Merge branch 'master' into feature/crypto

Change-Id: I6e601dafa31850ccaf031cedc656238c6fda9c62
This commit is contained in:
Alistair Coles 2015-09-02 17:15:10 +01:00
commit 257e468e9b
121 changed files with 7803 additions and 4218 deletions

View File

@ -78,3 +78,6 @@ Jaivish Kothari <jaivish.kothari@nectechnologies.in> <janonymous.codevulture@gma
Michael Matur <michael.matur@gmail.com> Michael Matur <michael.matur@gmail.com>
Kazuhiro Miyahara <miyahara.kazuhiro@lab.ntt.co.jp> Kazuhiro Miyahara <miyahara.kazuhiro@lab.ntt.co.jp>
Alexandra Settle <alexandra.settle@rackspace.com> Alexandra Settle <alexandra.settle@rackspace.com>
Kenichiro Matsuda <matsuda_kenichi@jp.fujitsu.com>
Atsushi Sakai <sakaia@jp.fujitsu.com>
Takashi Natsume <natsume.takashi@lab.ntt.co.jp>

19
AUTHORS
View File

@ -26,6 +26,7 @@ Chuck Thier (cthier@gmail.com)
Contributors Contributors
------------ ------------
Mehdi Abaakouk (mehdi.abaakouk@enovance.com) Mehdi Abaakouk (mehdi.abaakouk@enovance.com)
Timur Alperovich (timur.alperovich@gmail.com)
Jesse Andrews (anotherjesse@gmail.com) Jesse Andrews (anotherjesse@gmail.com)
Joe Arnold (joe@swiftstack.com) Joe Arnold (joe@swiftstack.com)
Ionuț Arțăriși (iartarisi@suse.cz) Ionuț Arțăriși (iartarisi@suse.cz)
@ -47,6 +48,7 @@ Tim Burke (tim.burke@gmail.com)
Brian D. Burns (iosctr@gmail.com) Brian D. Burns (iosctr@gmail.com)
Devin Carlen (devin.carlen@gmail.com) Devin Carlen (devin.carlen@gmail.com)
Thierry Carrez (thierry@openstack.org) Thierry Carrez (thierry@openstack.org)
Carlos Cavanna (ccavanna@ca.ibm.com)
Emmanuel Cazenave (contact@emcaz.fr) Emmanuel Cazenave (contact@emcaz.fr)
Mahati Chamarthy (mahati.chamarthy@gmail.com) Mahati Chamarthy (mahati.chamarthy@gmail.com)
Zap Chang (zapchang@gmail.com) Zap Chang (zapchang@gmail.com)
@ -55,6 +57,7 @@ Ray Chen (oldsharp@163.com)
Harshit Chitalia (harshit@acelio.com) Harshit Chitalia (harshit@acelio.com)
Brian Cline (bcline@softlayer.com) Brian Cline (bcline@softlayer.com)
Alistair Coles (alistair.coles@hp.com) Alistair Coles (alistair.coles@hp.com)
Clément Contini (ccontini@cloudops.com)
Brian Curtin (brian.curtin@rackspace.com) Brian Curtin (brian.curtin@rackspace.com)
Thiago da Silva (thiago@redhat.com) Thiago da Silva (thiago@redhat.com)
Julien Danjou (julien@danjou.info) Julien Danjou (julien@danjou.info)
@ -64,6 +67,7 @@ Cedric Dos Santos (cedric.dos.sant@gmail.com)
Gerry Drudy (gerry.drudy@hp.com) Gerry Drudy (gerry.drudy@hp.com)
Morgan Fainberg (morgan.fainberg@gmail.com) Morgan Fainberg (morgan.fainberg@gmail.com)
ZhiQiang Fan (aji.zqfan@gmail.com) ZhiQiang Fan (aji.zqfan@gmail.com)
Oshrit Feder (oshritf@il.ibm.com)
Mike Fedosin (mfedosin@mirantis.com) Mike Fedosin (mfedosin@mirantis.com)
Ricardo Ferreira (ricardo.sff@gmail.com) Ricardo Ferreira (ricardo.sff@gmail.com)
Flaper Fesp (flaper87@gmail.com) Flaper Fesp (flaper87@gmail.com)
@ -91,8 +95,10 @@ Dan Hersam (dan.hersam@hp.com)
Derek Higgins (derekh@redhat.com) Derek Higgins (derekh@redhat.com)
Alex Holden (alex@alexjonasholden.com) Alex Holden (alex@alexjonasholden.com)
Edward Hope-Morley (opentastic@gmail.com) Edward Hope-Morley (opentastic@gmail.com)
Charles Hsu (charles0126@gmail.com)
Joanna H. Huang (joanna.huitzu.huang@gmail.com) Joanna H. Huang (joanna.huitzu.huang@gmail.com)
Kun Huang (gareth@unitedstack.com) Kun Huang (gareth@unitedstack.com)
Bill Huber (wbhuber@us.ibm.com)
Matthieu Huin (mhu@enovance.com) Matthieu Huin (mhu@enovance.com)
Hodong Hwang (hodong.hwang@kt.com) Hodong Hwang (hodong.hwang@kt.com)
Motonobu Ichimura (motonobu@gmail.com) Motonobu Ichimura (motonobu@gmail.com)
@ -126,6 +132,7 @@ John Leach (john@johnleach.co.uk)
Ed Leafe (ed.leafe@rackspace.com) Ed Leafe (ed.leafe@rackspace.com)
Thomas Leaman (thomas.leaman@hp.com) Thomas Leaman (thomas.leaman@hp.com)
Eohyung Lee (liquidnuker@gmail.com) Eohyung Lee (liquidnuker@gmail.com)
Zhao Lei (zhaolei@cn.fujitsu.com)
Jamie Lennox (jlennox@redhat.com) Jamie Lennox (jlennox@redhat.com)
Tong Li (litong01@us.ibm.com) Tong Li (litong01@us.ibm.com)
Changbin Liu (changbin.liu@gmail.com) Changbin Liu (changbin.liu@gmail.com)
@ -136,10 +143,12 @@ Zhongyue Luo (zhongyue.nah@intel.com)
Paul Luse (paul.e.luse@intel.com) Paul Luse (paul.e.luse@intel.com)
Christopher MacGown (chris@pistoncloud.com) Christopher MacGown (chris@pistoncloud.com)
Dragos Manolescu (dragosm@hp.com) Dragos Manolescu (dragosm@hp.com)
Ben Martin (blmartin@us.ibm.com)
Steve Martinelli (stevemar@ca.ibm.com) Steve Martinelli (stevemar@ca.ibm.com)
Juan J. Martinez (juan@memset.com) Juan J. Martinez (juan@memset.com)
Marcelo Martins (btorch@gmail.com) Marcelo Martins (btorch@gmail.com)
Dolph Mathews (dolph.mathews@gmail.com) Dolph Mathews (dolph.mathews@gmail.com)
Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com)
Michael Matur (michael.matur@gmail.com) Michael Matur (michael.matur@gmail.com)
Donagh McCabe (donagh.mccabe@hp.com) Donagh McCabe (donagh.mccabe@hp.com)
Andy McCrae (andy.mccrae@gmail.com) Andy McCrae (andy.mccrae@gmail.com)
@ -151,11 +160,13 @@ Jola Mirecka (jola.mirecka@hp.com)
Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp) Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp)
Daisuke Morita (morita.daisuke@lab.ntt.co.jp) Daisuke Morita (morita.daisuke@lab.ntt.co.jp)
Dirk Mueller (dirk@dmllr.de) Dirk Mueller (dirk@dmllr.de)
Takashi Natsume (natsume.takashi@lab.ntt.co.jp)
Russ Nelson (russ@crynwr.com) Russ Nelson (russ@crynwr.com)
Maru Newby (mnewby@internap.com) Maru Newby (mnewby@internap.com)
Newptone (xingchao@unitedstack.com) Newptone (xingchao@unitedstack.com)
Colin Nicholson (colin.nicholson@iomart.com) Colin Nicholson (colin.nicholson@iomart.com)
Zhenguo Niu (zhenguo@unitedstack.com) Zhenguo Niu (zhenguo@unitedstack.com)
Ondrej Novy (ondrej.novy@firma.seznam.cz)
Timothy Okwii (tokwii@cisco.com) Timothy Okwii (tokwii@cisco.com)
Matthew Oliver (matt@oliver.net.au) Matthew Oliver (matt@oliver.net.au)
Hisashi Osanai (osanai.hisashi@jp.fujitsu.com) Hisashi Osanai (osanai.hisashi@jp.fujitsu.com)
@ -169,18 +180,24 @@ Constantine Peresypkin (constantine.peresypk@rackspace.com)
Dieter Plaetinck (dieter@vimeo.com) Dieter Plaetinck (dieter@vimeo.com)
Dan Prince (dprince@redhat.com) Dan Prince (dprince@redhat.com)
Sarvesh Ranjan (saranjan@cisco.com) Sarvesh Ranjan (saranjan@cisco.com)
Falk Reimann (falk.reimann@sap.com)
Brian Reitz (brian.reitz@oracle.com)
Felipe Reyes (freyes@tty.cl) Felipe Reyes (freyes@tty.cl)
Janie Richling (jrichli@us.ibm.com) Janie Richling (jrichli@us.ibm.com)
Matt Riedemann (mriedem@us.ibm.com) Matt Riedemann (mriedem@us.ibm.com)
Li Riqiang (lrqrun@gmail.com) Li Riqiang (lrqrun@gmail.com)
Rafael Rivero (rafael@cloudscaling.com) Rafael Rivero (rafael@cloudscaling.com)
Victor Rodionov (victor.rodionov@nexenta.com) Victor Rodionov (victor.rodionov@nexenta.com)
Eran Rom (eranr@il.ibm.com)
Aaron Rosen (arosen@nicira.com) Aaron Rosen (arosen@nicira.com)
Brent Roskos (broskos@internap.com) Brent Roskos (broskos@internap.com)
Hamdi Roumani (roumani@ca.ibm.com)
Shilla Saebi (shilla.saebi@gmail.com) Shilla Saebi (shilla.saebi@gmail.com)
Atsushi Sakai (sakaia@jp.fujitsu.com)
Cristian A Sanchez (cristian.a.sanchez@intel.com) Cristian A Sanchez (cristian.a.sanchez@intel.com)
Christian Schwede (cschwede@redhat.com) Christian Schwede (cschwede@redhat.com)
Mark Seger (Mark.Seger@hp.com) Mark Seger (Mark.Seger@hp.com)
Azhagu Selvan SP (tamizhgeek@gmail.com)
Alexandra Settle (alexandra.settle@rackspace.com) Alexandra Settle (alexandra.settle@rackspace.com)
Andrew Clay Shafer (acs@parvuscaptus.com) Andrew Clay Shafer (acs@parvuscaptus.com)
Mitsuhiro SHIGEMATSU (shigematsu.mitsuhiro@lab.ntt.co.jp) Mitsuhiro SHIGEMATSU (shigematsu.mitsuhiro@lab.ntt.co.jp)
@ -198,6 +215,7 @@ Jeremy Stanley (fungi@yuggoth.org)
Mauro Stettler (mauro.stettler@gmail.com) Mauro Stettler (mauro.stettler@gmail.com)
Tobias Stevenson (tstevenson@vbridges.com) Tobias Stevenson (tstevenson@vbridges.com)
Victor Stinner (vstinner@redhat.com) Victor Stinner (vstinner@redhat.com)
Akihito Takai (takaiak@nttdata.co.jp)
Pearl Yajing Tan (pearl.y.tan@seagate.com) Pearl Yajing Tan (pearl.y.tan@seagate.com)
Yuriy Taraday (yorik.sar@gmail.com) Yuriy Taraday (yorik.sar@gmail.com)
Monty Taylor (mordred@inaugust.com) Monty Taylor (mordred@inaugust.com)
@ -231,5 +249,6 @@ Guang Yee (guang.yee@hp.com)
Pete Zaitcev (zaitcev@kotori.zaitcev.us) Pete Zaitcev (zaitcev@kotori.zaitcev.us)
Hua Zhang (zhuadl@cn.ibm.com) Hua Zhang (zhuadl@cn.ibm.com)
Jian Zhang (jian.zhang@intel.com) Jian Zhang (jian.zhang@intel.com)
Kai Zhang (zakir.exe@gmail.com)
Ning Zhang (ning@zmanda.com) Ning Zhang (ning@zmanda.com)
Yuan Zhou (yuan.zhou@intel.com) Yuan Zhou (yuan.zhou@intel.com)

165
CHANGELOG
View File

@ -1,4 +1,133 @@
swift (2.3.0) swift (2.4.0)
* Dependency changes
- Added six requirement. This is part of an ongoing effort to add
support for Python 3.
- Dropped support for Python 2.6.
* Config changes
- Recent versions of Python restrict the number of headers allowed in a
request to 100. This number may be too low for custom middleware. The
new "extra_header_count" config value in swift.conf can be used to
increase the number of headers allowed.
- Renamed "run_pause" setting to "interval" (current configs with
run_pause still work). Future versions of Swift may remove the
run_pause setting.
* Versioned writes middleware
The versioned writes feature has been refactored and reimplemented as
middleware. You should explicitly add the versioned_writes middleware to
your proxy pipeline, but do not remove or disable the existing container
server config setting ("allow_versions"), if it is currently enabled.
The existing container server config setting enables existing
containers to continue being versioned. Please see
http://swift.openstack.org/middleware.html#how-to-enable-object-versioning-in-a-swift-cluster
for further upgrade notes.
* Allow 1+ object-servers-per-disk deployment
Enabled by a new > 0 integer config value, "servers_per_port" in the
[DEFAULT] config section for object-server and/or replication server
configs. The setting's integer value determines how many different
object-server workers handle requests for any single unique local port
in the ring. In this mode, the parent swift-object-server process
continues to run as the original user (i.e. root if low-port binding
is required), binds to all ports as defined in the ring, and forks off
the specified number of workers per listen socket. The child, per-port
servers drop privileges and behave pretty much how object-server workers
always have, except that because the ring has unique ports per disk, the
object-servers will only be handling requests for a single disk. The
parent process detects dead servers and restarts them (with the correct
listen socket), starts missing servers when an updated ring file is
found with a device on the server with a new port, and kills extraneous
servers when their port is found to no longer be in the ring. The ring
files are stat'ed at most every "ring_check_interval" seconds, as
configured in the object-server config (same default of 15s).
In testing, this deployment configuration (with a value of 3) lowers
request latency, improves requests per second, and isolates slow disk
IO as compared to the existing "workers" setting. To use this, each
device must be added to the ring using a different port.
* Do container listing updates in another (green)thread
The object server has learned the "container_update_timeout" setting
(with a default of 1 second). This value is the number of seconds that
the object server will wait for the container server to update the
listing before returning the status of the object PUT operation.
Previously, the object server would wait up to 3 seconds for the
container server response. The new behavior dramatically lowers object
PUT latency when container servers in the cluster are busy (e.g. when
the container is very large). Setting the value too low may result in a
client PUT'ing an object and not being able to immediately find it in
listings. Setting it too high will increase latency for clients when
container servers are busy.
* TempURL fixes (closes CVE-2015-5223)
Do not allow PUT tempurls to create pointers to other data.
Specifically, disallow the creation of DLO object manifests via a PUT
tempurl. This prevents discoverability attacks which can use any PUT
tempurl to probe for private data by creating a DLO object manifest and
then using the PUT tempurl to head the object.
* Ring changes
- Partition placement no longer uses the port number to place
partitions. This improves dispersion in small clusters running one
object server per drive, and it does not affect dispersion in
clusters running one object server per server.
- Added ring-builder-analyzer tool to more easily test and analyze a
series of ring management operations.
- Stop moving partitions unnecessarily when overload is on.
* Significant improvements and bug fixes have been made to erasure code
support. This feature is suitable for beta testing, but it is not yet
ready for broad production usage.
* Bulk upload now treats user xattrs on files in the given archive as
object metadata on the resulting created objects.
* Emit warning log in object replicator if "handoffs_first" or
"handoff_delete" is set.
* Enable object replicator's failure count in swift-recon.
* Added storage policy support to dispersion tools.
* Support keystone v3 domains in swift-dispersion.
* Added domain_remap information to the /info endpoint.
* Added support for a "default_reseller_prefix" in domain_remap
middleware config.
* Allow SLO PUTs to forgo per-segment integrity checks. Previously, each
segment referenced in the manifest also needed the correct etag and
bytes setting. These fields now allow the "null" value to skip those
particular checks on the given segment.
* Allow rsync to use compression via a "rsync_compress" config. If set to
true, compression is only enabled for an rsync to a device in a
different region. In some cases, this can speed up cross-region
replication data transfer.
* Added time synchronization check in swift-recon (the --time option).
* The account reaper now runs faster on large accounts.
* Various other minor bug fixes and improvements.
swift (2.3.0, OpenStack Kilo)
* Erasure Code support (beta) * Erasure Code support (beta)
@ -58,6 +187,7 @@ swift (2.3.0)
* Various other minor bug fixes and improvements. * Various other minor bug fixes and improvements.
swift (2.2.2) swift (2.2.2)
* Data placement changes * Data placement changes
@ -117,6 +247,7 @@ swift (2.2.2)
* Various other minor bug fixes and improvements. * Various other minor bug fixes and improvements.
swift (2.2.1) swift (2.2.1)
* Swift now rejects object names with Unicode surrogates. * Swift now rejects object names with Unicode surrogates.
@ -164,7 +295,7 @@ swift (2.2.1)
* Various other minor bug fixes and improvements. * Various other minor bug fixes and improvements.
swift (2.2.0) swift (2.2.0, OpenStack Juno)
* Added support for Keystone v3 auth. * Added support for Keystone v3 auth.
@ -338,7 +469,7 @@ swift (2.0.0)
* Various other minor bug fixes and improvements * Various other minor bug fixes and improvements
swift (1.13.1) swift (1.13.1, OpenStack Icehouse)
* Change the behavior of CORS responses to better match the spec * Change the behavior of CORS responses to better match the spec
@ -605,7 +736,7 @@ swift (1.11.0)
* Various other bug fixes and improvements * Various other bug fixes and improvements
swift (1.10.0) swift (1.10.0, OpenStack Havana)
* Added support for pooling memcache connections * Added support for pooling memcache connections
@ -776,7 +907,7 @@ swift (1.9.0)
* Various other minor bug fixes and improvements * Various other minor bug fixes and improvements
swift (1.8.0) swift (1.8.0, OpenStack Grizzly)
* Make rings' replica count adjustable * Make rings' replica count adjustable
@ -947,7 +1078,7 @@ swift (1.7.5)
* Various other minor bug fixes and improvements * Various other minor bug fixes and improvements
swift (1.7.4) swift (1.7.4, OpenStack Folsom)
* Fix issue where early client disconnects may have caused a memory leak * Fix issue where early client disconnects may have caused a memory leak
@ -962,14 +1093,14 @@ swift (1.7.0)
Serialize RingData in a versioned, custom format which is a combination Serialize RingData in a versioned, custom format which is a combination
of a JSON-encoded header and .tostring() dumps of the of a JSON-encoded header and .tostring() dumps of the
replica2part2dev_id arrays. This format deserializes hundreds of times replica2part2dev_id arrays. This format deserializes hundreds of times
faster than rings serialized with Python 2.7's pickle (a significant faster than rings serialized with Python 2.7's pickle (a significant
performance regression for ring loading between Python 2.6 and Python performance regression for ring loading between Python 2.6 and Python
2.7). Fixes bug 1031954. 2.7). Fixes bug 1031954.
The new implementation is backward-compatible; if a ring The new implementation is backward-compatible; if a ring
does not begin with a new-style magic string, it is assumed to be an does not begin with a new-style magic string, it is assumed to be an
old-style pickle-dumped ring and is handled as before. So new Swift old-style pickle-dumped ring and is handled as before. So new Swift
code can read old rings, but old Swift code will not be able to read code can read old rings, but old Swift code will not be able to read
newly-serialized rings. newly-serialized rings.
@ -1153,7 +1284,7 @@ swift (1.5.0)
* Various other minor bug fixes and improvements * Various other minor bug fixes and improvements
swift (1.4.8) swift (1.4.8, OpenStack Essex)
* Added optional max_containers_per_account restriction * Added optional max_containers_per_account restriction
@ -1296,7 +1427,7 @@ swift (1.4.4)
* Query only specific zone via swift-recon. * Query only specific zone via swift-recon.
swift (1.4.3) swift (1.4.3, OpenStack Diablo)
* Additional quarantine catching code. * Additional quarantine catching code.
@ -1421,3 +1552,15 @@ swift (1.4.0)
* Stats uploaders now allow overrides for source_filename_pattern and * Stats uploaders now allow overrides for source_filename_pattern and
new_log_cutoff values. new_log_cutoff values.
---
Changelog entries for previous versions are incomplete
swift (1.3.0, OpenStack Cactus)
swift (1.2.0, OpenStack Bexar)
swift (1.1.0, OpenStack Austin)
swift (1.0.0, Initial Release)

149
bandit.yaml Normal file
View File

@ -0,0 +1,149 @@
# optional: after how many files to update progress
#show_progress_every: 100
# optional: plugins directory name
#plugins_dir: 'plugins'
# optional: plugins discovery name pattern
plugin_name_pattern: '*.py'
# optional: terminal escape sequences to display colors
#output_colors:
# DEFAULT: '\033[0m'
# HEADER: '\033[95m'
# LOW: '\033[94m'
# MEDIUM: '\033[93m'
# HIGH: '\033[91m'
# optional: log format string
#log_format: "[%(module)s]\t%(levelname)s\t%(message)s"
# globs of files which should be analyzed
include:
- '*.py'
# a list of strings, which if found in the path will cause files to be
# excluded
# for example /tests/ - to remove all all files in tests directory
#exclude_dirs:
# - '/tests/'
#configured for swift
profiles:
gate:
include:
- blacklist_calls
- blacklist_imports
- exec_used
- linux_commands_wildcard_injection
- request_with_no_cert_validation
- set_bad_file_permissions
- subprocess_popen_with_shell_equals_true
- ssl_with_bad_version
- password_config_option_not_marked_secret
# - any_other_function_with_shell_equals_true
# - ssl_with_bad_defaults
# - jinja2_autoescape_false
# - use_of_mako_templates
# - subprocess_without_shell_equals_true
# - any_other_function_with_shell_equals_true
# - start_process_with_a_shell
# - start_process_with_no_shell
# - hardcoded_sql_expressions
# - hardcoded_tmp_director
# - linux_commands_wildcard_injection
#For now some items are commented which could be included as per use later.
blacklist_calls:
bad_name_sets:
# - pickle:
# qualnames: [pickle.loads, pickle.load, pickle.Unpickler,
# cPickle.loads, cPickle.load, cPickle.Unpickler]
# level: LOW
# message: "Pickle library appears to be in use, possible security
#issue."
- marshal:
qualnames: [marshal.load, marshal.loads]
message: "Deserialization with the marshal module is possibly
dangerous."
# - md5:
# qualnames: [hashlib.md5]
# level: LOW
# message: "Use of insecure MD5 hash function."
- mktemp_q:
qualnames: [tempfile.mktemp]
message: "Use of insecure and deprecated function (mktemp)."
# - eval:
# qualnames: [eval]
# level: LOW
# message: "Use of possibly insecure function - consider using safer
#ast.literal_eval."
- mark_safe:
names: [mark_safe]
message: "Use of mark_safe() may expose cross-site scripting
vulnerabilities and should be reviewed."
- httpsconnection:
qualnames: [httplib.HTTPSConnection]
message: "Use of HTTPSConnection does not provide security, see
https://wiki.openstack.org/wiki/OSSN/OSSN-0033"
- yaml_load:
qualnames: [yaml.load]
message: "Use of unsafe yaml load. Allows instantiation of
arbitrary objects. Consider yaml.safe_load()."
- urllib_urlopen:
qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener,
urllib.FancyURLopener, urllib2.urlopen, urllib2.Request]
message: "Audit url open for permitted schemes. Allowing use of
file:/ or custom schemes is often unexpected."
- paramiko_injection:
qualnames: [paramiko.exec_command, paramiko.invoke_shell]
message: "Paramiko exec_command() and invoke_shell() usage may
expose command injection vulnerabilities and should be reviewed."
shell_injection:
# Start a process using the subprocess module, or one of its wrappers.
subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call,
subprocess.check_output, utils.execute,
utils.execute_with_timeout]
# Start a process with a function vulnerable to shell injection.
shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4,
popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3,
popen2.Popen4, commands.getoutput, commands.getstatusoutput]
# Start a process with a function that is not vulnerable to shell
# injection.
no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve,
os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp,
os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe,
os.startfile]
blacklist_imports:
bad_import_sets:
- telnet:
imports: [telnetlib]
level: HIGH
message: "Telnet is considered insecure. Use SSH or some other
encrypted protocol."
- info_libs:
imports: [Crypto]
level: LOW
message: "Consider possible security implications associated with
#{module} module."
hardcoded_password:
word_list: "wordlist/default-passwords"
ssl_with_bad_version:
bad_protocol_versions:
- 'PROTOCOL_SSLv2'
- 'SSLv2_METHOD'
- 'SSLv23_METHOD'
- 'PROTOCOL_SSLv3' # strict option
- 'PROTOCOL_TLSv1' # strict option
- 'SSLv3_METHOD' # strict option
- 'TLSv1_METHOD' # strict option
password_config_option_not_marked_secret:
function_names:
- oslo.config.cfg.StrOpt
- oslo_config.cfg.StrOpt

View File

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may not # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy # use this file except in compliance with the License. You may obtain a copy
# of the License at # of the License at

View File

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may not # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy # use this file except in compliance with the License. You may obtain a copy
# of the License at # of the License at

View File

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation # Copyright (c) 2010-2012 OpenStack Foundation
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");

View File

@ -26,6 +26,7 @@ except ImportError:
from eventlet import GreenPool, hubs, patcher, Timeout from eventlet import GreenPool, hubs, patcher, Timeout
from eventlet.pools import Pool from eventlet.pools import Pool
from eventlet.green import urllib2
from swift.common import direct_client from swift.common import direct_client
try: try:
@ -176,9 +177,10 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
try: try:
objects = [o['name'] for o in conn.get_container( objects = [o['name'] for o in conn.get_container(
container, prefix='dispersion_', full_listing=True)[1]] container, prefix='dispersion_', full_listing=True)[1]]
except ClientException as err: except urllib2.HTTPError as err:
if err.http_status != 404: if err.getcode() != 404:
raise raise
print >>stderr, 'No objects to query. Has ' \ print >>stderr, 'No objects to query. Has ' \
'swift-dispersion-populate been run?' 'swift-dispersion-populate been run?'
stderr.flush() stderr.flush()

View File

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/env python
# Copyright (c) 2014 Christian Schwede <christian.schwede@enovance.com> # Copyright (c) 2014 Christian Schwede <christian.schwede@enovance.com>
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");

View File

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/env python
# Copyright (c) 2014 Christian Schwede <christian.schwede@enovance.com> # Copyright (c) 2014 Christian Schwede <christian.schwede@enovance.com>
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");

View File

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/env python
# Copyright (c) 2015 Samuel Merritt <sam@swiftstack.com> # Copyright (c) 2015 Samuel Merritt <sam@swiftstack.com>
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");

View File

@ -129,6 +129,8 @@ Logging address. The default is /dev/log.
Request timeout to external services. The default is 3 seconds. Request timeout to external services. The default is 3 seconds.
.IP \fBconn_timeout\fR .IP \fBconn_timeout\fR
Connection timeout to external services. The default is 0.5 seconds. Connection timeout to external services. The default is 0.5 seconds.
.IP \fBcontainer_update_timeout\fR
Time to wait while sending a container update on object update. The default is 1 second.
.RE .RE
.PD .PD

View File

@ -25,7 +25,7 @@
.SH SYNOPSIS .SH SYNOPSIS
.LP .LP
.B swift-recon .B swift-recon
\ <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] \ <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
.SH DESCRIPTION .SH DESCRIPTION
.PP .PP
@ -80,8 +80,10 @@ Get md5sum of servers ring and compare to local copy
Get cluster socket usage stats Get cluster socket usage stats
.IP "\fB--driveaudit\fR" .IP "\fB--driveaudit\fR"
Get drive audit error stats Get drive audit error stats
.IP "\fB-T, --time\fR"
Check time synchronization
.IP "\fB--all\fR" .IP "\fB--all\fR"
Perform all checks. Equivalent to \-arudlq \-\-md5 Perform all checks. Equivalent to \-arudlqT \-\-md5
.IP "\fB--region=REGION\fR" .IP "\fB--region=REGION\fR"
Only query servers in specified region Only query servers in specified region
.IP "\fB-z ZONE, --zone=ZONE\fR" .IP "\fB-z ZONE, --zone=ZONE\fR"

View File

@ -9,7 +9,6 @@ user = <your-user-name>
log_facility = LOG_LOCAL2 log_facility = LOG_LOCAL2
recon_cache_path = /var/cache/swift recon_cache_path = /var/cache/swift
eventlet_debug = true eventlet_debug = true
allow_versions = true
[pipeline:main] [pipeline:main]
pipeline = recon container-server pipeline = recon container-server

View File

@ -9,7 +9,6 @@ user = <your-user-name>
log_facility = LOG_LOCAL3 log_facility = LOG_LOCAL3
recon_cache_path = /var/cache/swift2 recon_cache_path = /var/cache/swift2
eventlet_debug = true eventlet_debug = true
allow_versions = true
[pipeline:main] [pipeline:main]
pipeline = recon container-server pipeline = recon container-server

View File

@ -9,7 +9,6 @@ user = <your-user-name>
log_facility = LOG_LOCAL4 log_facility = LOG_LOCAL4
recon_cache_path = /var/cache/swift3 recon_cache_path = /var/cache/swift3
eventlet_debug = true eventlet_debug = true
allow_versions = true
[pipeline:main] [pipeline:main]
pipeline = recon container-server pipeline = recon container-server

View File

@ -9,7 +9,6 @@ user = <your-user-name>
log_facility = LOG_LOCAL5 log_facility = LOG_LOCAL5
recon_cache_path = /var/cache/swift4 recon_cache_path = /var/cache/swift4
eventlet_debug = true eventlet_debug = true
allow_versions = true
[pipeline:main] [pipeline:main]
pipeline = recon container-server pipeline = recon container-server

View File

@ -0,0 +1,5 @@
[saio]
key = changeme
key2 = changeme
cluster_saio_endpoint = http://127.0.0.1:8080/v1/

View File

@ -37,7 +37,7 @@ interval = 300
# config value # config value
# processes = 0 # processes = 0
# process is which of the parts a particular process will work on # process is which of the parts a particular process will work on
# process can also be specified on the command line and will overide the config # process can also be specified on the command line and will override the config
# value # value
# process is "zero based", if you want to use 3 processes, you should run # process is "zero based", if you want to use 3 processes, you should run
# processes with process set to 0, 1, and 2 # processes with process set to 0, 1, and 2

View File

@ -9,7 +9,7 @@ eventlet_debug = true
[pipeline:main] [pipeline:main]
# Yes, proxy-logging appears twice. This is so that # Yes, proxy-logging appears twice. This is so that
# middleware-originated requests get logged too. # middleware-originated requests get logged too.
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain tempauth staticweb container-quotas account-quotas slo dlo proxy-logging proxy-server pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain container_sync tempauth staticweb container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
[filter:catch_errors] [filter:catch_errors]
use = egg:swift#catch_errors use = egg:swift#catch_errors
@ -35,6 +35,10 @@ use = egg:swift#dlo
[filter:slo] [filter:slo]
use = egg:swift#slo use = egg:swift#slo
[filter:container_sync]
use = egg:swift#container_sync
current = //saio/saio_endpoint
[filter:tempurl] [filter:tempurl]
use = egg:swift#tempurl use = egg:swift#tempurl
@ -60,6 +64,10 @@ use = egg:swift#memcache
[filter:gatekeeper] [filter:gatekeeper]
use = egg:swift#gatekeeper use = egg:swift#gatekeeper
[filter:versioned_writes]
use = egg:swift#versioned_writes
allow_versioned_writes = true
[app:proxy-server] [app:proxy-server]
use = egg:swift#proxy use = egg:swift#proxy
allow_account_management = true allow_account_management = true

View File

@ -154,6 +154,10 @@ until it has been resolved. If the drive is going to be replaced immediately,
then it is just best to replace the drive, format it, remount it, and let then it is just best to replace the drive, format it, remount it, and let
replication fill it up. replication fill it up.
After the drive is unmounted, make sure the mount point is owned by root
(root:root 755). This ensures that rsync will not try to replicate into the
root drive once the failed drive is unmounted.
If the drive can't be replaced immediately, then it is best to leave it If the drive can't be replaced immediately, then it is best to leave it
unmounted, and set the device weight to 0. This will allow all the unmounted, and set the device weight to 0. This will allow all the
replicas that were on that drive to be replicated elsewhere until the drive replicas that were on that drive to be replicated elsewhere until the drive
@ -545,18 +549,22 @@ Request URI Description
/recon/sockstat returns consumable info from /proc/net/sockstat|6 /recon/sockstat returns consumable info from /proc/net/sockstat|6
/recon/devices returns list of devices and devices dir i.e. /srv/node /recon/devices returns list of devices and devices dir i.e. /srv/node
/recon/async returns count of async pending /recon/async returns count of async pending
/recon/replication returns object replication times (for backward compatibility) /recon/replication returns object replication info (for backward compatibility)
/recon/replication/<type> returns replication info for given type (account, container, object) /recon/replication/<type> returns replication info for given type (account, container, object)
/recon/auditor/<type> returns auditor stats on last reported scan for given type (account, container, object) /recon/auditor/<type> returns auditor stats on last reported scan for given type (account, container, object)
/recon/updater/<type> returns last updater sweep times for given type (container, object) /recon/updater/<type> returns last updater sweep times for given type (container, object)
========================= ======================================================================================== ========================= ========================================================================================
Note that 'object_replication_last' and 'object_replication_time' in object
replication info are considered to be transitional and will be removed in
the subsequent releases. Use 'replication_last' and 'replication_time' instead.
This information can also be queried via the swift-recon command line utility:: This information can also be queried via the swift-recon command line utility::
fhines@ubuntu:~$ swift-recon -h fhines@ubuntu:~$ swift-recon -h
Usage: Usage:
usage: swift-recon <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d] usage: swift-recon <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d]
[-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
<server_type> account|container|object <server_type> account|container|object
Defaults to object server. Defaults to object server.
@ -579,7 +587,8 @@ This information can also be queried via the swift-recon command line utility::
-q, --quarantined Get cluster quarantine stats -q, --quarantined Get cluster quarantine stats
--md5 Get md5sum of servers ring and compare to local copy --md5 Get md5sum of servers ring and compare to local copy
--sockstat Get cluster socket usage stats --sockstat Get cluster socket usage stats
--all Perform all checks. Equal to -arudlq --md5 --sockstat -T, --time Check time synchronization
--all Perform all checks. Equal to -arudlqT --md5 --sockstat
-z ZONE, --zone=ZONE Only query servers in specified zone -z ZONE, --zone=ZONE Only query servers in specified zone
-t SECONDS, --timeout=SECONDS -t SECONDS, --timeout=SECONDS
Time to wait for a response from a server Time to wait for a response from a server

View File

@ -390,6 +390,13 @@ max_header_size 8192 max_header_size is the max number of bytes in
See also include_service_catalog in See also include_service_catalog in
proxy-server.conf-sample (documented in proxy-server.conf-sample (documented in
overview_auth.rst). overview_auth.rst).
extra_header_count 0 By default the maximum number of allowed
headers depends on the number of max
allowed metadata settings plus a default
value of 32 for regular http headers.
If for some reason this is not enough (custom
middleware for example) it can be increased
with the extra_header_count constraint.
=================== ========== ============================================= =================== ========== =============================================
--------------------------- ---------------------------
@ -405,76 +412,86 @@ The following configuration options are available:
[DEFAULT] [DEFAULT]
=================== ========== ============================================= ======================== ========== ==========================================
Option Default Description Option Default Description
------------------- ---------- --------------------------------------------- ------------------------ ---------- ------------------------------------------
swift_dir /etc/swift Swift configuration directory swift_dir /etc/swift Swift configuration directory
devices /srv/node Parent directory of where devices are mounted devices /srv/node Parent directory of where devices are
mount_check true Whether or not check if the devices are mounted
mounted to prevent accidentally writing mount_check true Whether or not check if the devices are
to the root device mounted to prevent accidentally writing
bind_ip 0.0.0.0 IP Address for server to bind to to the root device
bind_port 6000 Port for server to bind to bind_ip 0.0.0.0 IP Address for server to bind to
bind_timeout 30 Seconds to attempt bind before giving up bind_port 6000 Port for server to bind to
workers auto Override the number of pre-forked workers bind_timeout 30 Seconds to attempt bind before giving up
that will accept connections. If set it workers auto Override the number of pre-forked workers
should be an integer, zero means no fork. If that will accept connections. If set it
unset, it will try to default to the number should be an integer, zero means no fork.
of effective cpu cores and fallback to one. If unset, it will try to default to the
Increasing the number of workers helps slow number of effective cpu cores and fallback
filesystem operations in one request from to one. Increasing the number of workers
negatively impacting other requests, but only helps slow filesystem operations in one
the :ref:`servers_per_port request from negatively impacting other
<server-per-port-configuration>` requests, but only the
option provides complete I/O isolation with :ref:`servers_per_port
no measurable overhead. <server-per-port-configuration>` option
servers_per_port 0 If each disk in each storage policy ring has provides complete I/O isolation with no
unique port numbers for its "ip" value, you measurable overhead.
can use this setting to have each servers_per_port 0 If each disk in each storage policy ring
object-server worker only service requests has unique port numbers for its "ip"
for the single disk matching the port in the value, you can use this setting to have
ring. The value of this setting determines each object-server worker only service
how many worker processes run for each port requests for the single disk matching the
(disk) in the ring. If you have 24 disks port in the ring. The value of this
per server, and this setting is 4, then setting determines how many worker
each storage node will have 1 + (24 * 4) = processes run for each port (disk) in the
97 total object-server processes running. ring. If you have 24 disks per server, and
This gives complete I/O isolation, drastically this setting is 4, then each storage node
reducing the impact of slow disks on storage will have 1 + (24 * 4) = 97 total
node performance. The object-replicator and object-server processes running. This
object-reconstructor need to see this setting gives complete I/O isolation, drastically
too, so it must be in the [DEFAULT] section. reducing the impact of slow disks on
See :ref:`server-per-port-configuration`. storage node performance. The
max_clients 1024 Maximum number of clients one worker can object-replicator and object-reconstructor
process simultaneously (it will actually need to see this setting too, so it must
accept(2) N + 1). Setting this to one (1) be in the [DEFAULT] section.
will only handle one request at a time, See :ref:`server-per-port-configuration`.
without accepting another request max_clients 1024 Maximum number of clients one worker can
concurrently. process simultaneously (it will actually
disable_fallocate false Disable "fast fail" fallocate checks if the accept(2) N + 1). Setting this to one (1)
underlying filesystem does not support it. will only handle one request at a time,
log_max_line_length 0 Caps the length of log lines to the without accepting another request
value given; no limit if set to 0, the concurrently.
default. disable_fallocate false Disable "fast fail" fallocate checks if
log_custom_handlers None Comma-separated list of functions to call the underlying filesystem does not support
to setup custom log handlers. it.
eventlet_debug false If true, turn on debug logging for eventlet log_max_line_length 0 Caps the length of log lines to the
fallocate_reserve 0 You can set fallocate_reserve to the number of value given; no limit if set to 0, the
bytes you'd like fallocate to reserve, whether default.
there is space for the given file size or not. log_custom_handlers None Comma-separated list of functions to call
This is useful for systems that behave badly to setup custom log handlers.
when they completely run out of space; you can eventlet_debug false If true, turn on debug logging for
make the services pretend they're out of space eventlet
early. fallocate_reserve 0 You can set fallocate_reserve to the
conn_timeout 0.5 Time to wait while attempting to connect to number of bytes you'd like fallocate to
another backend node. reserve, whether there is space for the
node_timeout 3 Time to wait while sending each chunk of data given file size or not. This is useful for
to another backend node. systems that behave badly when they
client_timeout 60 Time to wait while receiving each chunk of completely run out of space; you can
data from a client or another backend node. make the services pretend they're out of
network_chunk_size 65536 Size of chunks to read/write over the network space early.
disk_chunk_size 65536 Size of chunks to read/write to disk conn_timeout 0.5 Time to wait while attempting to connect
=================== ========== ============================================= to another backend node.
node_timeout 3 Time to wait while sending each chunk of
data to another backend node.
client_timeout 60 Time to wait while receiving each chunk of
data from a client or another backend node
network_chunk_size 65536 Size of chunks to read/write over the
network
disk_chunk_size 65536 Size of chunks to read/write to disk
container_update_timeout 1 Time to wait while sending a container
update on object update.
======================== ========== ==========================================
.. _object-server-options: .. _object-server-options:
@ -1229,6 +1246,10 @@ For a standard swift install, all data drives are mounted directly under
be sure to set the `devices` config option in all of the server configs to be sure to set the `devices` config option in all of the server configs to
point to the correct directory. point to the correct directory.
The mount points for each drive in /srv/node/ should be owned by the root user
almost exclusively (root:root 755). This is required to prevent rsync from
syncing files into the root drive in the event a drive is unmounted.
Swift uses system calls to reserve space for new objects being written into Swift uses system calls to reserve space for new objects being written into
the system. If your filesystem does not support `fallocate()` or the system. If your filesystem does not support `fallocate()` or
`posix_fallocate()`, be sure to set the `disable_fallocate = true` config `posix_fallocate()`, be sure to set the `disable_fallocate = true` config

View File

@ -95,6 +95,16 @@ another device when creating the VM, and follow these instructions:
# **Make sure to include the trailing slash after /srv/$x/** # **Make sure to include the trailing slash after /srv/$x/**
for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done
Note: We create the mount points and mount the storage disk under
/mnt/sdb1. This disk will contain one directory per simulated swift node,
each owned by the current swift user.
We then create symlinks to these directories under /srv.
If the disk sdb is unmounted, files will not be written under
/srv/\*, because the symbolic link destination /mnt/sdb1/* will not
exist. This prevents disk sync operations from writing to the root
partition in the event a drive is unmounted.
#. Next, skip to :ref:`common-dev-section`. #. Next, skip to :ref:`common-dev-section`.
@ -135,6 +145,15 @@ these instructions:
# **Make sure to include the trailing slash after /srv/$x/** # **Make sure to include the trailing slash after /srv/$x/**
for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done
Note: We create the mount points and mount the loopback file under
/mnt/sdb1. This file will contain one directory per simulated swift node,
each owned by the current swift user.
We then create symlinks to these directories under /srv.
If the loopback file is unmounted, files will not be written under
/srv/\*, because the symbolic link destination /mnt/sdb1/* will not
exist. This prevents disk sync operations from writing to the root
partition in the event a drive is unmounted.
.. _common-dev-section: .. _common-dev-section:
@ -352,6 +371,10 @@ commands are as follows:
.. literalinclude:: /../saio/swift/container-reconciler.conf .. literalinclude:: /../saio/swift/container-reconciler.conf
#. ``/etc/swift/container-sync-realms.conf``
.. literalinclude:: /../saio/swift/container-sync-realms.conf
#. ``/etc/swift/account-server/1.conf`` #. ``/etc/swift/account-server/1.conf``
.. literalinclude:: /../saio/swift/account-server/1.conf .. literalinclude:: /../saio/swift/account-server/1.conf

View File

@ -118,11 +118,41 @@ After you proposed your changes to Swift, you can track the review in:
* `<https://review.openstack.org>`_ * `<https://review.openstack.org>`_
.. _post-rebase-instructions:
------------------------
Post rebase instructions
------------------------
After rebasing, the following steps should be performed to rebuild the swift
installation. Note that these commands should be performed from the root of the
swift repo directory (e.g. $HOME/swift/):
``sudo python setup.py develop``
``sudo pip install -r test-requirements.txt``
If using TOX, depending on the changes made during the rebase, you may need to
rebuild the TOX environment (generally this will be the case if
test-requirements.txt was updated such that a new version of a package is
required), this can be accomplished using the '-r' argument to the TOX cli:
``tox -r``
You can include any of the other TOX arguments as well, for example, to run the
pep8 suite and rebuild the TOX environment the following can be used:
``tox -r -e pep8``
The rebuild option only needs to be specified once for a particular build (e.g.
pep8), that is further invocations of the same build will not require this
until the next rebase.
--------------- ---------------
Troubleshooting Troubleshooting
--------------- ---------------
You may run into the following error when starting Swift if you rebase You may run into the following errors when starting Swift if you rebase
your commit using: your commit using:
``git rebase`` ``git rebase``
@ -143,10 +173,32 @@ your commit using:
pkg_resources.DistributionNotFound: swift==2.3.1.devXXX pkg_resources.DistributionNotFound: swift==2.3.1.devXXX
(where XXX represents a dev version of Swift). (where XXX represents a dev version of Swift).
.. code-block:: python
Traceback (most recent call last):
File "/usr/local/bin/swift-proxy-server", line 10, in <module>
execfile(__file__)
File "/home/swift/swift/bin/swift-proxy-server", line 23, in <module>
sys.exit(run_wsgi(conf_file, 'proxy-server', **options))
File "/home/swift/swift/swift/common/wsgi.py", line 888, in run_wsgi
loadapp(conf_path, global_conf=global_conf)
File "/home/swift/swift/swift/common/wsgi.py", line 390, in loadapp
func(PipelineWrapper(ctx))
File "/home/swift/swift/swift/proxy/server.py", line 602, in modify_wsgi_pipeline
ctx = pipe.create_filter(filter_name)
File "/home/swift/swift/swift/common/wsgi.py", line 329, in create_filter
global_conf=self.context.global_conf)
File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 296, in loadcontext
global_conf=global_conf)
File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 328, in _loadegg
return loader.get_context(object_type, name, global_conf)
File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 620, in get_context
object_type, name=name)
File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 659, in find_egg_entry_point
for prot in protocol_options] or '(no entry points)'))))
LookupError: Entry point 'versioned_writes' not found in egg 'swift' (dir: /home/swift/swift; protocols: paste.filter_factory, paste.filter_app_factory; entry_points: )
This happens because `git rebase` will retrieve code for a different version of This happens because `git rebase` will retrieve code for a different version of
Swift in the development stream, but the start scripts under `/usr/local/bin` have Swift in the development stream, but the start scripts under `/usr/local/bin` have
not been updated. The solution is to execute the following command under the swift not been updated. The solution is to follow the steps described in the
directory (which contains `setup.py`): :ref:`post-rebase-instructions` section.
``sudo python setup.py develop``

View File

@ -6,6 +6,13 @@ Please refer to the latest official
`Openstack Installation Guides <http://docs.openstack.org/#install-guides>`_ `Openstack Installation Guides <http://docs.openstack.org/#install-guides>`_
for the most up-to-date documentation. for the most up-to-date documentation.
Object Storage installation guide for Openstack Kilo
----------------------------------------------------
* `openSUSE 13.2 and SUSE Linux Enterprise Server 12 <http://docs.openstack.org/kilo/install-guide/install/zypper/content/ch_swift.html>`_
* `RHEL 7, CentOS 7, and Fedora 21 <http://docs.openstack.org/kilo/install-guide/install/yum/content/ch_swift.html>`_
* `Ubuntu 14.04 <http://docs.openstack.org/kilo/install-guide/install/apt/content/ch_swift.html>`_
Object Storage installation guide for Openstack Juno Object Storage installation guide for Openstack Juno
---------------------------------------------------- ----------------------------------------------------

View File

@ -102,6 +102,7 @@ DLO :ref:`dynamic-large-objects`
LE :ref:`list_endpoints` LE :ref:`list_endpoints`
KS :ref:`keystoneauth` KS :ref:`keystoneauth`
RL :ref:`ratelimit` RL :ref:`ratelimit`
VW :ref:`versioned_writes`
======================= ============================= ======================= =============================

View File

@ -155,6 +155,15 @@ Name Check (Forbidden Character Filter)
:members: :members:
:show-inheritance: :show-inheritance:
.. _versioned_writes:
Object Versioning
=================
.. automodule:: swift.common.middleware.versioned_writes
:members:
:show-inheritance:
Proxy Logging Proxy Logging
============= =============

View File

@ -1,89 +1,6 @@
=================
Object Versioning Object Versioning
================= =================
-------- .. automodule:: swift.common.middleware.versioned_writes
Overview :members:
-------- :show-inheritance:
Object versioning in swift is implemented by setting a flag on the container
to tell swift to version all objects in the container. The flag is the
``X-Versions-Location`` header on the container, and its value is the
container where the versions are stored. It is recommended to use a different
``X-Versions-Location`` container for each container that is being versioned.
When data is ``PUT`` into a versioned container (a container with the
versioning flag turned on), the existing data in the file is redirected to a
new object and the data in the ``PUT`` request is saved as the data for the
versioned object. The new object name (for the previous version) is
``<versions_container>/<length><object_name>/<timestamp>``, where ``length``
is the 3-character zero-padded hexadecimal length of the ``<object_name>`` and
``<timestamp>`` is the timestamp of when the previous version was created.
A ``GET`` to a versioned object will return the current version of the object
without having to do any request redirects or metadata lookups.
A ``POST`` to a versioned object will update the object metadata as normal,
but will not create a new version of the object. In other words, new versions
are only created when the content of the object changes.
A ``DELETE`` to a versioned object will only remove the current version of the
object. If you have 5 total versions of the object, you must delete the
object 5 times to completely remove the object.
Note: A large object manifest file cannot be versioned, but a large object
manifest may point to versioned segments.
--------------------------------------------------
How to Enable Object Versioning in a Swift Cluster
--------------------------------------------------
Set ``allow_versions`` to ``True`` in the container server config.
-----------------------
Examples Using ``curl``
-----------------------
First, create a container with the ``X-Versions-Location`` header or add the
header to an existing container. Also make sure the container referenced by
the ``X-Versions-Location`` exists. In this example, the name of that
container is "versions"::
curl -i -XPUT -H "X-Auth-Token: <token>" \
-H "X-Versions-Location: versions" http://<storage_url>/container
curl -i -XPUT -H "X-Auth-Token: <token>" http://<storage_url>/versions
Create an object (the first version)::
curl -i -XPUT --data-binary 1 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now create a new version of that object::
curl -i -XPUT --data-binary 2 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
See a listing of the older versions of the object::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
Now delete the current version of the object and see that the older version is
gone::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
---------------------------------------------------
How to Disable Object Versioning in a Swift Cluster
---------------------------------------------------
If you want to disable all functionality, set ``allow_versions`` back to
``False`` in the container server config.
Disable versioning a versioned container (x is any value except empty)::
curl -i -XPOST -H "X-Auth-Token: <token>" \
-H "X-Remove-Versions-Location: x" http://<storage_url>/container

View File

@ -41,7 +41,7 @@
# config value # config value
# processes = 0 # processes = 0
# process is which of the parts a particular process will work on # process is which of the parts a particular process will work on
# process can also be specified on the command line and will overide the config # process can also be specified on the command line and will override the config
# value # value
# process is "zero based", if you want to use 3 processes, you should run # process is "zero based", if you want to use 3 processes, you should run
# processes with process set to 0, 1, and 2 # processes with process set to 0, 1, and 2

View File

@ -60,6 +60,8 @@ bind_port = 6000
# conn_timeout = 0.5 # conn_timeout = 0.5
# Time to wait while sending each chunk of data to another backend node. # Time to wait while sending each chunk of data to another backend node.
# node_timeout = 3 # node_timeout = 3
# Time to wait while sending a container update on object update.
# container_update_timeout = 1.0
# Time to wait while receiving each chunk of data from a client or another # Time to wait while receiving each chunk of data from a client or another
# backend node. # backend node.
# client_timeout = 60 # client_timeout = 60

View File

@ -77,7 +77,7 @@ bind_port = 8080
# eventlet_debug = false # eventlet_debug = false
[pipeline:main] [pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo proxy-logging proxy-server pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
[app:proxy-server] [app:proxy-server]
use = egg:swift#proxy use = egg:swift#proxy
@ -703,3 +703,14 @@ use = egg:swift#xprofile
# #
# unwind the iterator of applications # unwind the iterator of applications
# unwind = false # unwind = false
# Note: Put after slo, dlo in the pipeline.
# If you don't put it in the pipeline, it will be inserted automatically.
[filter:versioned_writes]
use = egg:swift#versioned_writes
# Enables using versioned writes middleware and exposing configuration
# settings via HTTP GET /info.
# WARNING: Setting this option bypasses the "allow_versions" option
# in the container configuration file, which will be eventually
# deprecated. See documentation for more details.
# allow_versioned_writes = false

View File

@ -134,7 +134,7 @@ default = yes
# headers. If for some reason this is not enough (custom middleware for # headers. If for some reason this is not enough (custom middleware for
# example) it can be increased with the extra_header_count constraint. # example) it can be increased with the extra_header_count constraint.
#extra_header_count = 32 #extra_header_count = 0
# max_object_name_length is the max number of bytes in the utf8 encoding # max_object_name_length is the max number of bytes in the utf8 encoding

View File

@ -10,4 +10,4 @@ pastedeploy>=1.3.3
simplejson>=2.0.9 simplejson>=2.0.9
six>=1.9.0 six>=1.9.0
xattr>=0.4 xattr>=0.4
PyECLib>=1.0.7 PyECLib==1.0.7 # BSD

View File

@ -95,8 +95,8 @@ paste.filter_factory =
gatekeeper = swift.common.middleware.gatekeeper:filter_factory gatekeeper = swift.common.middleware.gatekeeper:filter_factory
container_sync = swift.common.middleware.container_sync:filter_factory container_sync = swift.common.middleware.container_sync:filter_factory
xprofile = swift.common.middleware.xprofile:filter_factory xprofile = swift.common.middleware.xprofile:filter_factory
versioned_writes = swift.common.middleware.versioned_writes:filter_factory
fake_footers = swift.common.middleware.fake_footers:filter_factory fake_footers = swift.common.middleware.fake_footers:filter_factory
test_fake_footers = swift.common.middleware.test_fake_footers:filter_factory
[build_sphinx] [build_sphinx]
all_files = 1 all_files = 1

View File

@ -380,6 +380,7 @@ class AccountBroker(DatabaseBroker):
:returns: list of tuples of (name, object_count, bytes_used, 0) :returns: list of tuples of (name, object_count, bytes_used, 0)
""" """
delim_force_gte = False
(marker, end_marker, prefix, delimiter) = utf8encode( (marker, end_marker, prefix, delimiter) = utf8encode(
marker, end_marker, prefix, delimiter) marker, end_marker, prefix, delimiter)
self._commit_puts_stale_ok() self._commit_puts_stale_ok()
@ -392,12 +393,17 @@ class AccountBroker(DatabaseBroker):
query = """ query = """
SELECT name, object_count, bytes_used, 0 SELECT name, object_count, bytes_used, 0
FROM container FROM container
WHERE deleted = 0 AND """ WHERE """
query_args = [] query_args = []
if end_marker: if end_marker:
query += ' name < ? AND' query += ' name < ? AND'
query_args.append(end_marker) query_args.append(end_marker)
if marker and marker >= prefix: if delim_force_gte:
query += ' name >= ? AND'
query_args.append(marker)
# Always set back to False
delim_force_gte = False
elif marker and marker >= prefix:
query += ' name > ? AND' query += ' name > ? AND'
query_args.append(marker) query_args.append(marker)
elif prefix: elif prefix:
@ -437,6 +443,8 @@ class AccountBroker(DatabaseBroker):
end = name.find(delimiter, len(prefix)) end = name.find(delimiter, len(prefix))
if end > 0: if end > 0:
marker = name[:end] + chr(ord(delimiter) + 1) marker = name[:end] + chr(ord(delimiter) + 1)
# we want result to be inclusive of delim+1
delim_force_gte = True
dir_name = name[:end + 1] dir_name = name[:end + 1]
if dir_name != orig_marker: if dir_name != orig_marker:
results.append([dir_name, 0, 0, 1]) results.append([dir_name, 0, 0, 1])

View File

@ -15,10 +15,12 @@
import os import os
import random import random
import socket
from swift import gettext_ as _ from swift import gettext_ as _
from logging import DEBUG from logging import DEBUG
from math import sqrt from math import sqrt
from time import time from time import time
from hashlib import md5
import itertools import itertools
from eventlet import GreenPool, sleep, Timeout from eventlet import GreenPool, sleep, Timeout
@ -70,6 +72,7 @@ class AccountReaper(Daemon):
self.node_timeout = int(conf.get('node_timeout', 10)) self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0')) self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0'))
self.bind_port = int(conf.get('bind_port', 0))
self.concurrency = int(conf.get('concurrency', 25)) self.concurrency = int(conf.get('concurrency', 25))
self.container_concurrency = self.object_concurrency = \ self.container_concurrency = self.object_concurrency = \
sqrt(self.concurrency) sqrt(self.concurrency)
@ -79,6 +82,7 @@ class AccountReaper(Daemon):
self.delay_reaping = int(conf.get('delay_reaping') or 0) self.delay_reaping = int(conf.get('delay_reaping') or 0)
reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30) reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30)
self.reap_not_done_after = reap_warn_after + self.delay_reaping self.reap_not_done_after = reap_warn_after + self.delay_reaping
self.start_time = time()
def get_account_ring(self): def get_account_ring(self):
"""The account :class:`swift.common.ring.Ring` for the cluster.""" """The account :class:`swift.common.ring.Ring` for the cluster."""
@ -161,9 +165,16 @@ class AccountReaper(Daemon):
if not partition.isdigit(): if not partition.isdigit():
continue continue
nodes = self.get_account_ring().get_part_nodes(int(partition)) nodes = self.get_account_ring().get_part_nodes(int(partition))
if (not is_local_device(self.myips, None, nodes[0]['ip'], None) if not os.path.isdir(partition_path):
or not os.path.isdir(partition_path)):
continue continue
container_shard = None
for container_shard, node in enumerate(nodes):
if is_local_device(self.myips, None, node['ip'], None) and \
(not self.bind_port or self.bind_port == node['port']):
break
else:
continue
for suffix in os.listdir(partition_path): for suffix in os.listdir(partition_path):
suffix_path = os.path.join(partition_path, suffix) suffix_path = os.path.join(partition_path, suffix)
if not os.path.isdir(suffix_path): if not os.path.isdir(suffix_path):
@ -181,7 +192,9 @@ class AccountReaper(Daemon):
AccountBroker(os.path.join(hsh_path, fname)) AccountBroker(os.path.join(hsh_path, fname))
if broker.is_status_deleted() and \ if broker.is_status_deleted() and \
not broker.empty(): not broker.empty():
self.reap_account(broker, partition, nodes) self.reap_account(
broker, partition, nodes,
container_shard=container_shard)
def reset_stats(self): def reset_stats(self):
self.stats_return_codes = {} self.stats_return_codes = {}
@ -192,7 +205,7 @@ class AccountReaper(Daemon):
self.stats_containers_possibly_remaining = 0 self.stats_containers_possibly_remaining = 0
self.stats_objects_possibly_remaining = 0 self.stats_objects_possibly_remaining = 0
def reap_account(self, broker, partition, nodes): def reap_account(self, broker, partition, nodes, container_shard=None):
""" """
Called once per pass for each account this server is the primary for Called once per pass for each account this server is the primary for
and attempts to delete the data for the given account. The reaper will and attempts to delete the data for the given account. The reaper will
@ -219,6 +232,8 @@ class AccountReaper(Daemon):
:param broker: The AccountBroker for the account to delete. :param broker: The AccountBroker for the account to delete.
:param partition: The partition in the account ring the account is on. :param partition: The partition in the account ring the account is on.
:param nodes: The primary node dicts for the account to delete. :param nodes: The primary node dicts for the account to delete.
:param container_shard: int used to shard containers reaped. If None,
will reap all containers.
.. seealso:: .. seealso::
@ -237,16 +252,24 @@ class AccountReaper(Daemon):
account = info['account'] account = info['account']
self.logger.info(_('Beginning pass on account %s'), account) self.logger.info(_('Beginning pass on account %s'), account)
self.reset_stats() self.reset_stats()
container_limit = 1000
if container_shard is not None:
container_limit *= len(nodes)
try: try:
marker = '' marker = ''
while True: while True:
containers = \ containers = \
list(broker.list_containers_iter(1000, marker, None, None, list(broker.list_containers_iter(container_limit, marker,
None)) None, None, None))
if not containers: if not containers:
break break
try: try:
for (container, _junk, _junk, _junk) in containers: for (container, _junk, _junk, _junk) in containers:
this_shard = int(md5(container).hexdigest(), 16) % \
len(nodes)
if container_shard not in (this_shard, None):
continue
self.container_pool.spawn(self.reap_container, account, self.container_pool.spawn(self.reap_container, account,
partition, nodes, container) partition, nodes, container)
self.container_pool.waitall() self.container_pool.waitall()
@ -351,6 +374,10 @@ class AccountReaper(Daemon):
self.stats_return_codes.get(err.http_status / 100, 0) + 1 self.stats_return_codes.get(err.http_status / 100, 0) + 1
self.logger.increment( self.logger.increment(
'return_codes.%d' % (err.http_status / 100,)) 'return_codes.%d' % (err.http_status / 100,))
except (Timeout, socket.error) as err:
self.logger.error(
_('Timeout Exception with %(ip)s:%(port)s/%(device)s'),
node)
if not objects: if not objects:
break break
try: try:
@ -403,6 +430,12 @@ class AccountReaper(Daemon):
self.stats_return_codes.get(err.http_status / 100, 0) + 1 self.stats_return_codes.get(err.http_status / 100, 0) + 1
self.logger.increment( self.logger.increment(
'return_codes.%d' % (err.http_status / 100,)) 'return_codes.%d' % (err.http_status / 100,))
except (Timeout, socket.error) as err:
self.logger.error(
_('Timeout Exception with %(ip)s:%(port)s/%(device)s'),
node)
failures += 1
self.logger.increment('containers_failures')
if successes > failures: if successes > failures:
self.stats_containers_deleted += 1 self.stats_containers_deleted += 1
self.logger.increment('containers_deleted') self.logger.increment('containers_deleted')
@ -473,6 +506,12 @@ class AccountReaper(Daemon):
self.stats_return_codes.get(err.http_status / 100, 0) + 1 self.stats_return_codes.get(err.http_status / 100, 0) + 1
self.logger.increment( self.logger.increment(
'return_codes.%d' % (err.http_status / 100,)) 'return_codes.%d' % (err.http_status / 100,))
except (Timeout, socket.error) as err:
failures += 1
self.logger.increment('objects_failures')
self.logger.error(
_('Timeout Exception with %(ip)s:%(port)s/%(device)s'),
node)
if successes > failures: if successes > failures:
self.stats_objects_deleted += 1 self.stats_objects_deleted += 1
self.logger.increment('objects_deleted') self.logger.increment('objects_deleted')

169
swift/cli/recon.py Executable file → Normal file
View File

@ -100,11 +100,14 @@ class Scout(object):
Obtain telemetry from a host running the swift recon middleware. Obtain telemetry from a host running the swift recon middleware.
:param host: host to check :param host: host to check
:returns: tuple of (recon url used, response body, and status) :returns: tuple of (recon url used, response body, status, time start
and time end)
""" """
base_url = "http://%s:%s/recon/" % (host[0], host[1]) base_url = "http://%s:%s/recon/" % (host[0], host[1])
ts_start = time.time()
url, content, status = self.scout_host(base_url, self.recon_type) url, content, status = self.scout_host(base_url, self.recon_type)
return url, content, status ts_end = time.time()
return url, content, status, ts_start, ts_end
def scout_server_type(self, host): def scout_server_type(self, host):
""" """
@ -253,7 +256,8 @@ class SwiftRecon(object):
if self.verbose: if self.verbose:
for ring_file, ring_sum in rings.items(): for ring_file, ring_sum in rings.items():
print("-> On disk %s md5sum: %s" % (ring_file, ring_sum)) print("-> On disk %s md5sum: %s" % (ring_file, ring_sum))
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200: if status != 200:
errors = errors + 1 errors = errors + 1
continue continue
@ -291,7 +295,8 @@ class SwiftRecon(object):
printfn("[%s] Checking swift.conf md5sum" % self._ptime()) printfn("[%s] Checking swift.conf md5sum" % self._ptime())
if self.verbose: if self.verbose:
printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,)) printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,))
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
if response[SWIFT_CONF_FILE] != conf_sum: if response[SWIFT_CONF_FILE] != conf_sum:
printfn("!! %s (%s) doesn't match on disk md5sum" % printfn("!! %s (%s) doesn't match on disk md5sum" %
@ -317,7 +322,8 @@ class SwiftRecon(object):
recon = Scout("async", self.verbose, self.suppress_errors, recon = Scout("async", self.verbose, self.suppress_errors,
self.timeout) self.timeout)
print("[%s] Checking async pendings" % self._ptime()) print("[%s] Checking async pendings" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
scan[url] = response['async_pending'] scan[url] = response['async_pending']
stats = self._gen_stats(scan.values(), 'async_pending') stats = self._gen_stats(scan.values(), 'async_pending')
@ -338,7 +344,8 @@ class SwiftRecon(object):
recon = Scout("driveaudit", self.verbose, self.suppress_errors, recon = Scout("driveaudit", self.verbose, self.suppress_errors,
self.timeout) self.timeout)
print("[%s] Checking drive-audit errors" % self._ptime()) print("[%s] Checking drive-audit errors" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
scan[url] = response['drive_audit_errors'] scan[url] = response['drive_audit_errors']
stats = self._gen_stats(scan.values(), 'drive_audit_errors') stats = self._gen_stats(scan.values(), 'drive_audit_errors')
@ -361,7 +368,8 @@ class SwiftRecon(object):
self.timeout) self.timeout)
print("[%s] Getting unmounted drives from %s hosts..." % print("[%s] Getting unmounted drives from %s hosts..." %
(self._ptime(), len(hosts))) (self._ptime(), len(hosts)))
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
unmounted[url] = [] unmounted[url] = []
errors[url] = [] errors[url] = []
@ -414,7 +422,8 @@ class SwiftRecon(object):
recon = Scout("expirer/%s" % self.server_type, self.verbose, recon = Scout("expirer/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout) self.suppress_errors, self.timeout)
print("[%s] Checking on expirers" % self._ptime()) print("[%s] Checking on expirers" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
stats['object_expiration_pass'].append( stats['object_expiration_pass'].append(
response.get('object_expiration_pass')) response.get('object_expiration_pass'))
@ -447,15 +456,18 @@ class SwiftRecon(object):
least_recent_url = None least_recent_url = None
most_recent_time = 0 most_recent_time = 0
most_recent_url = None most_recent_url = None
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
stats['replication_time'].append( stats['replication_time'].append(
response.get('replication_time')) response.get('replication_time',
repl_stats = response['replication_stats'] response.get('object_replication_time', 0)))
repl_stats = response.get('replication_stats')
if repl_stats: if repl_stats:
for stat_key in ['attempted', 'failure', 'success']: for stat_key in ['attempted', 'failure', 'success']:
stats[stat_key].append(repl_stats.get(stat_key)) stats[stat_key].append(repl_stats.get(stat_key))
last = response.get('replication_last', 0) last = response.get('replication_last',
response.get('object_replication_last', 0))
if last < least_recent_time: if last < least_recent_time:
least_recent_time = last least_recent_time = last
least_recent_url = url least_recent_url = url
@ -496,61 +508,6 @@ class SwiftRecon(object):
elapsed, elapsed_unit, host)) elapsed, elapsed_unit, host))
print("=" * 79) print("=" * 79)
def object_replication_check(self, hosts):
"""
Obtain and print replication statistics from object servers
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {}
recon = Scout("replication", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking on replication" % self._ptime())
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
most_recent_url = None
for url, response, status in self.pool.imap(recon.scout, hosts):
if status == 200:
stats[url] = response['object_replication_time']
last = response.get('object_replication_last', 0)
if last < least_recent_time:
least_recent_time = last
least_recent_url = url
if last > most_recent_time:
most_recent_time = last
most_recent_url = url
times = [x for x in stats.values() if x is not None]
if len(stats) > 0 and len(times) > 0:
computed = self._gen_stats(times, 'replication_time')
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[replication_time] - No hosts returned valid data.")
else:
print("[replication_time] - No hosts returned valid data.")
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Oldest completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(least_recent_time)),
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Most recent completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(most_recent_time)),
elapsed, elapsed_unit, host))
print("=" * 79)
def updater_check(self, hosts): def updater_check(self, hosts):
""" """
Obtain and print updater statistics Obtain and print updater statistics
@ -562,7 +519,8 @@ class SwiftRecon(object):
recon = Scout("updater/%s" % self.server_type, self.verbose, recon = Scout("updater/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout) self.suppress_errors, self.timeout)
print("[%s] Checking updater times" % self._ptime()) print("[%s] Checking updater times" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
if response['%s_updater_sweep' % self.server_type]: if response['%s_updater_sweep' % self.server_type]:
stats.append(response['%s_updater_sweep' % stats.append(response['%s_updater_sweep' %
@ -592,7 +550,8 @@ class SwiftRecon(object):
recon = Scout("auditor/%s" % self.server_type, self.verbose, recon = Scout("auditor/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout) self.suppress_errors, self.timeout)
print("[%s] Checking auditor stats" % self._ptime()) print("[%s] Checking auditor stats" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
scan[url] = response scan[url] = response
if len(scan) < 1: if len(scan) < 1:
@ -665,7 +624,8 @@ class SwiftRecon(object):
recon = Scout("auditor/object", self.verbose, self.suppress_errors, recon = Scout("auditor/object", self.verbose, self.suppress_errors,
self.timeout) self.timeout)
print("[%s] Checking auditor stats " % self._ptime()) print("[%s] Checking auditor stats " % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
if response['object_auditor_stats_ALL']: if response['object_auditor_stats_ALL']:
all_scan[url] = response['object_auditor_stats_ALL'] all_scan[url] = response['object_auditor_stats_ALL']
@ -736,7 +696,8 @@ class SwiftRecon(object):
recon = Scout("load", self.verbose, self.suppress_errors, recon = Scout("load", self.verbose, self.suppress_errors,
self.timeout) self.timeout)
print("[%s] Checking load averages" % self._ptime()) print("[%s] Checking load averages" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
load1[url] = response['1m'] load1[url] = response['1m']
load5[url] = response['5m'] load5[url] = response['5m']
@ -765,7 +726,8 @@ class SwiftRecon(object):
recon = Scout("quarantined", self.verbose, self.suppress_errors, recon = Scout("quarantined", self.verbose, self.suppress_errors,
self.timeout) self.timeout)
print("[%s] Checking quarantine" % self._ptime()) print("[%s] Checking quarantine" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
objq[url] = response['objects'] objq[url] = response['objects']
conq[url] = response['containers'] conq[url] = response['containers']
@ -799,7 +761,8 @@ class SwiftRecon(object):
recon = Scout("sockstat", self.verbose, self.suppress_errors, recon = Scout("sockstat", self.verbose, self.suppress_errors,
self.timeout) self.timeout)
print("[%s] Checking socket usage" % self._ptime()) print("[%s] Checking socket usage" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
inuse4[url] = response['tcp_in_use'] inuse4[url] = response['tcp_in_use']
mem[url] = response['tcp_mem_allocated_bytes'] mem[url] = response['tcp_mem_allocated_bytes']
@ -835,7 +798,8 @@ class SwiftRecon(object):
recon = Scout("diskusage", self.verbose, self.suppress_errors, recon = Scout("diskusage", self.verbose, self.suppress_errors,
self.timeout) self.timeout)
print("[%s] Checking disk usage now" % self._ptime()) print("[%s] Checking disk usage now" % self._ptime())
for url, response, status in self.pool.imap(recon.scout, hosts): for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200: if status == 200:
hostusage = [] hostusage = []
for entry in response: for entry in response:
@ -915,6 +879,47 @@ class SwiftRecon(object):
host = urlparse(url).netloc.split(':')[0] host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device))) print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
def time_check(self, hosts):
"""
Check a time synchronization of hosts with current time
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
matches = 0
errors = 0
recon = Scout("time", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking time-sync" % self._ptime())
for url, ts_remote, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
if (ts_remote < ts_start or ts_remote > ts_end):
diff = abs(ts_end - ts_remote)
ts_end_f = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(ts_end))
ts_remote_f = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(ts_remote))
print("!! %s current time is %s, but remote is %s, "
"differs by %.2f sec" % (
url,
ts_end_f,
ts_remote_f,
diff))
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def main(self): def main(self):
""" """
Retrieve and report cluster info from hosts running recon middleware. Retrieve and report cluster info from hosts running recon middleware.
@ -922,7 +927,7 @@ class SwiftRecon(object):
print("=" * 79) print("=" * 79)
usage = ''' usage = '''
usage: %prog <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d] usage: %prog <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d]
[-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
[--human-readable] [--human-readable]
<server_type>\taccount|container|object <server_type>\taccount|container|object
@ -964,13 +969,15 @@ class SwiftRecon(object):
help="Get cluster socket usage stats") help="Get cluster socket usage stats")
args.add_option('--driveaudit', action="store_true", args.add_option('--driveaudit', action="store_true",
help="Get drive audit error stats") help="Get drive audit error stats")
args.add_option('--time', '-T', action="store_true",
help="Check time synchronization")
args.add_option('--top', type='int', metavar='COUNT', default=0, args.add_option('--top', type='int', metavar='COUNT', default=0,
help='Also show the top COUNT entries in rank order.') help='Also show the top COUNT entries in rank order.')
args.add_option('--lowest', type='int', metavar='COUNT', default=0, args.add_option('--lowest', type='int', metavar='COUNT', default=0,
help='Also show the lowest COUNT entries in rank \ help='Also show the lowest COUNT entries in rank \
order.') order.')
args.add_option('--all', action="store_true", args.add_option('--all', action="store_true",
help="Perform all checks. Equal to \t\t\t-arudlq " help="Perform all checks. Equal to \t\t\t-arudlqT "
"--md5 --sockstat --auditor --updater --expirer") "--md5 --sockstat --auditor --updater --expirer")
args.add_option('--region', type="int", args.add_option('--region', type="int",
help="Only query servers in specified region") help="Only query servers in specified region")
@ -1011,7 +1018,7 @@ class SwiftRecon(object):
if options.all: if options.all:
if self.server_type == 'object': if self.server_type == 'object':
self.async_check(hosts) self.async_check(hosts)
self.object_replication_check(hosts) self.replication_check(hosts)
self.object_auditor_check(hosts) self.object_auditor_check(hosts)
self.updater_check(hosts) self.updater_check(hosts)
self.expirer_check(hosts) self.expirer_check(hosts)
@ -1031,6 +1038,7 @@ class SwiftRecon(object):
self.socket_usage(hosts) self.socket_usage(hosts)
self.server_type_check(hosts) self.server_type_check(hosts)
self.driveaudit_check(hosts) self.driveaudit_check(hosts)
self.time_check(hosts)
else: else:
if options.async: if options.async:
if self.server_type == 'object': if self.server_type == 'object':
@ -1040,10 +1048,7 @@ class SwiftRecon(object):
if options.unmounted: if options.unmounted:
self.umount_check(hosts) self.umount_check(hosts)
if options.replication: if options.replication:
if self.server_type == 'object': self.replication_check(hosts)
self.object_replication_check(hosts)
else:
self.replication_check(hosts)
if options.auditor: if options.auditor:
if self.server_type == 'object': if self.server_type == 'object':
self.object_auditor_check(hosts) self.object_auditor_check(hosts)
@ -1075,6 +1080,8 @@ class SwiftRecon(object):
self.socket_usage(hosts) self.socket_usage(hosts)
if options.driveaudit: if options.driveaudit:
self.driveaudit_check(hosts) self.driveaudit_check(hosts)
if options.time:
self.time_check(hosts)
def main(): def main():

View File

@ -403,14 +403,15 @@ swift-ring-builder <builder_file> create <part_power> <replicas>
print(Commands.create.__doc__.strip()) print(Commands.create.__doc__.strip())
exit(EXIT_ERROR) exit(EXIT_ERROR)
builder = RingBuilder(int(argv[3]), float(argv[4]), int(argv[5])) builder = RingBuilder(int(argv[3]), float(argv[4]), int(argv[5]))
backup_dir = pathjoin(dirname(argv[1]), 'backups') backup_dir = pathjoin(dirname(builder_file), 'backups')
try: try:
mkdir(backup_dir) mkdir(backup_dir)
except OSError as err: except OSError as err:
if err.errno != EEXIST: if err.errno != EEXIST:
raise raise
builder.save(pathjoin(backup_dir, '%d.' % time() + basename(argv[1]))) builder.save(pathjoin(backup_dir,
builder.save(argv[1]) '%d.' % time() + basename(builder_file)))
builder.save(builder_file)
exit(EXIT_SUCCESS) exit(EXIT_SUCCESS)
def default(): def default():
@ -418,7 +419,7 @@ swift-ring-builder <builder_file> create <part_power> <replicas>
swift-ring-builder <builder_file> swift-ring-builder <builder_file>
Shows information about the ring and the devices within. Shows information about the ring and the devices within.
""" """
print('%s, build version %d' % (argv[1], builder.version)) print('%s, build version %d' % (builder_file, builder.version))
regions = 0 regions = 0
zones = 0 zones = 0
balance = 0 balance = 0
@ -546,7 +547,7 @@ swift-ring-builder <builder_file> list_parts
if not builder._replica2part2dev: if not builder._replica2part2dev:
print('Specified builder file \"%s\" is not rebalanced yet. ' print('Specified builder file \"%s\" is not rebalanced yet. '
'Please rebalance first.' % argv[1]) 'Please rebalance first.' % builder_file)
exit(EXIT_ERROR) exit(EXIT_ERROR)
devs = _parse_list_parts_values(argv[3:]) devs = _parse_list_parts_values(argv[3:])
@ -612,7 +613,7 @@ swift-ring-builder <builder_file> add
print('The on-disk ring builder is unchanged.') print('The on-disk ring builder is unchanged.')
exit(EXIT_ERROR) exit(EXIT_ERROR)
builder.save(argv[1]) builder.save(builder_file)
exit(EXIT_SUCCESS) exit(EXIT_SUCCESS)
def set_weight(): def set_weight():
@ -644,7 +645,7 @@ swift-ring-builder <builder_file> set_weight
_parse_set_weight_values(argv[3:]) _parse_set_weight_values(argv[3:])
builder.save(argv[1]) builder.save(builder_file)
exit(EXIT_SUCCESS) exit(EXIT_SUCCESS)
def set_info(): def set_info():
@ -689,7 +690,7 @@ swift-ring-builder <builder_file> set_info
print(err) print(err)
exit(EXIT_ERROR) exit(EXIT_ERROR)
builder.save(argv[1]) builder.save(builder_file)
exit(EXIT_SUCCESS) exit(EXIT_SUCCESS)
def remove(): def remove():
@ -754,7 +755,7 @@ swift-ring-builder <builder_file> search
print('%s marked for removal and will ' print('%s marked for removal and will '
'be removed next rebalance.' % format_device(dev)) 'be removed next rebalance.' % format_device(dev))
builder.save(argv[1]) builder.save(builder_file)
exit(EXIT_SUCCESS) exit(EXIT_SUCCESS)
def rebalance(): def rebalance():
@ -856,9 +857,9 @@ swift-ring-builder <builder_file> rebalance [options]
ts = time() ts = time()
builder.get_ring().save( builder.get_ring().save(
pathjoin(backup_dir, '%d.' % ts + basename(ring_file))) pathjoin(backup_dir, '%d.' % ts + basename(ring_file)))
builder.save(pathjoin(backup_dir, '%d.' % ts + basename(argv[1]))) builder.save(pathjoin(backup_dir, '%d.' % ts + basename(builder_file)))
builder.get_ring().save(ring_file) builder.get_ring().save(ring_file)
builder.save(argv[1]) builder.save(builder_file)
exit(status) exit(status)
def dispersion(): def dispersion():
@ -893,7 +894,7 @@ swift-ring-builder <builder_file> dispersion <search_filter> [options]
status = EXIT_SUCCESS status = EXIT_SUCCESS
if not builder._replica2part2dev: if not builder._replica2part2dev:
print('Specified builder file \"%s\" is not rebalanced yet. ' print('Specified builder file \"%s\" is not rebalanced yet. '
'Please rebalance first.' % argv[1]) 'Please rebalance first.' % builder_file)
exit(EXIT_ERROR) exit(EXIT_ERROR)
usage = Commands.dispersion.__doc__.strip() usage = Commands.dispersion.__doc__.strip()
parser = optparse.OptionParser(usage) parser = optparse.OptionParser(usage)
@ -1021,7 +1022,7 @@ swift-ring-builder <ring_file> write_builder [min_part_hours]
def pretend_min_part_hours_passed(): def pretend_min_part_hours_passed():
builder.pretend_min_part_hours_passed() builder.pretend_min_part_hours_passed()
builder.save(argv[1]) builder.save(builder_file)
exit(EXIT_SUCCESS) exit(EXIT_SUCCESS)
def set_min_part_hours(): def set_min_part_hours():
@ -1037,7 +1038,7 @@ swift-ring-builder <builder_file> set_min_part_hours <hours>
builder.change_min_part_hours(int(argv[3])) builder.change_min_part_hours(int(argv[3]))
print('The minimum number of hours before a partition can be ' print('The minimum number of hours before a partition can be '
'reassigned is now set to %s' % argv[3]) 'reassigned is now set to %s' % argv[3])
builder.save(argv[1]) builder.save(builder_file)
exit(EXIT_SUCCESS) exit(EXIT_SUCCESS)
def set_replicas(): def set_replicas():
@ -1069,7 +1070,7 @@ swift-ring-builder <builder_file> set_replicas <replicas>
builder.set_replicas(new_replicas) builder.set_replicas(new_replicas)
print('The replica count is now %.6f.' % builder.replicas) print('The replica count is now %.6f.' % builder.replicas)
print('The change will take effect after the next rebalance.') print('The change will take effect after the next rebalance.')
builder.save(argv[1]) builder.save(builder_file)
exit(EXIT_SUCCESS) exit(EXIT_SUCCESS)
def set_overload(): def set_overload():
@ -1112,7 +1113,7 @@ swift-ring-builder <builder_file> set_overload <overload>[%]
print('The overload factor is now %0.2f%% (%.6f)' % ( print('The overload factor is now %0.2f%% (%.6f)' % (
builder.overload * 100, builder.overload)) builder.overload * 100, builder.overload))
print('The change will take effect after the next rebalance.') print('The change will take effect after the next rebalance.')
builder.save(argv[1]) builder.save(builder_file)
exit(status) exit(status)
@ -1145,6 +1146,9 @@ def main(arguments=None):
exit(EXIT_SUCCESS) exit(EXIT_SUCCESS)
builder_file, ring_file = parse_builder_ring_filename_args(argv) builder_file, ring_file = parse_builder_ring_filename_args(argv)
if builder_file != argv[1]:
print('Note: using %s instead of %s as builder file' % (
builder_file, argv[1]))
try: try:
builder = RingBuilder.load(builder_file) builder = RingBuilder.load(builder_file)
@ -1157,10 +1161,10 @@ def main(arguments=None):
exit(EXIT_ERROR) exit(EXIT_ERROR)
except Exception as e: except Exception as e:
print('Problem occurred while reading builder file: %s. %s' % print('Problem occurred while reading builder file: %s. %s' %
(argv[1], e)) (builder_file, e))
exit(EXIT_ERROR) exit(EXIT_ERROR)
backup_dir = pathjoin(dirname(argv[1]), 'backups') backup_dir = pathjoin(dirname(builder_file), 'backups')
try: try:
mkdir(backup_dir) mkdir(backup_dir)
except OSError as err: except OSError as err:
@ -1173,7 +1177,7 @@ def main(arguments=None):
command = argv[2] command = argv[2]
if argv[0].endswith('-safe'): if argv[0].endswith('-safe'):
try: try:
with lock_parent_directory(abspath(argv[1]), 15): with lock_parent_directory(abspath(builder_file), 15):
Commands.__dict__.get(command, Commands.unknown.im_func)() Commands.__dict__.get(command, Commands.unknown.im_func)()
except exceptions.LockTimeout: except exceptions.LockTimeout:
print("Ring/builder dir currently locked.") print("Ring/builder dir currently locked.")

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import functools
import os import os
import urllib import urllib
import time import time
@ -420,28 +421,33 @@ def check_destination_header(req):
'<container name>/<object name>') '<container name>/<object name>')
def check_account_format(req, account): def check_name_format(req, name, target_type):
""" """
Validate that the header contains valid account name. Validate that the header contains valid account or container name.
We assume the caller ensures that
destination header is present in req.headers.
:param req: HTTP request object :param req: HTTP request object
:returns: A properly encoded account name :param name: header value to validate
:param target_type: which header is being validated (Account or Container)
:returns: A properly encoded account name or container name
:raise: HTTPPreconditionFailed if account header :raise: HTTPPreconditionFailed if account header
is not well formatted. is not well formatted.
""" """
if not account: if not name:
raise HTTPPreconditionFailed( raise HTTPPreconditionFailed(
request=req, request=req,
body='Account name cannot be empty') body='%s name cannot be empty' % target_type)
if isinstance(account, unicode): if isinstance(name, unicode):
account = account.encode('utf-8') name = name.encode('utf-8')
if '/' in account: if '/' in name:
raise HTTPPreconditionFailed( raise HTTPPreconditionFailed(
request=req, request=req,
body='Account name cannot contain slashes') body='%s name cannot contain slashes' % target_type)
return account return name
check_account_format = functools.partial(check_name_format,
target_type='Account')
check_container_format = functools.partial(check_name_format,
target_type='Container')
def valid_api_version(version): def valid_api_version(version):

View File

@ -187,7 +187,8 @@ class Replicator(Daemon):
self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0, self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0,
'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0, 'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0,
'remove': 0, 'empty': 0, 'remote_merge': 0, 'remove': 0, 'empty': 0, 'remote_merge': 0,
'start': time.time(), 'diff_capped': 0} 'start': time.time(), 'diff_capped': 0,
'failure_nodes': {}}
def _report_stats(self): def _report_stats(self):
"""Report the current stats to the logs.""" """Report the current stats to the logs."""
@ -212,6 +213,13 @@ class Replicator(Daemon):
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', ('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl',
'empty', 'diff_capped')])) 'empty', 'diff_capped')]))
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _rsync_file(self, db_file, remote_file, whole_file=True, def _rsync_file(self, db_file, remote_file, whole_file=True,
different_region=False): different_region=False):
""" """
@ -479,7 +487,10 @@ class Replicator(Daemon):
quarantine_db(broker.db_file, broker.db_type) quarantine_db(broker.db_file, broker.db_type)
else: else:
self.logger.exception(_('ERROR reading db %s'), object_file) self.logger.exception(_('ERROR reading db %s'), object_file)
self.stats['failure'] += 1 nodes = self.ring.get_part_nodes(int(partition))
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
self.logger.increment('failures') self.logger.increment('failures')
return return
# The db is considered deleted if the delete_timestamp value is greater # The db is considered deleted if the delete_timestamp value is greater
@ -494,6 +505,7 @@ class Replicator(Daemon):
self.logger.timing_since('timing', start_time) self.logger.timing_since('timing', start_time)
return return
responses = [] responses = []
failure_devs_info = set()
nodes = self.ring.get_part_nodes(int(partition)) nodes = self.ring.get_part_nodes(int(partition))
local_dev = None local_dev = None
for node in nodes: for node in nodes:
@ -532,7 +544,8 @@ class Replicator(Daemon):
self.logger.exception(_('ERROR syncing %(file)s with node' self.logger.exception(_('ERROR syncing %(file)s with node'
' %(node)s'), ' %(node)s'),
{'file': object_file, 'node': node}) {'file': object_file, 'node': node})
self.stats['success' if success else 'failure'] += 1 if not success:
failure_devs_info.add((node['replication_ip'], node['device']))
self.logger.increment('successes' if success else 'failures') self.logger.increment('successes' if success else 'failures')
responses.append(success) responses.append(success)
try: try:
@ -543,7 +556,17 @@ class Replicator(Daemon):
if not shouldbehere and all(responses): if not shouldbehere and all(responses):
# If the db shouldn't be on this node and has been successfully # If the db shouldn't be on this node and has been successfully
# synced to all of its peers, it can be removed. # synced to all of its peers, it can be removed.
self.delete_db(broker) if not self.delete_db(broker):
failure_devs_info.update(
[(failure_dev['replication_ip'], failure_dev['device'])
for failure_dev in repl_nodes])
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in repl_nodes])
self.stats['success'] += len(target_devs_info - failure_devs_info)
self._add_failure_stats(failure_devs_info)
self.logger.timing_since('timing', start_time) self.logger.timing_since('timing', start_time)
def delete_db(self, broker): def delete_db(self, broker):
@ -558,9 +581,11 @@ class Replicator(Daemon):
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY): if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
self.logger.exception( self.logger.exception(
_('ERROR while trying to clean up %s') % suf_dir) _('ERROR while trying to clean up %s') % suf_dir)
return False
self.stats['remove'] += 1 self.stats['remove'] += 1
device_name = self.extract_device(object_file) device_name = self.extract_device(object_file)
self.logger.increment('removes.' + device_name) self.logger.increment('removes.' + device_name)
return True
def extract_device(self, object_file): def extract_device(self, object_file):
""" """
@ -592,6 +617,10 @@ class Replicator(Daemon):
node['replication_port']): node['replication_port']):
if self.mount_check and not ismount( if self.mount_check and not ismount(
os.path.join(self.root, node['device'])): os.path.join(self.root, node['device'])):
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in self.ring.devs if failure_dev])
self.logger.warn( self.logger.warn(
_('Skipping %(device)s as it is not mounted') % node) _('Skipping %(device)s as it is not mounted') % node)
continue continue

View File

@ -462,10 +462,10 @@ class Server(object):
# maybe there's a config file(s) out there, but I couldn't find it! # maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'): if not kwargs.get('quiet'):
if number: if number:
print(_('Unable to locate config number %s for %s' % ( print(_('Unable to locate config number %s for %s')
number, self.server))) % (number, self.server))
else: else:
print(_('Unable to locate config for %s' % (self.server))) print(_('Unable to locate config for %s') % self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'): if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files: if found_conf_files:
print(_('Found configs:')) print(_('Found configs:'))

View File

@ -443,7 +443,7 @@ class MemcacheRing(object):
with Timeout(self._io_timeout): with Timeout(self._io_timeout):
sock.sendall(msg) sock.sendall(msg)
# Wait for the set to complete # Wait for the set to complete
for _ in range(len(mapping)): for line in range(len(mapping)):
fp.readline() fp.readline()
self._return_conn(server, fp, sock) self._return_conn(server, fp, sock)
return return

View File

@ -15,6 +15,7 @@
import errno import errno
import os import os
import time
from swift import gettext_ as _ from swift import gettext_ as _
from swift import __version__ as swiftver from swift import __version__ as swiftver
@ -133,19 +134,19 @@ class ReconMiddleware(object):
def get_replication_info(self, recon_type): def get_replication_info(self, recon_type):
"""get replication info""" """get replication info"""
replication_list = ['replication_time',
'replication_stats',
'replication_last']
if recon_type == 'account': if recon_type == 'account':
return self._from_recon_cache(['replication_time', return self._from_recon_cache(replication_list,
'replication_stats',
'replication_last'],
self.account_recon_cache) self.account_recon_cache)
elif recon_type == 'container': elif recon_type == 'container':
return self._from_recon_cache(['replication_time', return self._from_recon_cache(replication_list,
'replication_stats',
'replication_last'],
self.container_recon_cache) self.container_recon_cache)
elif recon_type == 'object': elif recon_type == 'object':
return self._from_recon_cache(['object_replication_time', replication_list += ['object_replication_time',
'object_replication_last'], 'object_replication_last']
return self._from_recon_cache(replication_list,
self.object_recon_cache) self.object_recon_cache)
else: else:
return None return None
@ -328,6 +329,11 @@ class ReconMiddleware(object):
raise raise
return sockstat return sockstat
def get_time(self):
"""get current time"""
return time.time()
def GET(self, req): def GET(self, req):
root, rcheck, rtype = req.split_path(1, 3, True) root, rcheck, rtype = req.split_path(1, 3, True)
all_rtypes = ['account', 'container', 'object'] all_rtypes = ['account', 'container', 'object']
@ -368,6 +374,8 @@ class ReconMiddleware(object):
content = self.get_version() content = self.get_version()
elif rcheck == "driveaudit": elif rcheck == "driveaudit":
content = self.get_driveaudit_error() content = self.get_driveaudit_error()
elif rcheck == "time":
content = self.get_time()
else: else:
content = "Invalid path: %s" % req.path content = "Invalid path: %s" % req.path
return Response(request=req, status="404 Not Found", return Response(request=req, status="404 Not Found",

View File

@ -122,11 +122,13 @@ from urllib import urlencode
from urlparse import parse_qs from urlparse import parse_qs
from swift.proxy.controllers.base import get_account_info, get_container_info from swift.proxy.controllers.base import get_account_info, get_container_info
from swift.common.swob import HeaderKeyDict, HTTPUnauthorized from swift.common.swob import HeaderKeyDict, HTTPUnauthorized, HTTPBadRequest
from swift.common.utils import split_path, get_valid_utf8_str, \ from swift.common.utils import split_path, get_valid_utf8_str, \
register_swift_info, get_hmac, streq_const_time, quote register_swift_info, get_hmac, streq_const_time, quote
DISALLOWED_INCOMING_HEADERS = 'x-object-manifest'
#: Default headers to remove from incoming requests. Simply a whitespace #: Default headers to remove from incoming requests. Simply a whitespace
#: delimited list of header names and names can optionally end with '*' to #: delimited list of header names and names can optionally end with '*' to
#: indicate a prefix match. DEFAULT_INCOMING_ALLOW_HEADERS is a list of #: indicate a prefix match. DEFAULT_INCOMING_ALLOW_HEADERS is a list of
@ -150,6 +152,10 @@ DEFAULT_OUTGOING_REMOVE_HEADERS = 'x-object-meta-*'
DEFAULT_OUTGOING_ALLOW_HEADERS = 'x-object-meta-public-*' DEFAULT_OUTGOING_ALLOW_HEADERS = 'x-object-meta-public-*'
CONTAINER_SCOPE = 'container'
ACCOUNT_SCOPE = 'account'
def get_tempurl_keys_from_metadata(meta): def get_tempurl_keys_from_metadata(meta):
""" """
Extracts the tempurl keys from metadata. Extracts the tempurl keys from metadata.
@ -170,6 +176,38 @@ def disposition_format(filename):
quote(filename, safe=' /'), quote(filename)) quote(filename, safe=' /'), quote(filename))
def authorize_same_account(account_to_match):
def auth_callback_same_account(req):
try:
_ver, acc, _rest = req.split_path(2, 3, True)
except ValueError:
return HTTPUnauthorized(request=req)
if acc == account_to_match:
return None
else:
return HTTPUnauthorized(request=req)
return auth_callback_same_account
def authorize_same_container(account_to_match, container_to_match):
def auth_callback_same_container(req):
try:
_ver, acc, con, _rest = req.split_path(3, 4, True)
except ValueError:
return HTTPUnauthorized(request=req)
if acc == account_to_match and con == container_to_match:
return None
else:
return HTTPUnauthorized(request=req)
return auth_callback_same_container
class TempURL(object): class TempURL(object):
""" """
WSGI Middleware to grant temporary URLs specific access to Swift WSGI Middleware to grant temporary URLs specific access to Swift
@ -230,6 +268,10 @@ class TempURL(object):
#: The methods allowed with Temp URLs. #: The methods allowed with Temp URLs.
self.methods = methods self.methods = methods
self.disallowed_headers = set(
'HTTP_' + h.upper().replace('-', '_')
for h in DISALLOWED_INCOMING_HEADERS.split())
headers = DEFAULT_INCOMING_REMOVE_HEADERS headers = DEFAULT_INCOMING_REMOVE_HEADERS
if 'incoming_remove_headers' in conf: if 'incoming_remove_headers' in conf:
headers = conf['incoming_remove_headers'] headers = conf['incoming_remove_headers']
@ -298,10 +340,10 @@ class TempURL(object):
return self.app(env, start_response) return self.app(env, start_response)
if not temp_url_sig or not temp_url_expires: if not temp_url_sig or not temp_url_expires:
return self._invalid(env, start_response) return self._invalid(env, start_response)
account = self._get_account(env) account, container = self._get_account_and_container(env)
if not account: if not account:
return self._invalid(env, start_response) return self._invalid(env, start_response)
keys = self._get_keys(env, account) keys = self._get_keys(env)
if not keys: if not keys:
return self._invalid(env, start_response) return self._invalid(env, start_response)
if env['REQUEST_METHOD'] == 'HEAD': if env['REQUEST_METHOD'] == 'HEAD':
@ -316,15 +358,32 @@ class TempURL(object):
else: else:
hmac_vals = self._get_hmacs(env, temp_url_expires, keys) hmac_vals = self._get_hmacs(env, temp_url_expires, keys)
# While it's true that any() will short-circuit, this doesn't affect is_valid_hmac = False
# the timing-attack resistance since the only way this will hmac_scope = None
# short-circuit is when a valid signature is passed in. for hmac, scope in hmac_vals:
is_valid_hmac = any(streq_const_time(temp_url_sig, hmac) # While it's true that we short-circuit, this doesn't affect the
for hmac in hmac_vals) # timing-attack resistance since the only way this will
# short-circuit is when a valid signature is passed in.
if streq_const_time(temp_url_sig, hmac):
is_valid_hmac = True
hmac_scope = scope
break
if not is_valid_hmac: if not is_valid_hmac:
return self._invalid(env, start_response) return self._invalid(env, start_response)
# disallowed headers prevent accidently allowing upload of a pointer
# to data that the PUT tempurl would not otherwise allow access for.
# It should be safe to provide a GET tempurl for data that an
# untrusted client just uploaded with a PUT tempurl.
resp = self._clean_disallowed_headers(env, start_response)
if resp:
return resp
self._clean_incoming_headers(env) self._clean_incoming_headers(env)
env['swift.authorize'] = lambda req: None
if hmac_scope == ACCOUNT_SCOPE:
env['swift.authorize'] = authorize_same_account(account)
else:
env['swift.authorize'] = authorize_same_container(account,
container)
env['swift.authorize_override'] = True env['swift.authorize_override'] = True
env['REMOTE_USER'] = '.wsgi.tempurl' env['REMOTE_USER'] = '.wsgi.tempurl'
qs = {'temp_url_sig': temp_url_sig, qs = {'temp_url_sig': temp_url_sig,
@ -365,22 +424,23 @@ class TempURL(object):
return self.app(env, _start_response) return self.app(env, _start_response)
def _get_account(self, env): def _get_account_and_container(self, env):
""" """
Returns just the account for the request, if it's an object Returns just the account and container for the request, if it's an
request and one of the configured methods; otherwise, None is object request and one of the configured methods; otherwise, None is
returned. returned.
:param env: The WSGI environment for the request. :param env: The WSGI environment for the request.
:returns: Account str or None. :returns: (Account str, container str) or (None, None).
""" """
if env['REQUEST_METHOD'] in self.methods: if env['REQUEST_METHOD'] in self.methods:
try: try:
ver, acc, cont, obj = split_path(env['PATH_INFO'], 4, 4, True) ver, acc, cont, obj = split_path(env['PATH_INFO'], 4, 4, True)
except ValueError: except ValueError:
return None return (None, None)
if ver == 'v1' and obj.strip('/'): if ver == 'v1' and obj.strip('/'):
return acc return (acc, cont)
return (None, None)
def _get_temp_url_info(self, env): def _get_temp_url_info(self, env):
""" """
@ -410,18 +470,23 @@ class TempURL(object):
inline = True inline = True
return temp_url_sig, temp_url_expires, filename, inline return temp_url_sig, temp_url_expires, filename, inline
def _get_keys(self, env, account): def _get_keys(self, env):
""" """
Returns the X-[Account|Container]-Meta-Temp-URL-Key[-2] header values Returns the X-[Account|Container]-Meta-Temp-URL-Key[-2] header values
for the account or container, or an empty list if none are set. for the account or container, or an empty list if none are set. Each
value comes as a 2-tuple (key, scope), where scope is either
CONTAINER_SCOPE or ACCOUNT_SCOPE.
Returns 0-4 elements depending on how many keys are set in the Returns 0-4 elements depending on how many keys are set in the
account's or container's metadata. account's or container's metadata.
:param env: The WSGI environment for the request. :param env: The WSGI environment for the request.
:param account: Account str. :returns: [
:returns: [X-Account-Meta-Temp-URL-Key str value if set, (X-Account-Meta-Temp-URL-Key str value, ACCOUNT_SCOPE) if set,
X-Account-Meta-Temp-URL-Key-2 str value if set] (X-Account-Meta-Temp-URL-Key-2 str value, ACCOUNT_SCOPE if set,
(X-Container-Meta-Temp-URL-Key str value, CONTAINER_SCOPE) if set,
(X-Container-Meta-Temp-URL-Key-2 str value, CONTAINER_SCOPE if set,
]
""" """
account_info = get_account_info(env, self.app, swift_source='TU') account_info = get_account_info(env, self.app, swift_source='TU')
account_keys = get_tempurl_keys_from_metadata(account_info['meta']) account_keys = get_tempurl_keys_from_metadata(account_info['meta'])
@ -430,25 +495,28 @@ class TempURL(object):
container_keys = get_tempurl_keys_from_metadata( container_keys = get_tempurl_keys_from_metadata(
container_info.get('meta', [])) container_info.get('meta', []))
return account_keys + container_keys return ([(ak, ACCOUNT_SCOPE) for ak in account_keys] +
[(ck, CONTAINER_SCOPE) for ck in container_keys])
def _get_hmacs(self, env, expires, keys, request_method=None): def _get_hmacs(self, env, expires, scoped_keys, request_method=None):
""" """
:param env: The WSGI environment for the request. :param env: The WSGI environment for the request.
:param expires: Unix timestamp as an int for when the URL :param expires: Unix timestamp as an int for when the URL
expires. expires.
:param keys: Key strings, from the X-Account-Meta-Temp-URL-Key[-2] of :param scoped_keys: (key, scope) tuples like _get_keys() returns
the account.
:param request_method: Optional override of the request in :param request_method: Optional override of the request in
the WSGI env. For example, if a HEAD the WSGI env. For example, if a HEAD
does not match, you may wish to does not match, you may wish to
override with GET to still allow the override with GET to still allow the
HEAD. HEAD.
:returns: a list of (hmac, scope) 2-tuples
""" """
if not request_method: if not request_method:
request_method = env['REQUEST_METHOD'] request_method = env['REQUEST_METHOD']
return [get_hmac( return [
request_method, env['PATH_INFO'], expires, key) for key in keys] (get_hmac(request_method, env['PATH_INFO'], expires, key), scope)
for (key, scope) in scoped_keys]
def _invalid(self, env, start_response): def _invalid(self, env, start_response):
""" """
@ -465,6 +533,22 @@ class TempURL(object):
body = '401 Unauthorized: Temp URL invalid\n' body = '401 Unauthorized: Temp URL invalid\n'
return HTTPUnauthorized(body=body)(env, start_response) return HTTPUnauthorized(body=body)(env, start_response)
def _clean_disallowed_headers(self, env, start_response):
"""
Validate the absense of disallowed headers for "unsafe" operations.
:returns: None for safe operations or swob.HTTPBadResponse if the
request includes disallowed headers.
"""
if env['REQUEST_METHOD'] in ('GET', 'HEAD', 'OPTIONS'):
return
for h in env:
if h in self.disallowed_headers:
return HTTPBadRequest(
body='The header %r is not allowed in this tempurl' %
h[len('HTTP_'):].title().replace('_', '-'))(
env, start_response)
def _clean_incoming_headers(self, env): def _clean_incoming_headers(self, env):
""" """
Removes any headers from the WSGI environment as per the Removes any headers from the WSGI environment as per the

View File

@ -0,0 +1,496 @@
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Object versioning in swift is implemented by setting a flag on the container
to tell swift to version all objects in the container. The flag is the
``X-Versions-Location`` header on the container, and its value is the
container where the versions are stored. It is recommended to use a different
``X-Versions-Location`` container for each container that is being versioned.
When data is ``PUT`` into a versioned container (a container with the
versioning flag turned on), the existing data in the file is redirected to a
new object and the data in the ``PUT`` request is saved as the data for the
versioned object. The new object name (for the previous version) is
``<versions_container>/<length><object_name>/<timestamp>``, where ``length``
is the 3-character zero-padded hexadecimal length of the ``<object_name>`` and
``<timestamp>`` is the timestamp of when the previous version was created.
A ``GET`` to a versioned object will return the current version of the object
without having to do any request redirects or metadata lookups.
A ``POST`` to a versioned object will update the object metadata as normal,
but will not create a new version of the object. In other words, new versions
are only created when the content of the object changes.
A ``DELETE`` to a versioned object will only remove the current version of the
object. If you have 5 total versions of the object, you must delete the
object 5 times to completely remove the object.
--------------------------------------------------
How to Enable Object Versioning in a Swift Cluster
--------------------------------------------------
This middleware was written as an effort to refactor parts of the proxy server,
so this functionality was already available in previous releases and every
attempt was made to maintain backwards compatibility. To allow operators to
perform a seamless upgrade, it is not required to add the middleware to the
proxy pipeline and the flag ``allow_versions`` in the container server
configuration files are still valid. In future releases, ``allow_versions``
will be deprecated in favor of adding this middleware to the pipeline to enable
or disable the feature.
In case the middleware is added to the proxy pipeline, you must also
set ``allow_versioned_writes`` to ``True`` in the middleware options
to enable the information about this middleware to be returned in a /info
request.
Upgrade considerations: If ``allow_versioned_writes`` is set in the filter
configuration, you can leave the ``allow_versions`` flag in the container
server configuration files untouched. If you decide to disable or remove the
``allow_versions`` flag, you must re-set any existing containers that had
the 'X-Versions-Location' flag configured so that it can now be tracked by the
versioned_writes middleware.
-----------------------
Examples Using ``curl``
-----------------------
First, create a container with the ``X-Versions-Location`` header or add the
header to an existing container. Also make sure the container referenced by
the ``X-Versions-Location`` exists. In this example, the name of that
container is "versions"::
curl -i -XPUT -H "X-Auth-Token: <token>" \
-H "X-Versions-Location: versions" http://<storage_url>/container
curl -i -XPUT -H "X-Auth-Token: <token>" http://<storage_url>/versions
Create an object (the first version)::
curl -i -XPUT --data-binary 1 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now create a new version of that object::
curl -i -XPUT --data-binary 2 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
See a listing of the older versions of the object::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
Now delete the current version of the object and see that the older version is
gone::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
---------------------------------------------------
How to Disable Object Versioning in a Swift Cluster
---------------------------------------------------
If you want to disable all functionality, set ``allow_versioned_writes`` to
``False`` in the middleware options.
Disable versioning from a container (x is any value except empty)::
curl -i -XPOST -H "X-Auth-Token: <token>" \
-H "X-Remove-Versions-Location: x" http://<storage_url>/container
"""
import time
from urllib import quote, unquote
from swift.common.utils import get_logger, Timestamp, json, \
register_swift_info, config_true_value
from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.wsgi import WSGIContext, make_pre_authed_request
from swift.common.swob import Request, HTTPException
from swift.common.constraints import (
check_account_format, check_container_format, check_destination_header)
from swift.proxy.controllers.base import get_container_info
from swift.common.http import (
is_success, is_client_error, HTTP_NOT_FOUND)
from swift.common.swob import HTTPPreconditionFailed, HTTPServiceUnavailable, \
HTTPServerError
from swift.common.exceptions import (
ListingIterNotFound, ListingIterError)
class VersionedWritesContext(WSGIContext):
def __init__(self, wsgi_app, logger):
WSGIContext.__init__(self, wsgi_app)
self.logger = logger
def _listing_iter(self, account_name, lcontainer, lprefix, env):
for page in self._listing_pages_iter(account_name,
lcontainer, lprefix, env):
for item in page:
yield item
def _listing_pages_iter(self, account_name, lcontainer, lprefix, env):
marker = ''
while True:
lreq = make_pre_authed_request(
env, method='GET', swift_source='VW',
path='/v1/%s/%s' % (account_name, lcontainer))
lreq.environ['QUERY_STRING'] = \
'format=json&prefix=%s&marker=%s' % (quote(lprefix),
quote(marker))
lresp = lreq.get_response(self.app)
if not is_success(lresp.status_int):
if lresp.status_int == HTTP_NOT_FOUND:
raise ListingIterNotFound()
elif is_client_error(lresp.status_int):
raise HTTPPreconditionFailed()
else:
raise ListingIterError()
if not lresp.body:
break
sublisting = json.loads(lresp.body)
if not sublisting:
break
marker = sublisting[-1]['name'].encode('utf-8')
yield sublisting
def handle_obj_versions_put(self, req, object_versions,
object_name, policy_index):
ret = None
# do a HEAD request to check object versions
_headers = {'X-Newest': 'True',
'X-Backend-Storage-Policy-Index': policy_index,
'x-auth-token': req.headers.get('x-auth-token')}
# make a pre_auth request in case the user has write access
# to container, but not READ. This was allowed in previous version
# (i.e., before middleware) so keeping the same behavior here
head_req = make_pre_authed_request(
req.environ, path=req.path_info,
headers=_headers, method='HEAD', swift_source='VW')
hresp = head_req.get_response(self.app)
is_dlo_manifest = 'X-Object-Manifest' in req.headers or \
'X-Object-Manifest' in hresp.headers
# if there's an existing object, then copy it to
# X-Versions-Location
if is_success(hresp.status_int) and not is_dlo_manifest:
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(object_name)
lprefix = prefix_len + object_name + '/'
ts_source = hresp.environ.get('swift_x_timestamp')
if ts_source is None:
ts_source = time.mktime(time.strptime(
hresp.headers['last-modified'],
'%a, %d %b %Y %H:%M:%S GMT'))
new_ts = Timestamp(ts_source).internal
vers_obj_name = lprefix + new_ts
copy_headers = {
'Destination': '%s/%s' % (lcontainer, vers_obj_name),
'x-auth-token': req.headers.get('x-auth-token')}
# COPY implementation sets X-Newest to True when it internally
# does a GET on source object. So, we don't have to explicity
# set it in request headers here.
copy_req = make_pre_authed_request(
req.environ, path=req.path_info,
headers=copy_headers, method='COPY', swift_source='VW')
copy_resp = copy_req.get_response(self.app)
if is_success(copy_resp.status_int):
# success versioning previous existing object
# return None and handle original request
ret = None
else:
if is_client_error(copy_resp.status_int):
# missing container or bad permissions
ret = HTTPPreconditionFailed(request=req)
else:
# could not copy the data, bail
ret = HTTPServiceUnavailable(request=req)
else:
if hresp.status_int == HTTP_NOT_FOUND or is_dlo_manifest:
# nothing to version
# return None and handle original request
ret = None
else:
# if not HTTP_NOT_FOUND, return error immediately
ret = hresp
return ret
def handle_obj_versions_delete(self, req, object_versions,
account_name, container_name, object_name):
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(object_name)
lprefix = prefix_len + object_name + '/'
item_list = []
try:
for _item in self._listing_iter(account_name, lcontainer, lprefix,
req.environ):
item_list.append(_item)
except ListingIterNotFound:
pass
except HTTPPreconditionFailed:
return HTTPPreconditionFailed(request=req)
except ListingIterError:
return HTTPServerError(request=req)
if item_list:
# we're about to start making COPY requests - need to validate the
# write access to the versioned container
if 'swift.authorize' in req.environ:
container_info = get_container_info(
req.environ, self.app)
req.acl = container_info.get('write_acl')
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
while len(item_list) > 0:
previous_version = item_list.pop()
# there are older versions so copy the previous version to the
# current object and delete the previous version
prev_obj_name = previous_version['name'].encode('utf-8')
copy_path = '/v1/' + account_name + '/' + \
lcontainer + '/' + prev_obj_name
copy_headers = {'X-Newest': 'True',
'Destination': container_name + '/' + object_name,
'x-auth-token': req.headers.get('x-auth-token')}
copy_req = make_pre_authed_request(
req.environ, path=copy_path,
headers=copy_headers, method='COPY', swift_source='VW')
copy_resp = copy_req.get_response(self.app)
# if the version isn't there, keep trying with previous version
if copy_resp.status_int == HTTP_NOT_FOUND:
continue
if not is_success(copy_resp.status_int):
if is_client_error(copy_resp.status_int):
# some user error, maybe permissions
return HTTPPreconditionFailed(request=req)
else:
# could not copy the data, bail
return HTTPServiceUnavailable(request=req)
# reset these because the COPY changed them
new_del_req = make_pre_authed_request(
req.environ, path=copy_path, method='DELETE',
swift_source='VW')
req = new_del_req
# remove 'X-If-Delete-At', since it is not for the older copy
if 'X-If-Delete-At' in req.headers:
del req.headers['X-If-Delete-At']
break
# handle DELETE request here in case it was modified
return req.get_response(self.app)
def handle_container_request(self, env, start_response):
app_resp = self._app_call(env)
if self._response_headers is None:
self._response_headers = []
sysmeta_version_hdr = get_sys_meta_prefix('container') + \
'versions-location'
location = ''
for key, val in self._response_headers:
if key.lower() == sysmeta_version_hdr:
location = val
if location:
self._response_headers.extend([('X-Versions-Location', location)])
start_response(self._response_status,
self._response_headers,
self._response_exc_info)
return app_resp
class VersionedWritesMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = get_logger(conf, log_route='versioned_writes')
def container_request(self, req, start_response, enabled):
sysmeta_version_hdr = get_sys_meta_prefix('container') + \
'versions-location'
# set version location header as sysmeta
if 'X-Versions-Location' in req.headers:
val = req.headers.get('X-Versions-Location')
if val:
# diferently from previous version, we are actually
# returning an error if user tries to set versions location
# while feature is explicitly disabled.
if not config_true_value(enabled) and \
req.method in ('PUT', 'POST'):
raise HTTPPreconditionFailed(
request=req, content_type='text/plain',
body='Versioned Writes is disabled')
location = check_container_format(req, val)
req.headers[sysmeta_version_hdr] = location
# reset original header to maintain sanity
# now only sysmeta is source of Versions Location
req.headers['X-Versions-Location'] = ''
# if both headers are in the same request
# adding location takes precendence over removing
if 'X-Remove-Versions-Location' in req.headers:
del req.headers['X-Remove-Versions-Location']
else:
# empty value is the same as X-Remove-Versions-Location
req.headers['X-Remove-Versions-Location'] = 'x'
# handle removing versions container
val = req.headers.get('X-Remove-Versions-Location')
if val:
req.headers.update({sysmeta_version_hdr: ''})
req.headers.update({'X-Versions-Location': ''})
del req.headers['X-Remove-Versions-Location']
# send request and translate sysmeta headers from response
vw_ctx = VersionedWritesContext(self.app, self.logger)
return vw_ctx.handle_container_request(req.environ, start_response)
def object_request(self, req, version, account, container, obj,
allow_versioned_writes):
account_name = unquote(account)
container_name = unquote(container)
object_name = unquote(obj)
container_info = None
resp = None
is_enabled = config_true_value(allow_versioned_writes)
if req.method in ('PUT', 'DELETE'):
container_info = get_container_info(
req.environ, self.app)
elif req.method == 'COPY' and 'Destination' in req.headers:
if 'Destination-Account' in req.headers:
account_name = req.headers.get('Destination-Account')
account_name = check_account_format(req, account_name)
container_name, object_name = check_destination_header(req)
req.environ['PATH_INFO'] = "/%s/%s/%s/%s" % (
version, account_name, container_name, object_name)
container_info = get_container_info(
req.environ, self.app)
if not container_info:
return self.app
# To maintain backwards compatibility, container version
# location could be stored as sysmeta or not, need to check both.
# If stored as sysmeta, check if middleware is enabled. If sysmeta
# is not set, but versions property is set in container_info, then
# for backwards compatibility feature is enabled.
object_versions = container_info.get(
'sysmeta', {}).get('versions-location')
if object_versions and isinstance(object_versions, unicode):
object_versions = object_versions.encode('utf-8')
elif not object_versions:
object_versions = container_info.get('versions')
# if allow_versioned_writes is not set in the configuration files
# but 'versions' is configured, enable feature to maintain
# backwards compatibility
if not allow_versioned_writes and object_versions:
is_enabled = True
if is_enabled and object_versions:
object_versions = unquote(object_versions)
vw_ctx = VersionedWritesContext(self.app, self.logger)
if req.method in ('PUT', 'COPY'):
policy_idx = req.headers.get(
'X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
resp = vw_ctx.handle_obj_versions_put(
req, object_versions, object_name, policy_idx)
else: # handle DELETE
resp = vw_ctx.handle_obj_versions_delete(
req, object_versions, account_name,
container_name, object_name)
if resp:
return resp
else:
return self.app
def __call__(self, env, start_response):
# making a duplicate, because if this is a COPY request, we will
# modify the PATH_INFO to find out if the 'Destination' is in a
# versioned container
req = Request(env.copy())
try:
(version, account, container, obj) = req.split_path(3, 4, True)
except ValueError:
return self.app(env, start_response)
# In case allow_versioned_writes is set in the filter configuration,
# the middleware becomes the authority on whether object
# versioning is enabled or not. In case it is not set, then
# the option in the container configuration is still checked
# for backwards compatibility
# For a container request, first just check if option is set,
# can be either true or false.
# If set, check if enabled when actually trying to set container
# header. If not set, let request be handled by container server
# for backwards compatibility.
# For an object request, also check if option is set (either T or F).
# If set, check if enabled when checking versions container in
# sysmeta property. If it is not set check 'versions' property in
# container_info
allow_versioned_writes = self.conf.get('allow_versioned_writes')
if allow_versioned_writes and container and not obj:
try:
return self.container_request(req, start_response,
allow_versioned_writes)
except HTTPException as error_response:
return error_response(env, start_response)
elif obj and req.method in ('PUT', 'COPY', 'DELETE'):
try:
return self.object_request(
req, version, account, container, obj,
allow_versioned_writes)(env, start_response)
except HTTPException as error_response:
return error_response(env, start_response)
else:
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
if config_true_value(conf.get('allow_versioned_writes')):
register_swift_info('versioned_writes')
def obj_versions_filter(app):
return VersionedWritesMiddleware(app, conf)
return obj_versions_filter

View File

@ -480,8 +480,8 @@ class Range(object):
After initialization, "range.ranges" is populated with a list After initialization, "range.ranges" is populated with a list
of (start, end) tuples denoting the requested ranges. of (start, end) tuples denoting the requested ranges.
If there were any syntactically-invalid byte-range-spec values, If there were any syntactically-invalid byte-range-spec values, the
"range.ranges" will be an empty list, per the relevant RFC: constructor will raise a ValueError, per the relevant RFC:
"The recipient of a byte-range-set that includes one or more syntactically "The recipient of a byte-range-set that includes one or more syntactically
invalid byte-range-spec values MUST ignore the header field that includes invalid byte-range-spec values MUST ignore the header field that includes

View File

@ -2268,6 +2268,7 @@ class GreenAsyncPile(object):
size = size_or_pool size = size_or_pool
self._responses = eventlet.queue.LightQueue(size) self._responses = eventlet.queue.LightQueue(size)
self._inflight = 0 self._inflight = 0
self._pending = 0
def _run_func(self, func, args, kwargs): def _run_func(self, func, args, kwargs):
try: try:
@ -2279,6 +2280,7 @@ class GreenAsyncPile(object):
""" """
Spawn a job in a green thread on the pile. Spawn a job in a green thread on the pile.
""" """
self._pending += 1
self._inflight += 1 self._inflight += 1
self._pool.spawn(self._run_func, func, args, kwargs) self._pool.spawn(self._run_func, func, args, kwargs)
@ -2303,12 +2305,13 @@ class GreenAsyncPile(object):
def next(self): def next(self):
try: try:
return self._responses.get_nowait() rv = self._responses.get_nowait()
except Empty: except Empty:
if self._inflight == 0: if self._inflight == 0:
raise StopIteration() raise StopIteration()
else: rv = self._responses.get()
return self._responses.get() self._pending -= 1
return rv
class ModifiedParseResult(ParseResult): class ModifiedParseResult(ParseResult):

View File

@ -1084,13 +1084,14 @@ def make_env(env, method=None, path=None, agent='Swift', query_string=None,
:returns: Fresh WSGI environment. :returns: Fresh WSGI environment.
""" """
newenv = {} newenv = {}
for name in ('eventlet.posthooks', 'HTTP_USER_AGENT', 'HTTP_HOST', for name in ('HTTP_USER_AGENT', 'HTTP_HOST', 'PATH_INFO',
'PATH_INFO', 'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD', 'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD',
'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT', 'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT',
'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD', 'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD',
'SERVER_PROTOCOL', 'swift.cache', 'swift.source', 'SERVER_PROTOCOL', 'swift.cache', 'swift.source',
'swift.trans_id', 'swift.authorize_override', 'swift.trans_id', 'swift.authorize_override',
'swift.authorize', 'swift.metadata.checked'): 'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID',
'swift.metadata.checked'):
if name in env: if name in env:
newenv[name] = env[name] newenv[name] = env[name]
if method: if method:

View File

@ -6,9 +6,9 @@
#, fuzzy #, fuzzy
msgid "" msgid ""
msgstr "" msgstr ""
"Project-Id-Version: swift 2.3.1.dev213\n" "Project-Id-Version: swift 2.3.1.dev243\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-07-29 06:35+0000\n" "POT-Creation-Date: 2015-08-04 06:29+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n" "Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n" "Language-Team: LANGUAGE <LL@li.org>\n"
@ -53,18 +53,18 @@ msgid ""
" %(key)s across policies (%(sum)s)" " %(key)s across policies (%(sum)s)"
msgstr "" msgstr ""
#: swift/account/auditor.py:149 #: swift/account/auditor.py:148
#, python-format #, python-format
msgid "Audit Failed for %s: %s" msgid "Audit Failed for %s: %s"
msgstr "" msgstr ""
#: swift/account/auditor.py:153 #: swift/account/auditor.py:152
#, python-format #, python-format
msgid "ERROR Could not get account info %s" msgid "ERROR Could not get account info %s"
msgstr "" msgstr ""
#: swift/account/reaper.py:134 swift/common/utils.py:2147 #: swift/account/reaper.py:134 swift/common/utils.py:2147
#: swift/obj/diskfile.py:480 swift/obj/updater.py:88 swift/obj/updater.py:131 #: swift/obj/diskfile.py:296 swift/obj/updater.py:88 swift/obj/updater.py:131
#, python-format #, python-format
msgid "Skipping %s as it is not mounted" msgid "Skipping %s as it is not mounted"
msgstr "" msgstr ""
@ -154,7 +154,7 @@ msgid "Exception with objects for container %(container)s for account %(account)
msgstr "" msgstr ""
#: swift/account/server.py:275 swift/container/server.py:586 #: swift/account/server.py:275 swift/container/server.py:586
#: swift/obj/server.py:914 #: swift/obj/server.py:911
#, python-format #, python-format
msgid "ERROR __call__ error with %(method)s %(path)s " msgid "ERROR __call__ error with %(method)s %(path)s "
msgstr "" msgstr ""
@ -732,8 +732,8 @@ msgstr ""
msgid "ERROR: Failed to get paths to drive partitions: %s" msgid "ERROR: Failed to get paths to drive partitions: %s"
msgstr "" msgstr ""
#: swift/container/updater.py:92 swift/obj/reconstructor.py:815 #: swift/container/updater.py:92 swift/obj/reconstructor.py:812
#: swift/obj/replicator.py:498 swift/obj/replicator.py:586 #: swift/obj/replicator.py:497 swift/obj/replicator.py:585
#, python-format #, python-format
msgid "%s is not mounted" msgid "%s is not mounted"
msgstr "" msgstr ""
@ -845,53 +845,53 @@ msgstr ""
msgid "ERROR auditing: %s" msgid "ERROR auditing: %s"
msgstr "" msgstr ""
#: swift/obj/diskfile.py:327 swift/obj/diskfile.py:2320 #: swift/obj/diskfile.py:306 swift/obj/updater.py:162
#, python-format
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr ""
#: swift/obj/diskfile.py:418 swift/obj/diskfile.py:2388
msgid "Error hashing suffix"
msgstr ""
#: swift/obj/diskfile.py:490 swift/obj/updater.py:162
#, python-format #, python-format
msgid "Directory %r does not map to a valid policy (%s)" msgid "Directory %r does not map to a valid policy (%s)"
msgstr "" msgstr ""
#: swift/obj/diskfile.py:741 #: swift/obj/diskfile.py:619
#, python-format
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr ""
#: swift/obj/diskfile.py:700
msgid "Error hashing suffix"
msgstr ""
#: swift/obj/diskfile.py:821
#, python-format #, python-format
msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
msgstr "" msgstr ""
#: swift/obj/diskfile.py:941 #: swift/obj/diskfile.py:1035
#, python-format #, python-format
msgid "Problem cleaning up %s" msgid "Problem cleaning up %s"
msgstr "" msgstr ""
#: swift/obj/diskfile.py:1259 #: swift/obj/diskfile.py:1342
#, python-format #, python-format
msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
msgstr "" msgstr ""
#: swift/obj/diskfile.py:1549 #: swift/obj/diskfile.py:1612
#, python-format #, python-format
msgid "" msgid ""
"Client path %(client)s does not match path stored in object metadata " "Client path %(client)s does not match path stored in object metadata "
"%(meta)s" "%(meta)s"
msgstr "" msgstr ""
#: swift/obj/diskfile.py:1805 #: swift/obj/diskfile.py:2048
#, python-format #, python-format
msgid "No space left on device for %s (%s)" msgid "No space left on device for %s (%s)"
msgstr "" msgstr ""
#: swift/obj/diskfile.py:1814 #: swift/obj/diskfile.py:2057
#, python-format #, python-format
msgid "Problem cleaning up %s (%s)" msgid "Problem cleaning up %s (%s)"
msgstr "" msgstr ""
#: swift/obj/diskfile.py:1817 #: swift/obj/diskfile.py:2060
#, python-format #, python-format
msgid "Problem writing durable state file %s (%s)" msgid "Problem writing durable state file %s (%s)"
msgstr "" msgstr ""
@ -925,7 +925,7 @@ msgstr ""
msgid "Exception while deleting object %s %s %s" msgid "Exception while deleting object %s %s %s"
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:208 swift/obj/reconstructor.py:490 #: swift/obj/reconstructor.py:208 swift/obj/reconstructor.py:492
#, python-format #, python-format
msgid "Invalid response %(resp)s from %(full_path)s" msgid "Invalid response %(resp)s from %(full_path)s"
msgstr "" msgstr ""
@ -948,14 +948,14 @@ msgid ""
"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:369 swift/obj/replicator.py:430 #: swift/obj/reconstructor.py:369 swift/obj/replicator.py:429
#, python-format #, python-format
msgid "" msgid ""
"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
"synced" "synced"
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:376 swift/obj/replicator.py:437 #: swift/obj/reconstructor.py:376 swift/obj/replicator.py:436
#, python-format #, python-format
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "" msgstr ""
@ -965,7 +965,7 @@ msgstr ""
msgid "Nothing reconstructed for %s seconds." msgid "Nothing reconstructed for %s seconds."
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:413 swift/obj/replicator.py:474 #: swift/obj/reconstructor.py:413 swift/obj/replicator.py:473
msgid "Lockup detected.. killing live coros." msgid "Lockup detected.. killing live coros."
msgstr "" msgstr ""
@ -979,122 +979,122 @@ msgstr ""
msgid "%s responded as unmounted" msgid "%s responded as unmounted"
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:886 swift/obj/replicator.py:306 #: swift/obj/reconstructor.py:883 swift/obj/replicator.py:305
#, python-format #, python-format
msgid "Removing partition: %s" msgid "Removing partition: %s"
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:902 #: swift/obj/reconstructor.py:899
msgid "Ring change detected. Aborting current reconstruction pass." msgid "Ring change detected. Aborting current reconstruction pass."
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:921 #: swift/obj/reconstructor.py:918
msgid "Exception in top-levelreconstruction loop" msgid "Exception in top-levelreconstruction loop"
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:931 #: swift/obj/reconstructor.py:928
msgid "Running object reconstructor in script mode." msgid "Running object reconstructor in script mode."
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:940 #: swift/obj/reconstructor.py:937
#, python-format #, python-format
msgid "Object reconstruction complete (once). (%.02f minutes)" msgid "Object reconstruction complete (once). (%.02f minutes)"
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:947 #: swift/obj/reconstructor.py:944
msgid "Starting object reconstructor in daemon mode." msgid "Starting object reconstructor in daemon mode."
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:951 #: swift/obj/reconstructor.py:948
msgid "Starting object reconstruction pass." msgid "Starting object reconstruction pass."
msgstr "" msgstr ""
#: swift/obj/reconstructor.py:956 #: swift/obj/reconstructor.py:953
#, python-format #, python-format
msgid "Object reconstruction complete. (%.02f minutes)" msgid "Object reconstruction complete. (%.02f minutes)"
msgstr "" msgstr ""
#: swift/obj/replicator.py:145 #: swift/obj/replicator.py:144
#, python-format #, python-format
msgid "Killing long-running rsync: %s" msgid "Killing long-running rsync: %s"
msgstr "" msgstr ""
#: swift/obj/replicator.py:159 #: swift/obj/replicator.py:158
#, python-format #, python-format
msgid "Bad rsync return code: %(ret)d <- %(args)s" msgid "Bad rsync return code: %(ret)d <- %(args)s"
msgstr "" msgstr ""
#: swift/obj/replicator.py:166 swift/obj/replicator.py:170 #: swift/obj/replicator.py:165 swift/obj/replicator.py:169
#, python-format #, python-format
msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
msgstr "" msgstr ""
#: swift/obj/replicator.py:292 #: swift/obj/replicator.py:291
#, python-format #, python-format
msgid "Removing %s objects" msgid "Removing %s objects"
msgstr "" msgstr ""
#: swift/obj/replicator.py:300 #: swift/obj/replicator.py:299
msgid "Error syncing handoff partition" msgid "Error syncing handoff partition"
msgstr "" msgstr ""
#: swift/obj/replicator.py:362 #: swift/obj/replicator.py:361
#, python-format #, python-format
msgid "%(ip)s/%(device)s responded as unmounted" msgid "%(ip)s/%(device)s responded as unmounted"
msgstr "" msgstr ""
#: swift/obj/replicator.py:367 #: swift/obj/replicator.py:366
#, python-format #, python-format
msgid "Invalid response %(resp)s from %(ip)s" msgid "Invalid response %(resp)s from %(ip)s"
msgstr "" msgstr ""
#: swift/obj/replicator.py:402 #: swift/obj/replicator.py:401
#, python-format #, python-format
msgid "Error syncing with node: %s" msgid "Error syncing with node: %s"
msgstr "" msgstr ""
#: swift/obj/replicator.py:406 #: swift/obj/replicator.py:405
msgid "Error syncing partition" msgid "Error syncing partition"
msgstr "" msgstr ""
#: swift/obj/replicator.py:419 #: swift/obj/replicator.py:418
#, python-format #, python-format
msgid "" msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
msgstr "" msgstr ""
#: swift/obj/replicator.py:445 #: swift/obj/replicator.py:444
#, python-format #, python-format
msgid "Nothing replicated for %s seconds." msgid "Nothing replicated for %s seconds."
msgstr "" msgstr ""
#: swift/obj/replicator.py:589 #: swift/obj/replicator.py:588
msgid "Ring change detected. Aborting current replication pass." msgid "Ring change detected. Aborting current replication pass."
msgstr "" msgstr ""
#: swift/obj/replicator.py:610 #: swift/obj/replicator.py:609
msgid "Exception in top-level replication loop" msgid "Exception in top-level replication loop"
msgstr "" msgstr ""
#: swift/obj/replicator.py:619 #: swift/obj/replicator.py:618
msgid "Running object replicator in script mode." msgid "Running object replicator in script mode."
msgstr "" msgstr ""
#: swift/obj/replicator.py:637 #: swift/obj/replicator.py:636
#, python-format #, python-format
msgid "Object replication complete (once). (%.02f minutes)" msgid "Object replication complete (once). (%.02f minutes)"
msgstr "" msgstr ""
#: swift/obj/replicator.py:644 #: swift/obj/replicator.py:643
msgid "Starting object replicator in daemon mode." msgid "Starting object replicator in daemon mode."
msgstr "" msgstr ""
#: swift/obj/replicator.py:648 #: swift/obj/replicator.py:647
msgid "Starting object replication pass." msgid "Starting object replication pass."
msgstr "" msgstr ""
#: swift/obj/replicator.py:653 #: swift/obj/replicator.py:652
#, python-format #, python-format
msgid "Object replication complete. (%.02f minutes)" msgid "Object replication complete. (%.02f minutes)"
msgstr "" msgstr ""

View File

@ -8,10 +8,10 @@ msgid ""
msgstr "" msgstr ""
"Project-Id-Version: Swift\n" "Project-Id-Version: Swift\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-07-29 06:35+0000\n" "POT-Creation-Date: 2015-08-04 06:29+0000\n"
"PO-Revision-Date: 2015-07-28 00:33+0000\n" "PO-Revision-Date: 2015-07-28 00:33+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n" "Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/swift/" "Language-Team: Chinese (China) (http://www.transifex.com/openstack/swift/"
"language/zh_CN/)\n" "language/zh_CN/)\n"
"Plural-Forms: nplurals=1; plural=0\n" "Plural-Forms: nplurals=1; plural=0\n"
"MIME-Version: 1.0\n" "MIME-Version: 1.0\n"

View File

@ -331,7 +331,7 @@ class ObjectAuditor(Daemon):
try: try:
self.audit_loop(parent, zbo_fps, **kwargs) self.audit_loop(parent, zbo_fps, **kwargs)
except (Exception, Timeout) as err: except (Exception, Timeout) as err:
self.logger.exception(_('ERROR auditing: %s' % err)) self.logger.exception(_('ERROR auditing: %s'), err)
self._sleep() self._sleep()
def run_once(self, *args, **kwargs): def run_once(self, *args, **kwargs):
@ -352,4 +352,4 @@ class ObjectAuditor(Daemon):
self.audit_loop(parent, zbo_fps, override_devices=override_devices, self.audit_loop(parent, zbo_fps, override_devices=override_devices,
**kwargs) **kwargs)
except (Exception, Timeout) as err: except (Exception, Timeout) as err:
self.logger.exception(_('ERROR auditing: %s' % err)) self.logger.exception(_('ERROR auditing: %s'), err)

View File

@ -626,16 +626,18 @@ class BaseDiskFileManager(object):
os.rmdir(hsh_path) os.rmdir(hsh_path)
except OSError: except OSError:
pass pass
# we just deleted this hsh_path, why are we waiting
# until the next suffix hash to raise PathNotDir so that
# this suffix will get del'd from the suffix hashes?
for filename in files: for filename in files:
key, value = mapper(filename) key, value = mapper(filename)
hashes[key].update(value) hashes[key].update(value)
try: try:
os.rmdir(path) os.rmdir(path)
except OSError: except OSError as e:
pass if e.errno == errno.ENOENT:
raise PathNotDir()
else:
# if we remove it, pretend like it wasn't there to begin with so
# that the suffix key gets removed
raise PathNotDir()
return hashes return hashes
def _hash_suffix(self, path, reclaim_age): def _hash_suffix(self, path, reclaim_age):
@ -1362,6 +1364,10 @@ class BaseDiskFile(object):
The arguments to the constructor are considered implementation The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments. specific. The API does not define the constructor arguments.
The following path format is used for data file locations:
<devices_path/<device_dir>/<datadir>/<partdir>/<suffixdir>/<hashdir>/
<datafile>.<ext>
:param mgr: associated DiskFileManager instance :param mgr: associated DiskFileManager instance
:param device_path: path to the target device or drive :param device_path: path to the target device or drive
:param threadpool: thread pool to use for blocking operations :param threadpool: thread pool to use for blocking operations

View File

@ -53,13 +53,13 @@ class ObjectReplicator(Daemon):
caller to do this in a loop. caller to do this in a loop.
""" """
def __init__(self, conf): def __init__(self, conf, logger=None):
""" """
:param conf: configuration object obtained from ConfigParser :param conf: configuration object obtained from ConfigParser
:param logger: logging object :param logger: logging object
""" """
self.conf = conf self.conf = conf
self.logger = get_logger(conf, log_route='object-replicator') self.logger = logger or get_logger(conf, log_route='object-replicator')
self.devices_dir = conf.get('devices', '/srv/node') self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no')) self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
@ -90,7 +90,7 @@ class ObjectReplicator(Daemon):
self.node_timeout = float(conf.get('node_timeout', 10)) self.node_timeout = float(conf.get('node_timeout', 10))
self.sync_method = getattr(self, conf.get('sync_method') or 'rsync') self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
self.network_chunk_size = int(conf.get('network_chunk_size', 65536)) self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.headers = { self.default_headers = {
'Content-Length': '0', 'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()} 'user-agent': 'object-replicator %s' % os.getpid()}
self.rsync_error_log_line_length = \ self.rsync_error_log_line_length = \
@ -99,8 +99,37 @@ class ObjectReplicator(Daemon):
False)) False))
self.handoff_delete = config_auto_int_value( self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0) conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)):
self.logger.warn('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and '
'handoff_delete before the next '
'normal rebalance')
self._diskfile_mgr = DiskFileManager(conf, self.logger) self._diskfile_mgr = DiskFileManager(conf, self.logger)
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0,
'hashmatch': 0, 'rsync': 0, 'remove': 0,
'start': time.time(), 'failure_nodes': {}}
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _get_my_replication_ips(self):
my_replication_ips = set()
ips = whataremyips()
for policy in POLICIES:
self.load_object_ring(policy)
for local_dev in [dev for dev in policy.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
my_replication_ips.add(local_dev['replication_ip'])
return list(my_replication_ips)
# Just exists for doc anchor point # Just exists for doc anchor point
def sync(self, node, job, suffixes, *args, **kwargs): def sync(self, node, job, suffixes, *args, **kwargs):
""" """
@ -242,7 +271,9 @@ class ObjectReplicator(Daemon):
if len(suff) == 3 and isdir(join(path, suff))] if len(suff) == 3 and isdir(join(path, suff))]
self.replication_count += 1 self.replication_count += 1
self.logger.increment('partition.delete.count.%s' % (job['device'],)) self.logger.increment('partition.delete.count.%s' % (job['device'],))
self.headers['X-Backend-Storage-Policy-Index'] = int(job['policy']) headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
failure_devs_info = set()
begin = time.time() begin = time.time()
try: try:
responses = [] responses = []
@ -251,6 +282,7 @@ class ObjectReplicator(Daemon):
delete_objs = None delete_objs = None
if suffixes: if suffixes:
for node in job['nodes']: for node in job['nodes']:
self.stats['rsync'] += 1
kwargs = {} kwargs = {}
if node['region'] in synced_remote_regions and \ if node['region'] in synced_remote_regions and \
self.conf.get('sync_method', 'rsync') == 'ssync': self.conf.get('sync_method', 'rsync') == 'ssync':
@ -266,11 +298,14 @@ class ObjectReplicator(Daemon):
node['replication_ip'], node['replication_ip'],
node['replication_port'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE', node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), headers=self.headers) '/' + '-'.join(suffixes), headers=headers)
conn.getresponse().read() conn.getresponse().read()
if node['region'] != job['region']: if node['region'] != job['region']:
synced_remote_regions[node['region']] = \ synced_remote_regions[node['region']] = \
candidates.keys() candidates.keys()
else:
failure_devs_info.add((node['replication_ip'],
node['device']))
responses.append(success) responses.append(success)
for region, cand_objs in synced_remote_regions.items(): for region, cand_objs in synced_remote_regions.items():
if delete_objs is None: if delete_objs is None:
@ -286,11 +321,23 @@ class ObjectReplicator(Daemon):
delete_handoff = len(responses) == len(job['nodes']) and \ delete_handoff = len(responses) == len(job['nodes']) and \
all(responses) all(responses)
if delete_handoff: if delete_handoff:
self.stats['remove'] += 1
if (self.conf.get('sync_method', 'rsync') == 'ssync' and if (self.conf.get('sync_method', 'rsync') == 'ssync' and
delete_objs is not None): delete_objs is not None):
self.logger.info(_("Removing %s objects"), self.logger.info(_("Removing %s objects"),
len(delete_objs)) len(delete_objs))
self.delete_handoff_objs(job, delete_objs) _junk, error_paths = self.delete_handoff_objs(
job, delete_objs)
# if replication works for a hand-off device and it failed,
# the remote devices which are target of the replication
# from the hand-off device will be marked. Because cleanup
# after replication failed means replicator needs to
# replicate again with the same info.
if error_paths:
failure_devs_info.update(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
else: else:
self.delete_partition(job['path']) self.delete_partition(job['path'])
elif not suffixes: elif not suffixes:
@ -298,14 +345,21 @@ class ObjectReplicator(Daemon):
except (Exception, Timeout): except (Exception, Timeout):
self.logger.exception(_("Error syncing handoff partition")) self.logger.exception(_("Error syncing handoff partition"))
finally: finally:
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in job['nodes']])
self.stats['success'] += len(target_devs_info - failure_devs_info)
self._add_failure_stats(failure_devs_info)
self.partition_times.append(time.time() - begin) self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.delete.timing', begin) self.logger.timing_since('partition.delete.timing', begin)
def delete_partition(self, path): def delete_partition(self, path):
self.logger.info(_("Removing partition: %s"), path) self.logger.info(_("Removing partition: %s"), path)
tpool.execute(shutil.rmtree, path, ignore_errors=True) tpool.execute(shutil.rmtree, path)
def delete_handoff_objs(self, job, delete_objs): def delete_handoff_objs(self, job, delete_objs):
success_paths = []
error_paths = []
for object_hash in delete_objs: for object_hash in delete_objs:
object_path = storage_directory(job['obj_path'], job['partition'], object_path = storage_directory(job['obj_path'], job['partition'],
object_hash) object_hash)
@ -313,11 +367,14 @@ class ObjectReplicator(Daemon):
suffix_dir = dirname(object_path) suffix_dir = dirname(object_path)
try: try:
os.rmdir(suffix_dir) os.rmdir(suffix_dir)
success_paths.append(object_path)
except OSError as e: except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY): if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
error_paths.append(object_path)
self.logger.exception( self.logger.exception(
"Unexpected error trying to cleanup suffix dir:%r", "Unexpected error trying to cleanup suffix dir:%r",
suffix_dir) suffix_dir)
return success_paths, error_paths
def update(self, job): def update(self, job):
""" """
@ -327,7 +384,10 @@ class ObjectReplicator(Daemon):
""" """
self.replication_count += 1 self.replication_count += 1
self.logger.increment('partition.update.count.%s' % (job['device'],)) self.logger.increment('partition.update.count.%s' % (job['device'],))
self.headers['X-Backend-Storage-Policy-Index'] = int(job['policy']) headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
target_devs_info = set()
failure_devs_info = set()
begin = time.time() begin = time.time()
try: try:
hashed, local_hash = tpool_reraise( hashed, local_hash = tpool_reraise(
@ -346,6 +406,7 @@ class ObjectReplicator(Daemon):
while attempts_left > 0: while attempts_left > 0:
# If this throws StopIteration it will be caught way below # If this throws StopIteration it will be caught way below
node = next(nodes) node = next(nodes)
target_devs_info.add((node['replication_ip'], node['device']))
attempts_left -= 1 attempts_left -= 1
# if we have already synced to this remote region, # if we have already synced to this remote region,
# don't sync again on this replication pass # don't sync again on this replication pass
@ -356,17 +417,21 @@ class ObjectReplicator(Daemon):
resp = http_connect( resp = http_connect(
node['replication_ip'], node['replication_port'], node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE', node['device'], job['partition'], 'REPLICATE',
'', headers=self.headers).getresponse() '', headers=headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE: if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error(_('%(ip)s/%(device)s responded' self.logger.error(_('%(ip)s/%(device)s responded'
' as unmounted'), node) ' as unmounted'), node)
attempts_left += 1 attempts_left += 1
failure_devs_info.add((node['replication_ip'],
node['device']))
continue continue
if resp.status != HTTP_OK: if resp.status != HTTP_OK:
self.logger.error(_("Invalid response %(resp)s " self.logger.error(_("Invalid response %(resp)s "
"from %(ip)s"), "from %(ip)s"),
{'resp': resp.status, {'resp': resp.status,
'ip': node['replication_ip']}) 'ip': node['replication_ip']})
failure_devs_info.add((node['replication_ip'],
node['device']))
continue continue
remote_hash = pickle.loads(resp.read()) remote_hash = pickle.loads(resp.read())
del resp del resp
@ -374,6 +439,7 @@ class ObjectReplicator(Daemon):
local_hash[suffix] != local_hash[suffix] !=
remote_hash.get(suffix, -1)] remote_hash.get(suffix, -1)]
if not suffixes: if not suffixes:
self.stats['hashmatch'] += 1
continue continue
hashed, recalc_hash = tpool_reraise( hashed, recalc_hash = tpool_reraise(
self._diskfile_mgr._get_hashes, self._diskfile_mgr._get_hashes,
@ -384,26 +450,35 @@ class ObjectReplicator(Daemon):
suffixes = [suffix for suffix in local_hash if suffixes = [suffix for suffix in local_hash if
local_hash[suffix] != local_hash[suffix] !=
remote_hash.get(suffix, -1)] remote_hash.get(suffix, -1)]
self.stats['rsync'] += 1
success, _junk = self.sync(node, job, suffixes) success, _junk = self.sync(node, job, suffixes)
with Timeout(self.http_timeout): with Timeout(self.http_timeout):
conn = http_connect( conn = http_connect(
node['replication_ip'], node['replication_port'], node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE', node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), '/' + '-'.join(suffixes),
headers=self.headers) headers=headers)
conn.getresponse().read() conn.getresponse().read()
if not success:
failure_devs_info.add((node['replication_ip'],
node['device']))
# add only remote region when replicate succeeded # add only remote region when replicate succeeded
if success and node['region'] != job['region']: if success and node['region'] != job['region']:
synced_remote_regions.add(node['region']) synced_remote_regions.add(node['region'])
self.suffix_sync += len(suffixes) self.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes)) self.logger.update_stats('suffix.syncs', len(suffixes))
except (Exception, Timeout): except (Exception, Timeout):
failure_devs_info.add((node['replication_ip'],
node['device']))
self.logger.exception(_("Error syncing with node: %s") % self.logger.exception(_("Error syncing with node: %s") %
node) node)
self.suffix_count += len(local_hash) self.suffix_count += len(local_hash)
except (Exception, Timeout): except (Exception, Timeout):
failure_devs_info.update(target_devs_info)
self.logger.exception(_("Error syncing partition")) self.logger.exception(_("Error syncing partition"))
finally: finally:
self.stats['success'] += len(target_devs_info - failure_devs_info)
self._add_failure_stats(failure_devs_info)
self.partition_times.append(time.time() - begin) self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.update.timing', begin) self.logger.timing_since('partition.update.timing', begin)
@ -481,6 +556,9 @@ class ObjectReplicator(Daemon):
using replication style storage policy using replication style storage policy
""" """
jobs = [] jobs = []
self.all_devs_info.update(
[(dev['replication_ip'], dev['device'])
for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy) data_dir = get_data_dir(policy)
for local_dev in [dev for dev in policy.object_ring.devs for local_dev in [dev for dev in policy.object_ring.devs
if (dev if (dev
@ -494,6 +572,11 @@ class ObjectReplicator(Daemon):
obj_path = join(dev_path, data_dir) obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy)) tmp_path = join(dev_path, get_tmp_dir(policy))
if self.mount_check and not ismount(dev_path): if self.mount_check and not ismount(dev_path):
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warn(_('%s is not mounted'), local_dev['device']) self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age) unlink_older_than(tmp_path, time.time() - self.reclaim_age)
@ -508,6 +591,7 @@ class ObjectReplicator(Daemon):
and partition not in override_partitions): and partition not in override_partitions):
continue continue
part_nodes = None
try: try:
job_path = join(obj_path, partition) job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes( part_nodes = policy.object_ring.get_part_nodes(
@ -524,6 +608,17 @@ class ObjectReplicator(Daemon):
partition=partition, partition=partition,
region=local_dev['region'])) region=local_dev['region']))
except ValueError: except ValueError:
if part_nodes:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
else:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
continue continue
return jobs return jobs
@ -569,19 +664,31 @@ class ObjectReplicator(Daemon):
self.replication_count = 0 self.replication_count = 0
self.last_replication_count = -1 self.last_replication_count = -1
self.partition_times = [] self.partition_times = []
self.my_replication_ips = self._get_my_replication_ips()
self.all_devs_info = set()
stats = eventlet.spawn(self.heartbeat) stats = eventlet.spawn(self.heartbeat)
lockup_detector = eventlet.spawn(self.detect_lockups) lockup_detector = eventlet.spawn(self.detect_lockups)
eventlet.sleep() # Give spawns a cycle eventlet.sleep() # Give spawns a cycle
current_nodes = None
try: try:
self.run_pool = GreenPool(size=self.concurrency) self.run_pool = GreenPool(size=self.concurrency)
jobs = self.collect_jobs(override_devices=override_devices, jobs = self.collect_jobs(override_devices=override_devices,
override_partitions=override_partitions, override_partitions=override_partitions,
override_policies=override_policies) override_policies=override_policies)
for job in jobs: for job in jobs:
current_nodes = job['nodes']
if override_devices and job['device'] not in override_devices:
continue
if override_partitions and \
job['partition'] not in override_partitions:
continue
dev_path = join(self.devices_dir, job['device']) dev_path = join(self.devices_dir, job['device'])
if self.mount_check and not ismount(dev_path): if self.mount_check and not ismount(dev_path):
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
self.logger.warn(_('%s is not mounted'), job['device']) self.logger.warn(_('%s is not mounted'), job['device'])
continue continue
if not self.check_ring(job['policy'].object_ring): if not self.check_ring(job['policy'].object_ring):
@ -603,18 +710,26 @@ class ObjectReplicator(Daemon):
self.run_pool.spawn(self.update_deleted, job) self.run_pool.spawn(self.update_deleted, job)
else: else:
self.run_pool.spawn(self.update, job) self.run_pool.spawn(self.update, job)
current_nodes = None
with Timeout(self.lockup_timeout): with Timeout(self.lockup_timeout):
self.run_pool.waitall() self.run_pool.waitall()
except (Exception, Timeout): except (Exception, Timeout):
if current_nodes:
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in current_nodes])
else:
self._add_failure_stats(self.all_devs_info)
self.logger.exception(_("Exception in top-level replication loop")) self.logger.exception(_("Exception in top-level replication loop"))
self.kill_coros() self.kill_coros()
finally: finally:
stats.kill() stats.kill()
lockup_detector.kill() lockup_detector.kill()
self.stats_line() self.stats_line()
self.stats['attempted'] = self.replication_count
def run_once(self, *args, **kwargs): def run_once(self, *args, **kwargs):
start = time.time() self._zero_stats()
self.logger.info(_("Running object replicator in script mode.")) self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get('devices')) override_devices = list_from_csv(kwargs.get('devices'))
@ -631,27 +746,35 @@ class ObjectReplicator(Daemon):
override_devices=override_devices, override_devices=override_devices,
override_partitions=override_partitions, override_partitions=override_partitions,
override_policies=override_policies) override_policies=override_policies)
total = (time.time() - start) / 60 total = (time.time() - self.stats['start']) / 60
self.logger.info( self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total) _("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices): if not (override_partitions or override_devices):
dump_recon_cache({'object_replication_time': total, replication_last = time.time()
'object_replication_last': time.time()}, dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger) self.rcache, self.logger)
def run_forever(self, *args, **kwargs): def run_forever(self, *args, **kwargs):
self.logger.info(_("Starting object replicator in daemon mode.")) self.logger.info(_("Starting object replicator in daemon mode."))
# Run the replicator continually # Run the replicator continually
while True: while True:
start = time.time() self._zero_stats()
self.logger.info(_("Starting object replication pass.")) self.logger.info(_("Starting object replication pass."))
# Run the replicator # Run the replicator
self.replicate() self.replicate()
total = (time.time() - start) / 60 total = (time.time() - self.stats['start']) / 60
self.logger.info( self.logger.info(
_("Object replication complete. (%.02f minutes)"), total) _("Object replication complete. (%.02f minutes)"), total)
dump_recon_cache({'object_replication_time': total, replication_last = time.time()
'object_replication_last': time.time()}, dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger) self.rcache, self.logger)
self.logger.debug('Replication sleeping for %s seconds.', self.logger.debug('Replication sleeping for %s seconds.',
self.interval) self.interval)

View File

@ -28,6 +28,7 @@ from swift import gettext_ as _
from hashlib import md5 from hashlib import md5
from eventlet import sleep, wsgi, Timeout from eventlet import sleep, wsgi, Timeout
from eventlet.greenthread import spawn
from swift.common.utils import public, get_logger, \ from swift.common.utils import public, get_logger, \
config_true_value, timing_stats, replication, \ config_true_value, timing_stats, replication, \
@ -108,7 +109,9 @@ class ObjectController(BaseStorageServer):
""" """
super(ObjectController, self).__init__(conf) super(ObjectController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='object-server') self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = int(conf.get('node_timeout', 3)) self.node_timeout = float(conf.get('node_timeout', 3))
self.container_update_timeout = float(
conf.get('container_update_timeout', 1))
self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60)) self.client_timeout = int(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536)) self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
@ -198,7 +201,8 @@ class ObjectController(BaseStorageServer):
device, partition, account, container, obj, policy, **kwargs) device, partition, account, container, obj, policy, **kwargs)
def async_update(self, op, account, container, obj, host, partition, def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice, policy): contdevice, headers_out, objdevice, policy,
logger_thread_locals=None):
""" """
Sends or saves an async update. Sends or saves an async update.
@ -213,7 +217,12 @@ class ObjectController(BaseStorageServer):
request request
:param objdevice: device name that the object is in :param objdevice: device name that the object is in
:param policy: the associated BaseStoragePolicy instance :param policy: the associated BaseStoragePolicy instance
:param logger_thread_locals: The thread local values to be set on the
self.logger to retain transaction
logging information.
""" """
if logger_thread_locals:
self.logger.thread_locals = logger_thread_locals
headers_out['user-agent'] = 'object-server %s' % os.getpid() headers_out['user-agent'] = 'object-server %s' % os.getpid()
full_path = '/%s/%s/%s' % (account, container, obj) full_path = '/%s/%s/%s' % (account, container, obj)
if all([host, partition, contdevice]): if all([host, partition, contdevice]):
@ -285,10 +294,28 @@ class ObjectController(BaseStorageServer):
headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-') headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-')
headers_out['referer'] = request.as_referer() headers_out['referer'] = request.as_referer()
headers_out['X-Backend-Storage-Policy-Index'] = int(policy) headers_out['X-Backend-Storage-Policy-Index'] = int(policy)
update_greenthreads = []
for conthost, contdevice in updates: for conthost, contdevice in updates:
self.async_update(op, account, container, obj, conthost, gt = spawn(self.async_update, op, account, container, obj,
contpartition, contdevice, headers_out, conthost, contpartition, contdevice, headers_out,
objdevice, policy) objdevice, policy,
logger_thread_locals=self.logger.thread_locals)
update_greenthreads.append(gt)
# Wait a little bit to see if the container updates are successful.
# If we immediately return after firing off the greenthread above, then
# we're more likely to confuse the end-user who does a listing right
# after getting a successful response to the object create. The
# `container_update_timeout` bounds the length of time we wait so that
# one slow container server doesn't make the entire request lag.
try:
with Timeout(self.container_update_timeout):
for gt in update_greenthreads:
gt.wait()
except Timeout:
# updates didn't go through, log it and return
self.logger.debug(
'Container update timeout (%.4fs) waiting for %s',
self.container_update_timeout, updates)
def delete_at_update(self, op, delete_at, account, container, obj, def delete_at_update(self, op, delete_at, account, container, obj,
request, objdevice, policy): request, objdevice, policy):
@ -417,6 +444,11 @@ class ObjectController(BaseStorageServer):
override = key.lower().replace(override_prefix, 'x-') override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val update_headers[override] = val
def _preserve_slo_manifest(self, update_metadata, orig_metadata):
if 'X-Static-Large-Object' in orig_metadata:
update_metadata['X-Static-Large-Object'] = \
orig_metadata['X-Static-Large-Object']
@public @public
@timing_stats() @timing_stats()
def POST(self, request): def POST(self, request):
@ -446,6 +478,7 @@ class ObjectController(BaseStorageServer):
request=request, request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal}) headers={'X-Backend-Timestamp': orig_timestamp.internal})
metadata = {'X-Timestamp': req_timestamp.internal} metadata = {'X-Timestamp': req_timestamp.internal}
self._preserve_slo_manifest(metadata, orig_metadata)
metadata.update(val for val in request.headers.items() metadata.update(val for val in request.headers.items()
if is_user_meta('object', val[0])) if is_user_meta('object', val[0]))
for header_key in self.allowed_headers: for header_key in self.allowed_headers:

View File

@ -164,7 +164,7 @@ class Receiver(object):
self.node_index = int( self.node_index = int(
self.request.headers['X-Backend-Ssync-Node-Index']) self.request.headers['X-Backend-Ssync-Node-Index'])
if self.node_index != self.frag_index: if self.node_index != self.frag_index:
# a primary node should only recieve it's own fragments # a primary node should only receive it's own fragments
raise swob.HTTPBadRequest( raise swob.HTTPBadRequest(
'Frag-Index (%s) != Node-Index (%s)' % ( 'Frag-Index (%s) != Node-Index (%s)' % (
self.frag_index, self.node_index)) self.frag_index, self.node_index))

View File

@ -82,7 +82,6 @@ class Sender(object):
set(self.send_list)) set(self.send_list))
can_delete_obj = dict((hash_, self.available_map[hash_]) can_delete_obj = dict((hash_, self.available_map[hash_])
for hash_ in in_sync_hashes) for hash_ in in_sync_hashes)
self.disconnect()
if not self.failures: if not self.failures:
return True, can_delete_obj return True, can_delete_obj
else: else:
@ -103,6 +102,8 @@ class Sender(object):
self.node.get('replication_ip'), self.node.get('replication_ip'),
self.node.get('replication_port'), self.node.get('replication_port'),
self.node.get('device'), self.job.get('partition')) self.node.get('device'), self.job.get('partition'))
finally:
self.disconnect()
except Exception: except Exception:
# We don't want any exceptions to escape our code and possibly # We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it # mess up the original replicator code that called us since it
@ -351,6 +352,8 @@ class Sender(object):
Closes down the connection to the object server once done Closes down the connection to the object server once done
with the SSYNC request. with the SSYNC request.
""" """
if not self.connection:
return
try: try:
with exceptions.MessageTimeout( with exceptions.MessageTimeout(
self.daemon.node_timeout, 'disconnect'): self.daemon.node_timeout, 'disconnect'):

View File

@ -256,7 +256,7 @@ class ObjectUpdater(Daemon):
:param node: node dictionary from the container ring :param node: node dictionary from the container ring
:param part: partition that holds the container :param part: partition that holds the container
:param op: operation performed (ex: 'POST' or 'DELETE') :param op: operation performed (ex: 'PUT' or 'DELETE')
:param obj: object name being updated :param obj: object name being updated
:param headers_out: headers to send with the update :param headers_out: headers to send with the update
""" """

View File

@ -28,6 +28,7 @@ import os
import time import time
import functools import functools
import inspect import inspect
import itertools
import operator import operator
from sys import exc_info from sys import exc_info
from swift import gettext_ as _ from swift import gettext_ as _
@ -696,7 +697,12 @@ class ResumingGetter(object):
If we have no Range header, this is a no-op. If we have no Range header, this is a no-op.
""" """
if 'Range' in self.backend_headers: if 'Range' in self.backend_headers:
req_range = Range(self.backend_headers['Range']) try:
req_range = Range(self.backend_headers['Range'])
except ValueError:
# there's a Range header, but it's garbage, so get rid of it
self.backend_headers.pop('Range')
return
begin, end = req_range.ranges.pop(0) begin, end = req_range.ranges.pop(0)
if len(req_range.ranges) > 0: if len(req_range.ranges) > 0:
self.backend_headers['Range'] = str(req_range) self.backend_headers['Range'] = str(req_range)
@ -922,6 +928,7 @@ class ResumingGetter(object):
'part_iter': part_iter} 'part_iter': part_iter}
self.pop_range() self.pop_range()
except StopIteration: except StopIteration:
req.environ['swift.non_client_disconnect'] = True
return return
except ChunkReadTimeout: except ChunkReadTimeout:
@ -1016,7 +1023,7 @@ class ResumingGetter(object):
self.statuses.append(possible_source.status) self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason) self.reasons.append(possible_source.reason)
self.bodies.append('') self.bodies.append(None)
self.source_headers.append(possible_source.getheaders()) self.source_headers.append(possible_source.getheaders())
sources.append((possible_source, node)) sources.append((possible_source, node))
if not self.newest: # one good source is enough if not self.newest: # one good source is enough
@ -1120,6 +1127,99 @@ class GetOrHeadHandler(ResumingGetter):
return res return res
class NodeIter(object):
"""
Yields nodes for a ring partition, skipping over error
limited nodes and stopping at the configurable number of nodes. If a
node yielded subsequently gets error limited, an extra node will be
yielded to take its place.
Note that if you're going to iterate over this concurrently from
multiple greenthreads, you'll want to use a
swift.common.utils.GreenthreadSafeIterator to serialize access.
Otherwise, you may get ValueErrors from concurrent access. (You also
may not, depending on how logging is configured, the vagaries of
socket IO and eventlet, and the phase of the moon.)
:param app: a proxy app
:param ring: ring to get yield nodes from
:param partition: ring partition to yield nodes for
:param node_iter: optional iterable of nodes to try. Useful if you
want to filter or reorder the nodes.
"""
def __init__(self, app, ring, partition, node_iter=None):
self.app = app
self.ring = ring
self.partition = partition
part_nodes = ring.get_part_nodes(partition)
if node_iter is None:
node_iter = itertools.chain(
part_nodes, ring.get_more_nodes(partition))
num_primary_nodes = len(part_nodes)
self.nodes_left = self.app.request_node_count(num_primary_nodes)
self.expected_handoffs = self.nodes_left - num_primary_nodes
# Use of list() here forcibly yanks the first N nodes (the primary
# nodes) from node_iter, so the rest of its values are handoffs.
self.primary_nodes = self.app.sort_nodes(
list(itertools.islice(node_iter, num_primary_nodes)))
self.handoff_iter = node_iter
def __iter__(self):
self._node_iter = self._node_gen()
return self
def log_handoffs(self, handoffs):
"""
Log handoff requests if handoff logging is enabled and the
handoff was not expected.
We only log handoffs when we've pushed the handoff count further
than we would normally have expected under normal circumstances,
that is (request_node_count - num_primaries), when handoffs goes
higher than that it means one of the primaries must have been
skipped because of error limiting before we consumed all of our
nodes_left.
"""
if not self.app.log_handoffs:
return
extra_handoffs = handoffs - self.expected_handoffs
if extra_handoffs > 0:
self.app.logger.increment('handoff_count')
self.app.logger.warning(
'Handoff requested (%d)' % handoffs)
if (extra_handoffs == len(self.primary_nodes)):
# all the primaries were skipped, and handoffs didn't help
self.app.logger.increment('handoff_all_count')
def _node_gen(self):
for node in self.primary_nodes:
if not self.app.error_limited(node):
yield node
if not self.app.error_limited(node):
self.nodes_left -= 1
if self.nodes_left <= 0:
return
handoffs = 0
for node in self.handoff_iter:
if not self.app.error_limited(node):
handoffs += 1
self.log_handoffs(handoffs)
yield node
if not self.app.error_limited(node):
self.nodes_left -= 1
if self.nodes_left <= 0:
return
def next(self):
return next(self._node_iter)
def __next__(self):
return self.next()
class Controller(object): class Controller(object):
"""Base WSGI controller class for the proxy""" """Base WSGI controller class for the proxy"""
server_type = 'Base' server_type = 'Base'

View File

@ -51,13 +51,12 @@ from swift.common.constraints import check_metadata, check_object_creation, \
check_account_format check_account_format
from swift.common import constraints from swift.common import constraints
from swift.common.exceptions import ChunkReadTimeout, \ from swift.common.exceptions import ChunkReadTimeout, \
ChunkWriteTimeout, ConnectionTimeout, ListingIterNotFound, \ ChunkWriteTimeout, ConnectionTimeout, ResponseTimeout, \
ListingIterNotAuthorized, ListingIterError, ResponseTimeout, \
InsufficientStorage, FooterNotSupported, MultiphasePUTNotSupported, \ InsufficientStorage, FooterNotSupported, MultiphasePUTNotSupported, \
PutterConnectError PutterConnectError
from swift.common.http import ( from swift.common.http import (
is_success, is_client_error, is_server_error, HTTP_CONTINUE, HTTP_CREATED, is_success, is_server_error, HTTP_CONTINUE, HTTP_CREATED,
HTTP_MULTIPLE_CHOICES, HTTP_NOT_FOUND, HTTP_INTERNAL_SERVER_ERROR, HTTP_MULTIPLE_CHOICES, HTTP_INTERNAL_SERVER_ERROR,
HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE, HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE,
HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, is_informational) HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, is_informational)
from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY, from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY,
@ -139,46 +138,6 @@ class BaseObjectController(Controller):
self.container_name = unquote(container_name) self.container_name = unquote(container_name)
self.object_name = unquote(object_name) self.object_name = unquote(object_name)
def _listing_iter(self, lcontainer, lprefix, env):
for page in self._listing_pages_iter(lcontainer, lprefix, env):
for item in page:
yield item
def _listing_pages_iter(self, lcontainer, lprefix, env):
lpartition = self.app.container_ring.get_part(
self.account_name, lcontainer)
marker = ''
while True:
lreq = Request.blank('i will be overridden by env', environ=env)
# Don't quote PATH_INFO, by WSGI spec
lreq.environ['PATH_INFO'] = \
'/v1/%s/%s' % (self.account_name, lcontainer)
lreq.environ['REQUEST_METHOD'] = 'GET'
lreq.environ['QUERY_STRING'] = \
'format=json&prefix=%s&marker=%s' % (quote(lprefix),
quote(marker))
container_node_iter = self.app.iter_nodes(self.app.container_ring,
lpartition)
lresp = self.GETorHEAD_base(
lreq, _('Container'), container_node_iter, lpartition,
lreq.swift_entity_path)
if 'swift.authorize' in env:
lreq.acl = lresp.headers.get('x-container-read')
aresp = env['swift.authorize'](lreq)
if aresp:
raise ListingIterNotAuthorized(aresp)
if lresp.status_int == HTTP_NOT_FOUND:
raise ListingIterNotFound()
elif not is_success(lresp.status_int):
raise ListingIterError()
if not lresp.body:
break
sublisting = json.loads(lresp.body)
if not sublisting:
break
marker = sublisting[-1]['name'].encode('utf-8')
yield sublisting
def iter_nodes_local_first(self, ring, partition): def iter_nodes_local_first(self, ring, partition):
""" """
Yields nodes for a ring partition. Yields nodes for a ring partition.
@ -548,71 +507,6 @@ class BaseObjectController(Controller):
# until copy request handling moves to middleware # until copy request handling moves to middleware
return None, req, data_source, update_response return None, req, data_source, update_response
def _handle_object_versions(self, req):
"""
This method handles versionining of objects in containers that
have the feature enabled.
When a new PUT request is sent, the proxy checks for previous versions
of that same object name. If found, it is copied to a different
container and the new version is stored in its place.
This method was added as part of the PUT method refactoring and the
functionality is expected to be moved to middleware
"""
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
obj_ring = self.app.get_object_ring(policy_index)
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
object_versions = container_info['versions']
# do a HEAD request for checking object versions
if object_versions and not req.environ.get('swift_versioned_copy'):
# make sure proxy-server uses the right policy index
_headers = {'X-Backend-Storage-Policy-Index': policy_index,
'X-Newest': 'True'}
hreq = Request.blank(req.path_info, headers=_headers,
environ={'REQUEST_METHOD': 'HEAD'})
hnode_iter = self.app.iter_nodes(obj_ring, partition)
hresp = self.GETorHEAD_base(
hreq, _('Object'), hnode_iter, partition,
hreq.swift_entity_path)
is_manifest = 'X-Object-Manifest' in req.headers or \
'X-Object-Manifest' in hresp.headers
if hresp.status_int != HTTP_NOT_FOUND and not is_manifest:
# This is a version manifest and needs to be handled
# differently. First copy the existing data to a new object,
# then write the data from this request to the version manifest
# object.
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(self.object_name)
lprefix = prefix_len + self.object_name + '/'
ts_source = hresp.environ.get('swift_x_timestamp')
if ts_source is None:
ts_source = time.mktime(time.strptime(
hresp.headers['last-modified'],
'%a, %d %b %Y %H:%M:%S GMT'))
new_ts = Timestamp(ts_source).internal
vers_obj_name = lprefix + new_ts
copy_headers = {
'Destination': '%s/%s' % (lcontainer, vers_obj_name)}
copy_environ = {'REQUEST_METHOD': 'COPY',
'swift_versioned_copy': True
}
copy_req = Request.blank(req.path_info, headers=copy_headers,
environ=copy_environ)
copy_resp = self.COPY(copy_req)
if is_client_error(copy_resp.status_int):
# missing container or bad permissions
raise HTTPPreconditionFailed(request=req)
elif not is_success(copy_resp.status_int):
# could not copy the data, bail
raise HTTPServiceUnavailable(request=req)
def _update_content_type(self, req): def _update_content_type(self, req):
# Sometimes the 'content-type' header exists, but is set to None. # Sometimes the 'content-type' header exists, but is set to None.
req.content_type_manually_set = True req.content_type_manually_set = True
@ -657,13 +551,17 @@ class BaseObjectController(Controller):
if any(conn for conn in conns if conn.resp and if any(conn for conn in conns if conn.resp and
conn.resp.status == HTTP_CONFLICT): conn.resp.status == HTTP_CONFLICT):
timestamps = [HeaderKeyDict(conn.resp.getheaders()).get( status_times = ['%(status)s (%(timestamp)s)' % {
'X-Backend-Timestamp') for conn in conns if conn.resp] 'status': conn.resp.status,
'timestamp': HeaderKeyDict(
conn.resp.getheaders()).get(
'X-Backend-Timestamp', 'unknown')
} for conn in conns if conn.resp]
self.app.logger.debug( self.app.logger.debug(
_('Object PUT returning 202 for 409: ' _('Object PUT returning 202 for 409: '
'%(req_timestamp)s <= %(timestamps)r'), '%(req_timestamp)s <= %(timestamps)r'),
{'req_timestamp': req.timestamp.internal, {'req_timestamp': req.timestamp.internal,
'timestamps': ', '.join(timestamps)}) 'timestamps': ', '.join(status_times)})
raise HTTPAccepted(request=req) raise HTTPAccepted(request=req)
self._check_min_conn(req, conns, min_conns) self._check_min_conn(req, conns, min_conns)
@ -725,7 +623,7 @@ class BaseObjectController(Controller):
""" """
This method is responsible for establishing connection This method is responsible for establishing connection
with storage nodes and sending the data to each one of those with storage nodes and sending the data to each one of those
nodes. The process of transfering data is specific to each nodes. The process of transferring data is specific to each
Storage Policy, thus it is required for each policy specific Storage Policy, thus it is required for each policy specific
ObjectController to provide their own implementation of this method. ObjectController to provide their own implementation of this method.
@ -815,9 +713,6 @@ class BaseObjectController(Controller):
self._update_x_timestamp(req) self._update_x_timestamp(req)
# check if versioning is enabled and handle copying previous version
self._handle_object_versions(req)
# check if request is a COPY of an existing object # check if request is a COPY of an existing object
source_header = req.headers.get('X-Copy-From') source_header = req.headers.get('X-Copy-From')
if source_header: if source_header:
@ -830,7 +725,7 @@ class BaseObjectController(Controller):
data_source = iter(lambda: reader(self.app.client_chunk_size), '') data_source = iter(lambda: reader(self.app.client_chunk_size), '')
update_response = lambda req, resp: resp update_response = lambda req, resp: resp
# check if object is set to be automaticaly deleted (i.e. expired) # check if object is set to be automatically deleted (i.e. expired)
req, delete_at_container, delete_at_part, \ req, delete_at_container, delete_at_part, \
delete_at_nodes = self._config_obj_expiration(req) delete_at_nodes = self._config_obj_expiration(req)
@ -861,86 +756,10 @@ class BaseObjectController(Controller):
containers = container_info['nodes'] containers = container_info['nodes']
req.acl = container_info['write_acl'] req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key'] req.environ['swift_sync_key'] = container_info['sync_key']
object_versions = container_info['versions']
if 'swift.authorize' in req.environ: if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req) aresp = req.environ['swift.authorize'](req)
if aresp: if aresp:
return aresp return aresp
if object_versions:
# this is a version manifest and needs to be handled differently
object_versions = unquote(object_versions)
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(self.object_name)
lprefix = prefix_len + self.object_name + '/'
item_list = []
try:
for _item in self._listing_iter(lcontainer, lprefix,
req.environ):
item_list.append(_item)
except ListingIterNotFound:
# no worries, last_item is None
pass
except ListingIterNotAuthorized as err:
return err.aresp
except ListingIterError:
return HTTPServerError(request=req)
while len(item_list) > 0:
previous_version = item_list.pop()
# there are older versions so copy the previous version to the
# current object and delete the previous version
orig_container = self.container_name
orig_obj = self.object_name
self.container_name = lcontainer
self.object_name = previous_version['name'].encode('utf-8')
copy_path = '/v1/' + self.account_name + '/' + \
self.container_name + '/' + self.object_name
copy_headers = {'X-Newest': 'True',
'Destination': orig_container + '/' + orig_obj
}
copy_environ = {'REQUEST_METHOD': 'COPY',
'swift_versioned_copy': True
}
creq = Request.blank(copy_path, headers=copy_headers,
environ=copy_environ)
copy_resp = self.COPY(creq)
if copy_resp.status_int == HTTP_NOT_FOUND:
# the version isn't there so we'll try with previous
self.container_name = orig_container
self.object_name = orig_obj
continue
if is_client_error(copy_resp.status_int):
# some user error, maybe permissions
return HTTPPreconditionFailed(request=req)
elif not is_success(copy_resp.status_int):
# could not copy the data, bail
return HTTPServiceUnavailable(request=req)
# reset these because the COPY changed them
self.container_name = lcontainer
self.object_name = previous_version['name'].encode('utf-8')
new_del_req = Request.blank(copy_path, environ=req.environ)
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_idx = container_info['storage_policy']
obj_ring = self.app.get_object_ring(policy_idx)
# pass the policy index to storage nodes via req header
new_del_req.headers['X-Backend-Storage-Policy-Index'] = \
policy_idx
container_partition = container_info['partition']
containers = container_info['nodes']
new_del_req.acl = container_info['write_acl']
new_del_req.path_info = copy_path
req = new_del_req
# remove 'X-If-Delete-At', since it is not for the older copy
if 'X-If-Delete-At' in req.headers:
del req.headers['X-If-Delete-At']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
break
if not containers: if not containers:
return HTTPNotFound(request=req) return HTTPNotFound(request=req)
partition, nodes = obj_ring.get_nodes( partition, nodes = obj_ring.get_nodes(
@ -999,6 +818,13 @@ class BaseObjectController(Controller):
self.object_name = dest_object self.object_name = dest_object
# re-write the existing request as a PUT instead of creating a new one # re-write the existing request as a PUT instead of creating a new one
# since this one is already attached to the posthooklogger # since this one is already attached to the posthooklogger
# TODO: Swift now has proxy-logging middleware instead of
# posthooklogger used in before. i.e. we don't have to
# keep the code depends on evnetlet.posthooks sequence, IMHO.
# However, creating a new sub request might
# cause the possibility to hide some bugs behindes the request
# so that we should discuss whichi is suitable (new-sub-request
# vs re-write-existing-request) for Swift. [kota_]
req.method = 'PUT' req.method = 'PUT'
req.path_info = '/v1/%s/%s/%s' % \ req.path_info = '/v1/%s/%s/%s' % \
(dest_account, dest_container, dest_object) (dest_account, dest_container, dest_object)
@ -1471,6 +1297,8 @@ class ECAppIter(object):
# 100-byte object with 1024-byte segments. That's not # 100-byte object with 1024-byte segments. That's not
# what we're dealing with here, though. # what we're dealing with here, though.
if client_asked_for_range and not satisfiable: if client_asked_for_range and not satisfiable:
req.environ[
'swift.non_client_disconnect'] = True
raise HTTPRequestedRangeNotSatisfiable( raise HTTPRequestedRangeNotSatisfiable(
request=req, headers=resp_headers) request=req, headers=resp_headers)
self.learned_content_type = content_type self.learned_content_type = content_type
@ -2132,44 +1960,43 @@ class ECObjectController(BaseObjectController):
orig_range = req.range orig_range = req.range
range_specs = self._convert_range(req, policy) range_specs = self._convert_range(req, policy)
node_iter = GreenthreadSafeIterator(node_iter) safe_iter = GreenthreadSafeIterator(node_iter)
num_gets = policy.ec_ndata with ContextPool(policy.ec_ndata) as pool:
with ContextPool(num_gets) as pool:
pile = GreenAsyncPile(pool) pile = GreenAsyncPile(pool)
for _junk in range(num_gets): for _junk in range(policy.ec_ndata):
pile.spawn(self._fragment_GET_request, pile.spawn(self._fragment_GET_request,
req, node_iter, partition, req, safe_iter, partition,
policy) policy)
gets = list(pile)
good_gets = []
bad_gets = [] bad_gets = []
for get, parts_iter in gets: etag_buckets = collections.defaultdict(list)
best_etag = None
for get, parts_iter in pile:
if is_success(get.last_status): if is_success(get.last_status):
good_gets.append((get, parts_iter)) etag = HeaderKeyDict(
get.last_headers)['X-Object-Sysmeta-Ec-Etag']
etag_buckets[etag].append((get, parts_iter))
if etag != best_etag and (
len(etag_buckets[etag]) >
len(etag_buckets[best_etag])):
best_etag = etag
else: else:
bad_gets.append((get, parts_iter)) bad_gets.append((get, parts_iter))
matching_response_count = max(
len(etag_buckets[best_etag]), len(bad_gets))
if (policy.ec_ndata - matching_response_count >
pile._pending) and node_iter.nodes_left > 0:
# we need more matching responses to reach ec_ndata
# than we have pending gets, as long as we still have
# nodes in node_iter we can spawn another
pile.spawn(self._fragment_GET_request, req,
safe_iter, partition, policy)
req.range = orig_range req.range = orig_range
if len(good_gets) == num_gets: if len(etag_buckets[best_etag]) >= policy.ec_ndata:
# If these aren't all for the same object, then error out so # headers can come from any of the getters
# at least the client doesn't get garbage. We can do a lot
# better here with more work, but this'll work for now.
found_obj_etags = set(
HeaderKeyDict(
getter.last_headers)['X-Object-Sysmeta-Ec-Etag']
for getter, _junk in good_gets)
if len(found_obj_etags) > 1:
self.app.logger.debug(
"Returning 503 for %s; found too many etags (%s)",
req.path,
", ".join(found_obj_etags))
return HTTPServiceUnavailable(request=req)
# we found enough pieces to decode the object, so now let's
# decode the object
resp_headers = HeaderKeyDict( resp_headers = HeaderKeyDict(
good_gets[0][0].source_headers[-1]) etag_buckets[best_etag][0][0].source_headers[-1])
resp_headers.pop('Content-Range', None) resp_headers.pop('Content-Range', None)
eccl = resp_headers.get('X-Object-Sysmeta-Ec-Content-Length') eccl = resp_headers.get('X-Object-Sysmeta-Ec-Content-Length')
obj_length = int(eccl) if eccl is not None else None obj_length = int(eccl) if eccl is not None else None
@ -2177,11 +2004,10 @@ class ECObjectController(BaseObjectController):
# This is only true if we didn't get a 206 response, but # This is only true if we didn't get a 206 response, but
# that's the only time this is used anyway. # that's the only time this is used anyway.
fa_length = int(resp_headers['Content-Length']) fa_length = int(resp_headers['Content-Length'])
app_iter = ECAppIter( app_iter = ECAppIter(
req.swift_entity_path, req.swift_entity_path,
policy, policy,
[iterator for getter, iterator in good_gets], [iterator for getter, iterator in etag_buckets[best_etag]],
range_specs, fa_length, obj_length, range_specs, fa_length, obj_length,
self.app.logger) self.app.logger)
resp = Response( resp = Response(
@ -2210,13 +2036,12 @@ class ECObjectController(BaseObjectController):
# EC fragment archives each have different bytes, hence different # EC fragment archives each have different bytes, hence different
# etags. However, they all have the original object's etag stored in # etags. However, they all have the original object's etag stored in
# sysmeta, so we copy that here so the client gets it. # sysmeta, so we copy that here so the client gets it.
resp.headers['Etag'] = resp.headers.get( if is_success(resp.status_int):
'X-Object-Sysmeta-Ec-Etag') resp.headers['Etag'] = resp.headers.get(
resp.headers['Content-Length'] = resp.headers.get( 'X-Object-Sysmeta-Ec-Etag')
'X-Object-Sysmeta-Ec-Content-Length') resp.headers['Content-Length'] = resp.headers.get(
resp.fix_conditional_response() 'X-Object-Sysmeta-Ec-Content-Length')
resp.fix_conditional_response()
return resp
def _connect_put_node(self, node_iter, part, path, headers, def _connect_put_node(self, node_iter, part, path, headers,
logger_thread_locals): logger_thread_locals):
@ -2551,10 +2376,9 @@ class ECObjectController(BaseObjectController):
need_quorum = False need_quorum = False
# The .durable file will propagate in a replicated fashion; if # The .durable file will propagate in a replicated fashion; if
# one exists, the reconstructor will spread it around. Thus, we # one exists, the reconstructor will spread it around. Thus, we
# don't require as many .durable files to be successfully # require "parity + 1" .durable files to be successfully written
# written as we do fragment archives in order to call the PUT a # as we do fragment archives in order to call the PUT a success.
# success. min_conns = policy.ec_nparity + 1
min_conns = 2
putters = [p for p in putters if not p.failed] putters = [p for p in putters if not p.failed]
# ignore response etags, and quorum boolean # ignore response etags, and quorum boolean
statuses, reasons, bodies, _etags, _quorum = \ statuses, reasons, bodies, _etags, _quorum = \

View File

@ -19,7 +19,6 @@ import socket
from swift import gettext_ as _ from swift import gettext_ as _
from random import shuffle from random import shuffle
from time import time from time import time
import itertools
import functools import functools
import sys import sys
@ -36,7 +35,7 @@ from swift.common.utils import cache_from_env, get_logger, \
from swift.common.constraints import check_utf8, valid_api_version from swift.common.constraints import check_utf8, valid_api_version
from swift.proxy.controllers import AccountController, ContainerController, \ from swift.proxy.controllers import AccountController, ContainerController, \
ObjectControllerRouter, InfoController ObjectControllerRouter, InfoController
from swift.proxy.controllers.base import get_container_info from swift.proxy.controllers.base import get_container_info, NodeIter
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \ from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \ HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \
HTTPServerError, HTTPException, Request, HTTPServiceUnavailable HTTPServerError, HTTPException, Request, HTTPServiceUnavailable
@ -64,6 +63,9 @@ required_filters = [
if pipe.startswith('catch_errors') if pipe.startswith('catch_errors')
else [])}, else [])},
{'name': 'dlo', 'after_fn': lambda _junk: [ {'name': 'dlo', 'after_fn': lambda _junk: [
'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']},
{'name': 'versioned_writes', 'after_fn': lambda _junk: [
'staticweb', 'tempauth', 'keystoneauth', 'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']}] 'catch_errors', 'gatekeeper', 'proxy_logging']}]
@ -378,6 +380,7 @@ class Application(object):
allowed_methods = getattr(controller, 'allowed_methods', set()) allowed_methods = getattr(controller, 'allowed_methods', set())
return HTTPMethodNotAllowed( return HTTPMethodNotAllowed(
request=req, headers={'Allow': ', '.join(allowed_methods)}) request=req, headers={'Allow': ', '.join(allowed_methods)})
old_authorize = None
if 'swift.authorize' in req.environ: if 'swift.authorize' in req.environ:
# We call authorize before the handler, always. If authorized, # We call authorize before the handler, always. If authorized,
# we remove the swift.authorize hook so isn't ever called # we remove the swift.authorize hook so isn't ever called
@ -388,7 +391,7 @@ class Application(object):
if not resp and not req.headers.get('X-Copy-From-Account') \ if not resp and not req.headers.get('X-Copy-From-Account') \
and not req.headers.get('Destination-Account'): and not req.headers.get('Destination-Account'):
# No resp means authorized, no delayed recheck required. # No resp means authorized, no delayed recheck required.
del req.environ['swift.authorize'] old_authorize = req.environ['swift.authorize']
else: else:
# Response indicates denial, but we might delay the denial # Response indicates denial, but we might delay the denial
# and recheck later. If not delayed, return the error now. # and recheck later. If not delayed, return the error now.
@ -398,7 +401,13 @@ class Application(object):
# gets mutated during handling. This way logging can display the # gets mutated during handling. This way logging can display the
# method the client actually sent. # method the client actually sent.
req.environ['swift.orig_req_method'] = req.method req.environ['swift.orig_req_method'] = req.method
return handler(req) try:
if old_authorize:
req.environ.pop('swift.authorize', None)
return handler(req)
finally:
if old_authorize:
req.environ['swift.authorize'] = old_authorize
except HTTPException as error_response: except HTTPException as error_response:
return error_response return error_response
except (Exception, Timeout): except (Exception, Timeout):
@ -497,60 +506,7 @@ class Application(object):
'port': node['port'], 'device': node['device']}) 'port': node['port'], 'device': node['device']})
def iter_nodes(self, ring, partition, node_iter=None): def iter_nodes(self, ring, partition, node_iter=None):
""" return NodeIter(self, ring, partition, node_iter=node_iter)
Yields nodes for a ring partition, skipping over error
limited nodes and stopping at the configurable number of nodes. If a
node yielded subsequently gets error limited, an extra node will be
yielded to take its place.
Note that if you're going to iterate over this concurrently from
multiple greenthreads, you'll want to use a
swift.common.utils.GreenthreadSafeIterator to serialize access.
Otherwise, you may get ValueErrors from concurrent access. (You also
may not, depending on how logging is configured, the vagaries of
socket IO and eventlet, and the phase of the moon.)
:param ring: ring to get yield nodes from
:param partition: ring partition to yield nodes for
:param node_iter: optional iterable of nodes to try. Useful if you
want to filter or reorder the nodes.
"""
part_nodes = ring.get_part_nodes(partition)
if node_iter is None:
node_iter = itertools.chain(part_nodes,
ring.get_more_nodes(partition))
num_primary_nodes = len(part_nodes)
# Use of list() here forcibly yanks the first N nodes (the primary
# nodes) from node_iter, so the rest of its values are handoffs.
primary_nodes = self.sort_nodes(
list(itertools.islice(node_iter, num_primary_nodes)))
handoff_nodes = node_iter
nodes_left = self.request_node_count(len(primary_nodes))
log_handoffs_threshold = nodes_left - len(primary_nodes)
for node in primary_nodes:
if not self.error_limited(node):
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
handoffs = 0
for node in handoff_nodes:
if not self.error_limited(node):
handoffs += 1
if self.log_handoffs and handoffs > log_handoffs_threshold:
self.logger.increment('handoff_count')
self.logger.warning(
'Handoff requested (%d)' % handoffs)
if handoffs - log_handoffs_threshold == len(primary_nodes):
self.logger.increment('handoff_all_count')
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
def exception_occurred(self, node, typ, additional_info, def exception_occurred(self, node, typ, additional_info,
**kwargs): **kwargs):

View File

@ -13,3 +13,6 @@ sphinx>=1.1.2,<1.2
mock>=1.0 mock>=1.0
python-swiftclient python-swiftclient
python-keystoneclient>=1.3.0 python-keystoneclient>=1.3.0
# Security checks
bandit>=0.10.1

View File

@ -15,7 +15,7 @@
# See http://code.google.com/p/python-nose/issues/detail?id=373 # See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks # The code below enables nosetests to work with i18n _() blocks
from __future__ import print_function
import sys import sys
import os import os
try: try:
@ -63,15 +63,12 @@ def get_config(section_name=None, defaults=None):
config = readconf(config_file, section_name) config = readconf(config_file, section_name)
except SystemExit: except SystemExit:
if not os.path.exists(config_file): if not os.path.exists(config_file):
print >>sys.stderr, \ print('Unable to read test config %s - file not found'
'Unable to read test config %s - file not found' \ % config_file, file=sys.stderr)
% config_file
elif not os.access(config_file, os.R_OK): elif not os.access(config_file, os.R_OK):
print >>sys.stderr, \ print('Unable to read test config %s - permission denied'
'Unable to read test config %s - permission denied' \ % config_file, file=sys.stderr)
% config_file
else: else:
print >>sys.stderr, \ print('Unable to read test config %s - section %s not found'
'Unable to read test config %s - section %s not found' \ % (config_file, section_name), file=sys.stderr)
% (config_file, section_name)
return config return config

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
import mock import mock
import os import os
import sys import sys
@ -128,7 +129,7 @@ class InProcessException(BaseException):
def _info(msg): def _info(msg):
print >> sys.stderr, msg print(msg, file=sys.stderr)
def _debug(msg): def _debug(msg):
@ -501,7 +502,7 @@ def get_cluster_info():
# Most likely the swift cluster has "expose_info = false" set # Most likely the swift cluster has "expose_info = false" set
# in its proxy-server.conf file, so we'll just do the best we # in its proxy-server.conf file, so we'll just do the best we
# can. # can.
print >>sys.stderr, "** Swift Cluster not exposing /info **" print("** Swift Cluster not exposing /info **", file=sys.stderr)
# Finally, we'll allow any constraint present in the swift-constraints # Finally, we'll allow any constraint present in the swift-constraints
# section of test.conf to override everything. Note that only those # section of test.conf to override everything. Note that only those
@ -513,8 +514,8 @@ def get_cluster_info():
except KeyError: except KeyError:
pass pass
except ValueError: except ValueError:
print >>sys.stderr, "Invalid constraint value: %s = %s" % ( print("Invalid constraint value: %s = %s" % (
k, test_constraints[k]) k, test_constraints[k]), file=sys.stderr)
eff_constraints.update(test_constraints) eff_constraints.update(test_constraints)
# Just make it look like these constraints were loaded from a /info call, # Just make it look like these constraints were loaded from a /info call,
@ -564,8 +565,8 @@ def setup_package():
in_process_setup(the_object_server=( in_process_setup(the_object_server=(
mem_object_server if in_mem_obj else object_server)) mem_object_server if in_mem_obj else object_server))
except InProcessException as exc: except InProcessException as exc:
print >> sys.stderr, ('Exception during in-process setup: %s' print(('Exception during in-process setup: %s'
% str(exc)) % str(exc)), file=sys.stderr)
raise raise
global web_front_end global web_front_end
@ -674,20 +675,19 @@ def setup_package():
global skip global skip
skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]]) skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]])
if skip: if skip:
print >>sys.stderr, 'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG' print('SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG', file=sys.stderr)
global skip2 global skip2
skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]]) skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]])
if not skip and skip2: if not skip and skip2:
print >>sys.stderr, \ print('SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS '
'SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS' \ 'DUE TO NO CONFIG FOR THEM', file=sys.stderr)
' DUE TO NO CONFIG FOR THEM'
global skip3 global skip3
skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]]) skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]])
if not skip and skip3: if not skip and skip3:
print >>sys.stderr, \ print('SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS'
'SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS DUE TO NO CONFIG FOR THEM' 'DUE TO NO CONFIG FOR THEM', file=sys.stderr)
global skip_if_not_v3 global skip_if_not_v3
skip_if_not_v3 = (swift_test_auth_version != '3' skip_if_not_v3 = (swift_test_auth_version != '3'
@ -695,16 +695,17 @@ def setup_package():
swift_test_user[3], swift_test_user[3],
swift_test_key[3]])) swift_test_key[3]]))
if not skip and skip_if_not_v3: if not skip and skip_if_not_v3:
print >>sys.stderr, \ print('SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3',
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3' file=sys.stderr)
global skip_service_tokens global skip_service_tokens
skip_service_tokens = not all([not skip, swift_test_user[4], skip_service_tokens = not all([not skip, swift_test_user[4],
swift_test_key[4], swift_test_tenant[4], swift_test_key[4], swift_test_tenant[4],
swift_test_service_prefix]) swift_test_service_prefix])
if not skip and skip_service_tokens: if not skip and skip_service_tokens:
print >>sys.stderr, \ print(
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS' 'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS',
file=sys.stderr)
if policy_specified: if policy_specified:
policies = FunctionalStoragePolicyCollection.from_info() policies = FunctionalStoragePolicyCollection.from_info()

View File

@ -236,6 +236,9 @@ class Connection(object):
if not cfg.get('no_auth_token'): if not cfg.get('no_auth_token'):
headers['X-Auth-Token'] = self.storage_token headers['X-Auth-Token'] = self.storage_token
if cfg.get('use_token'):
headers['X-Auth-Token'] = cfg.get('use_token')
if isinstance(hdrs, dict): if isinstance(hdrs, dict):
headers.update(hdrs) headers.update(hdrs)
return headers return headers
@ -507,6 +510,18 @@ class Container(Base):
return self.conn.make_request('PUT', self.path, hdrs=hdrs, return self.conn.make_request('PUT', self.path, hdrs=hdrs,
parms=parms, cfg=cfg) in (201, 202) parms=parms, cfg=cfg) in (201, 202)
def update_metadata(self, hdrs=None, cfg=None):
if hdrs is None:
hdrs = {}
if cfg is None:
cfg = {}
self.conn.make_request('POST', self.path, hdrs=hdrs, cfg=cfg)
if not 200 <= self.conn.response.status <= 299:
raise ResponseError(self.conn.response, 'POST',
self.conn.make_path(self.path))
return True
def delete(self, hdrs=None, parms=None): def delete(self, hdrs=None, parms=None):
if hdrs is None: if hdrs is None:
hdrs = {} hdrs = {}
@ -637,6 +652,9 @@ class File(Base):
else: else:
headers['Content-Length'] = 0 headers['Content-Length'] = 0
if cfg.get('use_token'):
headers['X-Auth-Token'] = cfg.get('use_token')
if cfg.get('no_content_type'): if cfg.get('no_content_type'):
pass pass
elif self.content_type: elif self.content_type:
@ -711,13 +729,13 @@ class File(Base):
return self.conn.make_request('COPY', self.path, hdrs=headers, return self.conn.make_request('COPY', self.path, hdrs=headers,
parms=parms) == 201 parms=parms) == 201
def delete(self, hdrs=None, parms=None): def delete(self, hdrs=None, parms=None, cfg=None):
if hdrs is None: if hdrs is None:
hdrs = {} hdrs = {}
if parms is None: if parms is None:
parms = {} parms = {}
if self.conn.make_request('DELETE', self.path, hdrs=hdrs, if self.conn.make_request('DELETE', self.path, hdrs=hdrs,
parms=parms) != 204: cfg=cfg, parms=parms) != 204:
raise ResponseError(self.conn.response, 'DELETE', raise ResponseError(self.conn.response, 'DELETE',
self.conn.make_path(self.path)) self.conn.make_path(self.path))

View File

@ -190,7 +190,7 @@ class TestAccount(unittest.TestCase):
# cannot read account # cannot read account
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant read access # grant read access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -241,7 +241,7 @@ class TestAccount(unittest.TestCase):
# cannot read account # cannot read account
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant read-write access # grant read-write access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -282,7 +282,7 @@ class TestAccount(unittest.TestCase):
# cannot read account # cannot read account
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant admin access # grant admin access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -321,7 +321,7 @@ class TestAccount(unittest.TestCase):
# and again, cannot read account # and again, cannot read account
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
@requires_acls @requires_acls
def test_protected_tempurl(self): def test_protected_tempurl(self):

View File

@ -729,7 +729,7 @@ class TestContainer(unittest.TestCase):
# cannot list containers # cannot list containers
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant read-only access # grant read-only access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -742,22 +742,22 @@ class TestContainer(unittest.TestCase):
# read-only can list containers # read-only can list containers
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(self.name, listing) self.assertIn(self.name, listing)
# read-only can not create containers # read-only can not create containers
new_container_name = str(uuid4()) new_container_name = str(uuid4())
resp = retry(put, new_container_name, use_account=3) resp = retry(put, new_container_name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# but it can see newly created ones # but it can see newly created ones
resp = retry(put, new_container_name, use_account=1) resp = retry(put, new_container_name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 201) self.assertEqual(resp.status, 201)
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(new_container_name, listing) self.assertIn(new_container_name, listing)
@requires_acls @requires_acls
@ -788,13 +788,13 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1) resp = retry(get, self.name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# cannot see metadata # cannot see metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant read-only access # grant read-only access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -814,7 +814,7 @@ class TestContainer(unittest.TestCase):
# read-only can read container metadata # read-only can read container metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
@requires_acls @requires_acls
@ -844,7 +844,7 @@ class TestContainer(unittest.TestCase):
# cannot list containers # cannot list containers
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant read-write access # grant read-write access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -857,36 +857,36 @@ class TestContainer(unittest.TestCase):
# can list containers # can list containers
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(self.name, listing) self.assertIn(self.name, listing)
# can create new containers # can create new containers
new_container_name = str(uuid4()) new_container_name = str(uuid4())
resp = retry(put, new_container_name, use_account=3) resp = retry(put, new_container_name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 201) self.assertEqual(resp.status, 201)
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(new_container_name, listing) self.assertIn(new_container_name, listing)
# can also delete them # can also delete them
resp = retry(delete, new_container_name, use_account=3) resp = retry(delete, new_container_name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertNotIn(new_container_name, listing) self.assertNotIn(new_container_name, listing)
# even if they didn't create them # even if they didn't create them
empty_container_name = str(uuid4()) empty_container_name = str(uuid4())
resp = retry(put, empty_container_name, use_account=1) resp = retry(put, empty_container_name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 201) self.assertEqual(resp.status, 201)
resp = retry(delete, empty_container_name, use_account=3) resp = retry(delete, empty_container_name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
@requires_acls @requires_acls
def test_read_write_acl_metadata(self): def test_read_write_acl_metadata(self):
@ -916,13 +916,13 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1) resp = retry(get, self.name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# cannot see metadata # cannot see metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant read-write access # grant read-write access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -935,7 +935,7 @@ class TestContainer(unittest.TestCase):
# read-write can read container metadata # read-write can read container metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# read-write can also write container metadata # read-write can also write container metadata
@ -943,20 +943,20 @@ class TestContainer(unittest.TestCase):
headers = {'x-container-meta-test': new_value} headers = {'x-container-meta-test': new_value}
resp = retry(post, self.name, headers=headers, use_account=3) resp = retry(post, self.name, headers=headers, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# and remove it # and remove it
headers = {'x-remove-container-meta-test': 'true'} headers = {'x-remove-container-meta-test': 'true'}
resp = retry(post, self.name, headers=headers, use_account=3) resp = retry(post, self.name, headers=headers, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), None) self.assertEqual(resp.getheader('X-Container-Meta-Test'), None)
@requires_acls @requires_acls
@ -986,7 +986,7 @@ class TestContainer(unittest.TestCase):
# cannot list containers # cannot list containers
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant admin access # grant admin access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -999,36 +999,36 @@ class TestContainer(unittest.TestCase):
# can list containers # can list containers
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(self.name, listing) self.assertIn(self.name, listing)
# can create new containers # can create new containers
new_container_name = str(uuid4()) new_container_name = str(uuid4())
resp = retry(put, new_container_name, use_account=3) resp = retry(put, new_container_name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 201) self.assertEqual(resp.status, 201)
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(new_container_name, listing) self.assertIn(new_container_name, listing)
# can also delete them # can also delete them
resp = retry(delete, new_container_name, use_account=3) resp = retry(delete, new_container_name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, use_account=3) resp = retry(get, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertNotIn(new_container_name, listing) self.assertNotIn(new_container_name, listing)
# even if they didn't create them # even if they didn't create them
empty_container_name = str(uuid4()) empty_container_name = str(uuid4())
resp = retry(put, empty_container_name, use_account=1) resp = retry(put, empty_container_name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 201) self.assertEqual(resp.status, 201)
resp = retry(delete, empty_container_name, use_account=3) resp = retry(delete, empty_container_name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
@requires_acls @requires_acls
def test_admin_acl_metadata(self): def test_admin_acl_metadata(self):
@ -1058,13 +1058,13 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1) resp = retry(get, self.name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# cannot see metadata # cannot see metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant access # grant access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -1077,7 +1077,7 @@ class TestContainer(unittest.TestCase):
# can read container metadata # can read container metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# can also write container metadata # can also write container metadata
@ -1085,20 +1085,20 @@ class TestContainer(unittest.TestCase):
headers = {'x-container-meta-test': new_value} headers = {'x-container-meta-test': new_value}
resp = retry(post, self.name, headers=headers, use_account=3) resp = retry(post, self.name, headers=headers, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# and remove it # and remove it
headers = {'x-remove-container-meta-test': 'true'} headers = {'x-remove-container-meta-test': 'true'}
resp = retry(post, self.name, headers=headers, use_account=3) resp = retry(post, self.name, headers=headers, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), None) self.assertEqual(resp.getheader('X-Container-Meta-Test'), None)
@requires_acls @requires_acls
@ -1132,7 +1132,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1) resp = retry(get, self.name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret') self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret')
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
@ -1147,7 +1147,7 @@ class TestContainer(unittest.TestCase):
# can read container metadata # can read container metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# but not sync-key # but not sync-key
self.assertEqual(resp.getheader('X-Container-Sync-Key'), None) self.assertEqual(resp.getheader('X-Container-Sync-Key'), None)
@ -1169,7 +1169,7 @@ class TestContainer(unittest.TestCase):
# can read container metadata # can read container metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# but not sync-key # but not sync-key
self.assertEqual(resp.getheader('X-Container-Sync-Key'), None) self.assertEqual(resp.getheader('X-Container-Sync-Key'), None)
@ -1177,7 +1177,7 @@ class TestContainer(unittest.TestCase):
# sanity check sync-key w/ account1 # sanity check sync-key w/ account1
resp = retry(get, self.name, use_account=1) resp = retry(get, self.name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret') self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret')
# and can write # and can write
@ -1191,7 +1191,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1) # validate w/ account1 resp = retry(get, self.name, use_account=1) # validate w/ account1
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# but can not write sync-key # but can not write sync-key
self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret') self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret')
@ -1207,7 +1207,7 @@ class TestContainer(unittest.TestCase):
# admin can read container metadata # admin can read container metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# and ALSO sync-key # and ALSO sync-key
self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret') self.assertEqual(resp.getheader('X-Container-Sync-Key'), 'secret')
@ -1220,7 +1220,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Sync-Key'), new_secret) self.assertEqual(resp.getheader('X-Container-Sync-Key'), new_secret)
@requires_acls @requires_acls
@ -1255,7 +1255,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1) resp = retry(get, self.name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe') self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe')
self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe') self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe')
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
@ -1271,7 +1271,7 @@ class TestContainer(unittest.TestCase):
# can read container metadata # can read container metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# but not container acl # but not container acl
self.assertEqual(resp.getheader('X-Container-Read'), None) self.assertEqual(resp.getheader('X-Container-Read'), None)
@ -1297,7 +1297,7 @@ class TestContainer(unittest.TestCase):
# can read container metadata # can read container metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value)
# but not container acl # but not container acl
self.assertEqual(resp.getheader('X-Container-Read'), None) self.assertEqual(resp.getheader('X-Container-Read'), None)
@ -1306,7 +1306,7 @@ class TestContainer(unittest.TestCase):
# sanity check container acls with account1 # sanity check container acls with account1
resp = retry(get, self.name, use_account=1) resp = retry(get, self.name, use_account=1)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe') self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe')
self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe') self.assertEqual(resp.getheader('X-Container-Write'), 'jdoe')
@ -1322,7 +1322,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=1) # validate w/ account1 resp = retry(get, self.name, use_account=1) # validate w/ account1
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# but can not write container acls # but can not write container acls
self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe') self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe')
@ -1339,7 +1339,7 @@ class TestContainer(unittest.TestCase):
# admin can read container metadata # admin can read container metadata
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value) self.assertEqual(resp.getheader('X-Container-Meta-Test'), new_value)
# and ALSO container acls # and ALSO container acls
self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe') self.assertEqual(resp.getheader('X-Container-Read'), 'jdoe')
@ -1355,7 +1355,7 @@ class TestContainer(unittest.TestCase):
self.assertEqual(resp.status, 204) self.assertEqual(resp.status, 204)
resp = retry(get, self.name, use_account=3) resp = retry(get, self.name, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
self.assertEqual(resp.getheader('X-Container-Read'), '.r:*') self.assertEqual(resp.getheader('X-Container-Read'), '.r:*')
def test_long_name_content_type(self): def test_long_name_content_type(self):
@ -1415,8 +1415,8 @@ class TestContainer(unittest.TestCase):
resp = retry(head) resp = retry(head)
resp.read() resp.read()
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('x-storage-policy'), self.assertEqual(headers.get('x-storage-policy'),
default_policy['name']) default_policy['name'])
def test_error_invalid_storage_policy_name(self): def test_error_invalid_storage_policy_name(self):
def put(url, token, parsed, conn, headers): def put(url, token, parsed, conn, headers):
@ -1453,8 +1453,8 @@ class TestContainer(unittest.TestCase):
resp = retry(head) resp = retry(head)
resp.read() resp.read()
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('x-storage-policy'), self.assertEqual(headers.get('x-storage-policy'),
policy['name']) policy['name'])
# and test recreate with-out specifying Storage Policy # and test recreate with-out specifying Storage Policy
resp = retry(put) resp = retry(put)
@ -1464,8 +1464,8 @@ class TestContainer(unittest.TestCase):
resp = retry(head) resp = retry(head)
resp.read() resp.read()
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('x-storage-policy'), self.assertEqual(headers.get('x-storage-policy'),
policy['name']) policy['name'])
# delete it # delete it
def delete(url, token, parsed, conn): def delete(url, token, parsed, conn):
@ -1480,7 +1480,7 @@ class TestContainer(unittest.TestCase):
resp = retry(head) resp = retry(head)
resp.read() resp.read()
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('x-storage-policy'), None) self.assertEqual(headers.get('x-storage-policy'), None)
@requires_policies @requires_policies
def test_conflict_change_storage_policy_with_put(self): def test_conflict_change_storage_policy_with_put(self):
@ -1510,8 +1510,8 @@ class TestContainer(unittest.TestCase):
resp = retry(head) resp = retry(head)
resp.read() resp.read()
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('x-storage-policy'), self.assertEqual(headers.get('x-storage-policy'),
policy['name']) policy['name'])
@requires_policies @requires_policies
def test_noop_change_storage_policy_with_post(self): def test_noop_change_storage_policy_with_post(self):
@ -1547,8 +1547,8 @@ class TestContainer(unittest.TestCase):
resp = retry(head) resp = retry(head)
resp.read() resp.read()
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('x-storage-policy'), self.assertEqual(headers.get('x-storage-policy'),
policy['name']) policy['name'])
class BaseTestContainerACLs(unittest.TestCase): class BaseTestContainerACLs(unittest.TestCase):

View File

@ -119,10 +119,10 @@ class TestObject(unittest.TestCase):
return check_response(conn) return check_response(conn)
resp = retry(put) resp = retry(put)
resp.read() resp.read()
self.assertEquals(resp.status, 201) self.assertEqual(resp.status, 201)
resp = retry(put) resp = retry(put)
resp.read() resp.read()
self.assertEquals(resp.status, 412) self.assertEqual(resp.status, 412)
def put(url, token, parsed, conn): def put(url, token, parsed, conn):
conn.request('PUT', '%s/%s/%s' % ( conn.request('PUT', '%s/%s/%s' % (
@ -133,7 +133,7 @@ class TestObject(unittest.TestCase):
return check_response(conn) return check_response(conn)
resp = retry(put) resp = retry(put)
resp.read() resp.read()
self.assertEquals(resp.status, 400) self.assertEqual(resp.status, 400)
def test_non_integer_x_delete_after(self): def test_non_integer_x_delete_after(self):
def put(url, token, parsed, conn): def put(url, token, parsed, conn):
@ -145,7 +145,7 @@ class TestObject(unittest.TestCase):
return check_response(conn) return check_response(conn)
resp = retry(put) resp = retry(put)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 400) self.assertEqual(resp.status, 400)
self.assertEqual(body, 'Non-integer X-Delete-After') self.assertEqual(body, 'Non-integer X-Delete-After')
def test_non_integer_x_delete_at(self): def test_non_integer_x_delete_at(self):
@ -158,7 +158,7 @@ class TestObject(unittest.TestCase):
return check_response(conn) return check_response(conn)
resp = retry(put) resp = retry(put)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 400) self.assertEqual(resp.status, 400)
self.assertEqual(body, 'Non-integer X-Delete-At') self.assertEqual(body, 'Non-integer X-Delete-At')
def test_x_delete_at_in_the_past(self): def test_x_delete_at_in_the_past(self):
@ -171,7 +171,7 @@ class TestObject(unittest.TestCase):
return check_response(conn) return check_response(conn)
resp = retry(put) resp = retry(put)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 400) self.assertEqual(resp.status, 400)
self.assertEqual(body, 'X-Delete-At in past') self.assertEqual(body, 'X-Delete-At in past')
def test_copy_object(self): def test_copy_object(self):
@ -543,12 +543,12 @@ class TestObject(unittest.TestCase):
# cannot list objects # cannot list objects
resp = retry(get_listing, use_account=3) resp = retry(get_listing, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# cannot get object # cannot get object
resp = retry(get, self.obj, use_account=3) resp = retry(get, self.obj, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant read-only access # grant read-only access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -561,30 +561,30 @@ class TestObject(unittest.TestCase):
# can list objects # can list objects
resp = retry(get_listing, use_account=3) resp = retry(get_listing, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(self.obj, listing) self.assertIn(self.obj, listing)
# can get object # can get object
resp = retry(get, self.obj, use_account=3) resp = retry(get, self.obj, use_account=3)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertEquals(body, 'test') self.assertEqual(body, 'test')
# can not put an object # can not put an object
obj_name = str(uuid4()) obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3) resp = retry(put, obj_name, use_account=3)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# can not delete an object # can not delete an object
resp = retry(delete, self.obj, use_account=3) resp = retry(delete, self.obj, use_account=3)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# sanity with account1 # sanity with account1
resp = retry(get_listing, use_account=3) resp = retry(get_listing, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertNotIn(obj_name, listing) self.assertNotIn(obj_name, listing)
self.assertIn(self.obj, listing) self.assertIn(self.obj, listing)
@ -624,12 +624,12 @@ class TestObject(unittest.TestCase):
# cannot list objects # cannot list objects
resp = retry(get_listing, use_account=3) resp = retry(get_listing, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# cannot get object # cannot get object
resp = retry(get, self.obj, use_account=3) resp = retry(get, self.obj, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant read-write access # grant read-write access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -642,30 +642,30 @@ class TestObject(unittest.TestCase):
# can list objects # can list objects
resp = retry(get_listing, use_account=3) resp = retry(get_listing, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(self.obj, listing) self.assertIn(self.obj, listing)
# can get object # can get object
resp = retry(get, self.obj, use_account=3) resp = retry(get, self.obj, use_account=3)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertEquals(body, 'test') self.assertEqual(body, 'test')
# can put an object # can put an object
obj_name = str(uuid4()) obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3) resp = retry(put, obj_name, use_account=3)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 201) self.assertEqual(resp.status, 201)
# can delete an object # can delete an object
resp = retry(delete, self.obj, use_account=3) resp = retry(delete, self.obj, use_account=3)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
# sanity with account1 # sanity with account1
resp = retry(get_listing, use_account=3) resp = retry(get_listing, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(obj_name, listing) self.assertIn(obj_name, listing)
self.assertNotIn(self.obj, listing) self.assertNotIn(self.obj, listing)
@ -705,12 +705,12 @@ class TestObject(unittest.TestCase):
# cannot list objects # cannot list objects
resp = retry(get_listing, use_account=3) resp = retry(get_listing, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# cannot get object # cannot get object
resp = retry(get, self.obj, use_account=3) resp = retry(get, self.obj, use_account=3)
resp.read() resp.read()
self.assertEquals(resp.status, 403) self.assertEqual(resp.status, 403)
# grant admin access # grant admin access
acl_user = tf.swift_test_user[2] acl_user = tf.swift_test_user[2]
@ -723,30 +723,30 @@ class TestObject(unittest.TestCase):
# can list objects # can list objects
resp = retry(get_listing, use_account=3) resp = retry(get_listing, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(self.obj, listing) self.assertIn(self.obj, listing)
# can get object # can get object
resp = retry(get, self.obj, use_account=3) resp = retry(get, self.obj, use_account=3)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertEquals(body, 'test') self.assertEqual(body, 'test')
# can put an object # can put an object
obj_name = str(uuid4()) obj_name = str(uuid4())
resp = retry(put, obj_name, use_account=3) resp = retry(put, obj_name, use_account=3)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 201) self.assertEqual(resp.status, 201)
# can delete an object # can delete an object
resp = retry(delete, self.obj, use_account=3) resp = retry(delete, self.obj, use_account=3)
body = resp.read() body = resp.read()
self.assertEquals(resp.status, 204) self.assertEqual(resp.status, 204)
# sanity with account1 # sanity with account1
resp = retry(get_listing, use_account=3) resp = retry(get_listing, use_account=3)
listing = resp.read() listing = resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
self.assertIn(obj_name, listing) self.assertIn(obj_name, listing)
self.assertNotIn(self.obj, listing) self.assertNotIn(self.obj, listing)
@ -1113,78 +1113,78 @@ class TestObject(unittest.TestCase):
resp = retry(put_cors_cont, '*') resp = retry(put_cors_cont, '*')
resp.read() resp.read()
self.assertEquals(resp.status // 100, 2) self.assertEqual(resp.status // 100, 2)
resp = retry(put_obj, 'cat') resp = retry(put_obj, 'cat')
resp.read() resp.read()
self.assertEquals(resp.status // 100, 2) self.assertEqual(resp.status // 100, 2)
resp = retry(check_cors, resp = retry(check_cors,
'OPTIONS', 'cat', {'Origin': 'http://m.com'}) 'OPTIONS', 'cat', {'Origin': 'http://m.com'})
self.assertEquals(resp.status, 401) self.assertEqual(resp.status, 401)
resp = retry(check_cors, resp = retry(check_cors,
'OPTIONS', 'cat', 'OPTIONS', 'cat',
{'Origin': 'http://m.com', {'Origin': 'http://m.com',
'Access-Control-Request-Method': 'GET'}) 'Access-Control-Request-Method': 'GET'})
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
resp.read() resp.read()
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('access-control-allow-origin'), self.assertEqual(headers.get('access-control-allow-origin'),
'*') '*')
resp = retry(check_cors, resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'}) 'GET', 'cat', {'Origin': 'http://m.com'})
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('access-control-allow-origin'), self.assertEqual(headers.get('access-control-allow-origin'),
'*') '*')
resp = retry(check_cors, resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com', 'GET', 'cat', {'Origin': 'http://m.com',
'X-Web-Mode': 'True'}) 'X-Web-Mode': 'True'})
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('access-control-allow-origin'), self.assertEqual(headers.get('access-control-allow-origin'),
'*') '*')
#################### ####################
resp = retry(put_cors_cont, 'http://secret.com') resp = retry(put_cors_cont, 'http://secret.com')
resp.read() resp.read()
self.assertEquals(resp.status // 100, 2) self.assertEqual(resp.status // 100, 2)
resp = retry(check_cors, resp = retry(check_cors,
'OPTIONS', 'cat', 'OPTIONS', 'cat',
{'Origin': 'http://m.com', {'Origin': 'http://m.com',
'Access-Control-Request-Method': 'GET'}) 'Access-Control-Request-Method': 'GET'})
resp.read() resp.read()
self.assertEquals(resp.status, 401) self.assertEqual(resp.status, 401)
if strict_cors: if strict_cors:
resp = retry(check_cors, resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'}) 'GET', 'cat', {'Origin': 'http://m.com'})
resp.read() resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertNotIn('access-control-allow-origin', headers) self.assertNotIn('access-control-allow-origin', headers)
resp = retry(check_cors, resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://secret.com'}) 'GET', 'cat', {'Origin': 'http://secret.com'})
resp.read() resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('access-control-allow-origin'), self.assertEqual(headers.get('access-control-allow-origin'),
'http://secret.com') 'http://secret.com')
else: else:
resp = retry(check_cors, resp = retry(check_cors,
'GET', 'cat', {'Origin': 'http://m.com'}) 'GET', 'cat', {'Origin': 'http://m.com'})
resp.read() resp.read()
self.assertEquals(resp.status, 200) self.assertEqual(resp.status, 200)
headers = dict((k.lower(), v) for k, v in resp.getheaders()) headers = dict((k.lower(), v) for k, v in resp.getheaders())
self.assertEquals(headers.get('access-control-allow-origin'), self.assertEqual(headers.get('access-control-allow-origin'),
'http://m.com') 'http://m.com')
@requires_policies @requires_policies
def test_cross_policy_copy(self): def test_cross_policy_copy(self):

View File

@ -133,7 +133,7 @@ class TestAccount(Base):
def testInvalidUTF8Path(self): def testInvalidUTF8Path(self):
invalid_utf8 = Utils.create_utf8_name()[::-1] invalid_utf8 = Utils.create_utf8_name()[::-1]
container = self.env.account.container(invalid_utf8) container = self.env.account.container(invalid_utf8)
self.assertTrue(not container.create(cfg={'no_path_quote': True})) self.assertFalse(container.create(cfg={'no_path_quote': True}))
self.assert_status(412) self.assert_status(412)
self.assert_body('Invalid UTF8 or contains NULL') self.assert_body('Invalid UTF8 or contains NULL')
@ -313,7 +313,7 @@ class TestAccountNoContainers(Base):
def testGetRequest(self): def testGetRequest(self):
for format_type in [None, 'json', 'xml']: for format_type in [None, 'json', 'xml']:
self.assertTrue(not self.env.account.containers( self.assertFalse(self.env.account.containers(
parms={'format': format_type})) parms={'format': format_type}))
if format_type is None: if format_type is None:
@ -371,7 +371,7 @@ class TestContainer(Base):
self.assertTrue(cont.create()) self.assertTrue(cont.create())
self.assert_status(201) self.assert_status(201)
else: else:
self.assertTrue(not cont.create()) self.assertFalse(cont.create())
self.assert_status(400) self.assert_status(400)
def testFileThenContainerDelete(self): def testFileThenContainerDelete(self):
@ -441,14 +441,14 @@ class TestContainer(Base):
def testListDelimiter(self): def testListDelimiter(self):
cont = self.env.account.container(Utils.create_name()) cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create()) self.assertTrue(cont.create())
delimiter = '-' delimiter = '-'
files = ['test', delimiter.join(['test', 'bar']), files = ['test', delimiter.join(['test', 'bar']),
delimiter.join(['test', 'foo'])] delimiter.join(['test', 'foo'])]
for f in files: for f in files:
file_item = cont.file(f) file_item = cont.file(f)
self.assert_(file_item.write_random()) self.assertTrue(file_item.write_random())
results = cont.files() results = cont.files()
results = cont.files(parms={'delimiter': delimiter}) results = cont.files(parms={'delimiter': delimiter})
@ -456,13 +456,13 @@ class TestContainer(Base):
def testListDelimiterAndPrefix(self): def testListDelimiterAndPrefix(self):
cont = self.env.account.container(Utils.create_name()) cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create()) self.assertTrue(cont.create())
delimiter = 'a' delimiter = 'a'
files = ['bar', 'bazar'] files = ['bar', 'bazar']
for f in files: for f in files:
file_item = cont.file(f) file_item = cont.file(f)
self.assert_(file_item.write_random()) self.assertTrue(file_item.write_random())
results = cont.files(parms={'delimiter': delimiter, 'prefix': 'ba'}) results = cont.files(parms={'delimiter': delimiter, 'prefix': 'ba'})
self.assertEqual(results, ['bar', 'baza']) self.assertEqual(results, ['bar', 'baza'])
@ -490,7 +490,7 @@ class TestContainer(Base):
self.assertTrue(container.delete()) self.assertTrue(container.delete())
container = self.env.account.container(invalid_utf8) container = self.env.account.container(invalid_utf8)
self.assertTrue(not container.create(cfg={'no_path_quote': True})) self.assertFalse(container.create(cfg={'no_path_quote': True}))
self.assert_status(412) self.assert_status(412)
self.assertRaises(ResponseError, container.files, self.assertRaises(ResponseError, container.files,
cfg={'no_path_quote': True}) cfg={'no_path_quote': True})
@ -516,8 +516,8 @@ class TestContainer(Base):
cont_name = cont_name.encode('utf-8') cont_name = cont_name.encode('utf-8')
cont = self.env.account.container(cont_name) cont = self.env.account.container(cont_name)
self.assertTrue(not cont.create(cfg={'no_path_quote': True}), self.assertFalse(cont.create(cfg={'no_path_quote': True}),
'created container with name %s' % (cont_name)) 'created container with name %s' % (cont_name))
self.assert_status(404) self.assert_status(404)
self.assertNotIn(cont.name, self.env.account.containers()) self.assertNotIn(cont.name, self.env.account.containers())
@ -531,7 +531,7 @@ class TestContainer(Base):
def testDeleteOnContainerThatDoesNotExist(self): def testDeleteOnContainerThatDoesNotExist(self):
cont = self.env.account.container(Utils.create_name()) cont = self.env.account.container(Utils.create_name())
self.assertTrue(not cont.delete()) self.assertFalse(cont.delete())
self.assert_status(404) self.assert_status(404)
def testDeleteOnContainerWithFiles(self): def testDeleteOnContainerWithFiles(self):
@ -540,7 +540,7 @@ class TestContainer(Base):
file_item = cont.file(Utils.create_name()) file_item = cont.file(Utils.create_name())
file_item.write_random(self.env.file_size) file_item.write_random(self.env.file_size)
self.assertIn(file_item.name, cont.files()) self.assertIn(file_item.name, cont.files())
self.assertTrue(not cont.delete()) self.assertFalse(cont.delete())
self.assert_status(409) self.assert_status(409)
def testFileCreateInContainerThatDoesNotExist(self): def testFileCreateInContainerThatDoesNotExist(self):
@ -625,8 +625,8 @@ class TestContainer(Base):
def testTooLongName(self): def testTooLongName(self):
cont = self.env.account.container('x' * 257) cont = self.env.account.container('x' * 257)
self.assertTrue(not cont.create(), self.assertFalse(cont.create(),
'created container with name %s' % (cont.name)) 'created container with name %s' % (cont.name))
self.assert_status(400) self.assert_status(400)
def testContainerExistenceCachingProblem(self): def testContainerExistenceCachingProblem(self):
@ -967,24 +967,24 @@ class TestFile(Base):
# invalid source container # invalid source container
source_cont = self.env.account.container(Utils.create_name()) source_cont = self.env.account.container(Utils.create_name())
file_item = source_cont.file(source_filename) file_item = source_cont.file(source_filename)
self.assertTrue(not file_item.copy( self.assertFalse(file_item.copy(
'%s%s' % (prefix, self.env.container), '%s%s' % (prefix, self.env.container),
Utils.create_name())) Utils.create_name()))
self.assert_status(404) self.assert_status(404)
self.assertTrue(not file_item.copy('%s%s' % (prefix, dest_cont), self.assertFalse(file_item.copy('%s%s' % (prefix, dest_cont),
Utils.create_name())) Utils.create_name()))
self.assert_status(404) self.assert_status(404)
# invalid source object # invalid source object
file_item = self.env.container.file(Utils.create_name()) file_item = self.env.container.file(Utils.create_name())
self.assertTrue(not file_item.copy( self.assertFalse(file_item.copy(
'%s%s' % (prefix, self.env.container), '%s%s' % (prefix, self.env.container),
Utils.create_name())) Utils.create_name()))
self.assert_status(404) self.assert_status(404)
self.assertTrue(not file_item.copy('%s%s' % (prefix, dest_cont), self.assertFalse(file_item.copy('%s%s' % (prefix, dest_cont),
Utils.create_name())) Utils.create_name()))
self.assert_status(404) self.assert_status(404)
# invalid destination container # invalid destination container
@ -1016,7 +1016,7 @@ class TestFile(Base):
# invalid source container # invalid source container
source_cont = self.env.account.container(Utils.create_name()) source_cont = self.env.account.container(Utils.create_name())
file_item = source_cont.file(source_filename) file_item = source_cont.file(source_filename)
self.assertTrue(not file_item.copy_account( self.assertFalse(file_item.copy_account(
acct, acct,
'%s%s' % (prefix, self.env.container), '%s%s' % (prefix, self.env.container),
Utils.create_name())) Utils.create_name()))
@ -1027,7 +1027,7 @@ class TestFile(Base):
else: else:
self.assert_status(404) self.assert_status(404)
self.assertTrue(not file_item.copy_account( self.assertFalse(file_item.copy_account(
acct, acct,
'%s%s' % (prefix, cont), '%s%s' % (prefix, cont),
Utils.create_name())) Utils.create_name()))
@ -1035,7 +1035,7 @@ class TestFile(Base):
# invalid source object # invalid source object
file_item = self.env.container.file(Utils.create_name()) file_item = self.env.container.file(Utils.create_name())
self.assertTrue(not file_item.copy_account( self.assertFalse(file_item.copy_account(
acct, acct,
'%s%s' % (prefix, self.env.container), '%s%s' % (prefix, self.env.container),
Utils.create_name())) Utils.create_name()))
@ -1046,7 +1046,7 @@ class TestFile(Base):
else: else:
self.assert_status(404) self.assert_status(404)
self.assertTrue(not file_item.copy_account( self.assertFalse(file_item.copy_account(
acct, acct,
'%s%s' % (prefix, cont), '%s%s' % (prefix, cont),
Utils.create_name())) Utils.create_name()))
@ -1054,7 +1054,7 @@ class TestFile(Base):
# invalid destination container # invalid destination container
file_item = self.env.container.file(source_filename) file_item = self.env.container.file(source_filename)
self.assertTrue(not file_item.copy_account( self.assertFalse(file_item.copy_account(
acct, acct,
'%s%s' % (prefix, Utils.create_name()), '%s%s' % (prefix, Utils.create_name()),
Utils.create_name())) Utils.create_name()))
@ -1071,9 +1071,9 @@ class TestFile(Base):
file_item.write_random() file_item.write_random()
file_item = self.env.container.file(source_filename) file_item = self.env.container.file(source_filename)
self.assertTrue(not file_item.copy(Utils.create_name(), self.assertFalse(file_item.copy(Utils.create_name(),
Utils.create_name(), Utils.create_name(),
cfg={'no_destination': True})) cfg={'no_destination': True}))
self.assert_status(412) self.assert_status(412)
def testCopyDestinationSlashProblems(self): def testCopyDestinationSlashProblems(self):
@ -1082,9 +1082,9 @@ class TestFile(Base):
file_item.write_random() file_item.write_random()
# no slash # no slash
self.assertTrue(not file_item.copy(Utils.create_name(), self.assertFalse(file_item.copy(Utils.create_name(),
Utils.create_name(), Utils.create_name(),
cfg={'destination': Utils.create_name()})) cfg={'destination': Utils.create_name()}))
self.assert_status(412) self.assert_status(412)
def testCopyFromHeader(self): def testCopyFromHeader(self):
@ -2450,8 +2450,6 @@ class TestSlo(Base):
self.fail("COPY didn't copy the manifest (invalid json on GET)") self.fail("COPY didn't copy the manifest (invalid json on GET)")
def _make_manifest(self): def _make_manifest(self):
# To avoid the bug 1453807 on fast-post, make a new manifest
# for post test.
file_item = self.env.container.file("manifest-post") file_item = self.env.container.file("manifest-post")
seg_info = self.env.seg_info seg_info = self.env.seg_info
file_item.write( file_item.write(
@ -2473,6 +2471,7 @@ class TestSlo(Base):
updated = self.env.container.file("manifest-post") updated = self.env.container.file("manifest-post")
updated.info() updated.info()
updated.header_fields([('user-meta', 'x-object-meta-post')]) # sanity updated.header_fields([('user-meta', 'x-object-meta-post')]) # sanity
updated.header_fields([('slo', 'x-static-large-object')])
updated_contents = updated.read(parms={'multipart-manifest': 'get'}) updated_contents = updated.read(parms={'multipart-manifest': 'get'})
try: try:
json.loads(updated_contents) json.loads(updated_contents)
@ -2493,6 +2492,7 @@ class TestSlo(Base):
updated.info() updated.info()
updated.header_fields( updated.header_fields(
[('user-meta', 'x-object-meta-post')]) # sanity [('user-meta', 'x-object-meta-post')]) # sanity
updated.header_fields([('slo', 'x-static-large-object')])
updated_contents = updated.read( updated_contents = updated.read(
parms={'multipart-manifest': 'get'}) parms={'multipart-manifest': 'get'})
try: try:
@ -2598,7 +2598,7 @@ class TestObjectVersioningEnv(object):
@classmethod @classmethod
def setUp(cls): def setUp(cls):
cls.conn = Connection(tf.config) cls.conn = Connection(tf.config)
cls.conn.authenticate() cls.storage_url, cls.storage_token = cls.conn.authenticate()
cls.account = Account(cls.conn, tf.config.get('account', cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username'])) tf.config['username']))
@ -2628,6 +2628,30 @@ class TestObjectVersioningEnv(object):
# if versioning is off, then X-Versions-Location won't persist # if versioning is off, then X-Versions-Location won't persist
cls.versioning_enabled = 'versions' in container_info cls.versioning_enabled = 'versions' in container_info
# setup another account to test ACLs
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate()
cls.account2 = cls.conn2.get_account()
cls.account2.delete_containers()
# setup another account with no access to anything to test ACLs
config3 = deepcopy(tf.config)
config3['account'] = tf.config['account']
config3['username'] = tf.config['username3']
config3['password'] = tf.config['password3']
cls.conn3 = Connection(config3)
cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate()
cls.account3 = cls.conn3.get_account()
@classmethod
def tearDown(cls):
cls.account.delete_containers()
cls.account2.delete_containers()
class TestCrossPolicyObjectVersioningEnv(object): class TestCrossPolicyObjectVersioningEnv(object):
# tri-state: None initially, then True/False # tri-state: None initially, then True/False
@ -2650,14 +2674,14 @@ class TestCrossPolicyObjectVersioningEnv(object):
cls.multiple_policies_enabled = True cls.multiple_policies_enabled = True
else: else:
cls.multiple_policies_enabled = False cls.multiple_policies_enabled = False
# We have to lie here that versioning is enabled. We actually cls.versioning_enabled = False
# don't know, but it does not matter. We know these tests cannot
# run without multiple policies present. If multiple policies are
# present, we won't be setting this field to any value, so it
# should all still work.
cls.versioning_enabled = True
return return
if cls.versioning_enabled is None:
cls.versioning_enabled = 'versioned_writes' in cluster_info
if not cls.versioning_enabled:
return
policy = cls.policies.select() policy = cls.policies.select()
version_policy = cls.policies.exclude(name=policy['name']).select() version_policy = cls.policies.exclude(name=policy['name']).select()
@ -2691,6 +2715,25 @@ class TestCrossPolicyObjectVersioningEnv(object):
# if versioning is off, then X-Versions-Location won't persist # if versioning is off, then X-Versions-Location won't persist
cls.versioning_enabled = 'versions' in container_info cls.versioning_enabled = 'versions' in container_info
# setup another account to test ACLs
config2 = deepcopy(tf.config)
config2['account'] = tf.config['account2']
config2['username'] = tf.config['username2']
config2['password'] = tf.config['password2']
cls.conn2 = Connection(config2)
cls.storage_url2, cls.storage_token2 = cls.conn2.authenticate()
cls.account2 = cls.conn2.get_account()
cls.account2.delete_containers()
# setup another account with no access to anything to test ACLs
config3 = deepcopy(tf.config)
config3['account'] = tf.config['account']
config3['username'] = tf.config['username3']
config3['password'] = tf.config['password3']
cls.conn3 = Connection(config3)
cls.storage_url3, cls.storage_token3 = cls.conn3.authenticate()
cls.account3 = cls.conn3.get_account()
class TestObjectVersioning(Base): class TestObjectVersioning(Base):
env = TestObjectVersioningEnv env = TestObjectVersioningEnv
@ -2709,40 +2752,103 @@ class TestObjectVersioning(Base):
def tearDown(self): def tearDown(self):
super(TestObjectVersioning, self).tearDown() super(TestObjectVersioning, self).tearDown()
try: try:
# delete versions first! # only delete files and not container
# as they were configured in self.env
self.env.versions_container.delete_files() self.env.versions_container.delete_files()
self.env.container.delete_files() self.env.container.delete_files()
except ResponseError: except ResponseError:
pass pass
def test_clear_version_option(self):
# sanity
self.assertEqual(self.env.container.info()['versions'],
self.env.versions_container.name)
self.env.container.update_metadata(
hdrs={'X-Versions-Location': ''})
self.assertEqual(self.env.container.info().get('versions'), None)
# set location back to the way it was
self.env.container.update_metadata(
hdrs={'X-Versions-Location': self.env.versions_container.name})
self.assertEqual(self.env.container.info()['versions'],
self.env.versions_container.name)
def test_overwriting(self): def test_overwriting(self):
container = self.env.container container = self.env.container
versions_container = self.env.versions_container versions_container = self.env.versions_container
cont_info = container.info()
self.assertEquals(cont_info['versions'], versions_container.name)
obj_name = Utils.create_name() obj_name = Utils.create_name()
versioned_obj = container.file(obj_name) versioned_obj = container.file(obj_name)
versioned_obj.write("aaaaa") versioned_obj.write("aaaaa", hdrs={'Content-Type': 'text/jibberish01'})
obj_info = versioned_obj.info()
self.assertEqual('text/jibberish01', obj_info['content_type'])
self.assertEqual(0, versions_container.info()['object_count']) self.assertEqual(0, versions_container.info()['object_count'])
versioned_obj.write("bbbbb", hdrs={'Content-Type': 'text/jibberish02',
versioned_obj.write("bbbbb") 'X-Object-Meta-Foo': 'Bar'})
versioned_obj.initialize()
self.assertEqual(versioned_obj.content_type, 'text/jibberish02')
self.assertEqual(versioned_obj.metadata['foo'], 'Bar')
# the old version got saved off # the old version got saved off
self.assertEqual(1, versions_container.info()['object_count']) self.assertEqual(1, versions_container.info()['object_count'])
versioned_obj_name = versions_container.files()[0] versioned_obj_name = versions_container.files()[0]
self.assertEqual( prev_version = versions_container.file(versioned_obj_name)
"aaaaa", versions_container.file(versioned_obj_name).read()) prev_version.initialize()
self.assertEqual("aaaaa", prev_version.read())
self.assertEqual(prev_version.content_type, 'text/jibberish01')
# make sure the new obj metadata did not leak to the prev. version
self.assertTrue('foo' not in prev_version.metadata)
# check that POST does not create a new version
versioned_obj.sync_metadata(metadata={'fu': 'baz'})
self.assertEqual(1, versions_container.info()['object_count'])
# if we overwrite it again, there are two versions # if we overwrite it again, there are two versions
versioned_obj.write("ccccc") versioned_obj.write("ccccc")
self.assertEqual(2, versions_container.info()['object_count']) self.assertEqual(2, versions_container.info()['object_count'])
versioned_obj_name = versions_container.files()[1]
prev_version = versions_container.file(versioned_obj_name)
prev_version.initialize()
self.assertEqual("bbbbb", prev_version.read())
self.assertEqual(prev_version.content_type, 'text/jibberish02')
self.assertTrue('foo' in prev_version.metadata)
self.assertTrue('fu' in prev_version.metadata)
# as we delete things, the old contents return # as we delete things, the old contents return
self.assertEqual("ccccc", versioned_obj.read()) self.assertEqual("ccccc", versioned_obj.read())
# test copy from a different container
src_container = self.env.account.container(Utils.create_name())
self.assertTrue(src_container.create())
src_name = Utils.create_name()
src_obj = src_container.file(src_name)
src_obj.write("ddddd", hdrs={'Content-Type': 'text/jibberish04'})
src_obj.copy(container.name, obj_name)
self.assertEqual("ddddd", versioned_obj.read())
versioned_obj.initialize()
self.assertEqual(versioned_obj.content_type, 'text/jibberish04')
# make sure versions container has the previous version
self.assertEqual(3, versions_container.info()['object_count'])
versioned_obj_name = versions_container.files()[2]
prev_version = versions_container.file(versioned_obj_name)
prev_version.initialize()
self.assertEqual("ccccc", prev_version.read())
# test delete
versioned_obj.delete()
self.assertEqual("ccccc", versioned_obj.read())
versioned_obj.delete() versioned_obj.delete()
self.assertEqual("bbbbb", versioned_obj.read()) self.assertEqual("bbbbb", versioned_obj.read())
versioned_obj.delete() versioned_obj.delete()
self.assertEqual("aaaaa", versioned_obj.read()) self.assertEqual("aaaaa", versioned_obj.read())
self.assertEqual(0, versions_container.info()['object_count'])
versioned_obj.delete() versioned_obj.delete()
self.assertRaises(ResponseError, versioned_obj.read) self.assertRaises(ResponseError, versioned_obj.read)
@ -2774,6 +2880,87 @@ class TestObjectVersioning(Base):
self.assertEqual(3, versions_container.info()['object_count']) self.assertEqual(3, versions_container.info()['object_count'])
self.assertEqual("112233", man_file.read()) self.assertEqual("112233", man_file.read())
def test_versioning_container_acl(self):
# create versions container and DO NOT give write access to account2
versions_container = self.env.account.container(Utils.create_name())
self.assertTrue(versions_container.create(hdrs={
'X-Container-Write': ''
}))
# check account2 cannot write to versions container
fail_obj_name = Utils.create_name()
fail_obj = versions_container.file(fail_obj_name)
self.assertRaises(ResponseError, fail_obj.write, "should fail",
cfg={'use_token': self.env.storage_token2})
# create container and give write access to account2
# don't set X-Versions-Location just yet
container = self.env.account.container(Utils.create_name())
self.assertTrue(container.create(hdrs={
'X-Container-Write': self.env.conn2.user_acl}))
# check account2 cannot set X-Versions-Location on container
self.assertRaises(ResponseError, container.update_metadata, hdrs={
'X-Versions-Location': versions_container},
cfg={'use_token': self.env.storage_token2})
# good! now let admin set the X-Versions-Location
# p.s.: sticking a 'x-remove' header here to test precedence
# of both headers. Setting the location should succeed.
self.assertTrue(container.update_metadata(hdrs={
'X-Remove-Versions-Location': versions_container,
'X-Versions-Location': versions_container}))
# write object twice to container and check version
obj_name = Utils.create_name()
versioned_obj = container.file(obj_name)
self.assertTrue(versioned_obj.write("never argue with the data",
cfg={'use_token': self.env.storage_token2}))
self.assertEqual(versioned_obj.read(), "never argue with the data")
self.assertTrue(
versioned_obj.write("we don't have no beer, just tequila",
cfg={'use_token': self.env.storage_token2}))
self.assertEqual(versioned_obj.read(),
"we don't have no beer, just tequila")
self.assertEqual(1, versions_container.info()['object_count'])
# read the original uploaded object
for filename in versions_container.files():
backup_file = versions_container.file(filename)
break
self.assertEqual(backup_file.read(), "never argue with the data")
# user3 (some random user with no access to anything)
# tries to read from versioned container
self.assertRaises(ResponseError, backup_file.read,
cfg={'use_token': self.env.storage_token3})
# user3 cannot write or delete from source container either
self.assertRaises(ResponseError, versioned_obj.write,
"some random user trying to write data",
cfg={'use_token': self.env.storage_token3})
self.assertRaises(ResponseError, versioned_obj.delete,
cfg={'use_token': self.env.storage_token3})
# user2 can't read or delete from versions-location
self.assertRaises(ResponseError, backup_file.read,
cfg={'use_token': self.env.storage_token2})
self.assertRaises(ResponseError, backup_file.delete,
cfg={'use_token': self.env.storage_token2})
# but is able to delete from the source container
# this could be a helpful scenario for dev ops that want to setup
# just one container to hold object versions of multiple containers
# and each one of those containers are owned by different users
self.assertTrue(versioned_obj.delete(
cfg={'use_token': self.env.storage_token2}))
# tear-down since we create these containers here
# and not in self.env
versions_container.delete_recursive()
container.delete_recursive()
def test_versioning_check_acl(self): def test_versioning_check_acl(self):
container = self.env.container container = self.env.container
versions_container = self.env.versions_container versions_container = self.env.versions_container
@ -2903,6 +3090,59 @@ class TestTempurl(Base):
contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True}) contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
self.assertEqual(contents, "obj contents") self.assertEqual(contents, "obj contents")
def test_GET_DLO_inside_container(self):
seg1 = self.env.container.file(
"get-dlo-inside-seg1" + Utils.create_name())
seg2 = self.env.container.file(
"get-dlo-inside-seg2" + Utils.create_name())
seg1.write("one fish two fish ")
seg2.write("red fish blue fish")
manifest = self.env.container.file("manifest" + Utils.create_name())
manifest.write(
'',
hdrs={"X-Object-Manifest": "%s/get-dlo-inside-seg" %
(self.env.container.name,)})
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'GET', expires, self.env.conn.make_path(manifest.path),
self.env.tempurl_key)
parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
contents = manifest.read(parms=parms, cfg={'no_auth_token': True})
self.assertEqual(contents, "one fish two fish red fish blue fish")
def test_GET_DLO_outside_container(self):
seg1 = self.env.container.file(
"get-dlo-outside-seg1" + Utils.create_name())
seg2 = self.env.container.file(
"get-dlo-outside-seg2" + Utils.create_name())
seg1.write("one fish two fish ")
seg2.write("red fish blue fish")
container2 = self.env.account.container(Utils.create_name())
container2.create()
manifest = container2.file("manifest" + Utils.create_name())
manifest.write(
'',
hdrs={"X-Object-Manifest": "%s/get-dlo-outside-seg" %
(self.env.container.name,)})
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'GET', expires, self.env.conn.make_path(manifest.path),
self.env.tempurl_key)
parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
# cross container tempurl works fine for account tempurl key
contents = manifest.read(parms=parms, cfg={'no_auth_token': True})
self.assertEqual(contents, "one fish two fish red fish blue fish")
self.assert_status([200])
def test_PUT(self): def test_PUT(self):
new_obj = self.env.container.file(Utils.create_name()) new_obj = self.env.container.file(Utils.create_name())
@ -2921,6 +3161,42 @@ class TestTempurl(Base):
self.assertTrue(new_obj.info(parms=put_parms, self.assertTrue(new_obj.info(parms=put_parms,
cfg={'no_auth_token': True})) cfg={'no_auth_token': True}))
def test_PUT_manifest_access(self):
new_obj = self.env.container.file(Utils.create_name())
# give out a signature which allows a PUT to new_obj
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'PUT', expires, self.env.conn.make_path(new_obj.path),
self.env.tempurl_key)
put_parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
# try to create manifest pointing to some random container
try:
new_obj.write('', {
'x-object-manifest': '%s/foo' % 'some_random_container'
}, parms=put_parms, cfg={'no_auth_token': True})
except ResponseError as e:
self.assertEqual(e.status, 400)
else:
self.fail('request did not error')
# create some other container
other_container = self.env.account.container(Utils.create_name())
if not other_container.create():
raise ResponseError(self.conn.response)
# try to create manifest pointing to new container
try:
new_obj.write('', {
'x-object-manifest': '%s/foo' % other_container
}, parms=put_parms, cfg={'no_auth_token': True})
except ResponseError as e:
self.assertEqual(e.status, 400)
else:
self.fail('request did not error')
def test_HEAD(self): def test_HEAD(self):
expires = int(time.time()) + 86400 expires = int(time.time()) + 86400
sig = self.tempurl_sig( sig = self.tempurl_sig(
@ -3199,6 +3475,67 @@ class TestContainerTempurl(Base):
'Container TempURL key-2 found, should not be visible ' 'Container TempURL key-2 found, should not be visible '
'to readonly ACLs') 'to readonly ACLs')
def test_GET_DLO_inside_container(self):
seg1 = self.env.container.file(
"get-dlo-inside-seg1" + Utils.create_name())
seg2 = self.env.container.file(
"get-dlo-inside-seg2" + Utils.create_name())
seg1.write("one fish two fish ")
seg2.write("red fish blue fish")
manifest = self.env.container.file("manifest" + Utils.create_name())
manifest.write(
'',
hdrs={"X-Object-Manifest": "%s/get-dlo-inside-seg" %
(self.env.container.name,)})
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'GET', expires, self.env.conn.make_path(manifest.path),
self.env.tempurl_key)
parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
contents = manifest.read(parms=parms, cfg={'no_auth_token': True})
self.assertEqual(contents, "one fish two fish red fish blue fish")
def test_GET_DLO_outside_container(self):
container2 = self.env.account.container(Utils.create_name())
container2.create()
seg1 = container2.file(
"get-dlo-outside-seg1" + Utils.create_name())
seg2 = container2.file(
"get-dlo-outside-seg2" + Utils.create_name())
seg1.write("one fish two fish ")
seg2.write("red fish blue fish")
manifest = self.env.container.file("manifest" + Utils.create_name())
manifest.write(
'',
hdrs={"X-Object-Manifest": "%s/get-dlo-outside-seg" %
(container2.name,)})
expires = int(time.time()) + 86400
sig = self.tempurl_sig(
'GET', expires, self.env.conn.make_path(manifest.path),
self.env.tempurl_key)
parms = {'temp_url_sig': sig,
'temp_url_expires': str(expires)}
# cross container tempurl does not work for container tempurl key
try:
manifest.read(parms=parms, cfg={'no_auth_token': True})
except ResponseError as e:
self.assertEqual(e.status, 401)
else:
self.fail('request did not error')
try:
manifest.info(parms=parms, cfg={'no_auth_token': True})
except ResponseError as e:
self.assertEqual(e.status, 401)
else:
self.fail('request did not error')
class TestContainerTempurlUTF8(Base2, TestContainerTempurl): class TestContainerTempurlUTF8(Base2, TestContainerTempurl):
set_up = False set_up = False

View File

@ -11,7 +11,7 @@
# implied. # implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
import sys import sys
import itertools import itertools
import uuid import uuid
@ -226,8 +226,8 @@ def main():
try: try:
brain.run(command, *args) brain.run(command, *args)
except ClientException as e: except ClientException as e:
print '**WARNING**: %s raised %s' % (command, e) print('**WARNING**: %s raised %s' % (command, e))
print 'STATUS'.join(['*' * 25] * 2) print('STATUS'.join(['*' * 25] * 2))
brain.servers.status() brain.servers.status()
sys.exit() sys.exit()

View File

@ -13,13 +13,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
import os import os
from subprocess import Popen, PIPE from subprocess import Popen, PIPE
import sys import sys
from time import sleep, time from time import sleep, time
from collections import defaultdict from collections import defaultdict
import unittest import unittest
from hashlib import md5
from uuid import uuid4
from nose import SkipTest from nose import SkipTest
from six.moves.http_client import HTTPConnection from six.moves.http_client import HTTPConnection
@ -86,9 +88,9 @@ def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT):
break break
except Exception as err: except Exception as err:
if time() > try_until: if time() > try_until:
print err print(err)
print 'Giving up on %s:%s after %s seconds.' % ( print('Giving up on %s:%s after %s seconds.' % (
server, ipport, timeout) server, ipport, timeout))
raise err raise err
sleep(0.1) sleep(0.1)
else: else:
@ -102,8 +104,8 @@ def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT):
return url, token, account return url, token, account
except Exception as err: except Exception as err:
if time() > try_until: if time() > try_until:
print err print(err)
print 'Giving up on proxy:8080 after 30 seconds.' print('Giving up on proxy:8080 after 30 seconds.')
raise err raise err
sleep(0.1) sleep(0.1)
return None return None
@ -255,16 +257,63 @@ def get_policy(**kwargs):
raise SkipTest('No policy matching %s' % kwargs) raise SkipTest('No policy matching %s' % kwargs)
def resetswift():
p = Popen("resetswift 2>&1", shell=True, stdout=PIPE)
stdout, _stderr = p.communicate()
print(stdout)
Manager(['all']).stop()
class Body(object):
def __init__(self, total=3.5 * 2 ** 20):
self.length = total
self.hasher = md5()
self.read_amount = 0
self.chunk = uuid4().hex * 2 ** 10
self.buff = ''
@property
def etag(self):
return self.hasher.hexdigest()
def __len__(self):
return self.length
def read(self, amount):
if len(self.buff) < amount:
try:
self.buff += next(self)
except StopIteration:
pass
rv, self.buff = self.buff[:amount], self.buff[amount:]
return rv
def __iter__(self):
return self
def next(self):
if self.buff:
rv, self.buff = self.buff, ''
return rv
if self.read_amount >= self.length:
raise StopIteration()
rv = self.chunk[:int(self.length - self.read_amount)]
self.read_amount += len(rv)
self.hasher.update(rv)
return rv
def __next__(self):
return next(self)
class ProbeTest(unittest.TestCase): class ProbeTest(unittest.TestCase):
""" """
Don't instantiate this directly, use a child class instead. Don't instantiate this directly, use a child class instead.
""" """
def setUp(self): def setUp(self):
p = Popen("resetswift 2>&1", shell=True, stdout=PIPE) resetswift()
stdout, _stderr = p.communicate()
print stdout
Manager(['all']).stop()
self.pids = {} self.pids = {}
try: try:
self.ipport2server = {} self.ipport2server = {}
@ -403,11 +452,11 @@ if __name__ == "__main__":
force_validate=True) force_validate=True)
except SkipTest as err: except SkipTest as err:
sys.exit('%s ERROR: %s' % (server, err)) sys.exit('%s ERROR: %s' % (server, err))
print '%s OK' % server print('%s OK' % server)
for policy in POLICIES: for policy in POLICIES:
try: try:
get_ring(policy.ring_name, 3, 4, get_ring(policy.ring_name, 3, 4,
server='object', force_validate=True) server='object', force_validate=True)
except SkipTest as err: except SkipTest as err:
sys.exit('object ERROR (%s): %s' % (policy.name, err)) sys.exit('object ERROR (%s): %s' % (policy.name, err))
print 'object OK (%s)' % policy.name print('object OK (%s)' % policy.name)

View File

@ -35,20 +35,20 @@ class TestAccountFailures(ReplProbeTest):
# Assert account level sees them # Assert account level sees them
headers, containers = client.get_account(self.url, self.token) headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '2') self.assertEqual(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '0') self.assertEqual(headers['x-account-object-count'], '0')
self.assertEquals(headers['x-account-bytes-used'], '0') self.assertEqual(headers['x-account-bytes-used'], '0')
found1 = False found1 = False
found2 = False found2 = False
for container in containers: for container in containers:
if container['name'] == container1: if container['name'] == container1:
found1 = True found1 = True
self.assertEquals(container['count'], 0) self.assertEqual(container['count'], 0)
self.assertEquals(container['bytes'], 0) self.assertEqual(container['bytes'], 0)
elif container['name'] == container2: elif container['name'] == container2:
found2 = True found2 = True
self.assertEquals(container['count'], 0) self.assertEqual(container['count'], 0)
self.assertEquals(container['bytes'], 0) self.assertEqual(container['bytes'], 0)
self.assertTrue(found1) self.assertTrue(found1)
self.assertTrue(found2) self.assertTrue(found2)
@ -57,20 +57,20 @@ class TestAccountFailures(ReplProbeTest):
# Assert account level doesn't see it yet # Assert account level doesn't see it yet
headers, containers = client.get_account(self.url, self.token) headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '2') self.assertEqual(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '0') self.assertEqual(headers['x-account-object-count'], '0')
self.assertEquals(headers['x-account-bytes-used'], '0') self.assertEqual(headers['x-account-bytes-used'], '0')
found1 = False found1 = False
found2 = False found2 = False
for container in containers: for container in containers:
if container['name'] == container1: if container['name'] == container1:
found1 = True found1 = True
self.assertEquals(container['count'], 0) self.assertEqual(container['count'], 0)
self.assertEquals(container['bytes'], 0) self.assertEqual(container['bytes'], 0)
elif container['name'] == container2: elif container['name'] == container2:
found2 = True found2 = True
self.assertEquals(container['count'], 0) self.assertEqual(container['count'], 0)
self.assertEquals(container['bytes'], 0) self.assertEqual(container['bytes'], 0)
self.assertTrue(found1) self.assertTrue(found1)
self.assertTrue(found2) self.assertTrue(found2)
@ -79,20 +79,20 @@ class TestAccountFailures(ReplProbeTest):
# Assert account level now sees the container2/object1 # Assert account level now sees the container2/object1
headers, containers = client.get_account(self.url, self.token) headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '2') self.assertEqual(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '1') self.assertEqual(headers['x-account-object-count'], '1')
self.assertEquals(headers['x-account-bytes-used'], '4') self.assertEqual(headers['x-account-bytes-used'], '4')
found1 = False found1 = False
found2 = False found2 = False
for container in containers: for container in containers:
if container['name'] == container1: if container['name'] == container1:
found1 = True found1 = True
self.assertEquals(container['count'], 0) self.assertEqual(container['count'], 0)
self.assertEquals(container['bytes'], 0) self.assertEqual(container['bytes'], 0)
elif container['name'] == container2: elif container['name'] == container2:
found2 = True found2 = True
self.assertEquals(container['count'], 1) self.assertEqual(container['count'], 1)
self.assertEquals(container['bytes'], 4) self.assertEqual(container['bytes'], 4)
self.assertTrue(found1) self.assertTrue(found1)
self.assertTrue(found2) self.assertTrue(found2)
@ -111,9 +111,9 @@ class TestAccountFailures(ReplProbeTest):
# Assert account level knows container1 is gone but doesn't know about # Assert account level knows container1 is gone but doesn't know about
# container2/object2 yet # container2/object2 yet
headers, containers = client.get_account(self.url, self.token) headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '1') self.assertEqual(headers['x-account-container-count'], '1')
self.assertEquals(headers['x-account-object-count'], '1') self.assertEqual(headers['x-account-object-count'], '1')
self.assertEquals(headers['x-account-bytes-used'], '4') self.assertEqual(headers['x-account-bytes-used'], '4')
found1 = False found1 = False
found2 = False found2 = False
for container in containers: for container in containers:
@ -121,9 +121,9 @@ class TestAccountFailures(ReplProbeTest):
found1 = True found1 = True
elif container['name'] == container2: elif container['name'] == container2:
found2 = True found2 = True
self.assertEquals(container['count'], 1) self.assertEqual(container['count'], 1)
self.assertEquals(container['bytes'], 4) self.assertEqual(container['bytes'], 4)
self.assertTrue(not found1) self.assertFalse(found1)
self.assertTrue(found2) self.assertTrue(found2)
# Run container updaters # Run container updaters
@ -131,9 +131,9 @@ class TestAccountFailures(ReplProbeTest):
# Assert account level now knows about container2/object2 # Assert account level now knows about container2/object2
headers, containers = client.get_account(self.url, self.token) headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '1') self.assertEqual(headers['x-account-container-count'], '1')
self.assertEquals(headers['x-account-object-count'], '2') self.assertEqual(headers['x-account-object-count'], '2')
self.assertEquals(headers['x-account-bytes-used'], '9') self.assertEqual(headers['x-account-bytes-used'], '9')
found1 = False found1 = False
found2 = False found2 = False
for container in containers: for container in containers:
@ -141,9 +141,9 @@ class TestAccountFailures(ReplProbeTest):
found1 = True found1 = True
elif container['name'] == container2: elif container['name'] == container2:
found2 = True found2 = True
self.assertEquals(container['count'], 2) self.assertEqual(container['count'], 2)
self.assertEquals(container['bytes'], 9) self.assertEqual(container['bytes'], 9)
self.assertTrue(not found1) self.assertFalse(found1)
self.assertTrue(found2) self.assertTrue(found2)
# Restart other primary account server # Restart other primary account server
@ -154,9 +154,9 @@ class TestAccountFailures(ReplProbeTest):
# new container2/object2 yet # new container2/object2 yet
headers, containers = \ headers, containers = \
direct_client.direct_get_account(anodes[0], apart, self.account) direct_client.direct_get_account(anodes[0], apart, self.account)
self.assertEquals(headers['x-account-container-count'], '2') self.assertEqual(headers['x-account-container-count'], '2')
self.assertEquals(headers['x-account-object-count'], '1') self.assertEqual(headers['x-account-object-count'], '1')
self.assertEquals(headers['x-account-bytes-used'], '4') self.assertEqual(headers['x-account-bytes-used'], '4')
found1 = False found1 = False
found2 = False found2 = False
for container in containers: for container in containers:
@ -164,8 +164,8 @@ class TestAccountFailures(ReplProbeTest):
found1 = True found1 = True
elif container['name'] == container2: elif container['name'] == container2:
found2 = True found2 = True
self.assertEquals(container['count'], 1) self.assertEqual(container['count'], 1)
self.assertEquals(container['bytes'], 4) self.assertEqual(container['bytes'], 4)
self.assertTrue(found1) self.assertTrue(found1)
self.assertTrue(found2) self.assertTrue(found2)
@ -175,9 +175,9 @@ class TestAccountFailures(ReplProbeTest):
# Assert that server is now up to date # Assert that server is now up to date
headers, containers = \ headers, containers = \
direct_client.direct_get_account(anodes[0], apart, self.account) direct_client.direct_get_account(anodes[0], apart, self.account)
self.assertEquals(headers['x-account-container-count'], '1') self.assertEqual(headers['x-account-container-count'], '1')
self.assertEquals(headers['x-account-object-count'], '2') self.assertEqual(headers['x-account-object-count'], '2')
self.assertEquals(headers['x-account-bytes-used'], '9') self.assertEqual(headers['x-account-bytes-used'], '9')
found1 = False found1 = False
found2 = False found2 = False
for container in containers: for container in containers:
@ -185,9 +185,10 @@ class TestAccountFailures(ReplProbeTest):
found1 = True found1 = True
elif container['name'] == container2: elif container['name'] == container2:
found2 = True found2 = True
self.assertEquals(container['count'], 2) self.assertEqual(container['count'], 2)
self.assertEqual(container['bytes'], 9)
self.assertEquals(container['bytes'], 9) self.assertEquals(container['bytes'], 9)
self.assertTrue(not found1) self.assertFalse(found1)
self.assertTrue(found2) self.assertTrue(found2)

View File

@ -66,7 +66,7 @@ class TestAccountReaper(ReplProbeTest):
direct_head_container(cnode, cpart, self.account, direct_head_container(cnode, cpart, self.account,
container) container)
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
delete_time = err.http_headers.get( delete_time = err.http_headers.get(
'X-Backend-DELETE-Timestamp') 'X-Backend-DELETE-Timestamp')
# 'X-Backend-DELETE-Timestamp' confirms it was deleted # 'X-Backend-DELETE-Timestamp' confirms it was deleted
@ -91,7 +91,7 @@ class TestAccountReaper(ReplProbeTest):
direct_get_object(node, part, self.account, direct_get_object(node, part, self.account,
container, obj, headers=headers) container, obj, headers=headers)
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
delete_time = err.http_headers.get('X-Backend-Timestamp') delete_time = err.http_headers.get('X-Backend-Timestamp')
# 'X-Backend-Timestamp' confirms obj was deleted # 'X-Backend-Timestamp' confirms obj was deleted
self.assertTrue(delete_time) self.assertTrue(delete_time)
@ -114,7 +114,7 @@ class TestAccountReaper(ReplProbeTest):
direct_head_container(cnode, cpart, self.account, direct_head_container(cnode, cpart, self.account,
container) container)
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
delete_time = err.http_headers.get( delete_time = err.http_headers.get(
'X-Backend-DELETE-Timestamp') 'X-Backend-DELETE-Timestamp')
# 'X-Backend-DELETE-Timestamp' confirms it was deleted # 'X-Backend-DELETE-Timestamp' confirms it was deleted
@ -134,7 +134,7 @@ class TestAccountReaper(ReplProbeTest):
direct_get_object(node, part, self.account, direct_get_object(node, part, self.account,
container, obj, headers=headers) container, obj, headers=headers)
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
delete_time = err.http_headers.get('X-Backend-Timestamp') delete_time = err.http_headers.get('X-Backend-Timestamp')
# 'X-Backend-Timestamp' confirms obj was deleted # 'X-Backend-Timestamp' confirms obj was deleted
self.assertTrue(delete_time) self.assertTrue(delete_time)

View File

@ -70,7 +70,7 @@ class TestContainerFailures(ReplProbeTest):
# Assert all container1 servers indicate container1 is alive and # Assert all container1 servers indicate container1 is alive and
# well with object1 # well with object1
for cnode in cnodes: for cnode in cnodes:
self.assertEquals( self.assertEqual(
[o['name'] for o in direct_client.direct_get_container( [o['name'] for o in direct_client.direct_get_container(
cnode, cpart, self.account, container1)[1]], cnode, cpart, self.account, container1)[1]],
['object1']) ['object1'])
@ -78,9 +78,9 @@ class TestContainerFailures(ReplProbeTest):
# Assert account level also indicates container1 is alive and # Assert account level also indicates container1 is alive and
# well with object1 # well with object1
headers, containers = client.get_account(self.url, self.token) headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '1') self.assertEqual(headers['x-account-container-count'], '1')
self.assertEquals(headers['x-account-object-count'], '1') self.assertEqual(headers['x-account-object-count'], '1')
self.assertEquals(headers['x-account-bytes-used'], '3') self.assertEqual(headers['x-account-bytes-used'], '3')
def test_two_nodes_fail(self): def test_two_nodes_fail(self):
# Create container1 # Create container1
@ -118,15 +118,15 @@ class TestContainerFailures(ReplProbeTest):
direct_client.direct_get_container(cnode, cpart, self.account, direct_client.direct_get_container(cnode, cpart, self.account,
container1) container1)
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
else: else:
self.fail("Expected ClientException but didn't get it") self.fail("Expected ClientException but didn't get it")
# Assert account level also indicates container1 is gone # Assert account level also indicates container1 is gone
headers, containers = client.get_account(self.url, self.token) headers, containers = client.get_account(self.url, self.token)
self.assertEquals(headers['x-account-container-count'], '0') self.assertEqual(headers['x-account-container-count'], '0')
self.assertEquals(headers['x-account-object-count'], '0') self.assertEqual(headers['x-account-object-count'], '0')
self.assertEquals(headers['x-account-bytes-used'], '0') self.assertEqual(headers['x-account-bytes-used'], '0')
def _get_container_db_files(self, container): def _get_container_db_files(self, container):
opart, onodes = self.container_ring.get_nodes(self.account, container) opart, onodes = self.container_ring.get_nodes(self.account, container)
@ -160,7 +160,7 @@ class TestContainerFailures(ReplProbeTest):
try: try:
client.delete_container(self.url, self.token, container) client.delete_container(self.url, self.token, container)
except client.ClientException as err: except client.ClientException as err:
self.assertEquals(err.http_status, 503) self.assertEqual(err.http_status, 503)
else: else:
self.fail("Expected ClientException but didn't get it") self.fail("Expected ClientException but didn't get it")
else: else:

View File

@ -336,7 +336,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.assertEqual(metadata['x-static-large-object'].lower(), 'true') self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
for i, entry in enumerate(utils.json.loads(body)): for i, entry in enumerate(utils.json.loads(body)):
for key in ('hash', 'bytes', 'name'): for key in ('hash', 'bytes', 'name'):
self.assertEquals(entry[key], direct_manifest_data[i][key]) self.assertEqual(entry[key], direct_manifest_data[i][key])
metadata, body = client.get_object( metadata, body = client.get_object(
self.url, self.token, self.container_name, direct_manifest_name) self.url, self.token, self.container_name, direct_manifest_name)
self.assertEqual(metadata['x-static-large-object'].lower(), 'true') self.assertEqual(metadata['x-static-large-object'].lower(), 'true')

View File

@ -137,7 +137,7 @@ class TestEmptyDevice(ReplProbeTest):
onode, opart, self.account, container, obj, headers={ onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx}) 'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
self.assertFalse(os.path.exists(obj_dir)) self.assertFalse(os.path.exists(obj_dir))
else: else:
self.fail("Expected ClientException but didn't get it") self.fail("Expected ClientException but didn't get it")
@ -169,7 +169,7 @@ class TestEmptyDevice(ReplProbeTest):
another_onode, opart, self.account, container, obj, headers={ another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx}) 'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
else: else:
self.fail("Expected ClientException but didn't get it") self.fail("Expected ClientException but didn't get it")

View File

@ -54,7 +54,7 @@ class TestObjectAsyncUpdate(ReplProbeTest):
self.ipport2server, self.pids) self.ipport2server, self.pids)
# Assert it does not know about container/obj # Assert it does not know about container/obj
self.assertTrue(not direct_client.direct_get_container( self.assertFalse(direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]) cnode, cpart, self.account, container)[1])
# Run the object-updaters # Run the object-updaters

View File

@ -57,7 +57,7 @@ class TestObjectFailures(ReplProbeTest):
self.policy.name}) self.policy.name})
client.put_object(self.url, self.token, container, obj, data) client.put_object(self.url, self.token, container, obj, data)
odata = client.get_object(self.url, self.token, container, obj)[-1] odata = client.get_object(self.url, self.token, container, obj)[-1]
self.assertEquals(odata, data) self.assertEqual(odata, data)
opart, onodes = self.object_ring.get_nodes( opart, onodes = self.object_ring.get_nodes(
self.account, container, obj) self.account, container, obj)
onode = onodes[0] onode = onodes[0]
@ -84,14 +84,14 @@ class TestObjectFailures(ReplProbeTest):
odata = direct_client.direct_get_object( odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={ onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEquals(odata, 'VERIFY') self.assertEqual(odata, 'VERIFY')
try: try:
direct_client.direct_get_object( direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={ onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx}) 'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object") raise Exception("Did not quarantine object")
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
def run_quarantine_range_etag(self): def run_quarantine_range_etag(self):
container = 'container-range-%s' % uuid4() container = 'container-range-%s' % uuid4()
@ -111,7 +111,7 @@ class TestObjectFailures(ReplProbeTest):
odata = direct_client.direct_get_object( odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, onode, opart, self.account, container, obj,
headers=req_headers)[-1] headers=req_headers)[-1]
self.assertEquals(odata, result) self.assertEqual(odata, result)
try: try:
direct_client.direct_get_object( direct_client.direct_get_object(
@ -119,7 +119,7 @@ class TestObjectFailures(ReplProbeTest):
'X-Backend-Storage-Policy-Index': self.policy.idx}) 'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object") raise Exception("Did not quarantine object")
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
def run_quarantine_zero_byte_get(self): def run_quarantine_zero_byte_get(self):
container = 'container-zbyte-%s' % uuid4() container = 'container-zbyte-%s' % uuid4()
@ -137,7 +137,7 @@ class TestObjectFailures(ReplProbeTest):
self.policy.idx}) self.policy.idx})
raise Exception("Did not quarantine object") raise Exception("Did not quarantine object")
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
def run_quarantine_zero_byte_head(self): def run_quarantine_zero_byte_head(self):
container = 'container-zbyte-%s' % uuid4() container = 'container-zbyte-%s' % uuid4()
@ -155,7 +155,7 @@ class TestObjectFailures(ReplProbeTest):
self.policy.idx}) self.policy.idx})
raise Exception("Did not quarantine object") raise Exception("Did not quarantine object")
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
def run_quarantine_zero_byte_post(self): def run_quarantine_zero_byte_post(self):
container = 'container-zbyte-%s' % uuid4() container = 'container-zbyte-%s' % uuid4()
@ -177,7 +177,7 @@ class TestObjectFailures(ReplProbeTest):
response_timeout=1) response_timeout=1)
raise Exception("Did not quarantine object") raise Exception("Did not quarantine object")
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
def test_runner(self): def test_runner(self):
self.run_quarantine() self.run_quarantine()

View File

@ -16,13 +16,17 @@
from unittest import main from unittest import main
from uuid import uuid4 from uuid import uuid4
import random
from hashlib import md5
from collections import defaultdict
from swiftclient import client from swiftclient import client
from swift.common import direct_client from swift.common import direct_client
from swift.common.exceptions import ClientException from swift.common.exceptions import ClientException
from swift.common.manager import Manager from swift.common.manager import Manager
from test.probe.common import kill_server, ReplProbeTest, start_server from test.probe.common import (kill_server, start_server, ReplProbeTest,
ECProbeTest, Body)
class TestObjectHandoff(ReplProbeTest): class TestObjectHandoff(ReplProbeTest):
@ -102,7 +106,7 @@ class TestObjectHandoff(ReplProbeTest):
onode, opart, self.account, container, obj, headers={ onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx}) 'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
else: else:
self.fail("Expected ClientException but didn't get it") self.fail("Expected ClientException but didn't get it")
@ -136,7 +140,7 @@ class TestObjectHandoff(ReplProbeTest):
another_onode, opart, self.account, container, obj, headers={ another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx}) 'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
else: else:
self.fail("Expected ClientException but didn't get it") self.fail("Expected ClientException but didn't get it")
@ -160,7 +164,7 @@ class TestObjectHandoff(ReplProbeTest):
try: try:
client.head_object(self.url, self.token, container, obj) client.head_object(self.url, self.token, container, obj)
except client.ClientException as err: except client.ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
else: else:
self.fail("Expected ClientException but didn't get it") self.fail("Expected ClientException but didn't get it")
@ -206,10 +210,94 @@ class TestObjectHandoff(ReplProbeTest):
another_onode, opart, self.account, container, obj, headers={ another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx}) 'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err: except ClientException as err:
self.assertEquals(err.http_status, 404) self.assertEqual(err.http_status, 404)
else: else:
self.fail("Expected ClientException but didn't get it") self.fail("Expected ClientException but didn't get it")
class TestECObjectHandoffOverwrite(ECProbeTest):
def get_object(self, container_name, object_name):
headers, body = client.get_object(self.url, self.token,
container_name,
object_name,
resp_chunk_size=64 * 2 ** 10)
resp_checksum = md5()
for chunk in body:
resp_checksum.update(chunk)
return resp_checksum.hexdigest()
def test_ec_handoff_overwrite(self):
container_name = 'container-%s' % uuid4()
object_name = 'object-%s' % uuid4()
# create EC container
headers = {'X-Storage-Policy': self.policy.name}
client.put_container(self.url, self.token, container_name,
headers=headers)
# PUT object
old_contents = Body()
client.put_object(self.url, self.token, container_name,
object_name, contents=old_contents)
# get our node lists
opart, onodes = self.object_ring.get_nodes(
self.account, container_name, object_name)
# shutdown one of the primary data nodes
failed_primary = random.choice(onodes)
failed_primary_device_path = self.device_dir('object', failed_primary)
self.kill_drive(failed_primary_device_path)
# overwrite our object with some new data
new_contents = Body()
client.put_object(self.url, self.token, container_name,
object_name, contents=new_contents)
self.assertNotEqual(new_contents.etag, old_contents.etag)
# restore failed primary device
self.revive_drive(failed_primary_device_path)
# sanity - failed node has old contents
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
headers = direct_client.direct_head_object(
failed_primary, opart, self.account, container_name,
object_name, headers=req_headers)
self.assertEqual(headers['X-Object-Sysmeta-EC-Etag'],
old_contents.etag)
# we have 1 primary with wrong old etag, and we should have 5 with
# new etag plus a handoff with the new etag, so killing 2 other
# primaries forces proxy to try to GET from all primaries plus handoff.
other_nodes = [n for n in onodes if n != failed_primary]
random.shuffle(other_nodes)
for node in other_nodes[:2]:
self.kill_drive(self.device_dir('object', node))
# sanity, after taking out two primaries we should be down to
# only four primaries, one of which has the old etag - but we
# also have a handoff with the new etag out there
found_frags = defaultdict(int)
req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
for node in onodes + list(self.object_ring.get_more_nodes(opart)):
try:
headers = direct_client.direct_head_object(
node, opart, self.account, container_name,
object_name, headers=req_headers)
except Exception:
continue
found_frags[headers['X-Object-Sysmeta-EC-Etag']] += 1
self.assertEqual(found_frags, {
new_contents.etag: 4, # this should be enough to rebuild!
old_contents.etag: 1,
})
# clear node error limiting
Manager(['proxy']).restart()
resp_etag = self.get_object(container_name, object_name)
self.assertEqual(resp_etag, new_contents.etag)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -95,7 +95,7 @@ class TestReconstructorPropDurable(ECProbeTest):
if e.errno != errno.ENOENT: if e.errno != errno.ENOENT:
raise raise
# fire up reconstructor to propogate the .durable # fire up reconstructor to propagate the .durable
self.reconstructor.once() self.reconstructor.once()
# fragment is still exactly as it was before! # fragment is still exactly as it was before!

View File

@ -21,7 +21,7 @@ import random
import shutil import shutil
from collections import defaultdict from collections import defaultdict
from test.probe.common import ECProbeTest from test.probe.common import ECProbeTest, Body
from swift.common import direct_client from swift.common import direct_client
from swift.common.storage_policy import EC_POLICY from swift.common.storage_policy import EC_POLICY
@ -31,32 +31,6 @@ from swift.obj import reconstructor
from swiftclient import client from swiftclient import client
class Body(object):
def __init__(self, total=3.5 * 2 ** 20):
self.total = total
self.hasher = md5()
self.size = 0
self.chunk = 'test' * 16 * 2 ** 10
@property
def etag(self):
return self.hasher.hexdigest()
def __iter__(self):
return self
def next(self):
if self.size > self.total:
raise StopIteration()
self.size += len(self.chunk)
self.hasher.update(self.chunk)
return self.chunk
def __next__(self):
return next(self)
class TestReconstructorRevert(ECProbeTest): class TestReconstructorRevert(ECProbeTest):
def setUp(self): def setUp(self):
@ -159,7 +133,7 @@ class TestReconstructorRevert(ECProbeTest):
hnode_id = (hnode['port'] - 6000) / 10 hnode_id = (hnode['port'] - 6000) / 10
self.reconstructor.once(number=hnode_id) self.reconstructor.once(number=hnode_id)
# first threee primaries have data again # first three primaries have data again
for onode in (onodes[0], onodes[2]): for onode in (onodes[0], onodes[2]):
self.direct_get(onode, opart) self.direct_get(onode, opart)
@ -218,7 +192,7 @@ class TestReconstructorRevert(ECProbeTest):
# enable the first node again # enable the first node again
self.revive_drive(p_dev2) self.revive_drive(p_dev2)
# propogate the delete... # propagate the delete...
# fire up reconstructor on handoff nodes only # fire up reconstructor on handoff nodes only
for hnode in hnodes: for hnode in hnodes:
hnode_id = (hnode['port'] - 6000) / 10 hnode_id = (hnode['port'] - 6000) / 10

View File

@ -0,0 +1,103 @@
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import httplib
import random
from swift.common.storage_policy import POLICIES
from swift.common.ring import Ring
from swift.common.manager import Manager
from test.probe.common import resetswift
def putrequest(conn, method, path, headers):
conn.putrequest(method, path, skip_host=(headers and 'Host' in headers))
if headers:
for header, value in headers.items():
conn.putheader(header, str(value))
conn.endheaders()
class TestWSGIServerProcessHandling(unittest.TestCase):
def setUp(self):
resetswift()
def _check_reload(self, server_name, ip, port):
manager = Manager([server_name])
manager.start()
starting_pids = set(pid for server in manager.servers
for (_, pid) in server.iter_pid_files())
body = 'test' * 10
conn = httplib.HTTPConnection('%s:%s' % (ip, port))
# sanity request
putrequest(conn, 'PUT', 'blah',
headers={'Content-Length': len(body)})
conn.send(body)
resp = conn.getresponse()
self.assertEqual(resp.status // 100, 4)
resp.read()
manager.reload()
post_reload_pids = set(pid for server in manager.servers
for (_, pid) in server.iter_pid_files())
# none of the pids we started with are being tracked after reload
msg = 'expected all pids from %r to have died, but found %r' % (
starting_pids, post_reload_pids)
self.assertFalse(starting_pids & post_reload_pids, msg)
# ... and yet we can keep using the same connection!
putrequest(conn, 'PUT', 'blah',
headers={'Content-Length': len(body)})
conn.send(body)
resp = conn.getresponse()
self.assertEqual(resp.status // 100, 4)
resp.read()
# close our connection
conn.close()
# sanity
post_close_pids = set(pid for server in manager.servers
for (_, pid) in server.iter_pid_files())
self.assertEqual(post_reload_pids, post_close_pids)
def test_proxy_reload(self):
self._check_reload('proxy-server', 'localhost', 8080)
def test_object_reload(self):
policy = random.choice(list(POLICIES))
policy.load_ring('/etc/swift')
node = random.choice(policy.object_ring.get_part_nodes(1))
self._check_reload('object', node['ip'], node['port'])
def test_account_container_reload(self):
for server in ('account', 'container'):
ring = Ring('/etc/swift', ring_name=server)
node = random.choice(ring.get_part_nodes(1))
self._check_reload(server, node['ip'], node['port'])
if __name__ == '__main__':
unittest.main()

View File

@ -93,6 +93,7 @@ fake_syslog = False
#max_meta_count = 90 #max_meta_count = 90
#max_meta_overall_size = 4096 #max_meta_overall_size = 4096
#max_header_size = 8192 #max_header_size = 8192
#extra_header_count = 0
#max_object_name_length = 1024 #max_object_name_length = 1024
#container_listing_limit = 10000 #container_listing_limit = 10000
#account_listing_limit = 10000 #account_listing_limit = 10000

View File

@ -15,6 +15,7 @@
""" Swift tests """ """ Swift tests """
from __future__ import print_function
import os import os
import copy import copy
import logging import logging
@ -509,6 +510,8 @@ class FakeLogger(logging.Logger, object):
self.lines_dict = {'critical': [], 'error': [], 'info': [], self.lines_dict = {'critical': [], 'error': [], 'info': [],
'warning': [], 'debug': [], 'notice': []} 'warning': [], 'debug': [], 'notice': []}
clear = _clear # this is a public interface
def get_lines_for_level(self, level): def get_lines_for_level(self, level):
if level not in self.lines_dict: if level not in self.lines_dict:
raise KeyError( raise KeyError(
@ -572,8 +575,8 @@ class FakeLogger(logging.Logger, object):
try: try:
line = record.getMessage() line = record.getMessage()
except TypeError: except TypeError:
print 'WARNING: unable to format log message %r %% %r' % ( print('WARNING: unable to format log message %r %% %r' % (
record.msg, record.args) record.msg, record.args))
raise raise
self.lines_dict[record.levelname.lower()].append(line) self.lines_dict[record.levelname.lower()].append(line)
@ -597,7 +600,7 @@ class DebugLogger(FakeLogger):
def handle(self, record): def handle(self, record):
self._handle(record) self._handle(record)
print self.formatter.format(record) print(self.formatter.format(record))
class DebugLogAdapter(utils.LogAdapter): class DebugLogAdapter(utils.LogAdapter):
@ -860,7 +863,9 @@ def fake_http_connect(*code_iter, **kwargs):
headers = dict(self.expect_headers) headers = dict(self.expect_headers)
if expect_status == 409: if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp headers['X-Backend-Timestamp'] = self.timestamp
response = FakeConn(expect_status, headers=headers) response = FakeConn(expect_status,
timestamp=self.timestamp,
headers=headers)
response.status = expect_status response.status = expect_status
return response return response

View File

@ -52,9 +52,9 @@ class TestAccountBroker(unittest.TestCase):
pass pass
except DatabaseConnectionError as e: except DatabaseConnectionError as e:
self.assertTrue(hasattr(e, 'path')) self.assertTrue(hasattr(e, 'path'))
self.assertEquals(e.path, ':memory:') self.assertEqual(e.path, ':memory:')
self.assertTrue(hasattr(e, 'msg')) self.assertTrue(hasattr(e, 'msg'))
self.assertEquals(e.msg, "DB doesn't exist") self.assertEqual(e.msg, "DB doesn't exist")
except Exception as e: except Exception as e:
self.fail("Unexpected exception raised: %r" % e) self.fail("Unexpected exception raised: %r" % e)
else: else:
@ -486,6 +486,11 @@ class TestAccountBroker(unittest.TestCase):
POLICIES.default.idx) POLICIES.default.idx)
broker.put_container('a-b', Timestamp(time()).internal, 0, 0, 0, broker.put_container('a-b', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx) POLICIES.default.idx)
# NB: ord(".") == ord("-") + 1
broker.put_container('a.', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('a.b', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx)
broker.put_container('b', Timestamp(time()).internal, 0, 0, 0, broker.put_container('b', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx) POLICIES.default.idx)
broker.put_container('b-a', Timestamp(time()).internal, 0, 0, 0, broker.put_container('b-a', Timestamp(time()).internal, 0, 0, 0,
@ -495,20 +500,16 @@ class TestAccountBroker(unittest.TestCase):
broker.put_container('c', Timestamp(time()).internal, 0, 0, 0, broker.put_container('c', Timestamp(time()).internal, 0, 0, 0,
POLICIES.default.idx) POLICIES.default.idx)
listing = broker.list_containers_iter(15, None, None, None, None) listing = broker.list_containers_iter(15, None, None, None, None)
self.assertEqual(len(listing), 10)
self.assertEqual([row[0] for row in listing], self.assertEqual([row[0] for row in listing],
['a', 'a-', 'a-a', 'a-a-a', 'a-a-b', 'a-b', 'b', ['a', 'a-', 'a-a', 'a-a-a', 'a-a-b', 'a-b', 'a.',
'b-a', 'b-b', 'c']) 'a.b', 'b', 'b-a', 'b-b', 'c'])
listing = broker.list_containers_iter(15, None, None, '', '-') listing = broker.list_containers_iter(15, None, None, '', '-')
self.assertEqual(len(listing), 5)
self.assertEqual([row[0] for row in listing], self.assertEqual([row[0] for row in listing],
['a', 'a-', 'b', 'b-', 'c']) ['a', 'a-', 'a.', 'a.b', 'b', 'b-', 'c'])
listing = broker.list_containers_iter(15, None, None, 'a-', '-') listing = broker.list_containers_iter(15, None, None, 'a-', '-')
self.assertEqual(len(listing), 4)
self.assertEqual([row[0] for row in listing], self.assertEqual([row[0] for row in listing],
['a-', 'a-a', 'a-a-', 'a-b']) ['a-', 'a-a', 'a-a-', 'a-b'])
listing = broker.list_containers_iter(15, None, None, 'b-', '-') listing = broker.list_containers_iter(15, None, None, 'b-', '-')
self.assertEqual(len(listing), 2)
self.assertEqual([row[0] for row in listing], ['b-a', 'b-b']) self.assertEqual([row[0] for row in listing], ['b-a', 'b-b'])
def test_chexor(self): def test_chexor(self):
@ -584,8 +585,8 @@ class TestAccountBroker(unittest.TestCase):
broker2.merge_items(json.loads(json.dumps(broker1.get_items_since( broker2.merge_items(json.loads(json.dumps(broker1.get_items_since(
broker2.get_sync(id1), 1000))), id1) broker2.get_sync(id1), 1000))), id1)
items = broker2.get_items_since(-1, 1000) items = broker2.get_items_since(-1, 1000)
self.assertEquals(['b', snowman], self.assertEqual(['b', snowman],
sorted([rec['name'] for rec in items])) sorted([rec['name'] for rec in items]))
items_by_name = dict((rec['name'], rec) for rec in items) items_by_name = dict((rec['name'], rec) for rec in items)
self.assertEqual(items_by_name[snowman]['object_count'], 2) self.assertEqual(items_by_name[snowman]['object_count'], 2)

View File

@ -77,6 +77,7 @@ class FakeBroker(object):
class FakeAccountBroker(object): class FakeAccountBroker(object):
def __init__(self, containers): def __init__(self, containers):
self.containers = containers self.containers = containers
self.containers_yielded = []
def get_info(self): def get_info(self):
info = {'account': 'a', info = {'account': 'a',
@ -101,11 +102,11 @@ class FakeRing(object):
'port': 6002, 'port': 6002,
'device': None}, 'device': None},
{'id': '2', {'id': '2',
'ip': '10.10.10.1', 'ip': '10.10.10.2',
'port': 6002, 'port': 6002,
'device': None}, 'device': None},
{'id': '3', {'id': '3',
'ip': '10.10.10.1', 'ip': '10.10.10.3',
'port': 6002, 'port': 6002,
'device': None}, 'device': None},
] ]
@ -387,7 +388,7 @@ class TestReaper(unittest.TestCase):
'1429117638.86767') '1429117638.86767')
# verify calls to direct_delete_container # verify calls to direct_delete_container
self.assertEquals(mocks['direct_delete_container'].call_count, 3) self.assertEqual(mocks['direct_delete_container'].call_count, 3)
for i, call_args in enumerate( for i, call_args in enumerate(
mocks['direct_delete_container'].call_args_list): mocks['direct_delete_container'].call_args_list):
anode = acc_nodes[i % len(acc_nodes)] anode = acc_nodes[i % len(acc_nodes)]
@ -504,24 +505,26 @@ class TestReaper(unittest.TestCase):
self.called_amount = 0 self.called_amount = 0
self.r = r = self.init_reaper({}, fakelogger=True) self.r = r = self.init_reaper({}, fakelogger=True)
r.start_time = time.time() r.start_time = time.time()
ctx = [patch('swift.account.reaper.AccountReaper.reap_container', with patch('swift.account.reaper.AccountReaper.reap_container',
self.fake_reap_container), self.fake_reap_container), \
patch('swift.account.reaper.AccountReaper.get_account_ring', patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring)] self.fake_account_ring):
with nested(*ctx):
nodes = r.get_account_ring().get_part_nodes() nodes = r.get_account_ring().get_part_nodes()
self.assertTrue(r.reap_account(broker, 'partition', nodes)) for container_shard, node in enumerate(nodes):
self.assertTrue(
r.reap_account(broker, 'partition', nodes,
container_shard=container_shard))
self.assertEqual(self.called_amount, 4) self.assertEqual(self.called_amount, 4)
info_lines = r.logger.get_lines_for_level('info') info_lines = r.logger.get_lines_for_level('info')
self.assertEqual(len(info_lines), 2) self.assertEqual(len(info_lines), 6)
start_line, stat_line = info_lines for start_line, stat_line in zip(*[iter(info_lines)] * 2):
self.assertEqual(start_line, 'Beginning pass on account a') self.assertEqual(start_line, 'Beginning pass on account a')
self.assertTrue(stat_line.find('1 containers deleted')) self.assertTrue(stat_line.find('1 containers deleted'))
self.assertTrue(stat_line.find('1 objects deleted')) self.assertTrue(stat_line.find('1 objects deleted'))
self.assertTrue(stat_line.find('1 containers remaining')) self.assertTrue(stat_line.find('1 containers remaining'))
self.assertTrue(stat_line.find('1 objects remaining')) self.assertTrue(stat_line.find('1 objects remaining'))
self.assertTrue(stat_line.find('1 containers possibly remaining')) self.assertTrue(stat_line.find('1 containers possibly remaining'))
self.assertTrue(stat_line.find('1 objects possibly remaining')) self.assertTrue(stat_line.find('1 objects possibly remaining'))
def test_reap_account_no_container(self): def test_reap_account_no_container(self):
broker = FakeAccountBroker(tuple()) broker = FakeAccountBroker(tuple())
@ -584,6 +587,67 @@ class TestReaper(unittest.TestCase):
r.reap_device('sda1') r.reap_device('sda1')
self.assertEqual(self.called_amount, 0) self.assertEqual(self.called_amount, 0)
def test_reap_device_with_sharding(self):
devices = self.prepare_data_dir()
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.10.2'])
container_shard_used = [-1]
def fake_reap_account(*args, **kwargs):
container_shard_used[0] = kwargs.get('container_shard')
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
fake_reap_account):
r.reap_device('sda1')
# 10.10.10.2 is second node from ring
self.assertEqual(container_shard_used[0], 1)
def test_reap_account_with_sharding(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.10.2'])
container_reaped = [0]
def fake_list_containers_iter(self, *args):
for container in self.containers:
if container in self.containers_yielded:
continue
yield container, None, None, None
self.containers_yielded.append(container)
def fake_reap_container(self, account, account_partition,
account_nodes, container):
container_reaped[0] += 1
ctx = [patch('swift.account.reaper.AccountBroker',
FakeAccountBroker),
patch('swift.account.reaper.AccountBroker.list_containers_iter',
fake_list_containers_iter),
patch('swift.account.reaper.AccountReaper.reap_container',
fake_reap_container), ]
fake_ring = FakeRing()
with nested(*ctx):
fake_broker = FakeAccountBroker(['c', 'd', 'e'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 0)
self.assertEqual(container_reaped[0], 1)
fake_broker = FakeAccountBroker(['c', 'd', 'e'])
container_reaped[0] = 0
r.reap_account(fake_broker, 10, fake_ring.nodes, 1)
self.assertEqual(container_reaped[0], 2)
container_reaped[0] = 0
fake_broker = FakeAccountBroker(['c', 'd', 'e'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 2)
self.assertEqual(container_reaped[0], 0)
def test_run_once(self): def test_run_once(self):
def prepare_data_dir(): def prepare_data_dir():
devices_path = tempfile.mkdtemp() devices_path = tempfile.mkdtemp()

View File

@ -63,13 +63,13 @@ class TestAccountController(unittest.TestCase):
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'}) req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0 req.content_length = 0
resp = server_handler.OPTIONS(req) resp = server_handler.OPTIONS(req)
self.assertEquals(200, resp.status_int) self.assertEqual(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split(): for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split():
self.assertTrue( self.assertTrue(
verb in resp.headers['Allow'].split(', ')) verb in resp.headers['Allow'].split(', '))
self.assertEquals(len(resp.headers['Allow'].split(', ')), 7) self.assertEqual(len(resp.headers['Allow'].split(', ')), 7)
self.assertEquals(resp.headers['Server'], self.assertEqual(resp.headers['Server'],
(server_handler.server_type + '/' + swift_version)) (server_handler.server_type + '/' + swift_version))
def test_DELETE_not_found(self): def test_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE', req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
@ -599,11 +599,11 @@ class TestAccountController(unittest.TestCase):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'}) 'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller) resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201) self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE', req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'}) 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller) resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204) self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST', req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '2'}) 'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller) resp = req.get_response(self.controller)
@ -1095,7 +1095,7 @@ class TestAccountController(unittest.TestCase):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'}) 'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller) resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201) self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'}, req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1', headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0', 'X-Delete-Timestamp': '0',
@ -1103,7 +1103,7 @@ class TestAccountController(unittest.TestCase):
'X-Bytes-Used': '0', 'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)}) 'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller) resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201) self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'}) req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/*' req.accept = 'application/*'
resp = req.get_response(self.controller) resp = req.get_response(self.controller)
@ -1687,8 +1687,8 @@ class TestAccountController(unittest.TestCase):
'wsgi.multiprocess': False, 'wsgi.multiprocess': False,
'wsgi.run_once': False} 'wsgi.run_once': False}
self.controller(env, start_response) self.controller(env, start_response)
self.assertEquals(errbuf.getvalue(), '') self.assertEqual(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ') self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_GET_log_requests_true(self): def test_GET_log_requests_true(self):
self.controller.logger = FakeLogger() self.controller.logger = FakeLogger()
@ -1748,15 +1748,15 @@ class TestAccountController(unittest.TestCase):
req = Request.blank('/sda1/p/a', method=method) req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller) resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2) self.assertEqual(resp.status_int // 100, 2)
self.assertEquals(resp.headers['X-Account-Object-Count'], '2') self.assertEqual(resp.headers['X-Account-Object-Count'], '2')
self.assertEquals(resp.headers['X-Account-Bytes-Used'], '4') self.assertEqual(resp.headers['X-Account-Bytes-Used'], '4')
self.assertEquals( self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Object-Count' % resp.headers['X-Account-Storage-Policy-%s-Object-Count' %
POLICIES[0].name], '2') POLICIES[0].name], '2')
self.assertEquals( self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' % resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
POLICIES[0].name], '4') POLICIES[0].name], '4')
self.assertEquals( self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Container-Count' % resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
POLICIES[0].name], '1') POLICIES[0].name], '1')
@ -1786,15 +1786,15 @@ class TestAccountController(unittest.TestCase):
req = Request.blank('/sda1/p/a', method=method) req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller) resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2) self.assertEqual(resp.status_int // 100, 2)
self.assertEquals(resp.headers['X-Account-Object-Count'], '2') self.assertEqual(resp.headers['X-Account-Object-Count'], '2')
self.assertEquals(resp.headers['X-Account-Bytes-Used'], '4') self.assertEqual(resp.headers['X-Account-Bytes-Used'], '4')
self.assertEquals( self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Object-Count' % resp.headers['X-Account-Storage-Policy-%s-Object-Count' %
policy.name], '2') policy.name], '2')
self.assertEquals( self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' % resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
policy.name], '4') policy.name], '4')
self.assertEquals( self.assertEqual(
resp.headers['X-Account-Storage-Policy-%s-Container-Count' % resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
policy.name], '1') policy.name], '1')

View File

@ -128,8 +128,8 @@ Metadata:
No system metadata found in db file No system metadata found in db file
User Metadata: {'mydata': 'swift'}''' User Metadata: {'mydata': 'swift'}'''
self.assertEquals(sorted(out.getvalue().strip().split('\n')), self.assertEqual(sorted(out.getvalue().strip().split('\n')),
sorted(exp_out.split('\n'))) sorted(exp_out.split('\n')))
info = dict( info = dict(
account='acct', account='acct',
@ -175,8 +175,8 @@ Metadata:
X-Container-Foo: bar X-Container-Foo: bar
System Metadata: {'mydata': 'swift'} System Metadata: {'mydata': 'swift'}
No user metadata found in db file''' % POLICIES[0].name No user metadata found in db file''' % POLICIES[0].name
self.assertEquals(sorted(out.getvalue().strip().split('\n')), self.assertEqual(sorted(out.getvalue().strip().split('\n')),
sorted(exp_out.split('\n'))) sorted(exp_out.split('\n')))
def test_print_ring_locations_invalid_args(self): def test_print_ring_locations_invalid_args(self):
self.assertRaises(ValueError, print_ring_locations, self.assertRaises(ValueError, print_ring_locations,
@ -306,7 +306,7 @@ No user metadata found in db file''' % POLICIES[0].name
if exp_raised: if exp_raised:
exp_out = 'Does not appear to be a DB of type "account":' \ exp_out = 'Does not appear to be a DB of type "account":' \
' ./d49d0ecbb53be1fcc49624f2f7c7ccae.db' ' ./d49d0ecbb53be1fcc49624f2f7c7ccae.db'
self.assertEquals(out.getvalue().strip(), exp_out) self.assertEqual(out.getvalue().strip(), exp_out)
else: else:
self.fail("Expected an InfoSystemExit exception to be raised") self.fail("Expected an InfoSystemExit exception to be raised")
@ -334,8 +334,8 @@ class TestPrintObj(TestCliInfoBase):
out = StringIO() out = StringIO()
with mock.patch('sys.stdout', out): with mock.patch('sys.stdout', out):
self.assertRaises(InfoSystemExit, print_obj, datafile) self.assertRaises(InfoSystemExit, print_obj, datafile)
self.assertEquals(out.getvalue().strip(), self.assertEqual(out.getvalue().strip(),
'Invalid metadata') 'Invalid metadata')
def test_print_obj_valid(self): def test_print_obj_valid(self):
out = StringIO() out = StringIO()
@ -489,7 +489,7 @@ Other Metadata:
No metadata found''' % ( No metadata found''' % (
utils.Timestamp(106.3).internal) utils.Timestamp(106.3).internal)
self.assertEquals(out.getvalue().strip(), exp_out) self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({ metadata = get_metadata({
'X-Object-Sysmeta-Mtime': '107.3', 'X-Object-Sysmeta-Mtime': '107.3',
@ -514,7 +514,7 @@ Other Metadata:
No metadata found''' % ( No metadata found''' % (
utils.Timestamp(106.3).internal) utils.Timestamp(106.3).internal)
self.assertEquals(out.getvalue().strip(), exp_out) self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({ metadata = get_metadata({
'X-Object-Meta-Mtime': '107.3', 'X-Object-Meta-Mtime': '107.3',
@ -539,7 +539,7 @@ Other Metadata:
X-Object-Mtime: 107.3''' % ( X-Object-Mtime: 107.3''' % (
utils.Timestamp(106.3).internal) utils.Timestamp(106.3).internal)
self.assertEquals(out.getvalue().strip(), exp_out) self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({}) metadata = get_metadata({})
out = StringIO() out = StringIO()
@ -560,7 +560,7 @@ Other Metadata:
No metadata found''' % ( No metadata found''' % (
utils.Timestamp(106.3).internal) utils.Timestamp(106.3).internal)
self.assertEquals(out.getvalue().strip(), exp_out) self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'}) metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'})
metadata['name'] = '/a-s' metadata['name'] = '/a-s'
@ -583,7 +583,7 @@ Other Metadata:
No metadata found''' % ( No metadata found''' % (
utils.Timestamp(106.3).internal) utils.Timestamp(106.3).internal)
self.assertEquals(out.getvalue().strip(), exp_out) self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'}) metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'})
del metadata['Content-Type'] del metadata['Content-Type']
@ -605,7 +605,7 @@ Other Metadata:
No metadata found''' % ( No metadata found''' % (
utils.Timestamp(106.3).internal) utils.Timestamp(106.3).internal)
self.assertEquals(out.getvalue().strip(), exp_out) self.assertEqual(out.getvalue().strip(), exp_out)
metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'}) metadata = get_metadata({'X-Object-Meta-Mtime': '107.3'})
del metadata['X-Timestamp'] del metadata['X-Timestamp']
@ -626,7 +626,7 @@ User Metadata:
Other Metadata: Other Metadata:
No metadata found''' No metadata found'''
self.assertEquals(out.getvalue().strip(), exp_out) self.assertEqual(out.getvalue().strip(), exp_out)
class TestPrintObjWeirdPath(TestPrintObjFullMeta): class TestPrintObjWeirdPath(TestPrintObjFullMeta):

View File

@ -61,7 +61,7 @@ class TestScout(unittest.TestCase):
@mock.patch('eventlet.green.urllib2.urlopen') @mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_ok(self, mock_urlopen): def test_scout_ok(self, mock_urlopen):
mock_urlopen.return_value.read = lambda: json.dumps([]) mock_urlopen.return_value.read = lambda: json.dumps([])
url, content, status = self.scout_instance.scout( url, content, status, ts_start, ts_end = self.scout_instance.scout(
("127.0.0.1", "8080")) ("127.0.0.1", "8080"))
self.assertEqual(url, self.url) self.assertEqual(url, self.url)
self.assertEqual(content, []) self.assertEqual(content, [])
@ -70,7 +70,7 @@ class TestScout(unittest.TestCase):
@mock.patch('eventlet.green.urllib2.urlopen') @mock.patch('eventlet.green.urllib2.urlopen')
def test_scout_url_error(self, mock_urlopen): def test_scout_url_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.URLError("") mock_urlopen.side_effect = urllib2.URLError("")
url, content, status = self.scout_instance.scout( url, content, status, ts_start, ts_end = self.scout_instance.scout(
("127.0.0.1", "8080")) ("127.0.0.1", "8080"))
self.assertTrue(isinstance(content, urllib2.URLError)) self.assertTrue(isinstance(content, urllib2.URLError))
self.assertEqual(url, self.url) self.assertEqual(url, self.url)
@ -80,7 +80,7 @@ class TestScout(unittest.TestCase):
def test_scout_http_error(self, mock_urlopen): def test_scout_http_error(self, mock_urlopen):
mock_urlopen.side_effect = urllib2.HTTPError( mock_urlopen.side_effect = urllib2.HTTPError(
self.url, 404, "Internal error", None, None) self.url, 404, "Internal error", None, None)
url, content, status = self.scout_instance.scout( url, content, status, ts_start, ts_end = self.scout_instance.scout(
("127.0.0.1", "8080")) ("127.0.0.1", "8080"))
self.assertEqual(url, self.url) self.assertEqual(url, self.url)
self.assertTrue(isinstance(content, urllib2.HTTPError)) self.assertTrue(isinstance(content, urllib2.HTTPError))
@ -218,7 +218,7 @@ class TestRecon(unittest.TestCase):
'/etc/swift/object-1.ring.gz': empty_file_hash, '/etc/swift/object-1.ring.gz': empty_file_hash,
} }
status = 200 status = 200
scout_instance.scout.return_value = (url, response, status) scout_instance.scout.return_value = (url, response, status, 0, 0)
mock_scout.return_value = scout_instance mock_scout.return_value = scout_instance
stdout = StringIO() stdout = StringIO()
mock_hash = mock.MagicMock() mock_hash = mock.MagicMock()
@ -274,7 +274,7 @@ class TestRecon(unittest.TestCase):
url = 'http://%s:%s/recon/quarantined' % host url = 'http://%s:%s/recon/quarantined' % host
response = responses[host[1]] response = responses[host[1]]
status = 200 status = 200
return url, response, status return url, response, status, 0, 0
stdout = StringIO() stdout = StringIO()
patches = [ patches = [
@ -290,10 +290,10 @@ class TestRecon(unittest.TestCase):
m = r.match(line) m = r.match(line)
if m: if m:
ex = expected.pop(m.group(1)) ex = expected.pop(m.group(1))
self.assertEquals(m.group(2), self.assertEqual(m.group(2),
" low: %s, high: %s, avg: %s, total: %s," " low: %s, high: %s, avg: %s, total: %s,"
" Failed: %s%%, no_result: %s, reported: %s" " Failed: %s%%, no_result: %s, reported: %s"
% ex) % ex)
self.assertFalse(expected) self.assertFalse(expected)
def test_drive_audit_check(self): def test_drive_audit_check(self):
@ -311,7 +311,7 @@ class TestRecon(unittest.TestCase):
url = 'http://%s:%s/recon/driveaudit' % host url = 'http://%s:%s/recon/driveaudit' % host
response = responses[host[1]] response = responses[host[1]]
status = 200 status = 200
return url, response, status return url, response, status, 0, 0
stdout = StringIO() stdout = StringIO()
patches = [ patches = [
@ -328,10 +328,10 @@ class TestRecon(unittest.TestCase):
for line in lines: for line in lines:
m = r.match(line) m = r.match(line)
if m: if m:
self.assertEquals(m.group(2), self.assertEqual(m.group(2),
" low: %s, high: %s, avg: %s, total: %s," " low: %s, high: %s, avg: %s, total: %s,"
" Failed: %s%%, no_result: %s, reported: %s" " Failed: %s%%, no_result: %s, reported: %s"
% expected) % expected)
class TestReconCommands(unittest.TestCase): class TestReconCommands(unittest.TestCase):
@ -491,7 +491,7 @@ class TestReconCommands(unittest.TestCase):
return [('http://127.0.0.1:6010/recon/auditor/object', { return [('http://127.0.0.1:6010/recon/auditor/object', {
'object_auditor_stats_ALL': values, 'object_auditor_stats_ALL': values,
'object_auditor_stats_ZBF': values, 'object_auditor_stats_ZBF': values,
}, 200)] }, 200, 0, 0)]
response = {} response = {}
@ -535,7 +535,9 @@ class TestReconCommands(unittest.TestCase):
"avail": 15, "used": 85, "size": 100}, "avail": 15, "used": 85, "size": 100},
{"device": "sdd1", "mounted": True, {"device": "sdd1", "mounted": True,
"avail": 15, "used": 85, "size": 100}], "avail": 15, "used": 85, "size": 100}],
200)] 200,
0,
0)]
cli = recon.SwiftRecon() cli = recon.SwiftRecon()
cli.pool.imap = dummy_request cli.pool.imap = dummy_request
@ -576,40 +578,6 @@ class TestReconCommands(unittest.TestCase):
cli.disk_usage([('127.0.0.1', 6010)], 5, 0) cli.disk_usage([('127.0.0.1', 6010)], 5, 0)
mock_print.assert_has_calls(expected_calls) mock_print.assert_has_calls(expected_calls)
@mock.patch('__builtin__.print')
@mock.patch('time.time')
def test_object_replication_check(self, mock_now, mock_print):
now = 1430000000.0
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/replication/object',
{"object_replication_time": 61,
"object_replication_last": now},
200),
('http://127.0.0.1:6020/recon/replication/object',
{"object_replication_time": 23,
"object_replication_last": now},
200),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('[replication_time] low: 23, high: 61, avg: 42.0, ' +
'total: 84, Failed: 0.0%, no_result: 0, reported: 2'),
mock.call('Oldest completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6010.'),
mock.call('Most recent completion was 2015-04-25 22:13:20 ' +
'(42 seconds ago) by 127.0.0.1:6010.'),
]
mock_now.return_value = now + 42
cli.object_replication_check([('127.0.0.1', 6010),
('127.0.0.1', 6020)])
mock_print.assert_has_calls(default_calls)
@mock.patch('__builtin__.print') @mock.patch('__builtin__.print')
@mock.patch('time.time') @mock.patch('time.time')
def test_replication_check(self, mock_now, mock_print): def test_replication_check(self, mock_now, mock_print):
@ -625,7 +593,9 @@ class TestReconCommands(unittest.TestCase):
"remote_merge": 0, "diff_capped": 0, "start": now, "remote_merge": 0, "diff_capped": 0, "start": now,
"hashmatch": 0, "diff": 0, "empty": 0}, "hashmatch": 0, "diff": 0, "empty": 0},
"replication_time": 42}, "replication_time": 42},
200), 200,
0,
0),
('http://127.0.0.1:6021/recon/replication/container', ('http://127.0.0.1:6021/recon/replication/container',
{"replication_last": now, {"replication_last": now,
"replication_stats": { "replication_stats": {
@ -634,7 +604,9 @@ class TestReconCommands(unittest.TestCase):
"remote_merge": 0, "diff_capped": 0, "start": now, "remote_merge": 0, "diff_capped": 0, "start": now,
"hashmatch": 0, "diff": 0, "empty": 0}, "hashmatch": 0, "diff": 0, "empty": 0},
"replication_time": 23}, "replication_time": 23},
200), 200,
0,
0),
] ]
cli = recon.SwiftRecon() cli = recon.SwiftRecon()
@ -671,11 +643,15 @@ class TestReconCommands(unittest.TestCase):
('http://127.0.0.1:6010/recon/load', ('http://127.0.0.1:6010/recon/load',
{"1m": 0.2, "5m": 0.4, "15m": 0.25, {"1m": 0.2, "5m": 0.4, "15m": 0.25,
"processes": 10000, "tasks": "1/128"}, "processes": 10000, "tasks": "1/128"},
200), 200,
0,
0),
('http://127.0.0.1:6020/recon/load', ('http://127.0.0.1:6020/recon/load',
{"1m": 0.4, "5m": 0.8, "15m": 0.75, {"1m": 0.4, "5m": 0.8, "15m": 0.75,
"processes": 9000, "tasks": "1/200"}, "processes": 9000, "tasks": "1/200"},
200), 200,
0,
0),
] ]
cli = recon.SwiftRecon() cli = recon.SwiftRecon()
@ -695,3 +671,75 @@ class TestReconCommands(unittest.TestCase):
# We need any_order=True because the order of calls depends on the dict # We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it # that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True) mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('__builtin__.print')
@mock.patch('time.time')
def test_time_check(self, mock_now, mock_print):
now = 1430000000.0
mock_now.return_value = now
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/load',
now,
200,
now - 0.5,
now + 0.5),
('http://127.0.0.1:6020/recon/load',
now,
200,
now,
now),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call('2/2 hosts matched, 0 error[s] while checking hosts.')
]
cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)
@mock.patch('__builtin__.print')
@mock.patch('time.time')
def test_time_check_mismatch(self, mock_now, mock_print):
now = 1430000000.0
mock_now.return_value = now
def dummy_request(*args, **kwargs):
return [
('http://127.0.0.1:6010/recon/time',
now,
200,
now + 0.5,
now + 1.3),
('http://127.0.0.1:6020/recon/time',
now,
200,
now,
now),
]
cli = recon.SwiftRecon()
cli.pool.imap = dummy_request
default_calls = [
mock.call("!! http://127.0.0.1:6010/recon/time current time is "
"2015-04-25 22:13:21, but remote is "
"2015-04-25 22:13:20, differs by 1.30 sec"),
mock.call('1/2 hosts matched, 0 error[s] while checking hosts.'),
]
def mock_localtime(*args, **kwargs):
return time.gmtime(*args, **kwargs)
with mock.patch("time.localtime", mock_localtime):
cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)])
# We need any_order=True because the order of calls depends on the dict
# that is returned from the recon middleware, thus can't rely on it
mock_print.assert_has_calls(default_calls, any_order=True)

View File

@ -147,7 +147,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder._parse_search_values(argv) ringbuilder._parse_search_values(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_find_parts(self): def test_find_parts(self):
rb = RingBuilder(8, 3, 0) rb = RingBuilder(8, 3, 0)
@ -185,7 +185,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder._parse_list_parts_values(argv) ringbuilder._parse_list_parts_values(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_parse_add_values_number_of_arguments(self): def test_parse_add_values_number_of_arguments(self):
# Test Number of arguments abnormal # Test Number of arguments abnormal
@ -195,7 +195,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder._parse_add_values(argv) ringbuilder._parse_add_values(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_weight_values_no_devices(self): def test_set_weight_values_no_devices(self):
# Test no devices # Test no devices
@ -204,7 +204,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder._set_weight_values([], 100) ringbuilder._set_weight_values([], 100)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_parse_set_weight_values_number_of_arguments(self): def test_parse_set_weight_values_number_of_arguments(self):
# Test Number of arguments abnormal # Test Number of arguments abnormal
@ -214,7 +214,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder._parse_set_weight_values(argv) ringbuilder._parse_set_weight_values(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
argv = ["--region", "2"] argv = ["--region", "2"]
err = None err = None
@ -222,7 +222,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder._parse_set_weight_values(argv) ringbuilder._parse_set_weight_values(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_info_values_no_devices(self): def test_set_info_values_no_devices(self):
# Test no devices # Test no devices
@ -231,7 +231,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder._set_info_values([], 100) ringbuilder._set_info_values([], 100)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_parse_set_info_values_number_of_arguments(self): def test_parse_set_info_values_number_of_arguments(self):
# Test Number of arguments abnormal # Test Number of arguments abnormal
@ -241,7 +241,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder._parse_set_info_values(argv) ringbuilder._parse_set_info_values(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_parse_remove_values_number_of_arguments(self): def test_parse_remove_values_number_of_arguments(self):
# Test Number of arguments abnormal # Test Number of arguments abnormal
@ -251,7 +251,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder._parse_remove_values(argv) ringbuilder._parse_remove_values(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_create_ring(self): def test_create_ring(self):
argv = ["", self.tmpfile, "create", "6", "3.14159265359", "1"] argv = ["", self.tmpfile, "create", "6", "3.14159265359", "1"]
@ -405,7 +405,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_add_device_already_exists(self): def test_add_device_already_exists(self):
# Test Add a device that already exists # Test Add a device that already exists
@ -416,7 +416,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_remove_device(self): def test_remove_device(self):
for search_value in self.search_values: for search_value in self.search_values:
@ -692,7 +692,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_remove_device_no_matching(self): def test_remove_device_no_matching(self):
self.create_sample_ring() self.create_sample_ring()
@ -704,7 +704,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_weight(self): def test_set_weight(self):
for search_value in self.search_values: for search_value in self.search_values:
@ -903,7 +903,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_weight_no_matching(self): def test_set_weight_no_matching(self):
self.create_sample_ring() self.create_sample_ring()
@ -915,7 +915,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_info(self): def test_set_info(self):
for search_value in self.search_values: for search_value in self.search_values:
@ -1195,7 +1195,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_info_no_matching(self): def test_set_info_no_matching(self):
self.create_sample_ring() self.create_sample_ring()
@ -1207,7 +1207,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_info_already_exists(self): def test_set_info_already_exists(self):
self.create_sample_ring() self.create_sample_ring()
@ -1230,7 +1230,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_min_part_hours(self): def test_set_min_part_hours(self):
self.create_sample_ring() self.create_sample_ring()
@ -1247,7 +1247,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_replicas(self): def test_set_replicas(self):
self.create_sample_ring() self.create_sample_ring()
@ -1321,7 +1321,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_set_replicas_invalid_value(self): def test_set_replicas_invalid_value(self):
# Test not a valid number # Test not a valid number
@ -1331,7 +1331,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
# Test new replicas is 0 # Test new replicas is 0
argv = ["", self.tmpfile, "set_replicas", "0"] argv = ["", self.tmpfile, "set_replicas", "0"]
@ -1340,7 +1340,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_validate(self): def test_validate(self):
self.create_sample_ring() self.create_sample_ring()
@ -1358,7 +1358,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_validate_corrupted_file(self): def test_validate_corrupted_file(self):
self.create_sample_ring() self.create_sample_ring()
@ -1376,7 +1376,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_validate_non_existent_file(self): def test_validate_non_existent_file(self):
rand_file = '%s/%s' % ('/tmp', str(uuid.uuid4())) rand_file = '%s/%s' % ('/tmp', str(uuid.uuid4()))
@ -1386,7 +1386,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_validate_non_accessible_file(self): def test_validate_non_accessible_file(self):
with mock.patch.object( with mock.patch.object(
@ -1398,7 +1398,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_validate_generic_error(self): def test_validate_generic_error(self):
with mock.patch.object( with mock.patch.object(
@ -1410,7 +1410,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_search_device_ipv4_old_format(self): def test_search_device_ipv4_old_format(self):
self.create_sample_ring() self.create_sample_ring()
@ -1510,7 +1510,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_search_device_no_matching(self): def test_search_device_no_matching(self):
self.create_sample_ring() self.create_sample_ring()
@ -1522,7 +1522,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_list_parts_ipv4_old_format(self): def test_list_parts_ipv4_old_format(self):
self.create_sample_ring() self.create_sample_ring()
@ -1622,7 +1622,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_list_parts_no_matching(self): def test_list_parts_no_matching(self):
self.create_sample_ring() self.create_sample_ring()
@ -1634,7 +1634,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_unknown(self): def test_unknown(self):
argv = ["", self.tmpfile, "unknown"] argv = ["", self.tmpfile, "unknown"]
@ -1643,7 +1643,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_default(self): def test_default(self):
self.create_sample_ring() self.create_sample_ring()
@ -1669,7 +1669,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 1) self.assertEqual(err.code, 1)
def test_rebalance_no_devices(self): def test_rebalance_no_devices(self):
# Test no devices # Test no devices
@ -1681,7 +1681,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_write_ring(self): def test_write_ring(self):
self.create_sample_ring() self.create_sample_ring()
@ -1702,7 +1702,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_warn_at_risk(self): def test_warn_at_risk(self):
self.create_sample_ring() self.create_sample_ring()
@ -1715,7 +1715,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as e: except SystemExit as e:
err = e err = e
self.assertEquals(err.code, 1) self.assertEqual(err.code, 1)
def test_invalid_device_name(self): def test_invalid_device_name(self):
self.create_sample_ring() self.create_sample_ring()
@ -1731,7 +1731,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as exc: except SystemExit as exc:
err = exc err = exc
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
argv = ["", argv = ["",
self.tmpfile, self.tmpfile,
@ -1746,7 +1746,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ringbuilder.main(argv) ringbuilder.main(argv)
except SystemExit as exc: except SystemExit as exc:
err = exc err = exc
self.assertEquals(err.code, 2) self.assertEqual(err.code, 2)
def test_dispersion_command(self): def test_dispersion_command(self):
self.create_sample_ring() self.create_sample_ring()
@ -1755,6 +1755,23 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
self.assertIn('dispersion', out.lower()) self.assertIn('dispersion', out.lower())
self.assertFalse(err) self.assertFalse(err)
def test_use_ringfile_as_builderfile(self):
mock_stdout = six.StringIO()
mock_stderr = six.StringIO()
argv = ["", "object.ring.gz"]
try:
with mock.patch("sys.stdout", mock_stdout):
with mock.patch("sys.stderr", mock_stderr):
ringbuilder.main(argv)
except SystemExit:
pass
expected = "Note: using object.builder instead of object.ring.gz " \
"as builder file\n" \
"Ring Builder file does not exist: object.builder\n"
self.assertEqual(expected, mock_stdout.getvalue())
class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin): class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin):

View File

@ -76,7 +76,7 @@ class FakeSwift(object):
path += '?' + env['QUERY_STRING'] path += '?' + env['QUERY_STRING']
if 'swift.authorize' in env: if 'swift.authorize' in env:
resp = env['swift.authorize']() resp = env['swift.authorize'](swob.Request(env))
if resp: if resp:
return resp(env, start_response) return resp(env, start_response)

View File

@ -793,7 +793,7 @@ class TestDloGetManifest(DloTestCase):
def test_get_with_auth_overridden(self): def test_get_with_auth_overridden(self):
auth_got_called = [0] auth_got_called = [0]
def my_auth(): def my_auth(req):
auth_got_called[0] += 1 auth_got_called[0] += 1
return None return None

View File

@ -175,6 +175,9 @@ class FakeRecon(object):
def fake_driveaudit(self): def fake_driveaudit(self):
return {'driveaudittest': "1"} return {'driveaudittest': "1"}
def fake_time(self):
return {'timetest': "1"}
def nocontent(self): def nocontent(self):
return None return None
@ -503,6 +506,9 @@ class TestReconSuccess(TestCase):
"attempted": 1, "diff": 0, "attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0, "diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0, "failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 2, "remote_merge": 0, "no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0, "remove": 0, "rsync": 0,
"start": 1333044050.855202, "start": 1333044050.855202,
@ -520,6 +526,9 @@ class TestReconSuccess(TestCase):
"attempted": 1, "diff": 0, "attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0, "diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0, "failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 2, "remote_merge": 0, "no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0, "remove": 0, "rsync": 0,
"start": 1333044050.855202, "start": 1333044050.855202,
@ -534,6 +543,9 @@ class TestReconSuccess(TestCase):
"attempted": 179, "diff": 0, "attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0, "diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0, "failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 358, "remote_merge": 0, "no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0, "remove": 0, "rsync": 0,
"start": 5.5, "success": 358, "start": 5.5, "success": 358,
@ -552,6 +564,9 @@ class TestReconSuccess(TestCase):
"attempted": 179, "diff": 0, "attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0, "diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0, "failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"no_change": 358, "remote_merge": 0, "no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0, "remove": 0, "rsync": 0,
"start": 5.5, "success": 358, "start": 5.5, "success": 358,
@ -559,17 +574,40 @@ class TestReconSuccess(TestCase):
"replication_last": 1357969645.25}) "replication_last": 1357969645.25})
def test_get_replication_object(self): def test_get_replication_object(self):
from_cache_response = {"object_replication_time": 200.0, from_cache_response = {
"object_replication_last": 1357962809.15} "replication_time": 0.2615511417388916,
"replication_stats": {
"attempted": 179,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"remove": 0, "rsync": 0,
"start": 1333044050.855202, "success": 358},
"replication_last": 1357969645.25,
"object_replication_time": 0.2615511417388916,
"object_replication_last": 1357969645.25}
self.fakecache.fakeout_calls = [] self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('object') rv = self.app.get_replication_info('object')
self.assertEquals(self.fakecache.fakeout_calls, self.assertEquals(self.fakecache.fakeout_calls,
[((['object_replication_time', [((['replication_time', 'replication_stats',
'replication_last', 'object_replication_time',
'object_replication_last'], 'object_replication_last'],
'/var/cache/swift/object.recon'), {})]) '/var/cache/swift/object.recon'), {})])
self.assertEquals(rv, {'object_replication_time': 200.0, self.assertEquals(rv, {
'object_replication_last': 1357962809.15}) "replication_time": 0.2615511417388916,
"replication_stats": {
"attempted": 179,
"failure": 0, "hashmatch": 0,
"failure_nodes": {
"192.168.0.1": 0,
"192.168.0.2": 0},
"remove": 0, "rsync": 0,
"start": 1333044050.855202, "success": 358},
"replication_last": 1357969645.25,
"object_replication_time": 0.2615511417388916,
"object_replication_last": 1357969645.25})
def test_get_updater_info_container(self): def test_get_updater_info_container(self):
from_cache_response = {"container_updater_sweep": 18.476239919662476} from_cache_response = {"container_updater_sweep": 18.476239919662476}
@ -855,6 +893,15 @@ class TestReconSuccess(TestCase):
'/var/cache/swift/drive.recon'), {})]) '/var/cache/swift/drive.recon'), {})])
self.assertEquals(rv, {'drive_audit_errors': 7}) self.assertEquals(rv, {'drive_audit_errors': 7})
def test_get_time(self):
def fake_time():
return 1430000000.0
with mock.patch("time.time", fake_time):
now = fake_time()
rv = self.app.get_time()
self.assertEquals(rv, now)
class TestReconMiddleware(unittest.TestCase): class TestReconMiddleware(unittest.TestCase):
@ -884,6 +931,7 @@ class TestReconMiddleware(unittest.TestCase):
self.app.get_quarantine_count = self.frecon.fake_quarantined self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat self.app.get_socket_info = self.frecon.fake_sockstat
self.app.get_driveaudit_error = self.frecon.fake_driveaudit self.app.get_driveaudit_error = self.frecon.fake_driveaudit
self.app.get_time = self.frecon.fake_time
def test_recon_get_mem(self): def test_recon_get_mem(self):
get_mem_resp = ['{"memtest": "1"}'] get_mem_resp = ['{"memtest": "1"}']
@ -1118,5 +1166,12 @@ class TestReconMiddleware(unittest.TestCase):
resp = self.app(req.environ, start_response) resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_driveaudit_resp) self.assertEquals(resp, get_driveaudit_resp)
def test_recon_get_time(self):
get_time_resp = ['{"timetest": "1"}']
req = Request.blank('/recon/time',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_time_resp)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -29,6 +29,7 @@
# limitations under the License. # limitations under the License.
import hmac import hmac
import itertools
import unittest import unittest
from hashlib import sha1 from hashlib import sha1
from time import time from time import time
@ -44,10 +45,13 @@ class FakeApp(object):
self.calls = 0 self.calls = 0
self.status_headers_body_iter = status_headers_body_iter self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter: if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', { self.status_headers_body_iter = iter(
'x-test-header-one-a': 'value1', itertools.repeat((
'x-test-header-two-a': 'value2', '404 Not Found', {
'x-test-header-two-b': 'value3'}, '')]) 'x-test-header-one-a': 'value1',
'x-test-header-two-a': 'value2',
'x-test-header-two-b': 'value3'},
'')))
self.request = None self.request = None
def __call__(self, env, start_response): def __call__(self, env, start_response):
@ -69,16 +73,18 @@ class TestTempURL(unittest.TestCase):
self.auth = tempauth.filter_factory({'reseller_prefix': ''})(self.app) self.auth = tempauth.filter_factory({'reseller_prefix': ''})(self.app)
self.tempurl = tempurl.filter_factory({})(self.auth) self.tempurl = tempurl.filter_factory({})(self.auth)
def _make_request(self, path, environ=None, keys=(), **kwargs): def _make_request(self, path, environ=None, keys=(), container_keys=None,
**kwargs):
if environ is None: if environ is None:
environ = {} environ = {}
_junk, account, _junk, _junk = utils.split_path(path, 2, 4) _junk, account, _junk, _junk = utils.split_path(path, 2, 4)
self._fake_cache_environ(environ, account, keys) self._fake_cache_environ(environ, account, keys,
container_keys=container_keys)
req = Request.blank(path, environ=environ, **kwargs) req = Request.blank(path, environ=environ, **kwargs)
return req return req
def _fake_cache_environ(self, environ, account, keys): def _fake_cache_environ(self, environ, account, keys, container_keys=None):
""" """
Fake out the caching layer for get_account_info(). Injects account data Fake out the caching layer for get_account_info(). Injects account data
into environ such that keys are the tempurl keys, if set. into environ such that keys are the tempurl keys, if set.
@ -96,8 +102,13 @@ class TestTempURL(unittest.TestCase):
'bytes': '0', 'bytes': '0',
'meta': meta} 'meta': meta}
meta = {}
for i, key in enumerate(container_keys or []):
meta_name = 'Temp-URL-key' + (("-%d" % (i + 1) if i else ""))
meta[meta_name] = key
container_cache_key = 'swift.container/' + account + '/c' container_cache_key = 'swift.container/' + account + '/c'
environ.setdefault(container_cache_key, {'meta': {}}) environ.setdefault(container_cache_key, {'meta': meta})
def test_passthrough(self): def test_passthrough(self):
resp = self._make_request('/v1/a/c/o').get_response(self.tempurl) resp = self._make_request('/v1/a/c/o').get_response(self.tempurl)
@ -581,6 +592,81 @@ class TestTempURL(unittest.TestCase):
self.assertTrue('Temp URL invalid' in resp.body) self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers) self.assertTrue('Www-Authenticate' in resp.headers)
def test_authorize_limits_scope(self):
req_other_object = Request.blank("/v1/a/c/o2")
req_other_container = Request.blank("/v1/a/c2/o2")
req_other_account = Request.blank("/v1/a2/c2/o2")
key_kwargs = {
'keys': ['account-key', 'shared-key'],
'container_keys': ['container-key', 'shared-key'],
}
# A request with the account key limits the pre-authed scope to the
# account level.
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new('account-key', hmac_body, sha1).hexdigest()
qs = '?temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)
# make request will setup the environ cache for us
req = self._make_request(path + qs, **key_kwargs)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404) # sanity check
authorize = req.environ['swift.authorize']
# Requests for other objects happen if, for example, you're
# downloading a large object or creating a large-object manifest.
oo_resp = authorize(req_other_object)
self.assertEqual(oo_resp, None)
oc_resp = authorize(req_other_container)
self.assertEqual(oc_resp, None)
oa_resp = authorize(req_other_account)
self.assertEqual(oa_resp.status_int, 401)
# A request with the container key limits the pre-authed scope to
# the container level; a different container in the same account is
# out of scope and thus forbidden.
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new('container-key', hmac_body, sha1).hexdigest()
qs = '?temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)
req = self._make_request(path + qs, **key_kwargs)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404) # sanity check
authorize = req.environ['swift.authorize']
oo_resp = authorize(req_other_object)
self.assertEqual(oo_resp, None)
oc_resp = authorize(req_other_container)
self.assertEqual(oc_resp.status_int, 401)
oa_resp = authorize(req_other_account)
self.assertEqual(oa_resp.status_int, 401)
# If account and container share a key (users set these, so this can
# happen by accident, stupidity, *or* malice!), limit the scope to
# account level. This prevents someone from shrinking the scope of
# account-level tempurls by reusing one of the account's keys on a
# container.
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new('shared-key', hmac_body, sha1).hexdigest()
qs = '?temp_url_sig=%s&temp_url_expires=%s' % (sig, expires)
req = self._make_request(path + qs, **key_kwargs)
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404) # sanity check
authorize = req.environ['swift.authorize']
oo_resp = authorize(req_other_object)
self.assertEqual(oo_resp, None)
oc_resp = authorize(req_other_container)
self.assertEqual(oc_resp, None)
oa_resp = authorize(req_other_account)
self.assertEqual(oa_resp.status_int, 401)
def test_changed_path_invalid(self): def test_changed_path_invalid(self):
method = 'GET' method = 'GET'
expires = int(time() + 86400) expires = int(time() + 86400)
@ -649,6 +735,25 @@ class TestTempURL(unittest.TestCase):
self.assertTrue('Temp URL invalid' in resp.body) self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers) self.assertTrue('Www-Authenticate' in resp.headers)
def test_disallowed_header_object_manifest(self):
self.tempurl = tempurl.filter_factory({})(self.auth)
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, method='PUT', keys=[key],
headers={'x-object-manifest': 'private/secret'},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 400)
self.assertTrue('header' in resp.body)
self.assertTrue('not allowed' in resp.body)
self.assertTrue('X-Object-Manifest' in resp.body)
def test_removed_incoming_header(self): def test_removed_incoming_header(self):
self.tempurl = tempurl.filter_factory({ self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-remove-this'})(self.auth) 'incoming_remove_headers': 'x-remove-this'})(self.auth)
@ -809,35 +914,38 @@ class TestTempURL(unittest.TestCase):
self.assertTrue('x-conflict-header-test' in resp.headers) self.assertTrue('x-conflict-header-test' in resp.headers)
self.assertEqual(resp.headers['x-conflict-header-test'], 'value') self.assertEqual(resp.headers['x-conflict-header-test'], 'value')
def test_get_account(self): def test_get_account_and_container(self):
self.assertEquals(self.tempurl._get_account({ self.assertEquals(self.tempurl._get_account_and_container({
'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'}), 'a') 'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'}), ('a', 'c'))
self.assertEquals(self.tempurl._get_account({ self.assertEquals(self.tempurl._get_account_and_container({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'}), 'a') 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'}), ('a', 'c'))
self.assertEquals(self.tempurl._get_account({ self.assertEquals(self.tempurl._get_account_and_container({
'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/v1/a/c/o'}), 'a') 'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/v1/a/c/o'}), ('a', 'c'))
self.assertEquals(self.tempurl._get_account({ self.assertEquals(self.tempurl._get_account_and_container({
'REQUEST_METHOD': 'POST', 'PATH_INFO': '/v1/a/c/o'}), 'a') 'REQUEST_METHOD': 'POST', 'PATH_INFO': '/v1/a/c/o'}), ('a', 'c'))
self.assertEquals(self.tempurl._get_account({ self.assertEquals(self.tempurl._get_account_and_container({
'REQUEST_METHOD': 'DELETE', 'PATH_INFO': '/v1/a/c/o'}), 'a') 'REQUEST_METHOD': 'DELETE', 'PATH_INFO': '/v1/a/c/o'}), ('a', 'c'))
self.assertEquals(self.tempurl._get_account({ self.assertEquals(self.tempurl._get_account_and_container({
'REQUEST_METHOD': 'UNKNOWN', 'PATH_INFO': '/v1/a/c/o'}), None) 'REQUEST_METHOD': 'UNKNOWN', 'PATH_INFO': '/v1/a/c/o'}),
self.assertEquals(self.tempurl._get_account({ (None, None))
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/'}), None) self.assertEquals(self.tempurl._get_account_and_container({
self.assertEquals(self.tempurl._get_account({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/'}), (None, None))
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c//////'}), None) self.assertEquals(self.tempurl._get_account_and_container({
self.assertEquals(self.tempurl._get_account({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c//////'}),
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c///o///'}), 'a') (None, None))
self.assertEquals(self.tempurl._get_account({ self.assertEquals(self.tempurl._get_account_and_container({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c'}), None) 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c///o///'}),
self.assertEquals(self.tempurl._get_account({ ('a', 'c'))
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a//o'}), None) self.assertEquals(self.tempurl._get_account_and_container({
self.assertEquals(self.tempurl._get_account({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c'}), (None, None))
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1//c/o'}), None) self.assertEquals(self.tempurl._get_account_and_container({
self.assertEquals(self.tempurl._get_account({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a//o'}), (None, None))
'REQUEST_METHOD': 'GET', 'PATH_INFO': '//a/c/o'}), None) self.assertEquals(self.tempurl._get_account_and_container({
self.assertEquals(self.tempurl._get_account({ 'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1//c/o'}), (None, None))
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v2/a/c/o'}), None) self.assertEquals(self.tempurl._get_account_and_container({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '//a/c/o'}), (None, None))
self.assertEquals(self.tempurl._get_account_and_container({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v2/a/c/o'}), (None, None))
def test_get_temp_url_info(self): def test_get_temp_url_info(self):
s = 'f5d5051bddf5df7e27c628818738334f' s = 'f5d5051bddf5df7e27c628818738334f'
@ -889,13 +997,13 @@ class TestTempURL(unittest.TestCase):
self.assertEquals( self.assertEquals(
self.tempurl._get_hmacs( self.tempurl._get_hmacs(
{'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'}, {'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'},
1, ['abc']), 1, [('abc', 'account')]),
['026d7f7cc25256450423c7ad03fc9f5ffc1dab6d']) [('026d7f7cc25256450423c7ad03fc9f5ffc1dab6d', 'account')])
self.assertEquals( self.assertEquals(
self.tempurl._get_hmacs( self.tempurl._get_hmacs(
{'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'}, {'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'},
1, ['abc'], request_method='GET'), 1, [('abc', 'account')], request_method='GET'),
['026d7f7cc25256450423c7ad03fc9f5ffc1dab6d']) [('026d7f7cc25256450423c7ad03fc9f5ffc1dab6d', 'account')])
def test_invalid(self): def test_invalid(self):

View File

@ -0,0 +1,566 @@
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common import swob
from swift.common.middleware import versioned_writes
from swift.common.swob import Request
from test.unit.common.middleware.helpers import FakeSwift
class FakeCache(object):
def __init__(self, val):
if 'status' not in val:
val['status'] = 200
self.val = val
def get(self, *args):
return self.val
class VersionedWritesTestCase(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
conf = {'allow_versioned_writes': 'true'}
self.vw = versioned_writes.filter_factory(conf)(self.app)
def call_app(self, req, app=None, expect_exception=False):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
req.headers.setdefault("User-Agent", "Marula Kruger")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
body = ''
caught_exc = None
try:
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
if expect_exception:
return status[0], headers[0], body, caught_exc
else:
return status[0], headers[0], body
def call_vw(self, req, **kwargs):
return self.call_app(req, app=self.vw, **kwargs)
def assertRequestEqual(self, req, other):
self.assertEqual(req.method, other.method)
self.assertEqual(req.path, other.path)
def test_put_container(self):
self.app.register('PUT', '/v1/a/c', swob.HTTPOk, {}, 'passed')
req = Request.blank('/v1/a/c',
headers={'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'PUT'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEquals('PUT', method)
self.assertEquals('/v1/a/c', path)
self.assertTrue('x-container-sysmeta-versions-location' in req_headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_container_allow_versioned_writes_false(self):
self.vw.conf = {'allow_versioned_writes': 'false'}
# PUT/POST container must fail as 412 when allow_versioned_writes
# set to false
for method in ('PUT', 'POST'):
req = Request.blank('/v1/a/c',
headers={'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': method})
status, headers, body = self.call_vw(req)
self.assertEquals(status, "412 Precondition Failed")
# GET/HEAD performs as normal
self.app.register('GET', '/v1/a/c', swob.HTTPOk, {}, 'passed')
self.app.register('HEAD', '/v1/a/c', swob.HTTPOk, {}, 'passed')
for method in ('GET', 'HEAD'):
req = Request.blank('/v1/a/c',
headers={'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': method})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
def test_remove_versions_location(self):
self.app.register('POST', '/v1/a/c', swob.HTTPOk, {}, 'passed')
req = Request.blank('/v1/a/c',
headers={'X-Remove-Versions-Location': 'x'},
environ={'REQUEST_METHOD': 'POST'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEquals('POST', method)
self.assertEquals('/v1/a/c', path)
self.assertTrue('x-container-sysmeta-versions-location' in req_headers)
self.assertTrue('x-versions-location' in req_headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_remove_add_versions_precedence(self):
self.app.register(
'POST', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'ver_cont'},
'passed')
req = Request.blank('/v1/a/c',
headers={'X-Remove-Versions-Location': 'x',
'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'POST'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertTrue(('X-Versions-Location', 'ver_cont') in headers)
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEquals('POST', method)
self.assertEquals('/v1/a/c', path)
self.assertTrue('x-container-sysmeta-versions-location' in req_headers)
self.assertTrue('x-remove-versions-location' not in req_headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_get_container(self):
self.app.register(
'GET', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'ver_cont'}, None)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertTrue(('X-Versions-Location', 'ver_cont') in headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_get_head(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.app.register('HEAD', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_object_no_versioning(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
cache = FakeCache({})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_first_object_success(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_PUT_versioning_with_nonzero_default_policy(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, None)
cache = FakeCache({'versions': 'ver_cont', 'storage_policy': '2'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
# check for 'X-Backend-Storage-Policy-Index' in HEAD request
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEquals('HEAD', method)
self.assertEquals('/v1/a/c/o', path)
self.assertTrue('X-Backend-Storage-Policy-Index' in req_headers)
self.assertEquals('2',
req_headers.get('X-Backend-Storage-Policy-Index'))
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_object_no_versioning_with_container_config_true(self):
# set False to versions_write obsously and expect no COPY occurred
self.vw.conf = {'allow_versioned_writes': 'false'}
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '201 Created')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
called_method = [method for (method, path, hdrs) in self.app._calls]
self.assertTrue('COPY' not in called_method)
def test_delete_object_no_versioning_with_container_config_true(self):
# set False to versions_write obviously and expect no GET versioning
# container and COPY called (just delete object as normal)
self.vw.conf = {'allow_versioned_writes': 'false'}
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '204 No Content')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
called_method = \
[method for (method, path, rheaders) in self.app._calls]
self.assertTrue('COPY' not in called_method)
self.assertTrue('GET' not in called_method)
def test_copy_object_no_versioning_with_container_config_true(self):
# set False to versions_write obviously and expect no extra
# COPY called (just copy object as normal)
self.vw.conf = {'allow_versioned_writes': 'false'}
self.app.register(
'COPY', '/v1/a/c/o', swob.HTTPCreated, {}, None)
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '201 Created')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
called_method = \
[method for (method, path, rheaders) in self.app._calls]
self.assertTrue('COPY' in called_method)
self.assertEquals(called_method.count('COPY'), 1)
def test_new_version_success(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed')
self.app.register(
'COPY', '/v1/a/c/o', swob.HTTPCreated, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_new_version_sysmeta_precedence(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed')
self.app.register(
'COPY', '/v1/a/c/o', swob.HTTPCreated, {}, None)
# fill cache with two different values for versions location
# new middleware should use sysmeta first
cache = FakeCache({'versions': 'old_ver_cont',
'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
# check that sysmeta header was used
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEquals('COPY', method)
self.assertEquals('/v1/a/c/o', path)
self.assertTrue(req_headers['Destination'].startswith('ver_cont/'))
def test_copy_first_version(self):
self.app.register(
'COPY', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'HEAD', '/v1/a/tgt_cont/tgt_obj', swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_copy_new_version(self):
self.app.register(
'COPY', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'HEAD', '/v1/a/tgt_cont/tgt_obj', swob.HTTPOk,
{'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed')
self.app.register(
'COPY', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_copy_new_version_different_account(self):
self.app.register(
'COPY', '/v1/src_a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'HEAD', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPOk,
{'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed')
self.app.register(
'COPY', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/src_a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj',
'Destination-Account': 'tgt_a'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_copy_new_version_bogus_account(self):
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/src_a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj',
'Destination-Account': '/im/on/a/boat'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '412 Precondition Failed')
def test_delete_first_object_success(self):
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&marker=',
swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_delete_latest_version_success(self):
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&marker=',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}, '
'{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/'
'&marker=001o/2',
swob.HTTPNotFound, {}, None)
self.app.register(
'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPCreated,
{}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
# check that X-If-Delete-At was removed from DELETE request
calls = self.app.calls_with_headers
method, path, req_headers = calls.pop()
self.assertEquals('DELETE', method)
self.assertTrue(path.startswith('/v1/a/ver_cont/001o/2'))
self.assertFalse('x-if-delete-at' in req_headers or
'X-If-Delete-At' in req_headers)
def test_DELETE_on_expired_versioned_object(self):
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&marker=',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}, '
'{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/'
'&marker=001o/2',
swob.HTTPNotFound, {}, None)
# expired object
self.app.register(
'COPY', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound,
{}, None)
self.app.register(
'COPY', '/v1/a/ver_cont/001o/1', swob.HTTPCreated,
{}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_denied_DELETE_of_versioned_object(self):
authorize_call = []
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/&marker=',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}, '
'{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont?format=json&prefix=001o/'
'&marker=001o/2',
swob.HTTPNotFound, {}, None)
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPForbidden,
{}, None)
def fake_authorize(req):
authorize_call.append(req)
return swob.HTTPForbidden()
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'swift.authorize': fake_authorize,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEquals(status, '403 Forbidden')
self.assertEqual(len(authorize_call), 1)
self.assertRequestEqual(req, authorize_call[0])

View File

@ -576,6 +576,24 @@ class TestConstraints(unittest.TestCase):
constraints.check_account_format, constraints.check_account_format,
req, req.headers['X-Copy-From-Account']) req, req.headers['X-Copy-From-Account'])
def test_check_container_format(self):
invalid_versions_locations = (
'container/with/slashes',
'', # empty
)
for versions_location in invalid_versions_locations:
req = Request.blank(
'/v/a/c/o', headers={
'X-Versions-Location': versions_location})
try:
constraints.check_container_format(
req, req.headers['X-Versions-Location'])
except HTTPException as e:
self.assertTrue(e.body.startswith('Container name cannot'))
else:
self.fail('check_container_format did not raise error for %r' %
req.headers['X-Versions-Location'])
class TestConstraintsConfig(unittest.TestCase): class TestConstraintsConfig(unittest.TestCase):

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
import unittest import unittest
from contextlib import contextmanager from contextlib import contextmanager
import os import os
@ -1304,7 +1305,7 @@ def attach_fake_replication_rpc(rpc, replicate_hook=None):
self.host = node['replication_ip'] self.host = node['replication_ip']
def replicate(self, op, *sync_args): def replicate(self, op, *sync_args):
print 'REPLICATE: %s, %s, %r' % (self.path, op, sync_args) print('REPLICATE: %s, %s, %r' % (self.path, op, sync_args))
replicate_args = self.path.lstrip('/').split('/') replicate_args = self.path.lstrip('/').split('/')
args = [op] + list(sync_args) args = [op] + list(sync_args)
swob_response = rpc.dispatch(replicate_args, args) swob_response = rpc.dispatch(replicate_args, args)

View File

@ -25,7 +25,6 @@ import six
from six import StringIO from six import StringIO
from six.moves import range from six.moves import range
from test.unit import FakeLogger from test.unit import FakeLogger
import eventlet
from eventlet.green import urllib2 from eventlet.green import urllib2
from swift.common import internal_client from swift.common import internal_client
from swift.common import swob from swift.common import swob
@ -1265,48 +1264,109 @@ class TestSimpleClient(unittest.TestCase):
self.assertEqual(mock_urlopen.call_count, 2) self.assertEqual(mock_urlopen.call_count, 2)
self.assertEqual([None, None], retval) self.assertEqual([None, None], retval)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_request_with_retries_with_HTTPError(self, mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = ''
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
for request_method in 'GET PUT POST DELETE HEAD COPY'.split():
mock_urlopen.reset_mock()
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(urllib2.HTTPError,
c.retry_request, request_method, retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_request_container_with_retries_with_HTTPError(self,
mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = ''
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
for request_method in 'GET PUT POST DELETE HEAD COPY'.split():
mock_urlopen.reset_mock()
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(urllib2.HTTPError,
c.retry_request, request_method,
container='con', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
@mock.patch('eventlet.green.urllib2.urlopen')
def test_request_object_with_retries_with_HTTPError(self,
mock_urlopen):
mock_response = mock.MagicMock()
mock_response.read.return_value = ''
c = internal_client.SimpleClient(url='http://127.0.0.1', token='token')
self.assertEqual(c.retries, 5)
for request_method in 'GET PUT POST DELETE HEAD COPY'.split():
mock_urlopen.reset_mock()
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(urllib2.HTTPError,
c.retry_request, request_method,
container='con', name='obj', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
def test_proxy(self): def test_proxy(self):
running = True # check that proxy arg is passed through to the urllib Request
scheme = 'http'
def handle(sock): proxy_host = '127.0.0.1:80'
while running: proxy = '%s://%s' % (scheme, proxy_host)
try:
with eventlet.Timeout(0.1):
(conn, addr) = sock.accept()
except eventlet.Timeout:
continue
else:
conn.send('HTTP/1.1 503 Server Error')
conn.close()
sock.close()
sock = eventlet.listen(('', 0))
port = sock.getsockname()[1]
proxy = 'http://127.0.0.1:%s' % port
url = 'https://127.0.0.1:1/a' url = 'https://127.0.0.1:1/a'
server = eventlet.spawn(handle, sock)
try:
headers = {'Content-Length': '0'}
with mock.patch('swift.common.internal_client.sleep'):
try:
internal_client.put_object(
url, container='c', name='o1', headers=headers,
contents='', proxy=proxy, timeout=0.1, retries=0)
except urllib2.HTTPError as e:
self.assertEqual(e.code, 503)
except urllib2.URLError as e:
if 'ECONNREFUSED' in str(e):
self.fail(
"Got %s which probably means the http proxy "
"settings were not used" % e)
else:
raise e
else:
self.fail('Unexpected successful response')
finally:
running = False
server.wait()
class FakeConn(object):
def read(self):
return 'irrelevant'
mocked = 'swift.common.internal_client.urllib2.urlopen'
# module level methods
for func in (internal_client.put_object,
internal_client.delete_object):
with mock.patch(mocked) as mock_urlopen:
mock_urlopen.return_value = FakeConn()
func(url, container='c', name='o1', contents='', proxy=proxy,
timeout=0.1, retries=0)
self.assertEqual(1, mock_urlopen.call_count)
args, kwargs = mock_urlopen.call_args
self.assertEqual(1, len(args))
self.assertEqual(1, len(kwargs))
self.assertEqual(0.1, kwargs['timeout'])
self.assertTrue(isinstance(args[0], urllib2.Request))
self.assertEqual(proxy_host, args[0].host)
self.assertEqual(scheme, args[0].type)
# class methods
content = mock.MagicMock()
cl = internal_client.SimpleClient(url)
scenarios = ((cl.get_account, []),
(cl.get_container, ['c']),
(cl.put_container, ['c']),
(cl.put_object, ['c', 'o', content]))
for scenario in scenarios:
with mock.patch(mocked) as mock_urlopen:
mock_urlopen.return_value = FakeConn()
scenario[0](*scenario[1], proxy=proxy, timeout=0.1)
self.assertEqual(1, mock_urlopen.call_count)
args, kwargs = mock_urlopen.call_args
self.assertEqual(1, len(args))
self.assertEqual(1, len(kwargs))
self.assertEqual(0.1, kwargs['timeout'])
self.assertTrue(isinstance(args[0], urllib2.Request))
self.assertEqual(proxy_host, args[0].host)
self.assertEqual(scheme, args[0].type)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -12,7 +12,7 @@
# implied. # implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
import unittest import unittest
from test.unit import temptree from test.unit import temptree
@ -1188,9 +1188,9 @@ class TestServer(unittest.TestCase):
pass pass
def fail(self): def fail(self):
print >>self._stdout, 'mock process started' print('mock process started', file=self._stdout)
sleep(self.delay) # perform setup processing sleep(self.delay) # perform setup processing
print >>self._stdout, 'mock process failed to start' print('mock process failed to start', file=self._stdout)
self.close_stdout() self.close_stdout()
def poll(self): def poll(self):
@ -1198,12 +1198,12 @@ class TestServer(unittest.TestCase):
return self.returncode or None return self.returncode or None
def run(self): def run(self):
print >>self._stdout, 'mock process started' print('mock process started', file=self._stdout)
sleep(self.delay) # perform setup processing sleep(self.delay) # perform setup processing
print >>self._stdout, 'setup complete!' print('setup complete!', file=self._stdout)
self.close_stdout() self.close_stdout()
sleep(self.delay) # do some more processing sleep(self.delay) # do some more processing
print >>self._stdout, 'mock process finished' print('mock process finished', file=self._stdout)
self.finished = True self.finished = True
class MockTime(object): class MockTime(object):

View File

@ -14,7 +14,7 @@
# limitations under the License. # limitations under the License.
"""Tests for swift.common.utils""" """Tests for swift.common.utils"""
from __future__ import print_function
from test.unit import temptree from test.unit import temptree
import ctypes import ctypes
@ -1047,22 +1047,22 @@ class TestUtils(unittest.TestCase):
lfo_stdout = utils.LoggerFileObject(logger) lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger) lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR') lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print 'test1' print('test1')
self.assertEquals(sio.getvalue(), '') self.assertEquals(sio.getvalue(), '')
sys.stdout = lfo_stdout sys.stdout = lfo_stdout
print 'test2' print('test2')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\n') self.assertEquals(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr sys.stderr = lfo_stderr
print >> sys.stderr, 'test4' print('test4', file=sys.stderr)
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout sys.stdout = orig_stdout
print 'test5' print('test5')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n') self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print >> sys.stderr, 'test6' print('test6', file=sys.stderr)
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n') 'STDERR: test6\n')
sys.stderr = orig_stderr sys.stderr = orig_stderr
print 'test8' print('test8')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n' self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n') 'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c']) lfo_stdout.writelines(['a', 'b', 'c'])
@ -4232,7 +4232,7 @@ class TestThreadPool(unittest.TestCase):
except ZeroDivisionError: except ZeroDivisionError:
# NB: format is (filename, line number, function name, text) # NB: format is (filename, line number, function name, text)
tb_func = [elem[2] for elem tb_func = [elem[2] for elem
in traceback.extract_tb(sys.exc_traceback)] in traceback.extract_tb(sys.exc_info()[2])]
else: else:
self.fail("Expected ZeroDivisionError") self.fail("Expected ZeroDivisionError")
@ -4530,6 +4530,22 @@ class TestGreenAsyncPile(unittest.TestCase):
self.assertEqual(pile.waitall(0.5), [0.1, 0.1]) self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2) self.assertEqual(completed[0], 2)
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
pile.next()
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
class TestLRUCache(unittest.TestCase): class TestLRUCache(unittest.TestCase):

View File

@ -141,6 +141,11 @@ class TestWSGI(unittest.TestCase):
expected = swift.common.middleware.dlo.DynamicLargeObject expected = swift.common.middleware.dlo.DynamicLargeObject
self.assertTrue(isinstance(app, expected)) self.assertTrue(isinstance(app, expected))
app = app.app
expected = \
swift.common.middleware.versioned_writes.VersionedWritesMiddleware
self.assertIsInstance(app, expected)
app = app.app app = app.app
expected = swift.proxy.server.Application expected = swift.proxy.server.Application
self.assertTrue(isinstance(app, expected)) self.assertTrue(isinstance(app, expected))
@ -811,6 +816,16 @@ class TestWSGI(unittest.TestCase):
self.assertEquals(r.environ['SCRIPT_NAME'], '') self.assertEquals(r.environ['SCRIPT_NAME'], '')
self.assertEquals(r.environ['PATH_INFO'], '/override') self.assertEquals(r.environ['PATH_INFO'], '/override')
def test_make_env_keep_user_project_id(self):
oldenv = {'HTTP_X_USER_ID': '1234', 'HTTP_X_PROJECT_ID': '5678'}
newenv = wsgi.make_env(oldenv)
self.assertTrue('HTTP_X_USER_ID' in newenv)
self.assertEquals(newenv['HTTP_X_USER_ID'], '1234')
self.assertTrue('HTTP_X_PROJECT_ID' in newenv)
self.assertEquals(newenv['HTTP_X_PROJECT_ID'], '5678')
class TestServersPerPortStrategy(unittest.TestCase): class TestServersPerPortStrategy(unittest.TestCase):
def setUp(self): def setUp(self):
@ -1414,6 +1429,7 @@ class TestPipelineModification(unittest.TestCase):
['swift.common.middleware.catch_errors', ['swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper', 'swift.common.middleware.gatekeeper',
'swift.common.middleware.dlo', 'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.proxy.server']) 'swift.proxy.server'])
def test_proxy_modify_wsgi_pipeline(self): def test_proxy_modify_wsgi_pipeline(self):
@ -1444,6 +1460,7 @@ class TestPipelineModification(unittest.TestCase):
['swift.common.middleware.catch_errors', ['swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper', 'swift.common.middleware.gatekeeper',
'swift.common.middleware.dlo', 'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.common.middleware.healthcheck', 'swift.common.middleware.healthcheck',
'swift.proxy.server']) 'swift.proxy.server'])
@ -1541,6 +1558,7 @@ class TestPipelineModification(unittest.TestCase):
'swift.common.middleware.catch_errors', 'swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper', 'swift.common.middleware.gatekeeper',
'swift.common.middleware.dlo', 'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.common.middleware.healthcheck', 'swift.common.middleware.healthcheck',
'swift.proxy.server']) 'swift.proxy.server'])
@ -1554,6 +1572,7 @@ class TestPipelineModification(unittest.TestCase):
'swift.common.middleware.healthcheck', 'swift.common.middleware.healthcheck',
'swift.common.middleware.catch_errors', 'swift.common.middleware.catch_errors',
'swift.common.middleware.dlo', 'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.proxy.server']) 'swift.proxy.server'])
def test_catch_errors_gatekeeper_configured_not_at_start(self): def test_catch_errors_gatekeeper_configured_not_at_start(self):
@ -1566,6 +1585,7 @@ class TestPipelineModification(unittest.TestCase):
'swift.common.middleware.catch_errors', 'swift.common.middleware.catch_errors',
'swift.common.middleware.gatekeeper', 'swift.common.middleware.gatekeeper',
'swift.common.middleware.dlo', 'swift.common.middleware.dlo',
'swift.common.middleware.versioned_writes',
'swift.proxy.server']) 'swift.proxy.server'])
@with_tempdir @with_tempdir
@ -1598,7 +1618,7 @@ class TestPipelineModification(unittest.TestCase):
tempdir, policy.ring_name + '.ring.gz') tempdir, policy.ring_name + '.ring.gz')
app = wsgi.loadapp(conf_path) app = wsgi.loadapp(conf_path)
proxy_app = app.app.app.app.app proxy_app = app.app.app.app.app.app
self.assertEqual(proxy_app.account_ring.serialized_path, self.assertEqual(proxy_app.account_ring.serialized_path,
account_ring_path) account_ring_path)
self.assertEqual(proxy_app.container_ring.serialized_path, self.assertEqual(proxy_app.container_ring.serialized_path,

Some files were not shown because too many files have changed in this diff Show More