Browse Source

Merge branch 'master' into feature/crypto

Change-Id: I6e601dafa31850ccaf031cedc656238c6fda9c62
changes/75/219775/1
Alistair Coles 6 years ago
parent
commit
257e468e9b
121 changed files with 9163 additions and 5578 deletions
  1. +3
    -0
      .mailmap
  2. +19
    -0
      AUTHORS
  3. +154
    -11
      CHANGELOG
  4. +149
    -0
      bandit.yaml
  5. +1
    -1
      bin/swift-account-info
  6. +1
    -1
      bin/swift-container-info
  7. +1
    -1
      bin/swift-container-sync
  8. +4
    -2
      bin/swift-dispersion-report
  9. +1
    -1
      bin/swift-recon
  10. +1
    -1
      bin/swift-ring-builder
  11. +1
    -1
      bin/swift-ring-builder-analyzer
  12. +2
    -0
      doc/manpages/object-server.conf.5
  13. +4
    -2
      doc/manpages/swift-recon.1
  14. +0
    -1
      doc/saio/swift/container-server/1.conf
  15. +0
    -1
      doc/saio/swift/container-server/2.conf
  16. +0
    -1
      doc/saio/swift/container-server/3.conf
  17. +0
    -1
      doc/saio/swift/container-server/4.conf
  18. +5
    -0
      doc/saio/swift/container-sync-realms.conf
  19. +1
    -1
      doc/saio/swift/object-expirer.conf
  20. +9
    -1
      doc/saio/swift/proxy-server.conf
  21. +12
    -3
      doc/source/admin_guide.rst
  22. +91
    -70
      doc/source/deployment_guide.rst
  23. +23
    -0
      doc/source/development_saio.rst
  24. +58
    -6
      doc/source/first_contribution_swift.rst
  25. +7
    -0
      doc/source/howto_installmultinode.rst
  26. +1
    -0
      doc/source/logs.rst
  27. +9
    -0
      doc/source/middleware.rst
  28. +3
    -86
      doc/source/overview_object_versioning.rst
  29. +1
    -1
      etc/object-expirer.conf-sample
  30. +2
    -0
      etc/object-server.conf-sample
  31. +12
    -1
      etc/proxy-server.conf-sample
  32. +1
    -1
      etc/swift.conf-sample
  33. +1
    -1
      requirements.txt
  34. +1
    -1
      setup.cfg
  35. +10
    -2
      swift/account/backend.py
  36. +45
    -6
      swift/account/reaper.py
  37. +88
    -81
      swift/cli/recon.py
  38. +23
    -19
      swift/cli/ringbuilder.py
  39. +18
    -12
      swift/common/constraints.py
  40. +33
    -4
      swift/common/db_replicator.py
  41. +3
    -3
      swift/common/manager.py
  42. +1
    -1
      swift/common/memcached.py
  43. +16
    -8
      swift/common/middleware/recon.py
  44. +110
    -26
      swift/common/middleware/tempurl.py
  45. +496
    -0
      swift/common/middleware/versioned_writes.py
  46. +2
    -2
      swift/common/swob.py
  47. +6
    -3
      swift/common/utils.py
  48. +4
    -3
      swift/common/wsgi.py
  49. +52
    -52
      swift/locale/swift.pot
  50. +2
    -2
      swift/locale/zh_CN/LC_MESSAGES/swift.po
  51. +2
    -2
      swift/obj/auditor.py
  52. +11
    -5
      swift/obj/diskfile.py
  53. +141
    -18
      swift/obj/replicator.py
  54. +38
    -5
      swift/obj/server.py
  55. +1
    -1
      swift/obj/ssync_receiver.py
  56. +4
    -1
      swift/obj/ssync_sender.py
  57. +1
    -1
      swift/obj/updater.py
  58. +102
    -2
      swift/proxy/controllers/base.py
  59. +57
    -233
      swift/proxy/controllers/obj.py
  60. +14
    -58
      swift/proxy/server.py
  61. +3
    -0
      test-requirements.txt
  62. +7
    -10
      test/__init__.py
  63. +17
    -16
      test/functional/__init__.py
  64. +20
    -2
      test/functional/swift_test_client.py
  65. +4
    -4
      test/functional/test_account.py
  66. +63
    -63
      test/functional/test_container.py
  67. +51
    -51
      test/functional/test_object.py
  68. +383
    -46
      test/functional/tests.py
  69. +3
    -3
      test/probe/brain.py
  70. +61
    -12
      test/probe/common.py
  71. +44
    -43
      test/probe/test_account_failures.py
  72. +4
    -4
      test/probe/test_account_reaper.py
  73. +9
    -9
      test/probe/test_container_failures.py
  74. +1
    -1
      test/probe/test_container_merge_policy_index.py
  75. +2
    -2
      test/probe/test_empty_device_handoff.py
  76. +1
    -1
      test/probe/test_object_async_update.py
  77. +8
    -8
      test/probe/test_object_failures.py
  78. +93
    -5
      test/probe/test_object_handoff.py
  79. +1
    -1
      test/probe/test_reconstructor_durable.py
  80. +3
    -29
      test/probe/test_reconstructor_revert.py
  81. +103
    -0
      test/probe/test_wsgi_servers.py
  82. +1
    -0
      test/sample.conf
  83. +9
    -4
      test/unit/__init__.py
  84. +12
    -11
      test/unit/account/test_backend.py
  85. +82
    -18
      test/unit/account/test_reaper.py
  86. +20
    -20
      test/unit/account/test_server.py
  87. +14
    -14
      test/unit/cli/test_info.py
  88. +102
    -54
      test/unit/cli/test_recon.py
  89. +55
    -38
      test/unit/cli/test_ringbuilder.py
  90. +1
    -1
      test/unit/common/middleware/helpers.py
  91. +1
    -1
      test/unit/common/middleware/test_dlo.py
  92. +60
    -5
      test/unit/common/middleware/test_recon.py
  93. +149
    -41
      test/unit/common/middleware/test_tempurl.py
  94. +566
    -0
      test/unit/common/middleware/test_versioned_writes.py
  95. +18
    -0
      test/unit/common/test_constraints.py
  96. +2
    -1
      test/unit/common/test_db_replicator.py
  97. +100
    -40
      test/unit/common/test_internal_client.py
  98. +6
    -6
      test/unit/common/test_manager.py
  99. +24
    -8
      test/unit/common/test_utils.py
  100. +21
    -1
      test/unit/common/test_wsgi.py

+ 3
- 0
.mailmap View File

@ -78,3 +78,6 @@ Jaivish Kothari <jaivish.kothari@nectechnologies.in> <janonymous.codevulture@gma
Michael Matur <michael.matur@gmail.com>
Kazuhiro Miyahara <miyahara.kazuhiro@lab.ntt.co.jp>
Alexandra Settle <alexandra.settle@rackspace.com>
Kenichiro Matsuda <matsuda_kenichi@jp.fujitsu.com>
Atsushi Sakai <sakaia@jp.fujitsu.com>
Takashi Natsume <natsume.takashi@lab.ntt.co.jp>

+ 19
- 0
AUTHORS View File

@ -26,6 +26,7 @@ Chuck Thier (cthier@gmail.com)
Contributors
------------
Mehdi Abaakouk (mehdi.abaakouk@enovance.com)
Timur Alperovich (timur.alperovich@gmail.com)
Jesse Andrews (anotherjesse@gmail.com)
Joe Arnold (joe@swiftstack.com)
Ionuț Arțăriși (iartarisi@suse.cz)
@ -47,6 +48,7 @@ Tim Burke (tim.burke@gmail.com)
Brian D. Burns (iosctr@gmail.com)
Devin Carlen (devin.carlen@gmail.com)
Thierry Carrez (thierry@openstack.org)
Carlos Cavanna (ccavanna@ca.ibm.com)
Emmanuel Cazenave (contact@emcaz.fr)
Mahati Chamarthy (mahati.chamarthy@gmail.com)
Zap Chang (zapchang@gmail.com)
@ -55,6 +57,7 @@ Ray Chen (oldsharp@163.com)
Harshit Chitalia (harshit@acelio.com)
Brian Cline (bcline@softlayer.com)
Alistair Coles (alistair.coles@hp.com)
Clément Contini (ccontini@cloudops.com)
Brian Curtin (brian.curtin@rackspace.com)
Thiago da Silva (thiago@redhat.com)
Julien Danjou (julien@danjou.info)
@ -64,6 +67,7 @@ Cedric Dos Santos (cedric.dos.sant@gmail.com)
Gerry Drudy (gerry.drudy@hp.com)
Morgan Fainberg (morgan.fainberg@gmail.com)
ZhiQiang Fan (aji.zqfan@gmail.com)
Oshrit Feder (oshritf@il.ibm.com)
Mike Fedosin (mfedosin@mirantis.com)
Ricardo Ferreira (ricardo.sff@gmail.com)
Flaper Fesp (flaper87@gmail.com)
@ -91,8 +95,10 @@ Dan Hersam (dan.hersam@hp.com)
Derek Higgins (derekh@redhat.com)
Alex Holden (alex@alexjonasholden.com)
Edward Hope-Morley (opentastic@gmail.com)
Charles Hsu (charles0126@gmail.com)
Joanna H. Huang (joanna.huitzu.huang@gmail.com)
Kun Huang (gareth@unitedstack.com)
Bill Huber (wbhuber@us.ibm.com)
Matthieu Huin (mhu@enovance.com)
Hodong Hwang (hodong.hwang@kt.com)
Motonobu Ichimura (motonobu@gmail.com)
@ -126,6 +132,7 @@ John Leach (john@johnleach.co.uk)
Ed Leafe (ed.leafe@rackspace.com)
Thomas Leaman (thomas.leaman@hp.com)
Eohyung Lee (liquidnuker@gmail.com)
Zhao Lei (zhaolei@cn.fujitsu.com)
Jamie Lennox (jlennox@redhat.com)
Tong Li (litong01@us.ibm.com)
Changbin Liu (changbin.liu@gmail.com)
@ -136,10 +143,12 @@ Zhongyue Luo (zhongyue.nah@intel.com)
Paul Luse (paul.e.luse@intel.com)
Christopher MacGown (chris@pistoncloud.com)
Dragos Manolescu (dragosm@hp.com)
Ben Martin (blmartin@us.ibm.com)
Steve Martinelli (stevemar@ca.ibm.com)
Juan J. Martinez (juan@memset.com)
Marcelo Martins (btorch@gmail.com)
Dolph Mathews (dolph.mathews@gmail.com)
Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com)
Michael Matur (michael.matur@gmail.com)
Donagh McCabe (donagh.mccabe@hp.com)
Andy McCrae (andy.mccrae@gmail.com)
@ -151,11 +160,13 @@ Jola Mirecka (jola.mirecka@hp.com)
Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp)
Daisuke Morita (morita.daisuke@lab.ntt.co.jp)
Dirk Mueller (dirk@dmllr.de)
Takashi Natsume (natsume.takashi@lab.ntt.co.jp)
Russ Nelson (russ@crynwr.com)
Maru Newby (mnewby@internap.com)
Newptone (xingchao@unitedstack.com)
Colin Nicholson (colin.nicholson@iomart.com)
Zhenguo Niu (zhenguo@unitedstack.com)
Ondrej Novy (ondrej.novy@firma.seznam.cz)
Timothy Okwii (tokwii@cisco.com)
Matthew Oliver (matt@oliver.net.au)
Hisashi Osanai (osanai.hisashi@jp.fujitsu.com)
@ -169,18 +180,24 @@ Constantine Peresypkin (constantine.peresypk@rackspace.com)
Dieter Plaetinck (dieter@vimeo.com)
Dan Prince (dprince@redhat.com)
Sarvesh Ranjan (saranjan@cisco.com)
Falk Reimann (falk.reimann@sap.com)
Brian Reitz (brian.reitz@oracle.com)
Felipe Reyes (freyes@tty.cl)
Janie Richling (jrichli@us.ibm.com)
Matt Riedemann (mriedem@us.ibm.com)
Li Riqiang (lrqrun@gmail.com)
Rafael Rivero (rafael@cloudscaling.com)
Victor Rodionov (victor.rodionov@nexenta.com)
Eran Rom (eranr@il.ibm.com)
Aaron Rosen (arosen@nicira.com)
Brent Roskos (broskos@internap.com)
Hamdi Roumani (roumani@ca.ibm.com)
Shilla Saebi (shilla.saebi@gmail.com)
Atsushi Sakai (sakaia@jp.fujitsu.com)
Cristian A Sanchez (cristian.a.sanchez@intel.com)
Christian Schwede (cschwede@redhat.com)
Mark Seger (Mark.Seger@hp.com)
Azhagu Selvan SP (tamizhgeek@gmail.com)
Alexandra Settle (alexandra.settle@rackspace.com)
Andrew Clay Shafer (acs@parvuscaptus.com)
Mitsuhiro SHIGEMATSU (shigematsu.mitsuhiro@lab.ntt.co.jp)
@ -198,6 +215,7 @@ Jeremy Stanley (fungi@yuggoth.org)
Mauro Stettler (mauro.stettler@gmail.com)
Tobias Stevenson (tstevenson@vbridges.com)
Victor Stinner (vstinner@redhat.com)
Akihito Takai (takaiak@nttdata.co.jp)
Pearl Yajing Tan (pearl.y.tan@seagate.com)
Yuriy Taraday (yorik.sar@gmail.com)
Monty Taylor (mordred@inaugust.com)
@ -231,5 +249,6 @@ Guang Yee (guang.yee@hp.com)
Pete Zaitcev (zaitcev@kotori.zaitcev.us)
Hua Zhang (zhuadl@cn.ibm.com)
Jian Zhang (jian.zhang@intel.com)
Kai Zhang (zakir.exe@gmail.com)
Ning Zhang (ning@zmanda.com)
Yuan Zhou (yuan.zhou@intel.com)

+ 154
- 11
CHANGELOG View File

@ -1,4 +1,133 @@
swift (2.3.0)
swift (2.4.0)
* Dependency changes
- Added six requirement. This is part of an ongoing effort to add
support for Python 3.
- Dropped support for Python 2.6.
* Config changes
- Recent versions of Python restrict the number of headers allowed in a
request to 100. This number may be too low for custom middleware. The
new "extra_header_count" config value in swift.conf can be used to
increase the number of headers allowed.
- Renamed "run_pause" setting to "interval" (current configs with
run_pause still work). Future versions of Swift may remove the
run_pause setting.
* Versioned writes middleware
The versioned writes feature has been refactored and reimplemented as
middleware. You should explicitly add the versioned_writes middleware to
your proxy pipeline, but do not remove or disable the existing container
server config setting ("allow_versions"), if it is currently enabled.
The existing container server config setting enables existing
containers to continue being versioned. Please see
http://swift.openstack.org/middleware.html#how-to-enable-object-versioning-in-a-swift-cluster
for further upgrade notes.
* Allow 1+ object-servers-per-disk deployment
Enabled by a new > 0 integer config value, "servers_per_port" in the
[DEFAULT] config section for object-server and/or replication server
configs. The setting's integer value determines how many different
object-server workers handle requests for any single unique local port
in the ring. In this mode, the parent swift-object-server process
continues to run as the original user (i.e. root if low-port binding
is required), binds to all ports as defined in the ring, and forks off
the specified number of workers per listen socket. The child, per-port
servers drop privileges and behave pretty much how object-server workers
always have, except that because the ring has unique ports per disk, the
object-servers will only be handling requests for a single disk. The
parent process detects dead servers and restarts them (with the correct
listen socket), starts missing servers when an updated ring file is
found with a device on the server with a new port, and kills extraneous
servers when their port is found to no longer be in the ring. The ring
files are stat'ed at most every "ring_check_interval" seconds, as
configured in the object-server config (same default of 15s).
In testing, this deployment configuration (with a value of 3) lowers
request latency, improves requests per second, and isolates slow disk
IO as compared to the existing "workers" setting. To use this, each
device must be added to the ring using a different port.
* Do container listing updates in another (green)thread
The object server has learned the "container_update_timeout" setting
(with a default of 1 second). This value is the number of seconds that
the object server will wait for the container server to update the
listing before returning the status of the object PUT operation.
Previously, the object server would wait up to 3 seconds for the
container server response. The new behavior dramatically lowers object
PUT latency when container servers in the cluster are busy (e.g. when
the container is very large). Setting the value too low may result in a
client PUT'ing an object and not being able to immediately find it in
listings. Setting it too high will increase latency for clients when
container servers are busy.
* TempURL fixes (closes CVE-2015-5223)
Do not allow PUT tempurls to create pointers to other data.
Specifically, disallow the creation of DLO object manifests via a PUT
tempurl. This prevents discoverability attacks which can use any PUT
tempurl to probe for private data by creating a DLO object manifest and
then using the PUT tempurl to head the object.
* Ring changes
- Partition placement no longer uses the port number to place
partitions. This improves dispersion in small clusters running one
object server per drive, and it does not affect dispersion in
clusters running one object server per server.
- Added ring-builder-analyzer tool to more easily test and analyze a
series of ring management operations.
- Stop moving partitions unnecessarily when overload is on.
* Significant improvements and bug fixes have been made to erasure code
support. This feature is suitable for beta testing, but it is not yet
ready for broad production usage.
* Bulk upload now treats user xattrs on files in the given archive as
object metadata on the resulting created objects.
* Emit warning log in object replicator if "handoffs_first" or
"handoff_delete" is set.
* Enable object replicator's failure count in swift-recon.
* Added storage policy support to dispersion tools.
* Support keystone v3 domains in swift-dispersion.
* Added domain_remap information to the /info endpoint.
* Added support for a "default_reseller_prefix" in domain_remap
middleware config.
* Allow SLO PUTs to forgo per-segment integrity checks. Previously, each
segment referenced in the manifest also needed the correct etag and
bytes setting. These fields now allow the "null" value to skip those
particular checks on the given segment.
* Allow rsync to use compression via a "rsync_compress" config. If set to
true, compression is only enabled for an rsync to a device in a
different region. In some cases, this can speed up cross-region
replication data transfer.
* Added time synchronization check in swift-recon (the --time option).
* The account reaper now runs faster on large accounts.
* Various other minor bug fixes and improvements.
swift (2.3.0, OpenStack Kilo)
* Erasure Code support (beta)
@ -58,6 +187,7 @@ swift (2.3.0)
* Various other minor bug fixes and improvements.
swift (2.2.2)
* Data placement changes
@ -117,6 +247,7 @@ swift (2.2.2)
* Various other minor bug fixes and improvements.
swift (2.2.1)
* Swift now rejects object names with Unicode surrogates.
@ -164,7 +295,7 @@ swift (2.2.1)
* Various other minor bug fixes and improvements.
swift (2.2.0)
swift (2.2.0, OpenStack Juno)
* Added support for Keystone v3 auth.
@ -338,7 +469,7 @@ swift (2.0.0)
* Various other minor bug fixes and improvements
swift (1.13.1)
swift (1.13.1, OpenStack Icehouse)
* Change the behavior of CORS responses to better match the spec
@ -605,7 +736,7 @@ swift (1.11.0)
* Various other bug fixes and improvements
swift (1.10.0)
swift (1.10.0, OpenStack Havana)
* Added support for pooling memcache connections
@ -776,7 +907,7 @@ swift (1.9.0)
* Various other minor bug fixes and improvements
swift (1.8.0)
swift (1.8.0, OpenStack Grizzly)
* Make rings' replica count adjustable
@ -947,7 +1078,7 @@ swift (1.7.5)
* Various other minor bug fixes and improvements
swift (1.7.4)
swift (1.7.4, OpenStack Folsom)
* Fix issue where early client disconnects may have caused a memory leak
@ -962,14 +1093,14 @@ swift (1.7.0)
Serialize RingData in a versioned, custom format which is a combination
of a JSON-encoded header and .tostring() dumps of the
replica2part2dev_id arrays. This format deserializes hundreds of times
replica2part2dev_id arrays. This format deserializes hundreds of times
faster than rings serialized with Python 2.7's pickle (a significant
performance regression for ring loading between Python 2.6 and Python
2.7). Fixes bug 1031954.
2.7). Fixes bug 1031954.
The new implementation is backward-compatible; if a ring
does not begin with a new-style magic string, it is assumed to be an
old-style pickle-dumped ring and is handled as before. So new Swift
old-style pickle-dumped ring and is handled as before. So new Swift
code can read old rings, but old Swift code will not be able to read
newly-serialized rings.
@ -1153,7 +1284,7 @@ swift (1.5.0)
* Various other minor bug fixes and improvements
swift (1.4.8)
swift (1.4.8, OpenStack Essex)
* Added optional max_containers_per_account restriction
@ -1296,7 +1427,7 @@ swift (1.4.4)
* Query only specific zone via swift-recon.
swift (1.4.3)
swift (1.4.3, OpenStack Diablo)
* Additional quarantine catching code.
@ -1421,3 +1552,15 @@ swift (1.4.0)
* Stats uploaders now allow overrides for source_filename_pattern and
new_log_cutoff values.
---
Changelog entries for previous versions are incomplete
swift (1.3.0, OpenStack Cactus)
swift (1.2.0, OpenStack Bexar)
swift (1.1.0, OpenStack Austin)
swift (1.0.0, Initial Release)

+ 149
- 0
bandit.yaml View File

@ -0,0 +1,149 @@
# optional: after how many files to update progress
#show_progress_every: 100
# optional: plugins directory name
#plugins_dir: 'plugins'
# optional: plugins discovery name pattern
plugin_name_pattern: '*.py'
# optional: terminal escape sequences to display colors
#output_colors:
# DEFAULT: '\033[0m'
# HEADER: '\033[95m'
# LOW: '\033[94m'
# MEDIUM: '\033[93m'
# HIGH: '\033[91m'
# optional: log format string
#log_format: "[%(module)s]\t%(levelname)s\t%(message)s"
# globs of files which should be analyzed
include:
- '*.py'
# a list of strings, which if found in the path will cause files to be
# excluded
# for example /tests/ - to remove all all files in tests directory
#exclude_dirs:
# - '/tests/'
#configured for swift
profiles:
gate:
include:
- blacklist_calls
- blacklist_imports
- exec_used
- linux_commands_wildcard_injection
- request_with_no_cert_validation
- set_bad_file_permissions
- subprocess_popen_with_shell_equals_true
- ssl_with_bad_version
- password_config_option_not_marked_secret
# - any_other_function_with_shell_equals_true
# - ssl_with_bad_defaults
# - jinja2_autoescape_false
# - use_of_mako_templates
# - subprocess_without_shell_equals_true
# - any_other_function_with_shell_equals_true
# - start_process_with_a_shell
# - start_process_with_no_shell
# - hardcoded_sql_expressions
# - hardcoded_tmp_director
# - linux_commands_wildcard_injection
#For now some items are commented which could be included as per use later.
blacklist_calls:
bad_name_sets:
# - pickle:
# qualnames: [pickle.loads, pickle.load, pickle.Unpickler,
# cPickle.loads, cPickle.load, cPickle.Unpickler]
# level: LOW
# message: "Pickle library appears to be in use, possible security
#issue."
- marshal:
qualnames: [marshal.load, marshal.loads]
message: "Deserialization with the marshal module is possibly
dangerous."
# - md5:
# qualnames: [hashlib.md5]
# level: LOW
# message: "Use of insecure MD5 hash function."
- mktemp_q:
qualnames: [tempfile.mktemp]
message: "Use of insecure and deprecated function (mktemp)."
# - eval:
# qualnames: [eval]
# level: LOW
# message: "Use of possibly insecure function - consider using safer
#ast.literal_eval."
- mark_safe:
names: [mark_safe]
message: "Use of mark_safe() may expose cross-site scripting
vulnerabilities and should be reviewed."
- httpsconnection:
qualnames: [httplib.HTTPSConnection]
message: "Use of HTTPSConnection does not provide security, see
https://wiki.openstack.org/wiki/OSSN/OSSN-0033"
- yaml_load:
qualnames: [yaml.load]
message: "Use of unsafe yaml load. Allows instantiation of
arbitrary objects. Consider yaml.safe_load()."
- urllib_urlopen:
qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener,
urllib.FancyURLopener, urllib2.urlopen, urllib2.Request]
message: "Audit url open for permitted schemes. Allowing use of
file:/ or custom schemes is often unexpected."
- paramiko_injection:
qualnames: [paramiko.exec_command, paramiko.invoke_shell]
message: "Paramiko exec_command() and invoke_shell() usage may
expose command injection vulnerabilities and should be reviewed."
shell_injection:
# Start a process using the subprocess module, or one of its wrappers.
subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call,
subprocess.check_output, utils.execute,
utils.execute_with_timeout]
# Start a process with a function vulnerable to shell injection.
shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4,
popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3,
popen2.Popen4, commands.getoutput, commands.getstatusoutput]
# Start a process with a function that is not vulnerable to shell
# injection.
no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve,
os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp,
os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe,
os.startfile]
blacklist_imports:
bad_import_sets:
- telnet:
imports: [telnetlib]
level: HIGH
message: "Telnet is considered insecure. Use SSH or some other
encrypted protocol."
- info_libs:
imports: [Crypto]
level: LOW
message: "Consider possible security implications associated with
#{module} module."
hardcoded_password:
word_list: "wordlist/default-passwords"
ssl_with_bad_version:
bad_protocol_versions:
- 'PROTOCOL_SSLv2'
- 'SSLv2_METHOD'
- 'SSLv23_METHOD'
- 'PROTOCOL_SSLv3' # strict option
- 'PROTOCOL_TLSv1' # strict option
- 'SSLv3_METHOD' # strict option
- 'TLSv1_METHOD' # strict option
password_config_option_not_marked_secret:
function_names:
- oslo.config.cfg.StrOpt
- oslo_config.cfg.StrOpt

+ 1
- 1
bin/swift-account-info View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at


+ 1
- 1
bin/swift-container-info View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at


+ 1
- 1
bin/swift-container-sync View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");


+ 4
- 2
bin/swift-dispersion-report View File

@ -26,6 +26,7 @@ except ImportError:
from eventlet import GreenPool, hubs, patcher, Timeout
from eventlet.pools import Pool
from eventlet.green import urllib2
from swift.common import direct_client
try:
@ -176,9 +177,10 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
try:
objects = [o['name'] for o in conn.get_container(
container, prefix='dispersion_', full_listing=True)[1]]
except ClientException as err:
if err.http_status != 404:
except urllib2.HTTPError as err:
if err.getcode() != 404:
raise
print >>stderr, 'No objects to query. Has ' \
'swift-dispersion-populate been run?'
stderr.flush()


+ 1
- 1
bin/swift-recon View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# Copyright (c) 2014 Christian Schwede <christian.schwede@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");


+ 1
- 1
bin/swift-ring-builder View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# Copyright (c) 2014 Christian Schwede <christian.schwede@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");


+ 1
- 1
bin/swift-ring-builder-analyzer View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# Copyright (c) 2015 Samuel Merritt <sam@swiftstack.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");


+ 2
- 0
doc/manpages/object-server.conf.5 View File

@ -129,6 +129,8 @@ Logging address. The default is /dev/log.
Request timeout to external services. The default is 3 seconds.
.IP \fBconn_timeout\fR
Connection timeout to external services. The default is 0.5 seconds.
.IP \fBcontainer_update_timeout\fR
Time to wait while sending a container update on object update. The default is 1 second.
.RE
.PD


+ 4
- 2
doc/manpages/swift-recon.1 View File

@ -25,7 +25,7 @@
.SH SYNOPSIS
.LP
.B swift-recon
\ <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
\ <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
.SH DESCRIPTION
.PP
@ -80,8 +80,10 @@ Get md5sum of servers ring and compare to local copy
Get cluster socket usage stats
.IP "\fB--driveaudit\fR"
Get drive audit error stats
.IP "\fB-T, --time\fR"
Check time synchronization
.IP "\fB--all\fR"
Perform all checks. Equivalent to \-arudlq \-\-md5
Perform all checks. Equivalent to \-arudlqT \-\-md5
.IP "\fB--region=REGION\fR"
Only query servers in specified region
.IP "\fB-z ZONE, --zone=ZONE\fR"


+ 0
- 1
doc/saio/swift/container-server/1.conf View File

@ -9,7 +9,6 @@ user = <your-user-name>
log_facility = LOG_LOCAL2
recon_cache_path = /var/cache/swift
eventlet_debug = true
allow_versions = true
[pipeline:main]
pipeline = recon container-server


+ 0
- 1
doc/saio/swift/container-server/2.conf View File

@ -9,7 +9,6 @@ user = <your-user-name>
log_facility = LOG_LOCAL3
recon_cache_path = /var/cache/swift2
eventlet_debug = true
allow_versions = true
[pipeline:main]
pipeline = recon container-server


+ 0
- 1
doc/saio/swift/container-server/3.conf View File

@ -9,7 +9,6 @@ user = <your-user-name>
log_facility = LOG_LOCAL4
recon_cache_path = /var/cache/swift3
eventlet_debug = true
allow_versions = true
[pipeline:main]
pipeline = recon container-server


+ 0
- 1
doc/saio/swift/container-server/4.conf View File

@ -9,7 +9,6 @@ user = <your-user-name>
log_facility = LOG_LOCAL5
recon_cache_path = /var/cache/swift4
eventlet_debug = true
allow_versions = true
[pipeline:main]
pipeline = recon container-server


+ 5
- 0
doc/saio/swift/container-sync-realms.conf View File

@ -0,0 +1,5 @@
[saio]
key = changeme
key2 = changeme
cluster_saio_endpoint = http://127.0.0.1:8080/v1/

+ 1
- 1
doc/saio/swift/object-expirer.conf View File

@ -37,7 +37,7 @@ interval = 300
# config value
# processes = 0
# process is which of the parts a particular process will work on
# process can also be specified on the command line and will overide the config
# process can also be specified on the command line and will override the config
# value
# process is "zero based", if you want to use 3 processes, you should run
# processes with process set to 0, 1, and 2


+ 9
- 1
doc/saio/swift/proxy-server.conf View File

@ -9,7 +9,7 @@ eventlet_debug = true
[pipeline:main]
# Yes, proxy-logging appears twice. This is so that
# middleware-originated requests get logged too.
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain tempauth staticweb container-quotas account-quotas slo dlo proxy-logging proxy-server
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain container_sync tempauth staticweb container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
[filter:catch_errors]
use = egg:swift#catch_errors
@ -35,6 +35,10 @@ use = egg:swift#dlo
[filter:slo]
use = egg:swift#slo
[filter:container_sync]
use = egg:swift#container_sync
current = //saio/saio_endpoint
[filter:tempurl]
use = egg:swift#tempurl
@ -60,6 +64,10 @@ use = egg:swift#memcache
[filter:gatekeeper]
use = egg:swift#gatekeeper
[filter:versioned_writes]
use = egg:swift#versioned_writes
allow_versioned_writes = true
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = true


+ 12
- 3
doc/source/admin_guide.rst View File

@ -154,6 +154,10 @@ until it has been resolved. If the drive is going to be replaced immediately,
then it is just best to replace the drive, format it, remount it, and let
replication fill it up.
After the drive is unmounted, make sure the mount point is owned by root
(root:root 755). This ensures that rsync will not try to replicate into the
root drive once the failed drive is unmounted.
If the drive can't be replaced immediately, then it is best to leave it
unmounted, and set the device weight to 0. This will allow all the
replicas that were on that drive to be replicated elsewhere until the drive
@ -545,18 +549,22 @@ Request URI Description
/recon/sockstat returns consumable info from /proc/net/sockstat|6
/recon/devices returns list of devices and devices dir i.e. /srv/node
/recon/async returns count of async pending
/recon/replication returns object replication times (for backward compatibility)
/recon/replication returns object replication info (for backward compatibility)
/recon/replication/<type> returns replication info for given type (account, container, object)
/recon/auditor/<type> returns auditor stats on last reported scan for given type (account, container, object)
/recon/updater/<type> returns last updater sweep times for given type (container, object)
========================= ========================================================================================
Note that 'object_replication_last' and 'object_replication_time' in object
replication info are considered to be transitional and will be removed in
the subsequent releases. Use 'replication_last' and 'replication_time' instead.
This information can also be queried via the swift-recon command line utility::
fhines@ubuntu:~$ swift-recon -h
Usage:
usage: swift-recon <server_type> [-v] [--suppress] [-a] [-r] [-u] [-d]
[-l] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
[-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
<server_type> account|container|object
Defaults to object server.
@ -579,7 +587,8 @@ This information can also be queried via the swift-recon command line utility::
-q, --quarantined Get cluster quarantine stats
--md5 Get md5sum of servers ring and compare to local copy
--sockstat Get cluster socket usage stats
--all Perform all checks. Equal to -arudlq --md5 --sockstat
-T, --time Check time synchronization
--all Perform all checks. Equal to -arudlqT --md5 --sockstat
-z ZONE, --zone=ZONE Only query servers in specified zone
-t SECONDS, --timeout=SECONDS
Time to wait for a response from a server


+ 91
- 70
doc/source/deployment_guide.rst View File

@ -390,6 +390,13 @@ max_header_size 8192 max_header_size is the max number of bytes in
See also include_service_catalog in
proxy-server.conf-sample (documented in
overview_auth.rst).
extra_header_count 0 By default the maximum number of allowed
headers depends on the number of max
allowed metadata settings plus a default
value of 32 for regular http headers.
If for some reason this is not enough (custom
middleware for example) it can be increased
with the extra_header_count constraint.
=================== ========== =============================================
---------------------------
@ -405,76 +412,86 @@ The following configuration options are available:
[DEFAULT]
=================== ========== =============================================
Option Default Description
------------------- ---------- ---------------------------------------------
swift_dir /etc/swift Swift configuration directory
devices /srv/node Parent directory of where devices are mounted
mount_check true Whether or not check if the devices are
mounted to prevent accidentally writing
to the root device
bind_ip 0.0.0.0 IP Address for server to bind to
bind_port 6000 Port for server to bind to
bind_timeout 30 Seconds to attempt bind before giving up
workers auto Override the number of pre-forked workers
that will accept connections. If set it
should be an integer, zero means no fork. If
unset, it will try to default to the number
of effective cpu cores and fallback to one.
Increasing the number of workers helps slow
filesystem operations in one request from
negatively impacting other requests, but only
the :ref:`servers_per_port
<server-per-port-configuration>`
option provides complete I/O isolation with
no measurable overhead.
servers_per_port 0 If each disk in each storage policy ring has
unique port numbers for its "ip" value, you
can use this setting to have each
object-server worker only service requests
for the single disk matching the port in the
ring. The value of this setting determines
how many worker processes run for each port
(disk) in the ring. If you have 24 disks
per server, and this setting is 4, then
each storage node will have 1 + (24 * 4) =
97 total object-server processes running.
This gives complete I/O isolation, drastically
reducing the impact of slow disks on storage
node performance. The object-replicator and
object-reconstructor need to see this setting
too, so it must be in the [DEFAULT] section.
See :ref:`server-per-port-configuration`.
max_clients 1024 Maximum number of clients one worker can
process simultaneously (it will actually
accept(2) N + 1). Setting this to one (1)
will only handle one request at a time,
without accepting another request
concurrently.
disable_fallocate false Disable "fast fail" fallocate checks if the
underlying filesystem does not support it.
log_max_line_length 0 Caps the length of log lines to the
value given; no limit if set to 0, the
default.
log_custom_handlers None Comma-separated list of functions to call
to setup custom log handlers.
eventlet_debug false If true, turn on debug logging for eventlet
fallocate_reserve 0 You can set fallocate_reserve to the number of
bytes you'd like fallocate to reserve, whether
there is space for the given file size or not.
This is useful for systems that behave badly
when they completely run out of space; you can
make the services pretend they're out of space
early.
conn_timeout 0.5 Time to wait while attempting to connect to
another backend node.
node_timeout 3 Time to wait while sending each chunk of data
to another backend node.
client_timeout 60 Time to wait while receiving each chunk of
data from a client or another backend node.
network_chunk_size 65536 Size of chunks to read/write over the network
disk_chunk_size 65536 Size of chunks to read/write to disk
=================== ========== =============================================
======================== ========== ==========================================
Option Default Description
------------------------ ---------- ------------------------------------------
swift_dir /etc/swift Swift configuration directory
devices /srv/node Parent directory of where devices are
mounted
mount_check true Whether or not check if the devices are
mounted to prevent accidentally writing
to the root device
bind_ip 0.0.0.0 IP Address for server to bind to
bind_port 6000 Port for server to bind to
bind_timeout 30 Seconds to attempt bind before giving up
workers auto Override the number of pre-forked workers
that will accept connections. If set it
should be an integer, zero means no fork.
If unset, it will try to default to the
number of effective cpu cores and fallback
to one. Increasing the number of workers
helps slow filesystem operations in one
request from negatively impacting other
requests, but only the
:ref:`servers_per_port
<server-per-port-configuration>` option
provides complete I/O isolation with no
measurable overhead.
servers_per_port 0 If each disk in each storage policy ring
has unique port numbers for its "ip"
value, you can use this setting to have
each object-server worker only service
requests for the single disk matching the
port in the ring. The value of this
setting determines how many worker
processes run for each port (disk) in the
ring. If you have 24 disks per server, and
this setting is 4, then each storage node
will have 1 + (24 * 4) = 97 total
object-server processes running. This
gives complete I/O isolation, drastically
reducing the impact of slow disks on
storage node performance. The
object-replicator and object-reconstructor
need to see this setting too, so it must
be in the [DEFAULT] section.
See :ref:`server-per-port-configuration`.
max_clients 1024 Maximum number of clients one worker can
process simultaneously (it will actually
accept(2) N + 1). Setting this to one (1)
will only handle one request at a time,
without accepting another request
concurrently.
disable_fallocate false Disable "fast fail" fallocate checks if
the underlying filesystem does not support
it.
log_max_line_length 0 Caps the length of log lines to the
value given; no limit if set to 0, the
default.
log_custom_handlers None Comma-separated list of functions to call
to setup custom log handlers.
eventlet_debug false If true, turn on debug logging for
eventlet
fallocate_reserve 0 You can set fallocate_reserve to the
number of bytes you'd like fallocate to
reserve, whether there is space for the
given file size or not. This is useful for
systems that behave badly when they
completely run out of space; you can
make the services pretend they're out of
space early.
conn_timeout 0.5 Time to wait while attempting to connect
to another backend node.
node_timeout 3 Time to wait while sending each chunk of
data to another backend node.
client_timeout 60 Time to wait while receiving each chunk of
data from a client or another backend node
network_chunk_size 65536 Size of chunks to read/write over the
network
disk_chunk_size 65536 Size of chunks to read/write to disk
container_update_timeout 1 Time to wait while sending a container
update on object update.
======================== ========== ==========================================
.. _object-server-options:
@ -1229,6 +1246,10 @@ For a standard swift install, all data drives are mounted directly under
be sure to set the `devices` config option in all of the server configs to
point to the correct directory.
The mount points for each drive in /srv/node/ should be owned by the root user
almost exclusively (root:root 755). This is required to prevent rsync from
syncing files into the root drive in the event a drive is unmounted.
Swift uses system calls to reserve space for new objects being written into
the system. If your filesystem does not support `fallocate()` or
`posix_fallocate()`, be sure to set the `disable_fallocate = true` config


+ 23
- 0
doc/source/development_saio.rst View File

@ -95,6 +95,16 @@ another device when creating the VM, and follow these instructions:
# **Make sure to include the trailing slash after /srv/$x/**
for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done
Note: We create the mount points and mount the storage disk under
/mnt/sdb1. This disk will contain one directory per simulated swift node,
each owned by the current swift user.
We then create symlinks to these directories under /srv.
If the disk sdb is unmounted, files will not be written under
/srv/\*, because the symbolic link destination /mnt/sdb1/* will not
exist. This prevents disk sync operations from writing to the root
partition in the event a drive is unmounted.
#. Next, skip to :ref:`common-dev-section`.
@ -135,6 +145,15 @@ these instructions:
# **Make sure to include the trailing slash after /srv/$x/**
for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done
Note: We create the mount points and mount the loopback file under
/mnt/sdb1. This file will contain one directory per simulated swift node,
each owned by the current swift user.
We then create symlinks to these directories under /srv.
If the loopback file is unmounted, files will not be written under
/srv/\*, because the symbolic link destination /mnt/sdb1/* will not
exist. This prevents disk sync operations from writing to the root
partition in the event a drive is unmounted.
.. _common-dev-section:
@ -352,6 +371,10 @@ commands are as follows:
.. literalinclude:: /../saio/swift/container-reconciler.conf
#. ``/etc/swift/container-sync-realms.conf``
.. literalinclude:: /../saio/swift/container-sync-realms.conf
#. ``/etc/swift/account-server/1.conf``
.. literalinclude:: /../saio/swift/account-server/1.conf


+ 58
- 6
doc/source/first_contribution_swift.rst View File

@ -118,11 +118,41 @@ After you proposed your changes to Swift, you can track the review in:
* `<https://review.openstack.org>`_
.. _post-rebase-instructions:
------------------------
Post rebase instructions
------------------------
After rebasing, the following steps should be performed to rebuild the swift
installation. Note that these commands should be performed from the root of the
swift repo directory (e.g. $HOME/swift/):
``sudo python setup.py develop``
``sudo pip install -r test-requirements.txt``
If using TOX, depending on the changes made during the rebase, you may need to
rebuild the TOX environment (generally this will be the case if
test-requirements.txt was updated such that a new version of a package is
required), this can be accomplished using the '-r' argument to the TOX cli:
``tox -r``
You can include any of the other TOX arguments as well, for example, to run the
pep8 suite and rebuild the TOX environment the following can be used:
``tox -r -e pep8``
The rebuild option only needs to be specified once for a particular build (e.g.
pep8), that is further invocations of the same build will not require this
until the next rebase.
---------------
Troubleshooting
---------------
You may run into the following error when starting Swift if you rebase
You may run into the following errors when starting Swift if you rebase
your commit using:
``git rebase``
@ -143,10 +173,32 @@ your commit using:
pkg_resources.DistributionNotFound: swift==2.3.1.devXXX
(where XXX represents a dev version of Swift).
This happens because `git rebase` will retrieve code for a different version of
Swift in the development stream, but the start scripts under `/usr/local/bin` have
not been updated. The solution is to execute the following command under the swift
directory (which contains `setup.py`):
.. code-block:: python
``sudo python setup.py develop``
Traceback (most recent call last):
File "/usr/local/bin/swift-proxy-server", line 10, in <module>
execfile(__file__)
File "/home/swift/swift/bin/swift-proxy-server", line 23, in <module>
sys.exit(run_wsgi(conf_file, 'proxy-server', **options))
File "/home/swift/swift/swift/common/wsgi.py", line 888, in run_wsgi
loadapp(conf_path, global_conf=global_conf)
File "/home/swift/swift/swift/common/wsgi.py", line 390, in loadapp
func(PipelineWrapper(ctx))
File "/home/swift/swift/swift/proxy/server.py", line 602, in modify_wsgi_pipeline
ctx = pipe.create_filter(filter_name)
File "/home/swift/swift/swift/common/wsgi.py", line 329, in create_filter
global_conf=self.context.global_conf)
File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 296, in loadcontext
global_conf=global_conf)
File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 328, in _loadegg
return loader.get_context(object_type, name, global_conf)
File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 620, in get_context
object_type, name=name)
File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 659, in find_egg_entry_point
for prot in protocol_options] or '(no entry points)'))))
LookupError: Entry point 'versioned_writes' not found in egg 'swift' (dir: /home/swift/swift; protocols: paste.filter_factory, paste.filter_app_factory; entry_points: )
This happens because `git rebase` will retrieve code for a different version of
Swift in the development stream, but the start scripts under `/usr/local/bin` have
not been updated. The solution is to follow the steps described in the
:ref:`post-rebase-instructions` section.

+ 7
- 0
doc/source/howto_installmultinode.rst View File

@ -6,6 +6,13 @@ Please refer to the latest official
`Openstack Installation Guides <http://docs.openstack.org/#install-guides>`_
for the most up-to-date documentation.
Object Storage installation guide for Openstack Kilo
----------------------------------------------------
* `openSUSE 13.2 and SUSE Linux Enterprise Server 12 <http://docs.openstack.org/kilo/install-guide/install/zypper/content/ch_swift.html>`_
* `RHEL 7, CentOS 7, and Fedora 21 <http://docs.openstack.org/kilo/install-guide/install/yum/content/ch_swift.html>`_
* `Ubuntu 14.04 <http://docs.openstack.org/kilo/install-guide/install/apt/content/ch_swift.html>`_
Object Storage installation guide for Openstack Juno
----------------------------------------------------


+ 1
- 0
doc/source/logs.rst View File

@ -102,6 +102,7 @@ DLO :ref:`dynamic-large-objects`
LE :ref:`list_endpoints`
KS :ref:`keystoneauth`
RL :ref:`ratelimit`
VW :ref:`versioned_writes`
======================= =============================


+ 9
- 0
doc/source/middleware.rst View File

@ -155,6 +155,15 @@ Name Check (Forbidden Character Filter)
:members:
:show-inheritance:
.. _versioned_writes:
Object Versioning
=================
.. automodule:: swift.common.middleware.versioned_writes
:members:
:show-inheritance:
Proxy Logging
=============


+ 3
- 86
doc/source/overview_object_versioning.rst View File

@ -1,89 +1,6 @@
=================
Object Versioning
=================
--------
Overview
--------
Object versioning in swift is implemented by setting a flag on the container
to tell swift to version all objects in the container. The flag is the
``X-Versions-Location`` header on the container, and its value is the
container where the versions are stored. It is recommended to use a different
``X-Versions-Location`` container for each container that is being versioned.
When data is ``PUT`` into a versioned container (a container with the
versioning flag turned on), the existing data in the file is redirected to a
new object and the data in the ``PUT`` request is saved as the data for the
versioned object. The new object name (for the previous version) is
``<versions_container>/<length><object_name>/<timestamp>``, where ``length``
is the 3-character zero-padded hexadecimal length of the ``<object_name>`` and
``<timestamp>`` is the timestamp of when the previous version was created.
A ``GET`` to a versioned object will return the current version of the object
without having to do any request redirects or metadata lookups.
A ``POST`` to a versioned object will update the object metadata as normal,
but will not create a new version of the object. In other words, new versions
are only created when the content of the object changes.
A ``DELETE`` to a versioned object will only remove the current version of the
object. If you have 5 total versions of the object, you must delete the
object 5 times to completely remove the object.
Note: A large object manifest file cannot be versioned, but a large object
manifest may point to versioned segments.
--------------------------------------------------
How to Enable Object Versioning in a Swift Cluster
--------------------------------------------------
Set ``allow_versions`` to ``True`` in the container server config.
-----------------------
Examples Using ``curl``
-----------------------
First, create a container with the ``X-Versions-Location`` header or add the
header to an existing container. Also make sure the container referenced by
the ``X-Versions-Location`` exists. In this example, the name of that
container is "versions"::
curl -i -XPUT -H "X-Auth-Token: <token>" \
-H "X-Versions-Location: versions" http://<storage_url>/container
curl -i -XPUT -H "X-Auth-Token: <token>" http://<storage_url>/versions
Create an object (the first version)::
curl -i -XPUT --data-binary 1 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now create a new version of that object::
curl -i -XPUT --data-binary 2 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
See a listing of the older versions of the object::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
Now delete the current version of the object and see that the older version is
gone::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
---------------------------------------------------
How to Disable Object Versioning in a Swift Cluster
---------------------------------------------------
If you want to disable all functionality, set ``allow_versions`` back to
``False`` in the container server config.
Disable versioning a versioned container (x is any value except empty)::
curl -i -XPOST -H "X-Auth-Token: <token>" \
-H "X-Remove-Versions-Location: x" http://<storage_url>/container
.. automodule:: swift.common.middleware.versioned_writes
:members:
:show-inheritance:

+ 1
- 1
etc/object-expirer.conf-sample View File

@ -41,7 +41,7 @@
# config value
# processes = 0
# process is which of the parts a particular process will work on
# process can also be specified on the command line and will overide the config
# process can also be specified on the command line and will override the config
# value
# process is "zero based", if you want to use 3 processes, you should run
# processes with process set to 0, 1, and 2


+ 2
- 0
etc/object-server.conf-sample View File

@ -60,6 +60,8 @@ bind_port = 6000
# conn_timeout = 0.5
# Time to wait while sending each chunk of data to another backend node.
# node_timeout = 3
# Time to wait while sending a container update on object update.
# container_update_timeout = 1.0
# Time to wait while receiving each chunk of data from a client or another
# backend node.
# client_timeout = 60


+ 12
- 1
etc/proxy-server.conf-sample View File

@ -77,7 +77,7 @@ bind_port = 8080
# eventlet_debug = false
[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo proxy-logging proxy-server
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
[app:proxy-server]
use = egg:swift#proxy
@ -703,3 +703,14 @@ use = egg:swift#xprofile
#
# unwind the iterator of applications
# unwind = false
# Note: Put after slo, dlo in the pipeline.
# If you don't put it in the pipeline, it will be inserted automatically.
[filter:versioned_writes]
use = egg:swift#versioned_writes
# Enables using versioned writes middleware and exposing configuration
# settings via HTTP GET /info.
# WARNING: Setting this option bypasses the "allow_versions" option
# in the container configuration file, which will be eventually
# deprecated. See documentation for more details.
# allow_versioned_writes = false

+ 1
- 1
etc/swift.conf-sample View File

@ -134,7 +134,7 @@ default = yes
# headers. If for some reason this is not enough (custom middleware for
# example) it can be increased with the extra_header_count constraint.
#extra_header_count = 32
#extra_header_count = 0
# max_object_name_length is the max number of bytes in the utf8 encoding


+ 1
- 1
requirements.txt View File

@ -10,4 +10,4 @@ pastedeploy>=1.3.3
simplejson>=2.0.9
six>=1.9.0
xattr>=0.4
PyECLib>=1.0.7
PyECLib==1.0.7 # BSD

+ 1
- 1
setup.cfg View File

@ -95,8 +95,8 @@ paste.filter_factory =
gatekeeper = swift.common.middleware.gatekeeper:filter_factory
container_sync = swift.common.middleware.container_sync:filter_factory
xprofile = swift.common.middleware.xprofile:filter_factory
versioned_writes = swift.common.middleware.versioned_writes:filter_factory
fake_footers = swift.common.middleware.fake_footers:filter_factory
test_fake_footers = swift.common.middleware.test_fake_footers:filter_factory
[build_sphinx]
all_files = 1


+ 10
- 2
swift/account/backend.py View File

@ -380,6 +380,7 @@ class AccountBroker(DatabaseBroker):
:returns: list of tuples of (name, object_count, bytes_used, 0)
"""
delim_force_gte = False
(marker, end_marker, prefix, delimiter) = utf8encode(
marker, end_marker, prefix, delimiter)
self._commit_puts_stale_ok()
@ -392,12 +393,17 @@ class AccountBroker(DatabaseBroker):
query = """
SELECT name, object_count, bytes_used, 0
FROM container
WHERE deleted = 0 AND """
WHERE """
query_args = []
if end_marker:
query += ' name < ? AND'
query_args.append(end_marker)
if marker and marker >= prefix:
if delim_force_gte:
query += ' name >= ? AND'
query_args.append(marker)
# Always set back to False
delim_force_gte = False
elif marker and marker >= prefix:
query += ' name > ? AND'
query_args.append(marker)
elif prefix:
@ -437,6 +443,8 @@ class AccountBroker(DatabaseBroker):
end = name.find(delimiter, len(prefix))
if end > 0:
marker = name[:end] + chr(ord(delimiter) + 1)
# we want result to be inclusive of delim+1
delim_force_gte = True
dir_name = name[:end + 1]
if dir_name != orig_marker:
results.append([dir_name, 0, 0, 1])


+ 45
- 6
swift/account/reaper.py View File

@ -15,10 +15,12 @@
import os
import random
import socket
from swift import gettext_ as _
from logging import DEBUG
from math import sqrt
from time import time
from hashlib import md5
import itertools
from eventlet import GreenPool, sleep, Timeout
@ -70,6 +72,7 @@ class AccountReaper(Daemon):
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0'))
self.bind_port = int(conf.get('bind_port', 0))
self.concurrency = int(conf.get('concurrency', 25))
self.container_concurrency = self.object_concurrency = \
sqrt(self.concurrency)
@ -79,6 +82,7 @@ class AccountReaper(Daemon):
self.delay_reaping = int(conf.get('delay_reaping') or 0)
reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30)
self.reap_not_done_after = reap_warn_after + self.delay_reaping
self.start_time = time()
def get_account_ring(self):
"""The account :class:`swift.common.ring.Ring` for the cluster."""
@ -161,9 +165,16 @@ class AccountReaper(Daemon):
if not partition.isdigit():
continue
nodes = self.get_account_ring().get_part_nodes(int(partition))
if (not is_local_device(self.myips, None, nodes[0]['ip'], None)
or not os.path.isdir(partition_path)):
if not os.path.isdir(partition_path):
continue
container_shard = None
for container_shard, node in enumerate(nodes):
if is_local_device(self.myips, None, node['ip'], None) and \
(not self.bind_port or self.bind_port == node['port']):
break
else:
continue
for suffix in os.listdir(partition_path):
suffix_path = os.path.join(partition_path, suffix)
if not os.path.isdir(suffix_path):
@ -181,7 +192,9 @@ class AccountReaper(Daemon):
AccountBroker(os.path.join(hsh_path, fname))
if broker.is_status_deleted() and \
not broker.empty():
self.reap_account(broker, partition, nodes)
self.reap_account(
broker, partition, nodes,
container_shard=container_shard)
def reset_stats(self):
self.stats_return_codes = {}
@ -192,7 +205,7 @@ class AccountReaper(Daemon):
self.stats_containers_possibly_remaining = 0
self.stats_objects_possibly_remaining = 0
def reap_account(self, broker, partition, nodes):
def reap_account(self, broker, partition, nodes, container_shard=None):
"""
Called once per pass for each account this server is the primary for
and attempts to delete the data for the given account. The reaper will
@ -219,6 +232,8 @@ class AccountReaper(Daemon):
:param broker: The AccountBroker for the account to delete.
:param partition: The partition in the account ring the account is on.
:param nodes: The primary node dicts for the account to delete.
:param container_shard: int used to shard containers reaped. If None,
will reap all containers.
.. seealso::
@ -237,16 +252,24 @@ class AccountReaper(Daemon):
account = info['account']
self.logger.info(_('Beginning pass on account %s'), account)
self.reset_stats()
container_limit = 1000
if container_shard is not None:
container_limit *= len(nodes)
try:
marker = ''
while True:
containers = \
list(broker.list_containers_iter(1000, marker, None, None,
None))
list(broker.list_containers_iter(container_limit, marker,
None, None, None))
if not containers:
break
try:
for (container, _junk, _junk, _junk) in containers:
this_shard = int(md5(container).hexdigest(), 16) % \
len(nodes)
if container_shard not in (this_shard, None):
continue
self.container_pool.spawn(self.reap_container, account,
partition, nodes, container)
self.container_pool.waitall()
@ -351,6 +374,10 @@ class AccountReaper(Daemon):
self.stats_return_codes.get(err.http_status / 100, 0) + 1
self.logger.increment(
'return_codes.%d' % (err.http_status / 100,))
except (Timeout, socket.error) as err:
self.logger.error(
_('Timeout Exception with %(ip)s:%(port)s/%(device)s'),
node)
if not objects:
break
try:
@ -403,6 +430,12 @@ class AccountReaper(Daemon):
self.stats_return_codes.get(err.http_status / 100, 0) + 1
self.logger.increment(
'return_codes.%d' % (err.http_status / 100,))
except (Timeout, socket.error) as err:
self.logger.error(
_('Timeout Exception with %(ip)s:%(port)s/%(device)s'),
node)
failures += 1
self.logger.increment('containers_failures')
if successes > failures:
self.stats_containers_deleted += 1
self.logger.increment('containers_deleted')
@ -473,6 +506,12 @@ class AccountReaper(Daemon):
self.stats_return_codes.get(err.http_status / 100, 0) + 1
self.logger.increment(
'return_codes.%d' % (err.http_status / 100,))
except (Timeout, socket.error) as err:
failures += 1
self.logger.increment('objects_failures')
self.logger.error(
_('Timeout Exception with %(ip)s:%(port)s/%(device)s'),
node)
if successes > failures:
self.stats_objects_deleted += 1
self.logger.increment('objects_deleted')


+ 88
- 81
swift/cli/recon.py View File

@ -100,11 +100,14 @@ class Scout(object):
Obtain telemetry from a host running the swift recon middleware.
:param host: host to check
:returns: tuple of (recon url used, response body, and status)
:returns: tuple of (recon url used, response body, status, time start
and time end)
"""
base_url = "http://%s:%s/recon/" % (host[0], host[1])
ts_start = time.time()
url, content, status = self.scout_host(base_url, self.recon_type)
return url, content, status
ts_end = time.time()
return url, content, status, ts_start, ts_end
def scout_server_type(self, host):
"""
@ -253,7 +256,8 @@ class SwiftRecon(object):
if self.verbose:
for ring_file, ring_sum in rings.items():
print("-> On disk %s md5sum: %s" % (ring_file, ring_sum))
for url, response, status in self.pool.imap(recon.scout, hosts):
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
@ -291,7 +295,8 @@ class SwiftRecon(object):
printfn("[%s] Checking swift.conf md5sum" % self._ptime())
if self.verbose:
printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,))
for url, response, status in self.pool.imap(recon.scout, hosts):
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response[SWIFT_CONF_FILE] != conf_sum:
printfn("!! %s (%s) doesn't match on disk md5sum" %