Merge master into feature/hummingbird
Change-Id: I449596b4f167a88aa7ed999a6a657c762e9a4597
This commit is contained in:
commit
0f7f1de233
6
.mailmap
6
.mailmap
|
@ -87,3 +87,9 @@ Donagh McCabe <donagh.mccabe@hpe.com> <donagh.mccabe@hp.com>
|
||||||
Eamonn O'Toole <eamonn.otoole@hpe.com> <eamonn.otoole@hp.com>
|
Eamonn O'Toole <eamonn.otoole@hpe.com> <eamonn.otoole@hp.com>
|
||||||
Gerry Drudy <gerry.drudy@hpe.com> <gerry.drudy@hp.com>
|
Gerry Drudy <gerry.drudy@hpe.com> <gerry.drudy@hp.com>
|
||||||
Mark Seger <mark.seger@hpe.com> <mark.seger@hp.com>
|
Mark Seger <mark.seger@hpe.com> <mark.seger@hp.com>
|
||||||
|
Timur Alperovich <timur.alperovich@gmail.com> <timuralp@swiftstack.com>
|
||||||
|
Mehdi Abaakouk <sileht@redhat.com> <mehdi.abaakouk@enovance.com>
|
||||||
|
Richard Hawkins <richard.hawkins@rackspace.com> <hurricanerix@gmail.com>
|
||||||
|
Ondrej Novy <ondrej.novy@firma.seznam.cz>
|
||||||
|
Peter Lisak <peter.lisak@firma.seznam.cz>
|
||||||
|
Ke Liang <ke.liang@easystack.cn>
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
RET=0
|
||||||
|
for MAN in doc/manpages/* ; do
|
||||||
|
OUTPUT=$(LC_ALL=en_US.UTF-8 MANROFFSEQ='' MANWIDTH=80 man --warnings -E UTF-8 -l \
|
||||||
|
-Tutf8 -Z "$MAN" 2>&1 >/dev/null)
|
||||||
|
if [ -n "$OUTPUT" ] ; then
|
||||||
|
RET=1
|
||||||
|
echo "$MAN:"
|
||||||
|
echo "$OUTPUT"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$RET" -eq "0" ] ; then
|
||||||
|
echo "All manpages are fine"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit "$RET"
|
33
AUTHORS
33
AUTHORS
|
@ -18,6 +18,7 @@ CORE Emeritus
|
||||||
Chmouel Boudjnah (chmouel@enovance.com)
|
Chmouel Boudjnah (chmouel@enovance.com)
|
||||||
Florian Hines (syn@ronin.io)
|
Florian Hines (syn@ronin.io)
|
||||||
Greg Holt (gholt@rackspace.com)
|
Greg Holt (gholt@rackspace.com)
|
||||||
|
Paul Luse (paul.e.luse@intel.com)
|
||||||
Jay Payne (letterj@gmail.com)
|
Jay Payne (letterj@gmail.com)
|
||||||
Peter Portante (peter.portante@redhat.com)
|
Peter Portante (peter.portante@redhat.com)
|
||||||
Will Reese (wreese@gmail.com)
|
Will Reese (wreese@gmail.com)
|
||||||
|
@ -25,7 +26,7 @@ Chuck Thier (cthier@gmail.com)
|
||||||
|
|
||||||
Contributors
|
Contributors
|
||||||
------------
|
------------
|
||||||
Mehdi Abaakouk (mehdi.abaakouk@enovance.com)
|
Mehdi Abaakouk (sileht@redhat.com)
|
||||||
Timur Alperovich (timur.alperovich@gmail.com)
|
Timur Alperovich (timur.alperovich@gmail.com)
|
||||||
Jesse Andrews (anotherjesse@gmail.com)
|
Jesse Andrews (anotherjesse@gmail.com)
|
||||||
Joe Arnold (joe@swiftstack.com)
|
Joe Arnold (joe@swiftstack.com)
|
||||||
|
@ -41,7 +42,7 @@ James E. Blair (jeblair@openstack.org)
|
||||||
Fabien Boucher (fabien.boucher@enovance.com)
|
Fabien Boucher (fabien.boucher@enovance.com)
|
||||||
Clark Boylan (clark.boylan@gmail.com)
|
Clark Boylan (clark.boylan@gmail.com)
|
||||||
Pádraig Brady (pbrady@redhat.com)
|
Pádraig Brady (pbrady@redhat.com)
|
||||||
Lorcan Browne (lorcan.browne@hp.com)
|
Lorcan Browne (lorcan.browne@hpe.com)
|
||||||
Russell Bryant (rbryant@redhat.com)
|
Russell Bryant (rbryant@redhat.com)
|
||||||
Jay S. Bryant (jsbryant@us.ibm.com)
|
Jay S. Bryant (jsbryant@us.ibm.com)
|
||||||
Tim Burke (tim.burke@gmail.com)
|
Tim Burke (tim.burke@gmail.com)
|
||||||
|
@ -56,15 +57,17 @@ François Charlier (francois.charlier@enovance.com)
|
||||||
Ray Chen (oldsharp@163.com)
|
Ray Chen (oldsharp@163.com)
|
||||||
Harshit Chitalia (harshit@acelio.com)
|
Harshit Chitalia (harshit@acelio.com)
|
||||||
Brian Cline (bcline@softlayer.com)
|
Brian Cline (bcline@softlayer.com)
|
||||||
Alistair Coles (alistair.coles@hp.com)
|
Alistair Coles (alistair.coles@hpe.com)
|
||||||
Clément Contini (ccontini@cloudops.com)
|
Clément Contini (ccontini@cloudops.com)
|
||||||
Brian Curtin (brian.curtin@rackspace.com)
|
Brian Curtin (brian.curtin@rackspace.com)
|
||||||
Thiago da Silva (thiago@redhat.com)
|
Thiago da Silva (thiago@redhat.com)
|
||||||
Julien Danjou (julien@danjou.info)
|
Julien Danjou (julien@danjou.info)
|
||||||
|
Paul Dardeau (paul.dardeau@intel.com)
|
||||||
|
Zack M. Davis (zdavis@swiftstack.com)
|
||||||
Ksenia Demina (kdemina@mirantis.com)
|
Ksenia Demina (kdemina@mirantis.com)
|
||||||
Dan Dillinger (dan.dillinger@sonian.net)
|
Dan Dillinger (dan.dillinger@sonian.net)
|
||||||
Cedric Dos Santos (cedric.dos.sant@gmail.com)
|
Cedric Dos Santos (cedric.dos.sant@gmail.com)
|
||||||
Gerry Drudy (gerry.drudy@hp.com)
|
Gerry Drudy (gerry.drudy@hpe.com)
|
||||||
Morgan Fainberg (morgan.fainberg@gmail.com)
|
Morgan Fainberg (morgan.fainberg@gmail.com)
|
||||||
ZhiQiang Fan (aji.zqfan@gmail.com)
|
ZhiQiang Fan (aji.zqfan@gmail.com)
|
||||||
Oshrit Feder (oshritf@il.ibm.com)
|
Oshrit Feder (oshritf@il.ibm.com)
|
||||||
|
@ -85,6 +88,7 @@ David Goetz (david.goetz@rackspace.com)
|
||||||
Tushar Gohad (tushar.gohad@intel.com)
|
Tushar Gohad (tushar.gohad@intel.com)
|
||||||
Jonathan Gonzalez V (jonathan.abdiel@gmail.com)
|
Jonathan Gonzalez V (jonathan.abdiel@gmail.com)
|
||||||
Joe Gordon (jogo@cloudscaling.com)
|
Joe Gordon (jogo@cloudscaling.com)
|
||||||
|
ChangBo Guo(gcb) (eric.guo@easystack.cn)
|
||||||
David Hadas (davidh@il.ibm.com)
|
David Hadas (davidh@il.ibm.com)
|
||||||
Andrew Hale (andy@wwwdata.eu)
|
Andrew Hale (andy@wwwdata.eu)
|
||||||
Soren Hansen (soren@linux2go.dk)
|
Soren Hansen (soren@linux2go.dk)
|
||||||
|
@ -92,9 +96,12 @@ Richard Hawkins (richard.hawkins@rackspace.com)
|
||||||
Gregory Haynes (greg@greghaynes.net)
|
Gregory Haynes (greg@greghaynes.net)
|
||||||
Doug Hellmann (doug.hellmann@dreamhost.com)
|
Doug Hellmann (doug.hellmann@dreamhost.com)
|
||||||
Dan Hersam (dan.hersam@hp.com)
|
Dan Hersam (dan.hersam@hp.com)
|
||||||
|
hgangwx (hgangwx@cn.ibm.com)
|
||||||
Derek Higgins (derekh@redhat.com)
|
Derek Higgins (derekh@redhat.com)
|
||||||
|
Jonathan Hinson (jlhinson@us.ibm.com)
|
||||||
Alex Holden (alex@alexjonasholden.com)
|
Alex Holden (alex@alexjonasholden.com)
|
||||||
Edward Hope-Morley (opentastic@gmail.com)
|
Edward Hope-Morley (opentastic@gmail.com)
|
||||||
|
Ferenc Horváth (hferenc@inf.u-szeged.hu)
|
||||||
Charles Hsu (charles0126@gmail.com)
|
Charles Hsu (charles0126@gmail.com)
|
||||||
Joanna H. Huang (joanna.huitzu.huang@gmail.com)
|
Joanna H. Huang (joanna.huitzu.huang@gmail.com)
|
||||||
Kun Huang (gareth@unitedstack.com)
|
Kun Huang (gareth@unitedstack.com)
|
||||||
|
@ -111,6 +118,7 @@ Jason Johnson (jajohnson@softlayer.com)
|
||||||
Brian K. Jones (bkjones@gmail.com)
|
Brian K. Jones (bkjones@gmail.com)
|
||||||
Arnaud JOST (arnaud.jost@ovh.net)
|
Arnaud JOST (arnaud.jost@ovh.net)
|
||||||
Kiyoung Jung (kiyoung.jung@kt.com)
|
Kiyoung Jung (kiyoung.jung@kt.com)
|
||||||
|
Harshada Mangesh Kakad (harshadak@metsi.co.uk)
|
||||||
Takashi Kajinami (kajinamit@nttdata.co.jp)
|
Takashi Kajinami (kajinamit@nttdata.co.jp)
|
||||||
Matt Kassawara (mkassawara@gmail.com)
|
Matt Kassawara (mkassawara@gmail.com)
|
||||||
Morita Kazutaka (morita.kazutaka@gmail.com)
|
Morita Kazutaka (morita.kazutaka@gmail.com)
|
||||||
|
@ -136,6 +144,8 @@ Eohyung Lee (liquidnuker@gmail.com)
|
||||||
Zhao Lei (zhaolei@cn.fujitsu.com)
|
Zhao Lei (zhaolei@cn.fujitsu.com)
|
||||||
Jamie Lennox (jlennox@redhat.com)
|
Jamie Lennox (jlennox@redhat.com)
|
||||||
Tong Li (litong01@us.ibm.com)
|
Tong Li (litong01@us.ibm.com)
|
||||||
|
Ke Liang (ke.liang@easystack.cn)
|
||||||
|
Peter Lisak (peter.lisak@firma.seznam.cz)
|
||||||
Changbin Liu (changbin.liu@gmail.com)
|
Changbin Liu (changbin.liu@gmail.com)
|
||||||
Jing Liuqing (jing.liuqing@99cloud.net)
|
Jing Liuqing (jing.liuqing@99cloud.net)
|
||||||
Victor Lowther (victor.lowther@gmail.com)
|
Victor Lowther (victor.lowther@gmail.com)
|
||||||
|
@ -143,6 +153,7 @@ Sergey Lukjanov (slukjanov@mirantis.com)
|
||||||
Zhongyue Luo (zhongyue.nah@intel.com)
|
Zhongyue Luo (zhongyue.nah@intel.com)
|
||||||
Paul Luse (paul.e.luse@intel.com)
|
Paul Luse (paul.e.luse@intel.com)
|
||||||
Christopher MacGown (chris@pistoncloud.com)
|
Christopher MacGown (chris@pistoncloud.com)
|
||||||
|
Ganesh Maharaj Mahalingam (ganesh.mahalingam@intel.com)
|
||||||
Dragos Manolescu (dragosm@hp.com)
|
Dragos Manolescu (dragosm@hp.com)
|
||||||
Ben Martin (blmartin@us.ibm.com)
|
Ben Martin (blmartin@us.ibm.com)
|
||||||
Steve Martinelli (stevemar@ca.ibm.com)
|
Steve Martinelli (stevemar@ca.ibm.com)
|
||||||
|
@ -152,7 +163,7 @@ Nakagawa Masaaki (nakagawamsa@nttdata.co.jp)
|
||||||
Dolph Mathews (dolph.mathews@gmail.com)
|
Dolph Mathews (dolph.mathews@gmail.com)
|
||||||
Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com)
|
Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com)
|
||||||
Michael Matur (michael.matur@gmail.com)
|
Michael Matur (michael.matur@gmail.com)
|
||||||
Donagh McCabe (donagh.mccabe@hp.com)
|
Donagh McCabe (donagh.mccabe@hpe.com)
|
||||||
Andy McCrae (andy.mccrae@gmail.com)
|
Andy McCrae (andy.mccrae@gmail.com)
|
||||||
Paul McMillan (paul.mcmillan@nebula.com)
|
Paul McMillan (paul.mcmillan@nebula.com)
|
||||||
Ewan Mellor (ewan.mellor@citrix.com)
|
Ewan Mellor (ewan.mellor@citrix.com)
|
||||||
|
@ -168,19 +179,22 @@ Maru Newby (mnewby@internap.com)
|
||||||
Newptone (xingchao@unitedstack.com)
|
Newptone (xingchao@unitedstack.com)
|
||||||
Colin Nicholson (colin.nicholson@iomart.com)
|
Colin Nicholson (colin.nicholson@iomart.com)
|
||||||
Zhenguo Niu (zhenguo@unitedstack.com)
|
Zhenguo Niu (zhenguo@unitedstack.com)
|
||||||
|
Catherine Northcott (catherine@northcott.nz)
|
||||||
Ondrej Novy (ondrej.novy@firma.seznam.cz)
|
Ondrej Novy (ondrej.novy@firma.seznam.cz)
|
||||||
Timothy Okwii (tokwii@cisco.com)
|
Timothy Okwii (tokwii@cisco.com)
|
||||||
Matthew Oliver (matt@oliver.net.au)
|
Matthew Oliver (matt@oliver.net.au)
|
||||||
Hisashi Osanai (osanai.hisashi@jp.fujitsu.com)
|
Hisashi Osanai (osanai.hisashi@jp.fujitsu.com)
|
||||||
Eamonn O'Toole (eamonn.otoole@hp.com)
|
Eamonn O'Toole (eamonn.otoole@hpe.com)
|
||||||
James Page (james.page@ubuntu.com)
|
James Page (james.page@ubuntu.com)
|
||||||
Prashanth Pai (ppai@redhat.com)
|
Prashanth Pai (ppai@redhat.com)
|
||||||
|
Venkateswarlu Pallamala (p.venkatesh551@gmail.com)
|
||||||
Pawel Palucki (pawel.palucki@gmail.com)
|
Pawel Palucki (pawel.palucki@gmail.com)
|
||||||
Alex Pecoraro (alex.pecoraro@emc.com)
|
Alex Pecoraro (alex.pecoraro@emc.com)
|
||||||
Sascha Peilicke (saschpe@gmx.de)
|
Sascha Peilicke (saschpe@gmx.de)
|
||||||
Constantine Peresypkin (constantine.peresypk@rackspace.com)
|
Constantine Peresypkin (constantine.peresypk@rackspace.com)
|
||||||
Dieter Plaetinck (dieter@vimeo.com)
|
Dieter Plaetinck (dieter@vimeo.com)
|
||||||
Dan Prince (dprince@redhat.com)
|
Dan Prince (dprince@redhat.com)
|
||||||
|
Sivasathurappan Radhakrishnan (siva.radhakrishnan@intel.com)
|
||||||
Sarvesh Ranjan (saranjan@cisco.com)
|
Sarvesh Ranjan (saranjan@cisco.com)
|
||||||
Falk Reimann (falk.reimann@sap.com)
|
Falk Reimann (falk.reimann@sap.com)
|
||||||
Brian Reitz (brian.reitz@oracle.com)
|
Brian Reitz (brian.reitz@oracle.com)
|
||||||
|
@ -198,7 +212,7 @@ Shilla Saebi (shilla.saebi@gmail.com)
|
||||||
Atsushi Sakai (sakaia@jp.fujitsu.com)
|
Atsushi Sakai (sakaia@jp.fujitsu.com)
|
||||||
Cristian A Sanchez (cristian.a.sanchez@intel.com)
|
Cristian A Sanchez (cristian.a.sanchez@intel.com)
|
||||||
Christian Schwede (cschwede@redhat.com)
|
Christian Schwede (cschwede@redhat.com)
|
||||||
Mark Seger (Mark.Seger@hp.com)
|
Mark Seger (mark.seger@hpe.com)
|
||||||
Azhagu Selvan SP (tamizhgeek@gmail.com)
|
Azhagu Selvan SP (tamizhgeek@gmail.com)
|
||||||
Alexandra Settle (alexandra.settle@rackspace.com)
|
Alexandra Settle (alexandra.settle@rackspace.com)
|
||||||
Andrew Clay Shafer (acs@parvuscaptus.com)
|
Andrew Clay Shafer (acs@parvuscaptus.com)
|
||||||
|
@ -212,6 +226,7 @@ Pradeep Kumar Singh (pradeep.singh@nectechnologies.in)
|
||||||
Liu Siqi (meizu647@gmail.com)
|
Liu Siqi (meizu647@gmail.com)
|
||||||
Adrian Smith (adrian_f_smith@dell.com)
|
Adrian Smith (adrian_f_smith@dell.com)
|
||||||
Jon Snitow (otherjon@swiftstack.com)
|
Jon Snitow (otherjon@swiftstack.com)
|
||||||
|
Emile Snyder (emile.snyder@gmail.com)
|
||||||
Emett Speer (speer.emett@gmail.com)
|
Emett Speer (speer.emett@gmail.com)
|
||||||
TheSriram (sriram@klusterkloud.com)
|
TheSriram (sriram@klusterkloud.com)
|
||||||
Jeremy Stanley (fungi@yuggoth.org)
|
Jeremy Stanley (fungi@yuggoth.org)
|
||||||
|
@ -234,7 +249,9 @@ Dmitry Ukov (dukov@mirantis.com)
|
||||||
Vincent Untz (vuntz@suse.com)
|
Vincent Untz (vuntz@suse.com)
|
||||||
Daniele Valeriani (daniele@dvaleriani.net)
|
Daniele Valeriani (daniele@dvaleriani.net)
|
||||||
Koert van der Veer (koert@cloudvps.com)
|
Koert van der Veer (koert@cloudvps.com)
|
||||||
|
Béla Vancsics (vancsics@inf.u-szeged.hu)
|
||||||
Vladimir Vechkanov (vvechkanov@mirantis.com)
|
Vladimir Vechkanov (vvechkanov@mirantis.com)
|
||||||
|
venkatamahesh (venkatamaheshkotha@gmail.com)
|
||||||
Gil Vernik (gilv@il.ibm.com)
|
Gil Vernik (gilv@il.ibm.com)
|
||||||
Hou Ming Wang (houming.wang@easystack.cn)
|
Hou Ming Wang (houming.wang@easystack.cn)
|
||||||
Shane Wang (shane.wang@intel.com)
|
Shane Wang (shane.wang@intel.com)
|
||||||
|
@ -248,7 +265,7 @@ Ye Jia Xu (xyj.asmy@gmail.com)
|
||||||
Alex Yang (alex890714@gmail.com)
|
Alex Yang (alex890714@gmail.com)
|
||||||
Lin Yang (lin.a.yang@intel.com)
|
Lin Yang (lin.a.yang@intel.com)
|
||||||
Yee (mail.zhang.yee@gmail.com)
|
Yee (mail.zhang.yee@gmail.com)
|
||||||
Guang Yee (guang.yee@hp.com)
|
Guang Yee (guang.yee@hpe.com)
|
||||||
Pete Zaitcev (zaitcev@kotori.zaitcev.us)
|
Pete Zaitcev (zaitcev@kotori.zaitcev.us)
|
||||||
Hua Zhang (zhuadl@cn.ibm.com)
|
Hua Zhang (zhuadl@cn.ibm.com)
|
||||||
Jian Zhang (jian.zhang@intel.com)
|
Jian Zhang (jian.zhang@intel.com)
|
||||||
|
|
89
CHANGELOG
89
CHANGELOG
|
@ -1,3 +1,92 @@
|
||||||
|
swift (2.6.0)
|
||||||
|
|
||||||
|
* Dependency changes
|
||||||
|
- Updated minimum version of eventlet to 0.17.4 to support IPv6.
|
||||||
|
|
||||||
|
- Updated the minimum version of PyECLib to 1.0.7.
|
||||||
|
|
||||||
|
* The ring rebalancing algorithm was updated to better handle edge cases
|
||||||
|
and to give better (more balanced) rings in the general case. New rings
|
||||||
|
will have better initial placement, capacity adjustments will move less
|
||||||
|
data for better balance, and existing rings that were imbalanced should
|
||||||
|
start to become better balanced as they go through rebalance cycles.
|
||||||
|
|
||||||
|
* Added container and account reverse listings.
|
||||||
|
|
||||||
|
A GET request to an account or container resource with a "reverse=true"
|
||||||
|
query parameter will return the listing in reverse order. When
|
||||||
|
iterating over pages of reverse listings, the relative order of marker
|
||||||
|
and end_marker are swapped.
|
||||||
|
|
||||||
|
* Storage policies now support having more than one name.
|
||||||
|
|
||||||
|
This allows operators to fix a typo without breaking existing clients,
|
||||||
|
or, alternatively, have "short names" for policies. This is implemented
|
||||||
|
with the "aliases" config key in the storage policy config in
|
||||||
|
swift.conf. The aliases value is a list of names that the storage
|
||||||
|
policy may also be identified by. The storage policy "name" is used to
|
||||||
|
report the policy to users (eg in container headers). The aliases have
|
||||||
|
the same naming restrictions as the policy's primary name.
|
||||||
|
|
||||||
|
* The object auditor learned the "interval" config value to control the
|
||||||
|
time between each audit pass.
|
||||||
|
|
||||||
|
* `swift-recon --all` now includes the config checksum check.
|
||||||
|
|
||||||
|
* `swift-init` learned the --kill-after-timeout option to force a service
|
||||||
|
to quit (SIGKILL) after a designated time.
|
||||||
|
|
||||||
|
* `swift-recon` now correctly shows timestamps in UTC instead of local
|
||||||
|
time.
|
||||||
|
|
||||||
|
* Fixed bug where `swift-ring-builder` couldn't select device id 0.
|
||||||
|
|
||||||
|
* Documented the previously undocumented
|
||||||
|
`swift-ring-builder pretend_min_part_hours_passed` command.
|
||||||
|
|
||||||
|
* The "node_timeout" config value now accepts decimal values.
|
||||||
|
|
||||||
|
* `swift-ring-builder` now properly removes devices with zero weight.
|
||||||
|
|
||||||
|
* `swift-init` return codes are updated via "--strict" and "--non-strict"
|
||||||
|
options. Please see the usage string for more information.
|
||||||
|
|
||||||
|
* `swift-ring-builder` now reports the min_part_hours lockout time
|
||||||
|
remaining
|
||||||
|
|
||||||
|
* Container sync has been improved to more quickly find and iterate over
|
||||||
|
the containers to be synced. This reduced server load and lowers the
|
||||||
|
time required to see data propagate between two clusters. Please see
|
||||||
|
http://swift.openstack.org/overview_container_sync.html for more details
|
||||||
|
about the new on-disk structure for tracking synchronized containers.
|
||||||
|
|
||||||
|
* A container POST will now update that container's put-timestamp value.
|
||||||
|
|
||||||
|
* TempURL header restrictions are now exposed in /info.
|
||||||
|
|
||||||
|
* Error messages on static large object manifest responses have been
|
||||||
|
greatly improved.
|
||||||
|
|
||||||
|
* Closed a bug where an unfinished read of a large object would leak a
|
||||||
|
socket file descriptor and a small amount of memory. (CVE-2016-0738)
|
||||||
|
|
||||||
|
* Fixed an issue where a zero-byte object PUT with an incorrect Etag
|
||||||
|
would return a 503.
|
||||||
|
|
||||||
|
* Fixed an error when a static large object manifest references the same
|
||||||
|
object more than once.
|
||||||
|
|
||||||
|
* Improved performance of finding handoff nodes if a zone is empty.
|
||||||
|
|
||||||
|
* Fixed duplication of headers in Access-Control-Expose-Headers on CORS
|
||||||
|
requests.
|
||||||
|
|
||||||
|
* Fixed handling of IPv6 connections to memcache pools.
|
||||||
|
|
||||||
|
* Continued work towards python 3 compatibility.
|
||||||
|
|
||||||
|
* Various other minor bug fixes and improvements.
|
||||||
|
|
||||||
swift (2.5.0, OpenStack Liberty)
|
swift (2.5.0, OpenStack Liberty)
|
||||||
|
|
||||||
* Added the ability to specify ranges for Static Large Object (SLO)
|
* Added the ability to specify ranges for Static Large Object (SLO)
|
||||||
|
|
|
@ -89,8 +89,8 @@ Specs
|
||||||
The [``swift-specs``](https://github.com/openstack/swift-specs) repo
|
The [``swift-specs``](https://github.com/openstack/swift-specs) repo
|
||||||
can be used for collaborative design work before a feature is implemented.
|
can be used for collaborative design work before a feature is implemented.
|
||||||
|
|
||||||
Openstack's gerrit system is used to collaborate on the design spec. Once
|
OpenStack's gerrit system is used to collaborate on the design spec. Once
|
||||||
approved Openstack provides a doc site to easily read these [specs](http://specs.openstack.org/openstack/swift-specs/)
|
approved OpenStack provides a doc site to easily read these [specs](http://specs.openstack.org/openstack/swift-specs/)
|
||||||
|
|
||||||
A spec is needed for more impactful features. Coordinating a feature between
|
A spec is needed for more impactful features. Coordinating a feature between
|
||||||
many devs (especially across companies) is a great example of when a spec is
|
many devs (especially across companies) is a great example of when a spec is
|
||||||
|
|
|
@ -23,7 +23,6 @@ from time import time
|
||||||
|
|
||||||
from eventlet import GreenPool, hubs, patcher, Timeout
|
from eventlet import GreenPool, hubs, patcher, Timeout
|
||||||
from eventlet.pools import Pool
|
from eventlet.pools import Pool
|
||||||
from eventlet.green import urllib2
|
|
||||||
|
|
||||||
from swift.common import direct_client
|
from swift.common import direct_client
|
||||||
try:
|
try:
|
||||||
|
@ -174,8 +173,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
|
||||||
try:
|
try:
|
||||||
objects = [o['name'] for o in conn.get_container(
|
objects = [o['name'] for o in conn.get_container(
|
||||||
container, prefix='dispersion_', full_listing=True)[1]]
|
container, prefix='dispersion_', full_listing=True)[1]]
|
||||||
except urllib2.HTTPError as err:
|
except ClientException as err:
|
||||||
if err.getcode() != 404:
|
if err.http_status != 404:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
print >>stderr, 'No objects to query. Has ' \
|
print >>stderr, 'No objects to query. Has ' \
|
||||||
|
|
|
@ -200,6 +200,10 @@ if __name__ == '__main__':
|
||||||
(mount_point))
|
(mount_point))
|
||||||
comment_fstab(mount_point)
|
comment_fstab(mount_point)
|
||||||
unmounts += 1
|
unmounts += 1
|
||||||
|
else:
|
||||||
|
logger.info("Detected %s with %d errors "
|
||||||
|
"(Device not unmounted)" %
|
||||||
|
(mount_point, count))
|
||||||
recon_errors[mount_point] = count
|
recon_errors[mount_point] = count
|
||||||
total_errors += count
|
total_errors += count
|
||||||
recon_file = recon_cache_path + "/drive.recon"
|
recon_file = recon_cache_path + "/drive.recon"
|
||||||
|
|
|
@ -74,6 +74,11 @@ def main():
|
||||||
help="Return zero status code even if some config is "
|
help="Return zero status code even if some config is "
|
||||||
"missing. Default mode if any server is a glob or "
|
"missing. Default mode if any server is a glob or "
|
||||||
"one of aliases `all`, `main` or `rest`.")
|
"one of aliases `all`, `main` or `rest`.")
|
||||||
|
# SIGKILL daemon after kill_wait period
|
||||||
|
parser.add_option('--kill-after-timeout', dest='kill_after_timeout',
|
||||||
|
action='store_true',
|
||||||
|
help="Kill daemon and all childs after kill-wait "
|
||||||
|
"period.")
|
||||||
|
|
||||||
options, args = parser.parse_args()
|
options, args = parser.parse_args()
|
||||||
|
|
||||||
|
|
|
@ -59,7 +59,7 @@ Lists old Swift processes.
|
||||||
listing.append((str(hours), pid, args))
|
listing.append((str(hours), pid, args))
|
||||||
|
|
||||||
if not listing:
|
if not listing:
|
||||||
exit()
|
sys.exit()
|
||||||
|
|
||||||
hours_len = len('Hours')
|
hours_len = len('Hours')
|
||||||
pid_len = len('PID')
|
pid_len = len('PID')
|
||||||
|
|
|
@ -93,7 +93,7 @@ Example (sends SIGTERM to all orphaned Swift processes older than two hours):
|
||||||
listing.append((str(hours), pid, args))
|
listing.append((str(hours), pid, args))
|
||||||
|
|
||||||
if not listing:
|
if not listing:
|
||||||
exit()
|
sys.exit()
|
||||||
|
|
||||||
hours_len = len('Hours')
|
hours_len = len('Hours')
|
||||||
pid_len = len('PID')
|
pid_len = len('PID')
|
||||||
|
|
|
@ -102,8 +102,10 @@ adapted_logger. The default is empty.
|
||||||
If set, log_udp_host will override log_address.
|
If set, log_udp_host will override log_address.
|
||||||
.IP "\fBlog_udp_port\fR
|
.IP "\fBlog_udp_port\fR
|
||||||
UDP log port, the default is 514.
|
UDP log port, the default is 514.
|
||||||
.IP \fBlog_statsd_host\fR = localhost
|
.IP \fBlog_statsd_host\fR
|
||||||
log_statsd_* enable StatsD logging.
|
StatsD server. IPv4/IPv6 addresses and hostnames are
|
||||||
|
supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4
|
||||||
|
address will be used.
|
||||||
.IP \fBlog_statsd_port\fR
|
.IP \fBlog_statsd_port\fR
|
||||||
The default is 8125.
|
The default is 8125.
|
||||||
.IP \fBlog_statsd_default_sample_rate\fR
|
.IP \fBlog_statsd_default_sample_rate\fR
|
||||||
|
|
|
@ -108,8 +108,10 @@ adapted_logger. The default is empty.
|
||||||
If set, log_udp_host will override log_address.
|
If set, log_udp_host will override log_address.
|
||||||
.IP "\fBlog_udp_port\fR
|
.IP "\fBlog_udp_port\fR
|
||||||
UDP log port, the default is 514.
|
UDP log port, the default is 514.
|
||||||
.IP \fBlog_statsd_host\fR = localhost
|
.IP \fBlog_statsd_host\fR
|
||||||
log_statsd_* enable StatsD logging.
|
StatsD server. IPv4/IPv6 addresses and hostnames are
|
||||||
|
supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4
|
||||||
|
address will be used.
|
||||||
.IP \fBlog_statsd_port\fR
|
.IP \fBlog_statsd_port\fR
|
||||||
The default is 8125.
|
The default is 8125.
|
||||||
.IP \fBlog_statsd_default_sample_rate\fR
|
.IP \fBlog_statsd_default_sample_rate\fR
|
||||||
|
|
|
@ -76,8 +76,10 @@ adapted_logger. The default is empty.
|
||||||
If set, log_udp_host will override log_address.
|
If set, log_udp_host will override log_address.
|
||||||
.IP "\fBlog_udp_port\fR
|
.IP "\fBlog_udp_port\fR
|
||||||
UDP log port, the default is 514.
|
UDP log port, the default is 514.
|
||||||
.IP \fBlog_statsd_host\fR = localhost
|
.IP \fBlog_statsd_host\fR
|
||||||
log_statsd_* enable StatsD logging.
|
StatsD server. IPv4/IPv6 addresses and hostnames are
|
||||||
|
supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4
|
||||||
|
address will be used.
|
||||||
.IP \fBlog_statsd_port\fR
|
.IP \fBlog_statsd_port\fR
|
||||||
The default is 8125.
|
The default is 8125.
|
||||||
.IP \fBlog_statsd_default_sample_rate\fR
|
.IP \fBlog_statsd_default_sample_rate\fR
|
||||||
|
|
|
@ -111,8 +111,10 @@ adapted_logger. The default is empty.
|
||||||
If set, log_udp_host will override log_address.
|
If set, log_udp_host will override log_address.
|
||||||
.IP "\fBlog_udp_port\fR
|
.IP "\fBlog_udp_port\fR
|
||||||
UDP log port, the default is 514.
|
UDP log port, the default is 514.
|
||||||
.IP \fBlog_statsd_host\fR = localhost
|
.IP \fBlog_statsd_host\fR
|
||||||
log_statsd_* enable StatsD logging.
|
StatsD server. IPv4/IPv6 addresses and hostnames are
|
||||||
|
supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4
|
||||||
|
address will be used.
|
||||||
.IP \fBlog_statsd_port\fR
|
.IP \fBlog_statsd_port\fR
|
||||||
The default is 8125.
|
The default is 8125.
|
||||||
.IP \fBlog_statsd_default_sample_rate\fR
|
.IP \fBlog_statsd_default_sample_rate\fR
|
||||||
|
@ -365,7 +367,7 @@ Depending on the method of deployment you may need to create this directory manu
|
||||||
and ensure that swift has read/write.The default is /var/cache/swift.
|
and ensure that swift has read/write.The default is /var/cache/swift.
|
||||||
.IP "\fBhandoffs_first\fR"
|
.IP "\fBhandoffs_first\fR"
|
||||||
The flag to replicate handoffs prior to canonical partitions.
|
The flag to replicate handoffs prior to canonical partitions.
|
||||||
It allows to force syncing and deleting handoffs quickly.
|
It allows one to force syncing and deleting handoffs quickly.
|
||||||
If set to a True value(e.g. "True" or "1"), partitions
|
If set to a True value(e.g. "True" or "1"), partitions
|
||||||
that are not supposed to be on the node will be replicated first.
|
that are not supposed to be on the node will be replicated first.
|
||||||
The default is false.
|
The default is false.
|
||||||
|
@ -425,7 +427,7 @@ Depending on the method of deployment you may need to create this directory manu
|
||||||
and ensure that swift has read/write.The default is /var/cache/swift.
|
and ensure that swift has read/write.The default is /var/cache/swift.
|
||||||
.IP "\fBhandoffs_first\fR"
|
.IP "\fBhandoffs_first\fR"
|
||||||
The flag to replicate handoffs prior to canonical partitions.
|
The flag to replicate handoffs prior to canonical partitions.
|
||||||
It allows to force syncing and deleting handoffs quickly.
|
It allows one to force syncing and deleting handoffs quickly.
|
||||||
If set to a True value(e.g. "True" or "1"), partitions
|
If set to a True value(e.g. "True" or "1"), partitions
|
||||||
that are not supposed to be on the node will be replicated first.
|
that are not supposed to be on the node will be replicated first.
|
||||||
The default is false.
|
The default is false.
|
||||||
|
|
|
@ -118,8 +118,10 @@ adapted_logger. The default is empty.
|
||||||
If set, log_udp_host will override log_address.
|
If set, log_udp_host will override log_address.
|
||||||
.IP "\fBlog_udp_port\fR
|
.IP "\fBlog_udp_port\fR
|
||||||
UDP log port, the default is 514.
|
UDP log port, the default is 514.
|
||||||
.IP \fBlog_statsd_host\fR = localhost
|
.IP \fBlog_statsd_host\fR
|
||||||
log_statsd_* enable StatsD logging.
|
StatsD server. IPv4/IPv6 addresses and hostnames are
|
||||||
|
supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4
|
||||||
|
address will be used.
|
||||||
.IP \fBlog_statsd_port\fR
|
.IP \fBlog_statsd_port\fR
|
||||||
The default is 8125.
|
The default is 8125.
|
||||||
.IP \fBlog_statsd_default_sample_rate\fR
|
.IP \fBlog_statsd_default_sample_rate\fR
|
||||||
|
@ -328,8 +330,8 @@ This allows middleware higher in the WSGI pipeline to override auth
|
||||||
processing, useful for middleware such as tempurl and formpost. If you know
|
processing, useful for middleware such as tempurl and formpost. If you know
|
||||||
you're not going to use such middleware and you want a bit of extra security,
|
you're not going to use such middleware and you want a bit of extra security,
|
||||||
you can set this to false.
|
you can set this to false.
|
||||||
.IP \fBis_admin [DEPRECATED]\fR
|
.IP \fBis_admin\fR
|
||||||
If is_admin is true, a user whose username is the same as the project name
|
[DEPRECATED] If is_admin is true, a user whose username is the same as the project name
|
||||||
and who has any role on the project will have access rights elevated to be
|
and who has any role on the project will have access rights elevated to be
|
||||||
the same as if the user had an operator role. Note that the condition
|
the same as if the user had an operator role. Note that the condition
|
||||||
compares names rather than UUIDs. This option is deprecated.
|
compares names rather than UUIDs. This option is deprecated.
|
||||||
|
@ -384,7 +386,8 @@ Sets the maximum number of connections to each memcached server per worker.
|
||||||
If not set in the configuration file, the value for memcache_servers will be
|
If not set in the configuration file, the value for memcache_servers will be
|
||||||
read from /etc/swift/memcache.conf (see memcache.conf-sample) or lacking that
|
read from /etc/swift/memcache.conf (see memcache.conf-sample) or lacking that
|
||||||
file, it will default to 127.0.0.1:11211. You can specify multiple servers
|
file, it will default to 127.0.0.1:11211. You can specify multiple servers
|
||||||
separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211.
|
separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211. (IPv6
|
||||||
|
addresses must follow rfc3986 section-3.2.2, i.e. [::1]:11211)
|
||||||
.IP \fBmemcache_serialization_support\fR
|
.IP \fBmemcache_serialization_support\fR
|
||||||
This sets how memcache values are serialized and deserialized:
|
This sets how memcache values are serialized and deserialized:
|
||||||
.RE
|
.RE
|
||||||
|
@ -665,7 +668,9 @@ unset.
|
||||||
Default is 514.
|
Default is 514.
|
||||||
.IP \fBaccess_log_statsd_host\fR
|
.IP \fBaccess_log_statsd_host\fR
|
||||||
You can use log_statsd_* from [DEFAULT], or override them here.
|
You can use log_statsd_* from [DEFAULT], or override them here.
|
||||||
Default is localhost.
|
StatsD server. IPv4/IPv6 addresses and hostnames are
|
||||||
|
supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4
|
||||||
|
address will be used.
|
||||||
.IP \fBaccess_log_statsd_port\fR
|
.IP \fBaccess_log_statsd_port\fR
|
||||||
Default is 8125.
|
Default is 8125.
|
||||||
.IP \fBaccess_log_statsd_default_sample_rate\fR
|
.IP \fBaccess_log_statsd_default_sample_rate\fR
|
||||||
|
@ -949,7 +954,7 @@ chunk of data from the object servers while serving GET / HEAD requests.
|
||||||
Timeouts from these requests can be recovered from so setting this to
|
Timeouts from these requests can be recovered from so setting this to
|
||||||
something lower than node_timeout would provide quicker error recovery
|
something lower than node_timeout would provide quicker error recovery
|
||||||
while allowing for a longer timeout for non-recoverable requests (PUTs).
|
while allowing for a longer timeout for non-recoverable requests (PUTs).
|
||||||
Defaults to node_timeout, should be overriden if node_timeout is set to a
|
Defaults to node_timeout, should be overridden if node_timeout is set to a
|
||||||
high number to prevent client timeouts from firing before the proxy server
|
high number to prevent client timeouts from firing before the proxy server
|
||||||
has a chance to retry.
|
has a chance to retry.
|
||||||
.IP \fBconn_timeout\fR
|
.IP \fBconn_timeout\fR
|
||||||
|
@ -997,11 +1002,9 @@ The valid values for sorting_method are "affinity", "shuffle", and "timing".
|
||||||
.IP \fBtiming_expiry\fR
|
.IP \fBtiming_expiry\fR
|
||||||
If the "timing" sorting_method is used, the timings will only be valid for
|
If the "timing" sorting_method is used, the timings will only be valid for
|
||||||
the number of seconds configured by timing_expiry. The default is 300.
|
the number of seconds configured by timing_expiry. The default is 300.
|
||||||
.IP \fBmax_large_object_get_time\fR
|
|
||||||
The maximum time (seconds) that a large object connection is allowed to last. The default is 86400.
|
|
||||||
.IP \fBrequest_node_count\fR
|
.IP \fBrequest_node_count\fR
|
||||||
Set to the number of nodes to contact for a normal request. You can use
|
Set to the number of nodes to contact for a normal request. You can use '* replicas'
|
||||||
'* replicas' at the end to have it use the number given times the number of
|
at the end to have it use the number given times the number of
|
||||||
replicas for the ring being used for the request. The default is '2 * replicas'.
|
replicas for the ring being used for the request. The default is '2 * replicas'.
|
||||||
.IP \fBread_affinity\fR
|
.IP \fBread_affinity\fR
|
||||||
Which backend servers to prefer on reads. Format is r<N> for region
|
Which backend servers to prefer on reads. Format is r<N> for region
|
||||||
|
|
|
@ -111,6 +111,7 @@ allows one to use the keywords such as "all", "main" and "rest" for the <server>
|
||||||
.IP "-r RUN_DIR, --run-dir=RUN_DIR directory where the pids will be stored (default /var/run/swift)
|
.IP "-r RUN_DIR, --run-dir=RUN_DIR directory where the pids will be stored (default /var/run/swift)
|
||||||
.IP "--strict return non-zero status code if some config is missing. Default mode if server is explicitly named."
|
.IP "--strict return non-zero status code if some config is missing. Default mode if server is explicitly named."
|
||||||
.IP "--non-strict return zero status code even if some config is missing. Default mode if server is one of aliases `all`, `main` or `rest`."
|
.IP "--non-strict return zero status code even if some config is missing. Default mode if server is one of aliases `all`, `main` or `rest`."
|
||||||
|
.IP "--kill-after-timeout kill daemon and all children after kill-wait period."
|
||||||
.PD
|
.PD
|
||||||
.RE
|
.RE
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ user = <your-user-name>
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
|
|
@ -17,7 +17,7 @@ log_level = INFO
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
[swift-hash]
|
[swift-hash]
|
||||||
# random unique strings that can never change (DO NOT LOSE)
|
# random unique strings that can never change (DO NOT LOSE)
|
||||||
|
# Use only printable chars (python -c "import string; print(string.printable)")
|
||||||
swift_hash_path_prefix = changeme
|
swift_hash_path_prefix = changeme
|
||||||
swift_hash_path_suffix = changeme
|
swift_hash_path_suffix = changeme
|
||||||
|
|
||||||
|
@ -15,6 +16,6 @@ policy_type = replication
|
||||||
[storage-policy:2]
|
[storage-policy:2]
|
||||||
name = ec42
|
name = ec42
|
||||||
policy_type = erasure_coding
|
policy_type = erasure_coding
|
||||||
ec_type = jerasure_rs_vand
|
ec_type = liberasurecode_rs_vand
|
||||||
ec_num_data_fragments = 4
|
ec_num_data_fragments = 4
|
||||||
ec_num_parity_fragments = 2
|
ec_num_parity_fragments = 2
|
||||||
|
|
|
@ -463,7 +463,12 @@ Example::
|
||||||
|
|
||||||
Assuming 3 replicas, this configuration will make object PUTs try
|
Assuming 3 replicas, this configuration will make object PUTs try
|
||||||
storing the object's replicas on up to 6 disks ("2 * replicas") in
|
storing the object's replicas on up to 6 disks ("2 * replicas") in
|
||||||
region 1 ("r1").
|
region 1 ("r1"). Proxy server tries to find 3 devices for storing the
|
||||||
|
object. While a device is unavailable, it queries the ring for the 4th
|
||||||
|
device and so on until 6th device. If the 6th disk is still unavailable,
|
||||||
|
the last replica will be sent to other region. It doesn't mean there'll
|
||||||
|
have 6 replicas in region 1.
|
||||||
|
|
||||||
|
|
||||||
You should be aware that, if you have data coming into SF faster than
|
You should be aware that, if you have data coming into SF faster than
|
||||||
your link to NY can transfer it, then your cluster's data distribution
|
your link to NY can transfer it, then your cluster's data distribution
|
||||||
|
@ -624,7 +629,11 @@ configuration entries (see the sample configuration files)::
|
||||||
log_statsd_metric_prefix = [empty-string]
|
log_statsd_metric_prefix = [empty-string]
|
||||||
|
|
||||||
If `log_statsd_host` is not set, this feature is disabled. The default values
|
If `log_statsd_host` is not set, this feature is disabled. The default values
|
||||||
for the other settings are given above.
|
for the other settings are given above. The `log_statsd_host` can be a
|
||||||
|
hostname, an IPv4 address, or an IPv6 address (not surrounded with brackets, as
|
||||||
|
this is unnecessary since the port is specified separately). If a hostname
|
||||||
|
resolves to an IPv4 address, an IPv4 socket will be used to send StatsD UDP
|
||||||
|
packets, even if the hostname would also resolve to an IPv6 address.
|
||||||
|
|
||||||
.. _StatsD: http://codeascraft.etsy.com/2011/02/15/measure-anything-measure-everything/
|
.. _StatsD: http://codeascraft.etsy.com/2011/02/15/measure-anything-measure-everything/
|
||||||
.. _Graphite: http://graphite.wikidot.com/
|
.. _Graphite: http://graphite.wikidot.com/
|
||||||
|
@ -675,8 +684,7 @@ of async_pendings in real-time, but will not tell you the current number of
|
||||||
async_pending container updates on disk at any point in time.
|
async_pending container updates on disk at any point in time.
|
||||||
|
|
||||||
Note also that the set of metrics collected, their names, and their semantics
|
Note also that the set of metrics collected, their names, and their semantics
|
||||||
are not locked down and will change over time. StatsD logging is currently in
|
are not locked down and will change over time.
|
||||||
a "beta" stage and will continue to evolve.
|
|
||||||
|
|
||||||
Metrics for `account-auditor`:
|
Metrics for `account-auditor`:
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ request.
|
||||||
|
|
||||||
The format of the form **POST** request is:
|
The format of the form **POST** request is:
|
||||||
|
|
||||||
**Example 1.14. Form POST format**
|
**Example 1.14. Form POST format**
|
||||||
|
|
||||||
.. code::
|
.. code::
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ Form **POST** middleware uses an HMAC-SHA1 cryptographic signature. This
|
||||||
signature includes these elements from the form:
|
signature includes these elements from the form:
|
||||||
|
|
||||||
- The path. Starting with ``/v1/`` onwards and including a container
|
- The path. Starting with ``/v1/`` onwards and including a container
|
||||||
name and, optionally, an object prefix. In `Example 1.15`, “HMAC-SHA1
|
name and, optionally, an object prefix. In `Example 1.15`, “HMAC-SHA1
|
||||||
signature for form
|
signature for form
|
||||||
POST” the path is
|
POST” the path is
|
||||||
``/v1/my_account/container/object_prefix``. Do not URL-encode the
|
``/v1/my_account/container/object_prefix``. Do not URL-encode the
|
||||||
|
@ -148,15 +148,15 @@ signature includes these elements from the form:
|
||||||
|
|
||||||
- A redirect URL. If there is no redirect URL, use the empty string.
|
- A redirect URL. If there is no redirect URL, use the empty string.
|
||||||
|
|
||||||
- Maximum file size. In `Example 1.15`, “HMAC-SHA1 signature for form
|
- Maximum file size. In `Example 1.15`, “HMAC-SHA1 signature for form
|
||||||
POST” the
|
POST” the
|
||||||
``max_file_size`` is ``104857600`` bytes.
|
``max_file_size`` is ``104857600`` bytes.
|
||||||
|
|
||||||
- The maximum number of objects to upload. In `Example 1.15`, “HMAC-SHA1
|
- The maximum number of objects to upload. In `Example 1.15`, “HMAC-SHA1
|
||||||
signature for form
|
signature for form
|
||||||
POST” ``max_file_count`` is ``10``.
|
POST” ``max_file_count`` is ``10``.
|
||||||
|
|
||||||
- Expiry time. In `Example 1.15, “HMAC-SHA1 signature for form
|
- Expiry time. In `Example 1.15, “HMAC-SHA1 signature for form
|
||||||
POST” the expiry time
|
POST” the expiry time
|
||||||
is set to ``600`` seconds into the future.
|
is set to ``600`` seconds into the future.
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ signature includes these elements from the form:
|
||||||
The following example code generates a signature for use with form
|
The following example code generates a signature for use with form
|
||||||
**POST**:
|
**POST**:
|
||||||
|
|
||||||
**Example 1.15. HMAC-SHA1 signature for form POST**
|
**Example 1.15. HMAC-SHA1 signature for form POST**
|
||||||
|
|
||||||
.. code::
|
.. code::
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
Large objects
|
Large objects
|
||||||
=============
|
=============
|
||||||
|
|
||||||
By default, the content of an object cannot be greater than 5 GB.
|
By default, the content of an object cannot be greater than 5 GB.
|
||||||
However, you can use a number of smaller objects to construct a large
|
However, you can use a number of smaller objects to construct a large
|
||||||
object. The large object is comprised of two types of objects:
|
object. The large object is comprised of two types of objects:
|
||||||
|
|
||||||
|
@ -40,9 +40,9 @@ Note
|
||||||
|
|
||||||
If you make a **COPY** request by using a manifest object as the source,
|
If you make a **COPY** request by using a manifest object as the source,
|
||||||
the new object is a normal, and not a segment, object. If the total size
|
the new object is a normal, and not a segment, object. If the total size
|
||||||
of the source segment objects exceeds 5 GB, the **COPY** request fails.
|
of the source segment objects exceeds 5 GB, the **COPY** request fails.
|
||||||
However, you can make a duplicate of the manifest object and this new
|
However, you can make a duplicate of the manifest object and this new
|
||||||
object can be larger than 5 GB.
|
object can be larger than 5 GB.
|
||||||
|
|
||||||
Static large objects
|
Static large objects
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
@ -58,7 +58,7 @@ header. This ensures that the upload cannot corrupt your data.
|
||||||
List the name of each segment object along with its size and MD5
|
List the name of each segment object along with its size and MD5
|
||||||
checksum in order.
|
checksum in order.
|
||||||
|
|
||||||
Create a manifest object. Include the *``?multipart-manifest=put``*
|
Create a manifest object. Include the ``multipart-manifest=put``
|
||||||
query string at the end of the manifest object name to indicate that
|
query string at the end of the manifest object name to indicate that
|
||||||
this is a manifest object.
|
this is a manifest object.
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ list, where each element contains the following attributes:
|
||||||
- ``size_bytes``. The size of the segment object. This value must match
|
- ``size_bytes``. The size of the segment object. This value must match
|
||||||
the ``Content-Length`` of that object.
|
the ``Content-Length`` of that object.
|
||||||
|
|
||||||
**Example Static large object manifest list**
|
**Example Static large object manifest list**
|
||||||
|
|
||||||
This example shows three segment objects. You can use several containers
|
This example shows three segment objects. You can use several containers
|
||||||
and the object names do not have to conform to a specific pattern, in
|
and the object names do not have to conform to a specific pattern, in
|
||||||
|
@ -112,8 +112,8 @@ set to be the MD5 checksum of the concatenated ``ETag`` values of the
|
||||||
object segments. You can also set the ``Content-Type`` request header
|
object segments. You can also set the ``Content-Type`` request header
|
||||||
and custom object metadata.
|
and custom object metadata.
|
||||||
|
|
||||||
When the **PUT** operation sees the *``?multipart-manifest=put``* query
|
When the **PUT** operation sees the ``multipart-manifest=put`` query
|
||||||
parameter, it reads the request body and verifies that each segment
|
string, it reads the request body and verifies that each segment
|
||||||
object exists and that the sizes and ETags match. If there is a
|
object exists and that the sizes and ETags match. If there is a
|
||||||
mismatch, the **PUT**\ operation fails.
|
mismatch, the **PUT**\ operation fails.
|
||||||
|
|
||||||
|
@ -124,25 +124,25 @@ this is a static object manifest.
|
||||||
Normally when you perform a **GET** operation on the manifest object,
|
Normally when you perform a **GET** operation on the manifest object,
|
||||||
the response body contains the concatenated content of the segment
|
the response body contains the concatenated content of the segment
|
||||||
objects. To download the manifest list, use the
|
objects. To download the manifest list, use the
|
||||||
*``?multipart-manifest=get``* query parameter. The resulting list is not
|
``multipart-manifest=get`` query string. The resulting list is not
|
||||||
formatted the same as the manifest you originally used in the **PUT**
|
formatted the same as the manifest you originally used in the **PUT**
|
||||||
operation.
|
operation.
|
||||||
|
|
||||||
If you use the **DELETE** operation on a manifest object, the manifest
|
If you use the **DELETE** operation on a manifest object, the manifest
|
||||||
object is deleted. The segment objects are not affected. However, if you
|
object is deleted. The segment objects are not affected. However, if you
|
||||||
add the *``?multipart-manifest=delete``* query parameter, the segment
|
add the ``multipart-manifest=delete`` query string, the segment
|
||||||
objects are deleted and if all are successfully deleted, the manifest
|
objects are deleted and if all are successfully deleted, the manifest
|
||||||
object is also deleted.
|
object is also deleted.
|
||||||
|
|
||||||
To change the manifest, use a **PUT** operation with the
|
To change the manifest, use a **PUT** operation with the
|
||||||
*``?multipart-manifest=put``* query parameter. This request creates a
|
``multipart-manifest=put`` query string. This request creates a
|
||||||
manifest object. You can also update the object metadata in the usual
|
manifest object. You can also update the object metadata in the usual
|
||||||
way.
|
way.
|
||||||
|
|
||||||
Dynamic large objects
|
Dynamic large objects
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You must segment objects that are larger than 5 GB before you can upload
|
You must segment objects that are larger than 5 GB before you can upload
|
||||||
them. You then upload the segment objects like you would any other
|
them. You then upload the segment objects like you would any other
|
||||||
object and create a dynamic large manifest object. The manifest object
|
object and create a dynamic large manifest object. The manifest object
|
||||||
tells Object Storage how to find the segment objects that comprise the
|
tells Object Storage how to find the segment objects that comprise the
|
||||||
|
@ -168,7 +168,7 @@ of segments to a second location and update the manifest to point to
|
||||||
this new location. During the upload of the new segments, the original
|
this new location. During the upload of the new segments, the original
|
||||||
manifest is still available to download the first set of segments.
|
manifest is still available to download the first set of segments.
|
||||||
|
|
||||||
**Example Upload segment of large object request: HTTP**
|
**Example Upload segment of large object request: HTTP**
|
||||||
|
|
||||||
.. code::
|
.. code::
|
||||||
|
|
||||||
|
@ -190,7 +190,7 @@ Unprocessable Entity response is returned.
|
||||||
You can continue uploading segments like this example shows, prior to
|
You can continue uploading segments like this example shows, prior to
|
||||||
uploading the manifest.
|
uploading the manifest.
|
||||||
|
|
||||||
**Example Upload next segment of large object request: HTTP**
|
**Example Upload next segment of large object request: HTTP**
|
||||||
|
|
||||||
.. code::
|
.. code::
|
||||||
|
|
||||||
|
@ -220,7 +220,7 @@ subsequent additional segments.
|
||||||
X-Object-Manifest: {container}/{prefix}
|
X-Object-Manifest: {container}/{prefix}
|
||||||
|
|
||||||
|
|
||||||
**Example Upload manifest response: HTTP**
|
**Example Upload manifest response: HTTP**
|
||||||
|
|
||||||
.. code::
|
.. code::
|
||||||
|
|
||||||
|
@ -238,67 +238,88 @@ Comparison of static and dynamic large objects
|
||||||
While static and dynamic objects have similar behavior, here are
|
While static and dynamic objects have similar behavior, here are
|
||||||
their differences:
|
their differences:
|
||||||
|
|
||||||
**Comparing static and dynamic large objects**
|
End-to-end integrity
|
||||||
|
--------------------
|
||||||
|
|
||||||
Static large object: Assured end-to-end integrity. The list of segments
|
With static large objects, integrity can be assured.
|
||||||
includes the MD5 checksum (``ETag``) of each segment. You cannot upload the
|
The list of segments may include the MD5 checksum (``ETag``) of each segment.
|
||||||
manifest object if the ``ETag`` in the list differs from the uploaded segment
|
You cannot upload the manifest object if the ``ETag`` in the list differs
|
||||||
object. If a segment is somehow lost, an attempt to download the manifest
|
from the uploaded segment object. If a segment is somehow lost, an attempt
|
||||||
object results in an error. You must upload the segment objects before you
|
to download the manifest object results in an error.
|
||||||
upload the manifest object. You cannot add or remove segment objects from the
|
|
||||||
manifest. However, you can create a completely new manifest object of the same
|
|
||||||
name with a different manifest list.
|
|
||||||
|
|
||||||
With static large objects, you can upload new segment objects or remove
|
With dynamic large objects, integrity is not guaranteed. The eventual
|
||||||
existing segments. The names must simply match the ``{prefix}`` supplied
|
|
||||||
in ``X-Object-Manifest``. The segment objects must be at least 1 MB in size
|
|
||||||
(by default). The final segment object can be any size. At most, 1000 segments
|
|
||||||
are supported (by default). The manifest list includes the container name of
|
|
||||||
each object. Segment objects can be in different containers.
|
|
||||||
|
|
||||||
Dynamic large object: End-to-end integrity is not guaranteed. The eventual
|
|
||||||
consistency model means that although you have uploaded a segment object, it
|
consistency model means that although you have uploaded a segment object, it
|
||||||
might not appear in the container listing until later. If you download the
|
might not appear in the container listing until later. If you download the
|
||||||
manifest before it appears in the container, it does not form part of the
|
manifest before it appears in the container, it does not form part of the
|
||||||
content returned in response to a **GET** request.
|
content returned in response to a **GET** request.
|
||||||
|
|
||||||
|
Upload Order
|
||||||
|
------------
|
||||||
|
|
||||||
|
With static large objects, you must upload the
|
||||||
|
segment objects before you upload the manifest object.
|
||||||
|
|
||||||
With dynamic large objects, you can upload manifest and segment objects
|
With dynamic large objects, you can upload manifest and segment objects
|
||||||
in any order. In case a premature download of the manifest occurs, we
|
in any order. In case a premature download of the manifest occurs, we
|
||||||
recommend users upload the manifest object after the segments. However,
|
recommend users upload the manifest object after the segments. However,
|
||||||
the system does not enforce the order. Segment objects can be any size. All
|
the system does not enforce the order.
|
||||||
segment objects must be in the same container.
|
|
||||||
|
Removal or addition of segment objects
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
With static large objects, you cannot add or
|
||||||
|
remove segment objects from the manifest. However, you can create a
|
||||||
|
completely new manifest object of the same name with a different manifest
|
||||||
|
list.
|
||||||
|
|
||||||
|
With dynamic large objects, you can upload new segment objects or remove
|
||||||
|
existing segments. The names must simply match the ``{prefix}`` supplied
|
||||||
|
in ``X-Object-Manifest``.
|
||||||
|
|
||||||
|
Segment object size and number
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
With static large objects, the segment objects must be at least 1 byte in size.
|
||||||
|
However, if the segment objects are less than 1MB (by default),
|
||||||
|
the SLO download is (by default) rate limited. At most,
|
||||||
|
1000 segments are supported (by default) and the manifest has a limit
|
||||||
|
(by default) of 2MB in size.
|
||||||
|
|
||||||
|
With dynamic large objects, segment objects can be any size.
|
||||||
|
|
||||||
|
Segment object container name
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
With static large objects, the manifest list includes the container name of each object.
|
||||||
|
Segment objects can be in different containers.
|
||||||
|
|
||||||
|
With dynamic large objects, all segment objects must be in the same container.
|
||||||
|
|
||||||
Manifest object metadata
|
Manifest object metadata
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
For static large objects, the object has ``X-Static-Large-Object`` set to
|
With static large objects, the manifest object has ``X-Static-Large-Object``
|
||||||
``true``. You do not set this metadata directly. Instead the system sets
|
set to ``true``. You do not set this
|
||||||
it when you **PUT** a static manifest object.
|
metadata directly. Instead the system sets it when you **PUT** a static
|
||||||
|
manifest object.
|
||||||
|
|
||||||
For dynamic object,s the ``X-Object-Manifest`` value is the
|
With dynamic large objects, the ``X-Object-Manifest`` value is the
|
||||||
``{container}/{prefix}``, which indicates where the segment objects are
|
``{container}/{prefix}``, which indicates
|
||||||
located. You supply this request header in the **PUT** operation.
|
where the segment objects are located. You supply this request header in the
|
||||||
|
**PUT** operation.
|
||||||
|
|
||||||
Copying the manifest object
|
Copying the manifest object
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
With static large objects, you include the *``?multipart-manifest=get``*
|
The semantics are the same for both static and dynamic large objects.
|
||||||
|
When copying large objects, the **COPY** operation does not create
|
||||||
|
a manifest object but a normal object with content same as what you would
|
||||||
|
get on a **GET** request to the original manifest object.
|
||||||
|
|
||||||
|
To copy the manifest object, you include the ``multipart-manifest=get``
|
||||||
query string in the **COPY** request. The new object contains the same
|
query string in the **COPY** request. The new object contains the same
|
||||||
manifest as the original. The segment objects are not copied. Instead,
|
manifest as the original. The segment objects are not copied. Instead,
|
||||||
both the original and new manifest objects share the same set of segment
|
both the original and new manifest objects share the same set of segment
|
||||||
objects.
|
objects.
|
||||||
|
|
||||||
When creating dynamic large objects, the **COPY** operation does not create
|
|
||||||
a manifest object but a normal object with content same as what you would
|
|
||||||
get on a **GET** request to original manifest object.
|
|
||||||
|
|
||||||
To duplicate a manifest object:
|
|
||||||
|
|
||||||
* Use the **GET** operation to read the value of ``X-Object-Manifest`` and
|
|
||||||
use this value in the ``X-Object-Manifest`` request header in a **PUT**
|
|
||||||
operation.
|
|
||||||
* Alternatively, you can include *``?multipart-manifest=get``* query
|
|
||||||
string in the **COPY** request.
|
|
||||||
|
|
||||||
This creates a new manifest object that shares the same set of segment
|
|
||||||
objects as the original manifest object.
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ The Object Storage system organizes data in a hierarchy, as follows:
|
||||||
object versioning, at the container level.
|
object versioning, at the container level.
|
||||||
|
|
||||||
You can bulk-delete up to 10,000 containers in a single request.
|
You can bulk-delete up to 10,000 containers in a single request.
|
||||||
|
|
||||||
You can set a storage policy on a container with predefined names
|
You can set a storage policy on a container with predefined names
|
||||||
and definitions from your cloud provider.
|
and definitions from your cloud provider.
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ The Object Storage system organizes data in a hierarchy, as follows:
|
||||||
With the Object Storage API, you can:
|
With the Object Storage API, you can:
|
||||||
|
|
||||||
- Store an unlimited number of objects. Each object can be as large
|
- Store an unlimited number of objects. Each object can be as large
|
||||||
as 5 GB, which is the default. You can configure the maximum
|
as 5 GB, which is the default. You can configure the maximum
|
||||||
object size.
|
object size.
|
||||||
|
|
||||||
- Upload and store objects of any size with large object creation.
|
- Upload and store objects of any size with large object creation.
|
||||||
|
@ -78,7 +78,7 @@ The Object Storage system organizes data in a hierarchy, as follows:
|
||||||
- Compress files using content-encoding metadata.
|
- Compress files using content-encoding metadata.
|
||||||
|
|
||||||
- Override browser behavior for an object using content-disposition metadata.
|
- Override browser behavior for an object using content-disposition metadata.
|
||||||
|
|
||||||
- Schedule objects for deletion.
|
- Schedule objects for deletion.
|
||||||
|
|
||||||
- Bulk-delete up to 10,000 objects in a single request.
|
- Bulk-delete up to 10,000 objects in a single request.
|
||||||
|
@ -154,11 +154,11 @@ Your service provider might use different default values.
|
||||||
Item Maximum value Notes
|
Item Maximum value Notes
|
||||||
============================ ============= =====
|
============================ ============= =====
|
||||||
Number of HTTP headers 90
|
Number of HTTP headers 90
|
||||||
Length of HTTP headers 4096 bytes
|
Length of HTTP headers 4096 bytes
|
||||||
Length per HTTP request line 8192 bytes
|
Length per HTTP request line 8192 bytes
|
||||||
Length of HTTP request 5 GB
|
Length of HTTP request 5 GB
|
||||||
Length of container names 256 bytes Cannot contain the ``/`` character.
|
Length of container names 256 bytes Cannot contain the ``/`` character.
|
||||||
Length of object names 1024 bytes By default, there are no character restrictions.
|
Length of object names 1024 bytes By default, there are no character restrictions.
|
||||||
============================ ============= =====
|
============================ ============= =====
|
||||||
|
|
||||||
You must UTF-8-encode and then URL-encode container and object names
|
You must UTF-8-encode and then URL-encode container and object names
|
||||||
|
|
|
@ -7,7 +7,7 @@ the ``Content-Encoding`` metadata. This metadata enables you to indicate
|
||||||
that the object content is compressed without losing the identity of the
|
that the object content is compressed without losing the identity of the
|
||||||
underlying media type (``Content-Type``) of the file, such as a video.
|
underlying media type (``Content-Type``) of the file, such as a video.
|
||||||
|
|
||||||
**Example Content-Encoding header request: HTTP**
|
**Example Content-Encoding header request: HTTP**
|
||||||
|
|
||||||
This example assigns an attachment type to the ``Content-Encoding``
|
This example assigns an attachment type to the ``Content-Encoding``
|
||||||
header that indicates how the file is downloaded:
|
header that indicates how the file is downloaded:
|
||||||
|
|
|
@ -25,7 +25,7 @@ Application Bindings
|
||||||
* `java-openstack-swift <https://github.com/dkocher/java-openstack-swift>`_ - Java bindings for OpenStack Swift
|
* `java-openstack-swift <https://github.com/dkocher/java-openstack-swift>`_ - Java bindings for OpenStack Swift
|
||||||
* `swift_client <https://github.com/mrkamel/swift_client>`_ - Small but powerful Ruby client to interact with OpenStack Swift
|
* `swift_client <https://github.com/mrkamel/swift_client>`_ - Small but powerful Ruby client to interact with OpenStack Swift
|
||||||
* `nightcrawler_swift <https://github.com/tulios/nightcrawler_swift>`_ - This Ruby gem teleports your assets to a OpenStack Swift bucket/container
|
* `nightcrawler_swift <https://github.com/tulios/nightcrawler_swift>`_ - This Ruby gem teleports your assets to a OpenStack Swift bucket/container
|
||||||
* `swift storage <https://rubygems.org/gems/swift-storage>`_ - Simple Openstack Swift storage client.
|
* `swift storage <https://rubygems.org/gems/swift-storage>`_ - Simple OpenStack Swift storage client.
|
||||||
|
|
||||||
Authentication
|
Authentication
|
||||||
--------------
|
--------------
|
||||||
|
@ -65,6 +65,7 @@ Alternative API
|
||||||
|
|
||||||
* `Swift3 <https://github.com/openstack/swift3>`_ - Amazon S3 API emulation.
|
* `Swift3 <https://github.com/openstack/swift3>`_ - Amazon S3 API emulation.
|
||||||
* `CDMI <https://github.com/osaddon/cdmi>`_ - CDMI support
|
* `CDMI <https://github.com/osaddon/cdmi>`_ - CDMI support
|
||||||
|
* `SwiftHLM <https://github.com/ibm-research/SwiftHLM>`_ - a middleware for using OpenStack Swift with tape and other high latency media storage backends
|
||||||
|
|
||||||
|
|
||||||
Benchmarking/Load Generators
|
Benchmarking/Load Generators
|
||||||
|
@ -106,7 +107,7 @@ Other
|
||||||
* `Glance <https://github.com/openstack/glance>`_ - Provides services for discovering, registering, and retrieving virtual machine images (for OpenStack Compute [Nova], for example).
|
* `Glance <https://github.com/openstack/glance>`_ - Provides services for discovering, registering, and retrieving virtual machine images (for OpenStack Compute [Nova], for example).
|
||||||
* `Better Staticweb <https://github.com/CloudVPS/better-staticweb>`_ - Makes swift containers accessible by default.
|
* `Better Staticweb <https://github.com/CloudVPS/better-staticweb>`_ - Makes swift containers accessible by default.
|
||||||
* `Swiftsync <https://github.com/stackforge/swiftsync>`_ - A massive syncer between two swift clusters.
|
* `Swiftsync <https://github.com/stackforge/swiftsync>`_ - A massive syncer between two swift clusters.
|
||||||
* `Django Swiftbrowser <https://github.com/cschwede/django-swiftbrowser>`_ - Simple Django web app to access Openstack Swift.
|
* `Django Swiftbrowser <https://github.com/cschwede/django-swiftbrowser>`_ - Simple Django web app to access OpenStack Swift.
|
||||||
* `Swift-account-stats <https://github.com/enovance/swift-account-stats>`_ - Swift-account-stats is a tool to report statistics on Swift usage at tenant and global levels.
|
* `Swift-account-stats <https://github.com/enovance/swift-account-stats>`_ - Swift-account-stats is a tool to report statistics on Swift usage at tenant and global levels.
|
||||||
* `PyECLib <https://bitbucket.org/kmgreen2/pyeclib>`_ - High Level Erasure Code library used by Swift
|
* `PyECLib <https://bitbucket.org/kmgreen2/pyeclib>`_ - High Level Erasure Code library used by Swift
|
||||||
* `liberasurecode <http://www.bytebucket.org/tsg-/liberasurecode>`_ - Low Level Erasure Code library used by PyECLib
|
* `liberasurecode <http://www.bytebucket.org/tsg-/liberasurecode>`_ - Low Level Erasure Code library used by PyECLib
|
||||||
|
|
|
@ -1,4 +1,17 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
# Copyright (c) 2010-2012 OpenStack Foundation.
|
# Copyright (c) 2010-2012 OpenStack Foundation.
|
||||||
#
|
#
|
||||||
# Swift documentation build configuration file, created by
|
# Swift documentation build configuration file, created by
|
||||||
|
@ -13,9 +26,11 @@
|
||||||
# All configuration values have a default; values that are commented out
|
# All configuration values have a default; values that are commented out
|
||||||
# serve to show the default.
|
# serve to show the default.
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import datetime
|
import datetime
|
||||||
|
import os
|
||||||
|
from swift import __version__
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
@ -28,7 +43,7 @@ sys.path.extend([os.path.abspath('../swift'), os.path.abspath('..'),
|
||||||
# Add any Sphinx extension module names here, as strings. They can be
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||||
extensions = ['sphinx.ext.autodoc',
|
extensions = ['sphinx.ext.autodoc',
|
||||||
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
|
'sphinx.ext.todo', 'sphinx.ext.coverage',
|
||||||
'sphinx.ext.ifconfig', 'oslosphinx']
|
'sphinx.ext.ifconfig', 'oslosphinx']
|
||||||
todo_include_todos = True
|
todo_include_todos = True
|
||||||
|
|
||||||
|
@ -36,17 +51,17 @@ todo_include_todos = True
|
||||||
# Changing the path so that the Hudson build output contains GA code and the
|
# Changing the path so that the Hudson build output contains GA code and the
|
||||||
# source docs do not contain the code so local, offline sphinx builds are
|
# source docs do not contain the code so local, offline sphinx builds are
|
||||||
# "clean."
|
# "clean."
|
||||||
#templates_path = []
|
# templates_path = []
|
||||||
#if os.getenv('HUDSON_PUBLISH_DOCS'):
|
# if os.getenv('HUDSON_PUBLISH_DOCS'):
|
||||||
# templates_path = ['_ga', '_templates']
|
# templates_path = ['_ga', '_templates']
|
||||||
#else:
|
# else:
|
||||||
# templates_path = ['_templates']
|
# templates_path = ['_templates']
|
||||||
|
|
||||||
# The suffix of source filenames.
|
# The suffix of source filenames.
|
||||||
source_suffix = '.rst'
|
source_suffix = '.rst'
|
||||||
|
|
||||||
# The encoding of source files.
|
# The encoding of source files.
|
||||||
#source_encoding = 'utf-8'
|
# source_encoding = 'utf-8'
|
||||||
|
|
||||||
# The master toctree document.
|
# The master toctree document.
|
||||||
master_doc = 'index'
|
master_doc = 'index'
|
||||||
|
@ -60,23 +75,22 @@ copyright = u'%d, OpenStack Foundation' % datetime.datetime.now().year
|
||||||
# built documents.
|
# built documents.
|
||||||
#
|
#
|
||||||
# The short X.Y version.
|
# The short X.Y version.
|
||||||
from swift import __version__
|
|
||||||
version = __version__.rsplit('.', 1)[0]
|
version = __version__.rsplit('.', 1)[0]
|
||||||
# The full version, including alpha/beta/rc tags.
|
# The full version, including alpha/beta/rc tags.
|
||||||
release = __version__
|
release = __version__
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
# for a list of supported languages.
|
# for a list of supported languages.
|
||||||
#language = None
|
# language = None
|
||||||
|
|
||||||
# There are two options for replacing |today|: either, you set today to some
|
# There are two options for replacing |today|: either, you set today to some
|
||||||
# non-false value, then it is used:
|
# non-false value, then it is used:
|
||||||
#today = ''
|
# today = ''
|
||||||
# Else, today_fmt is used as the format for a strftime call.
|
# Else, today_fmt is used as the format for a strftime call.
|
||||||
#today_fmt = '%B %d, %Y'
|
# today_fmt = '%B %d, %Y'
|
||||||
|
|
||||||
# List of documents that shouldn't be included in the build.
|
# List of documents that shouldn't be included in the build.
|
||||||
#unused_docs = []
|
# unused_docs = []
|
||||||
|
|
||||||
# List of directories, relative to source directory, that shouldn't be searched
|
# List of directories, relative to source directory, that shouldn't be searched
|
||||||
# for source files.
|
# for source files.
|
||||||
|
@ -84,14 +98,14 @@ exclude_trees = []
|
||||||
|
|
||||||
# The reST default role (used for this markup: `text`) to use for all
|
# The reST default role (used for this markup: `text`) to use for all
|
||||||
# documents.
|
# documents.
|
||||||
#default_role = None
|
# default_role = None
|
||||||
|
|
||||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||||
#add_function_parentheses = True
|
# add_function_parentheses = True
|
||||||
|
|
||||||
# If true, the current module name will be prepended to all description
|
# If true, the current module name will be prepended to all description
|
||||||
# unit titles (such as .. function::).
|
# unit titles (such as .. function::).
|
||||||
#add_module_names = True
|
# add_module_names = True
|
||||||
|
|
||||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||||
# output. They are ignored by default.
|
# output. They are ignored by default.
|
||||||
|
@ -109,74 +123,76 @@ modindex_common_prefix = ['swift.']
|
||||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||||
# html_theme = 'default'
|
# html_theme = 'default'
|
||||||
#html_theme_path = ["."]
|
# html_theme_path = ["."]
|
||||||
#html_theme = '_theme'
|
# html_theme = '_theme'
|
||||||
|
|
||||||
# Theme options are theme-specific and customize the look and feel of a theme
|
# Theme options are theme-specific and customize the look and feel of a theme
|
||||||
# further. For a list of options available for each theme, see the
|
# further. For a list of options available for each theme, see the
|
||||||
# documentation.
|
# documentation.
|
||||||
#html_theme_options = {}
|
# html_theme_options = {}
|
||||||
|
|
||||||
# Add any paths that contain custom themes here, relative to this directory.
|
# Add any paths that contain custom themes here, relative to this directory.
|
||||||
#html_theme_path = []
|
# html_theme_path = []
|
||||||
|
|
||||||
# The name for this set of Sphinx documents. If None, it defaults to
|
# The name for this set of Sphinx documents. If None, it defaults to
|
||||||
# "<project> v<release> documentation".
|
# "<project> v<release> documentation".
|
||||||
#html_title = None
|
# html_title = None
|
||||||
|
|
||||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||||
#html_short_title = None
|
# html_short_title = None
|
||||||
|
|
||||||
# The name of an image file (relative to this directory) to place at the top
|
# The name of an image file (relative to this directory) to place at the top
|
||||||
# of the sidebar.
|
# of the sidebar.
|
||||||
#html_logo = None
|
# html_logo = None
|
||||||
|
|
||||||
# The name of an image file (within the static path) to use as favicon of the
|
# The name of an image file (within the static path) to use as favicon of the
|
||||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||||
# pixels large.
|
# pixels large.
|
||||||
#html_favicon = None
|
# html_favicon = None
|
||||||
|
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
#html_static_path = ['_static']
|
# html_static_path = ['_static']
|
||||||
|
|
||||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||||
# using the given strftime format.
|
# using the given strftime format.
|
||||||
#html_last_updated_fmt = '%b %d, %Y'
|
# html_last_updated_fmt = '%b %d, %Y'
|
||||||
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
|
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
|
||||||
html_last_updated_fmt = os.popen(git_cmd).read()
|
"-n1"]
|
||||||
|
html_last_updated_fmt = subprocess.Popen(
|
||||||
|
git_cmd, stdout=subprocess.PIPE).communicate()[0]
|
||||||
|
|
||||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||||
# typographically correct entities.
|
# typographically correct entities.
|
||||||
#html_use_smartypants = True
|
# html_use_smartypants = True
|
||||||
|
|
||||||
# Custom sidebar templates, maps document names to template names.
|
# Custom sidebar templates, maps document names to template names.
|
||||||
#html_sidebars = {}
|
# html_sidebars = {}
|
||||||
|
|
||||||
# Additional templates that should be rendered to pages, maps page names to
|
# Additional templates that should be rendered to pages, maps page names to
|
||||||
# template names.
|
# template names.
|
||||||
#html_additional_pages = {}
|
# html_additional_pages = {}
|
||||||
|
|
||||||
# If false, no module index is generated.
|
# If false, no module index is generated.
|
||||||
#html_use_modindex = True
|
# html_use_modindex = True
|
||||||
|
|
||||||
# If false, no index is generated.
|
# If false, no index is generated.
|
||||||
#html_use_index = True
|
# html_use_index = True
|
||||||
|
|
||||||
# If true, the index is split into individual pages for each letter.
|
# If true, the index is split into individual pages for each letter.
|
||||||
#html_split_index = False
|
# html_split_index = False
|
||||||
|
|
||||||
# If true, links to the reST sources are added to the pages.
|
# If true, links to the reST sources are added to the pages.
|
||||||
#html_show_sourcelink = True
|
# html_show_sourcelink = True
|
||||||
|
|
||||||
# If true, an OpenSearch description file will be output, and all pages will
|
# If true, an OpenSearch description file will be output, and all pages will
|
||||||
# contain a <link> tag referring to it. The value of this option must be the
|
# contain a <link> tag referring to it. The value of this option must be the
|
||||||
# base URL from which the finished HTML is served.
|
# base URL from which the finished HTML is served.
|
||||||
#html_use_opensearch = ''
|
# html_use_opensearch = ''
|
||||||
|
|
||||||
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
||||||
#html_file_suffix = ''
|
# html_file_suffix = ''
|
||||||
|
|
||||||
# Output file base name for HTML help builder.
|
# Output file base name for HTML help builder.
|
||||||
htmlhelp_basename = 'swiftdoc'
|
htmlhelp_basename = 'swiftdoc'
|
||||||
|
@ -185,10 +201,10 @@ htmlhelp_basename = 'swiftdoc'
|
||||||
# -- Options for LaTeX output -------------------------------------------------
|
# -- Options for LaTeX output -------------------------------------------------
|
||||||
|
|
||||||
# The paper size ('letter' or 'a4').
|
# The paper size ('letter' or 'a4').
|
||||||
#latex_paper_size = 'letter'
|
# latex_paper_size = 'letter'
|
||||||
|
|
||||||
# The font size ('10pt', '11pt' or '12pt').
|
# The font size ('10pt', '11pt' or '12pt').
|
||||||
#latex_font_size = '10pt'
|
# latex_font_size = '10pt'
|
||||||
|
|
||||||
# Grouping the document tree into LaTeX files. List of tuples
|
# Grouping the document tree into LaTeX files. List of tuples
|
||||||
# (source start file, target name, title, author, documentclass
|
# (source start file, target name, title, author, documentclass
|
||||||
|
@ -200,17 +216,17 @@ latex_documents = [
|
||||||
|
|
||||||
# The name of an image file (relative to this directory) to place at the top of
|
# The name of an image file (relative to this directory) to place at the top of
|
||||||
# the title page.
|
# the title page.
|
||||||
#latex_logo = None
|
# latex_logo = None
|
||||||
|
|
||||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||||
# not chapters.
|
# not chapters.
|
||||||
#latex_use_parts = False
|
# latex_use_parts = False
|
||||||
|
|
||||||
# Additional stuff for the LaTeX preamble.
|
# Additional stuff for the LaTeX preamble.
|
||||||
#latex_preamble = ''
|
# latex_preamble = ''
|
||||||
|
|
||||||
# Documents to append as an appendix to all manuals.
|
# Documents to append as an appendix to all manuals.
|
||||||
#latex_appendices = []
|
# latex_appendices = []
|
||||||
|
|
||||||
# If false, no module index is generated.
|
# If false, no module index is generated.
|
||||||
#latex_use_modindex = True
|
# latex_use_modindex = True
|
||||||
|
|
|
@ -478,7 +478,11 @@ log_custom_handlers None Comma-separated list of functions t
|
||||||
to setup custom log handlers.
|
to setup custom log handlers.
|
||||||
log_udp_host Override log_address
|
log_udp_host Override log_address
|
||||||
log_udp_port 514 UDP log port
|
log_udp_port 514 UDP log port
|
||||||
log_statsd_host localhost StatsD logging
|
log_statsd_host None Enables StatsD logging; IPv4/IPv6
|
||||||
|
address or a hostname. If a
|
||||||
|
hostname resolves to an IPv4 and IPv6
|
||||||
|
address, the IPv4 address will be
|
||||||
|
used.
|
||||||
log_statsd_port 8125
|
log_statsd_port 8125
|
||||||
log_statsd_default_sample_rate 1.0
|
log_statsd_default_sample_rate 1.0
|
||||||
log_statsd_sample_rate_factor 1.0
|
log_statsd_sample_rate_factor 1.0
|
||||||
|
@ -526,9 +530,10 @@ set log_address /dev/log Logging directory
|
||||||
user swift User to run as
|
user swift User to run as
|
||||||
max_upload_time 86400 Maximum time allowed to upload an
|
max_upload_time 86400 Maximum time allowed to upload an
|
||||||
object
|
object
|
||||||
slow 0 If > 0, Minimum time in seconds
|
slow 0 If > 0, Minimum time in seconds for a PUT or
|
||||||
for a PUT or DELETE request to
|
DELETE request to complete. This is only
|
||||||
complete
|
useful to simulate slow devices during testing
|
||||||
|
and development.
|
||||||
mb_per_sync 512 On PUT requests, sync file every
|
mb_per_sync 512 On PUT requests, sync file every
|
||||||
n MB
|
n MB
|
||||||
keep_cache_size 5242880 Largest object size to keep in
|
keep_cache_size 5242880 Largest object size to keep in
|
||||||
|
@ -719,6 +724,8 @@ log_facility LOG_LOCAL0 Syslog log facility
|
||||||
log_level INFO Logging level
|
log_level INFO Logging level
|
||||||
log_address /dev/log Logging directory
|
log_address /dev/log Logging directory
|
||||||
log_time 3600 Frequency of status logs in seconds.
|
log_time 3600 Frequency of status logs in seconds.
|
||||||
|
interval 30 Time in seconds to wait between
|
||||||
|
auditor passes
|
||||||
disk_chunk_size 65536 Size of chunks read during auditing
|
disk_chunk_size 65536 Size of chunks read during auditing
|
||||||
files_per_second 20 Maximum files audited per second per
|
files_per_second 20 Maximum files audited per second per
|
||||||
auditor process. Should be tuned according
|
auditor process. Should be tuned according
|
||||||
|
@ -787,7 +794,11 @@ log_custom_handlers None Comma-separated list of functions t
|
||||||
to setup custom log handlers.
|
to setup custom log handlers.
|
||||||
log_udp_host Override log_address
|
log_udp_host Override log_address
|
||||||
log_udp_port 514 UDP log port
|
log_udp_port 514 UDP log port
|
||||||
log_statsd_host localhost StatsD logging
|
log_statsd_host None Enables StatsD logging; IPv4/IPv6
|
||||||
|
address or a hostname. If a
|
||||||
|
hostname resolves to an IPv4 and IPv6
|
||||||
|
address, the IPv4 address will be
|
||||||
|
used.
|
||||||
log_statsd_port 8125
|
log_statsd_port 8125
|
||||||
log_statsd_default_sample_rate 1.0
|
log_statsd_default_sample_rate 1.0
|
||||||
log_statsd_sample_rate_factor 1.0
|
log_statsd_sample_rate_factor 1.0
|
||||||
|
@ -998,7 +1009,11 @@ log_custom_handlers None Comma-separated list of functions t
|
||||||
to setup custom log handlers.
|
to setup custom log handlers.
|
||||||
log_udp_host Override log_address
|
log_udp_host Override log_address
|
||||||
log_udp_port 514 UDP log port
|
log_udp_port 514 UDP log port
|
||||||
log_statsd_host localhost StatsD logging
|
log_statsd_host None Enables StatsD logging; IPv4/IPv6
|
||||||
|
address or a hostname. If a
|
||||||
|
hostname resolves to an IPv4 and IPv6
|
||||||
|
address, the IPv4 address will be
|
||||||
|
used.
|
||||||
log_statsd_port 8125
|
log_statsd_port 8125
|
||||||
log_statsd_default_sample_rate 1.0
|
log_statsd_default_sample_rate 1.0
|
||||||
log_statsd_sample_rate_factor 1.0
|
log_statsd_sample_rate_factor 1.0
|
||||||
|
@ -1226,7 +1241,11 @@ log_custom_handlers None Comma separated
|
||||||
handlers.
|
handlers.
|
||||||
log_udp_host Override log_address
|
log_udp_host Override log_address
|
||||||
log_udp_port 514 UDP log port
|
log_udp_port 514 UDP log port
|
||||||
log_statsd_host localhost StatsD logging
|
log_statsd_host None Enables StatsD logging; IPv4/IPv6
|
||||||
|
address or a hostname. If a
|
||||||
|
hostname resolves to an IPv4 and IPv6
|
||||||
|
address, the IPv4 address will be
|
||||||
|
used.
|
||||||
log_statsd_port 8125
|
log_statsd_port 8125
|
||||||
log_statsd_default_sample_rate 1.0
|
log_statsd_default_sample_rate 1.0
|
||||||
log_statsd_sample_rate_factor 1.0
|
log_statsd_sample_rate_factor 1.0
|
||||||
|
@ -1278,7 +1297,8 @@ object_chunk_size 65536 Chunk size to read from
|
||||||
client_chunk_size 65536 Chunk size to read from
|
client_chunk_size 65536 Chunk size to read from
|
||||||
clients
|
clients
|
||||||
memcache_servers 127.0.0.1:11211 Comma separated list of
|
memcache_servers 127.0.0.1:11211 Comma separated list of
|
||||||
memcached servers ip:port
|
memcached servers
|
||||||
|
ip:port or [ipv6addr]:port
|
||||||
memcache_max_connections 2 Max number of connections to
|
memcache_max_connections 2 Max number of connections to
|
||||||
each memcached server per
|
each memcached server per
|
||||||
worker
|
worker
|
||||||
|
@ -1469,7 +1489,7 @@ At Rackspace, our Proxy servers have dual quad core processors, giving us 8
|
||||||
cores. Our testing has shown 16 workers to be a pretty good balance when
|
cores. Our testing has shown 16 workers to be a pretty good balance when
|
||||||
saturating a 10g network and gives good CPU utilization.
|
saturating a 10g network and gives good CPU utilization.
|
||||||
|
|
||||||
Our Storage servers all run together on the same servers. These servers have
|
Our Storage server processes all run together on the same servers. These servers have
|
||||||
dual quad core processors, for 8 cores total. We run the Account, Container,
|
dual quad core processors, for 8 cores total. We run the Account, Container,
|
||||||
and Object servers with 8 workers each. Most of the background jobs are run at
|
and Object servers with 8 workers each. Most of the background jobs are run at
|
||||||
a concurrency of 1, with the exception of the replicators which are run at a
|
a concurrency of 1, with the exception of the replicators which are run at a
|
||||||
|
|
|
@ -83,15 +83,35 @@ For example, this command would run the functional tests using policy
|
||||||
|
|
||||||
SWIFT_TEST_POLICY=silver tox -e func
|
SWIFT_TEST_POLICY=silver tox -e func
|
||||||
|
|
||||||
|
|
||||||
|
In-process functional testing
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
If the ``test.conf`` file is not found then the functional test framework will
|
If the ``test.conf`` file is not found then the functional test framework will
|
||||||
instantiate a set of Swift servers in the same process that executes the
|
instantiate a set of Swift servers in the same process that executes the
|
||||||
functional tests. This 'in-process test' mode may also be enabled (or disabled)
|
functional tests. This 'in-process test' mode may also be enabled (or disabled)
|
||||||
by setting the environment variable ``SWIFT_TEST_IN_PROCESS`` to a true (or
|
by setting the environment variable ``SWIFT_TEST_IN_PROCESS`` to a true (or
|
||||||
false) value prior to executing `tox -e func`.
|
false) value prior to executing `tox -e func`.
|
||||||
|
|
||||||
When using the 'in-process test' mode, the optional in-memory
|
When using the 'in-process test' mode some server configuration options may be
|
||||||
object server may be selected by setting the environment variable
|
set using environment variables:
|
||||||
``SWIFT_TEST_IN_MEMORY_OBJ`` to a true value.
|
|
||||||
|
- the optional in-memory object server may be selected by setting the
|
||||||
|
environment variable ``SWIFT_TEST_IN_MEMORY_OBJ`` to a true value.
|
||||||
|
|
||||||
|
- the proxy-server ``object_post_as_copy`` option may be set using the
|
||||||
|
environment variable ``SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY``.
|
||||||
|
|
||||||
|
For example, this command would run the in-process mode functional tests with
|
||||||
|
the proxy-server using object_post_as_copy=False (the 'fast-POST' mode)::
|
||||||
|
|
||||||
|
SWIFT_TEST_IN_PROCESS=1 SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY=False \
|
||||||
|
tox -e func
|
||||||
|
|
||||||
|
This particular example may also be run using the ``func-in-process-fast-post``
|
||||||
|
tox environment::
|
||||||
|
|
||||||
|
tox -e func-in-process-fast-post
|
||||||
|
|
||||||
The 'in-process test' mode searches for ``proxy-server.conf`` and
|
The 'in-process test' mode searches for ``proxy-server.conf`` and
|
||||||
``swift.conf`` config files from which it copies config options and overrides
|
``swift.conf`` config files from which it copies config options and overrides
|
||||||
|
@ -127,7 +147,7 @@ using config files found in ``$HOME/my_tests`` and policy 'silver'::
|
||||||
Coding Style
|
Coding Style
|
||||||
------------
|
------------
|
||||||
|
|
||||||
Swift use flake8 with the OpenStack `hacking`_ module to enforce
|
Swift uses flake8 with the OpenStack `hacking`_ module to enforce
|
||||||
coding style.
|
coding style.
|
||||||
|
|
||||||
Install flake8 and hacking with pip or by the packages of your
|
Install flake8 and hacking with pip or by the packages of your
|
||||||
|
@ -164,6 +184,14 @@ Installing Sphinx:
|
||||||
#. Install sphinx (On Ubuntu: `sudo apt-get install python-sphinx`)
|
#. Install sphinx (On Ubuntu: `sudo apt-get install python-sphinx`)
|
||||||
#. `python setup.py build_sphinx`
|
#. `python setup.py build_sphinx`
|
||||||
|
|
||||||
|
--------
|
||||||
|
Manpages
|
||||||
|
--------
|
||||||
|
|
||||||
|
For sanity check of your change in manpage, use this command in the root
|
||||||
|
of your Swift repo::
|
||||||
|
|
||||||
|
./.manpages
|
||||||
|
|
||||||
---------------------
|
---------------------
|
||||||
License and Copyright
|
License and Copyright
|
||||||
|
|
|
@ -37,7 +37,8 @@ Installing dependencies
|
||||||
|
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install curl gcc memcached rsync sqlite3 xfsprogs \
|
sudo apt-get install curl gcc memcached rsync sqlite3 xfsprogs \
|
||||||
git-core libffi-dev python-setuptools
|
git-core libffi-dev python-setuptools \
|
||||||
|
liberasurecode-dev
|
||||||
sudo apt-get install python-coverage python-dev python-nose \
|
sudo apt-get install python-coverage python-dev python-nose \
|
||||||
python-xattr python-eventlet \
|
python-xattr python-eventlet \
|
||||||
python-greenlet python-pastedeploy \
|
python-greenlet python-pastedeploy \
|
||||||
|
@ -48,7 +49,8 @@ Installing dependencies
|
||||||
|
|
||||||
sudo yum update
|
sudo yum update
|
||||||
sudo yum install curl gcc memcached rsync sqlite xfsprogs git-core \
|
sudo yum install curl gcc memcached rsync sqlite xfsprogs git-core \
|
||||||
libffi-devel xinetd python-setuptools \
|
libffi-devel xinetd liberasurecode-devel \
|
||||||
|
python-setuptools \
|
||||||
python-coverage python-devel python-nose \
|
python-coverage python-devel python-nose \
|
||||||
pyxattr python-eventlet \
|
pyxattr python-eventlet \
|
||||||
python-greenlet python-paste-deploy \
|
python-greenlet python-paste-deploy \
|
||||||
|
@ -585,3 +587,7 @@ doesn't work, here are some good starting places to look for issues:
|
||||||
cannot rate limit (unit tests generate a lot of logs very quickly).
|
cannot rate limit (unit tests generate a lot of logs very quickly).
|
||||||
Open the file ``SWIFT_TEST_CONFIG_FILE`` points to, and change the
|
Open the file ``SWIFT_TEST_CONFIG_FILE`` points to, and change the
|
||||||
value of ``fake_syslog`` to ``True``.
|
value of ``fake_syslog`` to ``True``.
|
||||||
|
#. If you encounter a ``401 Unauthorized`` when following Step 12 where
|
||||||
|
you check that you can ``GET`` account, use ``sudo service memcached status``
|
||||||
|
and check if memcache is running. If memcache is not running, start it using
|
||||||
|
``sudo service memcached start``. Once memcache is running, rerun ``GET`` account.
|
||||||
|
|
|
@ -3,31 +3,31 @@ Instructions for a Multiple Server Swift Installation
|
||||||
=====================================================
|
=====================================================
|
||||||
|
|
||||||
Please refer to the latest official
|
Please refer to the latest official
|
||||||
`Openstack Installation Guides <http://docs.openstack.org/#install-guides>`_
|
`OpenStack Installation Guides <http://docs.openstack.org/#install-guides>`_
|
||||||
for the most up-to-date documentation.
|
for the most up-to-date documentation.
|
||||||
|
|
||||||
Object Storage installation guide for Openstack Liberty
|
Object Storage installation guide for OpenStack Liberty
|
||||||
----------------------------------------------------
|
-------------------------------------------------------
|
||||||
|
|
||||||
* `openSUSE 13.2 and SUSE Linux Enterprise Server 12 <http://docs.openstack.org/liberty/install-guide-obs/swift.html>`_
|
* `openSUSE 13.2 and SUSE Linux Enterprise Server 12 <http://docs.openstack.org/liberty/install-guide-obs/swift.html>`_
|
||||||
* `RHEL 7, CentOS 7 <http://docs.openstack.org/liberty/install-guide-rdo/swift.html>`_
|
* `RHEL 7, CentOS 7 <http://docs.openstack.org/liberty/install-guide-rdo/swift.html>`_
|
||||||
* `Ubuntu 14.04 <http://docs.openstack.org/liberty/install-guide-ubuntu/swift.html>`_
|
* `Ubuntu 14.04 <http://docs.openstack.org/liberty/install-guide-ubuntu/swift.html>`_
|
||||||
|
|
||||||
Object Storage installation guide for Openstack Kilo
|
Object Storage installation guide for OpenStack Kilo
|
||||||
----------------------------------------------------
|
----------------------------------------------------
|
||||||
|
|
||||||
* `openSUSE 13.2 and SUSE Linux Enterprise Server 12 <http://docs.openstack.org/kilo/install-guide/install/zypper/content/ch_swift.html>`_
|
* `openSUSE 13.2 and SUSE Linux Enterprise Server 12 <http://docs.openstack.org/kilo/install-guide/install/zypper/content/ch_swift.html>`_
|
||||||
* `RHEL 7, CentOS 7, and Fedora 21 <http://docs.openstack.org/kilo/install-guide/install/yum/content/ch_swift.html>`_
|
* `RHEL 7, CentOS 7, and Fedora 21 <http://docs.openstack.org/kilo/install-guide/install/yum/content/ch_swift.html>`_
|
||||||
* `Ubuntu 14.04 <http://docs.openstack.org/kilo/install-guide/install/apt/content/ch_swift.html>`_
|
* `Ubuntu 14.04 <http://docs.openstack.org/kilo/install-guide/install/apt/content/ch_swift.html>`_
|
||||||
|
|
||||||
Object Storage installation guide for Openstack Juno
|
Object Storage installation guide for OpenStack Juno
|
||||||
----------------------------------------------------
|
----------------------------------------------------
|
||||||
|
|
||||||
* `openSUSE 13.1 and SUSE Linux Enterprise Server 11 <http://docs.openstack.org/juno/install-guide/install/zypper/content/ch_swift.html>`_
|
* `openSUSE 13.1 and SUSE Linux Enterprise Server 11 <http://docs.openstack.org/juno/install-guide/install/zypper/content/ch_swift.html>`_
|
||||||
* `RHEL 7, CentOS 7, and Fedora 20 <http://docs.openstack.org/juno/install-guide/install/yum/content/ch_swift.html>`_
|
* `RHEL 7, CentOS 7, and Fedora 20 <http://docs.openstack.org/juno/install-guide/install/yum/content/ch_swift.html>`_
|
||||||
* `Ubuntu 14.04 <http://docs.openstack.org/juno/install-guide/install/apt/content/ch_swift.html>`_
|
* `Ubuntu 14.04 <http://docs.openstack.org/juno/install-guide/install/apt/content/ch_swift.html>`_
|
||||||
|
|
||||||
Object Storage installation guide for Openstack Icehouse
|
Object Storage installation guide for OpenStack Icehouse
|
||||||
--------------------------------------------------------
|
--------------------------------------------------------
|
||||||
|
|
||||||
* `openSUSE and SUSE Linux Enterprise Server <http://docs.openstack.org/icehouse/install-guide/install/zypper/content/ch_swift.html>`_
|
* `openSUSE and SUSE Linux Enterprise Server <http://docs.openstack.org/icehouse/install-guide/install/zypper/content/ch_swift.html>`_
|
||||||
|
|
Before Width: | Height: | Size: 145 KiB After Width: | Height: | Size: 145 KiB |
|
@ -86,10 +86,15 @@ Administrator Documentation
|
||||||
admin_guide
|
admin_guide
|
||||||
replication_network
|
replication_network
|
||||||
logs
|
logs
|
||||||
|
ops_runbook/index
|
||||||
|
|
||||||
Object Storage v1 REST API Documentation
|
Object Storage v1 REST API Documentation
|
||||||
========================================
|
========================================
|
||||||
|
|
||||||
|
See `Complete Reference for the Object Storage REST API <http://developer.openstack.org/api-ref-objectstorage-v1.html>`_
|
||||||
|
|
||||||
|
The following provides supporting information for the REST API:
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
|
|
||||||
|
@ -104,6 +109,14 @@ Object Storage v1 REST API Documentation
|
||||||
api/use_content-encoding_metadata.rst
|
api/use_content-encoding_metadata.rst
|
||||||
api/use_the_content-disposition_metadata.rst
|
api/use_the_content-disposition_metadata.rst
|
||||||
|
|
||||||
|
OpenStack End User Guide
|
||||||
|
========================
|
||||||
|
|
||||||
|
The `OpenStack End User Guide <http://docs.openstack.org/user-guide>`_
|
||||||
|
has additional information on using Swift.
|
||||||
|
See the `Manage objects and containers <http://docs.openstack.org/user-guide/managing-openstack-object-storage-with-swift-cli.html>`_
|
||||||
|
section.
|
||||||
|
|
||||||
|
|
||||||
Source Documentation
|
Source Documentation
|
||||||
====================
|
====================
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,36 @@
|
||||||
|
==================
|
||||||
|
General Procedures
|
||||||
|
==================
|
||||||
|
|
||||||
|
Getting a swift account stats
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
``swift-direct`` is specific to the HPE Helion Public Cloud. Go look at
|
||||||
|
``swifty`` for an alternate, this is an example.
|
||||||
|
|
||||||
|
This procedure describes how you determine the swift usage for a given
|
||||||
|
swift account, that is the number of containers, number of objects and
|
||||||
|
total bytes used. To do this you will need the project ID.
|
||||||
|
|
||||||
|
Log onto one of the swift proxy servers.
|
||||||
|
|
||||||
|
Use swift-direct to show this accounts usage:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_redacted-9a11-45f8-aa1c-9e7b1c7904c8
|
||||||
|
Status: 200
|
||||||
|
Content-Length: 0
|
||||||
|
Accept-Ranges: bytes
|
||||||
|
X-Timestamp: 1379698586.88364
|
||||||
|
X-Account-Bytes-Used: 67440225625994
|
||||||
|
X-Account-Container-Count: 1
|
||||||
|
Content-Type: text/plain; charset=utf-8
|
||||||
|
X-Account-Object-Count: 8436776
|
||||||
|
Status: 200
|
||||||
|
name: my_container count: 8436776 bytes: 67440225625994
|
||||||
|
|
||||||
|
This account has 1 container. That container has 8436776 objects. The
|
||||||
|
total bytes used is 67440225625994.
|
|
@ -0,0 +1,79 @@
|
||||||
|
=================
|
||||||
|
Swift Ops Runbook
|
||||||
|
=================
|
||||||
|
|
||||||
|
This document contains operational procedures that Hewlett Packard Enterprise (HPE) uses to operate
|
||||||
|
and monitor the Swift system within the HPE Helion Public Cloud. This
|
||||||
|
document is an excerpt of a larger product-specific handbook. As such,
|
||||||
|
the material may appear incomplete. The suggestions and recommendations
|
||||||
|
made in this document are for our particular environment, and may not be
|
||||||
|
suitable for your environment or situation. We make no representations
|
||||||
|
concerning the accuracy, adequacy, completeness or suitability of the
|
||||||
|
information, suggestions or recommendations. This document are provided
|
||||||
|
for reference only. We are not responsible for your use of any
|
||||||
|
information, suggestions or recommendations contained herein.
|
||||||
|
|
||||||
|
This document also contains references to certain tools that we use to
|
||||||
|
operate the Swift system within the HPE Helion Public Cloud.
|
||||||
|
Descriptions of these tools are provided for reference only, as the tools themselves
|
||||||
|
are not publically available at this time.
|
||||||
|
|
||||||
|
- ``swift-direct``: This is similar to the ``swiftly`` tool.
|
||||||
|
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
general.rst
|
||||||
|
diagnose.rst
|
||||||
|
procedures.rst
|
||||||
|
maintenance.rst
|
||||||
|
troubleshooting.rst
|
||||||
|
|
||||||
|
Is the system up?
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you have a report that Swift is down, perform the following basic checks:
|
||||||
|
|
||||||
|
#. Run swift functional tests.
|
||||||
|
|
||||||
|
#. From a server in your data center, use ``curl`` to check ``/healthcheck``.
|
||||||
|
|
||||||
|
#. If you have a monitoring system, check your monitoring system.
|
||||||
|
|
||||||
|
#. Check on your hardware load balancers infrastructure.
|
||||||
|
|
||||||
|
#. Run swift-recon on a proxy node.
|
||||||
|
|
||||||
|
Run swift function tests
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
We would recommend that you set up your function tests against your production
|
||||||
|
system.
|
||||||
|
|
||||||
|
A script for running the function tests is located in ``swift/.functests``.
|
||||||
|
|
||||||
|
|
||||||
|
External monitoring
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
- We use pingdom.com to monitor the external Swift API. We suggest the
|
||||||
|
following:
|
||||||
|
|
||||||
|
- Do a GET on ``/healthcheck``
|
||||||
|
|
||||||
|
- Create a container, make it public (x-container-read:
|
||||||
|
.r\*,.rlistings), create a small file in the container; do a GET
|
||||||
|
on the object
|
||||||
|
|
||||||
|
Reference information
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Reference: Swift startup/shutdown
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
- Use reload - not stop/start/restart.
|
||||||
|
|
||||||
|
- Try to roll sets of servers (especially proxy) in groups of less
|
||||||
|
than 20% of your servers.
|
||||||
|
|
|
@ -0,0 +1,322 @@
|
||||||
|
==================
|
||||||
|
Server maintenance
|
||||||
|
==================
|
||||||
|
|
||||||
|
General assumptions
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- It is assumed that anyone attempting to replace hardware components
|
||||||
|
will have already read and understood the appropriate maintenance and
|
||||||
|
service guides.
|
||||||
|
|
||||||
|
- It is assumed that where servers need to be taken off-line for
|
||||||
|
hardware replacement, that this will be done in series, bringing the
|
||||||
|
server back on-line before taking the next off-line.
|
||||||
|
|
||||||
|
- It is assumed that the operations directed procedure will be used for
|
||||||
|
identifying hardware for replacement.
|
||||||
|
|
||||||
|
Assessing the health of swift
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can run the swift-recon tool on a Swift proxy node to get a quick
|
||||||
|
check of how Swift is doing. Please note that the numbers below are
|
||||||
|
necessarily somewhat subjective. Sometimes parameters for which we
|
||||||
|
say 'low values are good' will have pretty high values for a time. Often
|
||||||
|
if you wait a while things get better.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo swift-recon -rla
|
||||||
|
===============================================================================
|
||||||
|
[2012-03-10 12:57:21] Checking async pendings on 384 hosts...
|
||||||
|
Async stats: low: 0, high: 1, avg: 0, total: 1
|
||||||
|
===============================================================================
|
||||||
|
|
||||||
|
[2012-03-10 12:57:22] Checking replication times on 384 hosts...
|
||||||
|
[Replication Times] shortest: 1.4113877813, longest: 36.8293570836, avg: 4.86278064749
|
||||||
|
===============================================================================
|
||||||
|
|
||||||
|
[2012-03-10 12:57:22] Checking load avg's on 384 hosts...
|
||||||
|
[5m load average] lowest: 2.22, highest: 9.5, avg: 4.59578125
|
||||||
|
[15m load average] lowest: 2.36, highest: 9.45, avg: 4.62622395833
|
||||||
|
[1m load average] lowest: 1.84, highest: 9.57, avg: 4.5696875
|
||||||
|
===============================================================================
|
||||||
|
|
||||||
|
In the example above we ask for information on replication times (-r),
|
||||||
|
load averages (-l) and async pendings (-a). This is a healthy Swift
|
||||||
|
system. Rules-of-thumb for 'good' recon output are:
|
||||||
|
|
||||||
|
- Nodes that respond are up and running Swift. If all nodes respond,
|
||||||
|
that is a good sign. But some nodes may time out. For example:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
\-> [http://<redacted>.29:6000/recon/load:] <urlopen error [Errno 111] ECONNREFUSED>
|
||||||
|
\-> [http://<redacted>.31:6000/recon/load:] <urlopen error timed out>
|
||||||
|
|
||||||
|
- That could be okay or could require investigation.
|
||||||
|
|
||||||
|
- Low values (say < 10 for high and average) for async pendings are
|
||||||
|
good. Higher values occur when disks are down and/or when the system
|
||||||
|
is heavily loaded. Many simultaneous PUTs to the same container can
|
||||||
|
drive async pendings up. This may be normal, and may resolve itself
|
||||||
|
after a while. If it persists, one way to track down the problem is
|
||||||
|
to find a node with high async pendings (with ``swift-recon -av | sort
|
||||||
|
-n -k4``), then check its Swift logs, Often async pendings are high
|
||||||
|
because a node cannot write to a container on another node. Often
|
||||||
|
this is because the node or disk is offline or bad. This may be okay
|
||||||
|
if we know about it.
|
||||||
|
|
||||||
|
- Low values for replication times are good. These values rise when new
|
||||||
|
rings are pushed, and when nodes and devices are brought back on
|
||||||
|
line.
|
||||||
|
|
||||||
|
- Our 'high' load average values are typically in the 9-15 range. If
|
||||||
|
they are a lot bigger it is worth having a look at the systems
|
||||||
|
pushing the average up. Run ``swift-recon -av`` to get the individual
|
||||||
|
averages. To sort the entries with the highest at the end,
|
||||||
|
run ``swift-recon -av | sort -n -k4``.
|
||||||
|
|
||||||
|
For comparison here is the recon output for the same system above when
|
||||||
|
two entire racks of Swift are down:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
[2012-03-10 16:56:33] Checking async pendings on 384 hosts...
|
||||||
|
-> http://<redacted>.22:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.18:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.16:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.13:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.30:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.6:6000/recon/async: <urlopen error timed out>
|
||||||
|
.........
|
||||||
|
-> http://<redacted>.5:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.15:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.9:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.27:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.4:6000/recon/async: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.8:6000/recon/async: <urlopen error timed out>
|
||||||
|
Async stats: low: 243, high: 659, avg: 413, total: 132275
|
||||||
|
===============================================================================
|
||||||
|
[2012-03-10 16:57:48] Checking replication times on 384 hosts...
|
||||||
|
-> http://<redacted>.22:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.18:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.16:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.13:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.30:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.6:6000/recon/replication: <urlopen error timed out>
|
||||||
|
............
|
||||||
|
-> http://<redacted>.5:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.15:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.9:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.27:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.4:6000/recon/replication: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.8:6000/recon/replication: <urlopen error timed out>
|
||||||
|
[Replication Times] shortest: 1.38144306739, longest: 112.620954418, avg: 10.285
|
||||||
|
9475361
|
||||||
|
===============================================================================
|
||||||
|
[2012-03-10 16:59:03] Checking load avg's on 384 hosts...
|
||||||
|
-> http://<redacted>.22:6000/recon/load: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.18:6000/recon/load: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.16:6000/recon/load: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.13:6000/recon/load: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.30:6000/recon/load: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.6:6000/recon/load: <urlopen error timed out>
|
||||||
|
............
|
||||||
|
-> http://<redacted>.15:6000/recon/load: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.9:6000/recon/load: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.27:6000/recon/load: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.4:6000/recon/load: <urlopen error timed out>
|
||||||
|
-> http://<redacted>.8:6000/recon/load: <urlopen error timed out>
|
||||||
|
[5m load average] lowest: 1.71, highest: 4.91, avg: 2.486375
|
||||||
|
[15m load average] lowest: 1.79, highest: 5.04, avg: 2.506125
|
||||||
|
[1m load average] lowest: 1.46, highest: 4.55, avg: 2.4929375
|
||||||
|
===============================================================================
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The replication times and load averages are within reasonable
|
||||||
|
parameters, even with 80 object stores down. Async pendings, however is
|
||||||
|
quite high. This is due to the fact that the containers on the servers
|
||||||
|
which are down cannot be updated. When those servers come back up, async
|
||||||
|
pendings should drop. If async pendings were at this level without an
|
||||||
|
explanation, we have a problem.
|
||||||
|
|
||||||
|
Recon examples
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Here is an example of noting and tracking down a problem with recon.
|
||||||
|
|
||||||
|
Running reccon shows some async pendings:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
bob@notso:~/swift-1.4.4/swift$ ssh \\-q <redacted>.132.7 sudo swift-recon \\-alr
|
||||||
|
===============================================================================
|
||||||
|
\[2012-03-14 17:25:55\\] Checking async pendings on 384 hosts...
|
||||||
|
Async stats: low: 0, high: 23, avg: 8, total: 3356
|
||||||
|
===============================================================================
|
||||||
|
\[2012-03-14 17:25:55\\] Checking replication times on 384 hosts...
|
||||||
|
\[Replication Times\\] shortest: 1.49303831657, longest: 39.6982825994, avg: 4.2418222066
|
||||||
|
===============================================================================
|
||||||
|
\[2012-03-14 17:25:56\\] Checking load avg's on 384 hosts...
|
||||||
|
\[5m load average\\] lowest: 2.35, highest: 8.88, avg: 4.45911458333
|
||||||
|
\[15m load average\\] lowest: 2.41, highest: 9.11, avg: 4.504765625
|
||||||
|
\[1m load average\\] lowest: 1.95, highest: 8.56, avg: 4.40588541667
|
||||||
|
===============================================================================
|
||||||
|
|
||||||
|
Why? Running recon again with -av swift (not shown here) tells us that
|
||||||
|
the node with the highest (23) is <redacted>.72.61. Looking at the log
|
||||||
|
files on <redacted>.72.61 we see:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
souzab@<redacted>:~$ sudo tail -f /var/log/swift/background.log | - grep -i ERROR
|
||||||
|
Mar 14 17:28:06 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001}
|
||||||
|
Mar 14 17:28:06 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001}
|
||||||
|
Mar 14 17:28:09 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001}
|
||||||
|
Mar 14 17:28:11 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001}
|
||||||
|
Mar 14 17:28:13 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001}
|
||||||
|
Mar 14 17:28:13 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001}
|
||||||
|
Mar 14 17:28:15 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001}
|
||||||
|
Mar 14 17:28:15 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001}
|
||||||
|
Mar 14 17:28:19 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001}
|
||||||
|
Mar 14 17:28:19 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001}
|
||||||
|
Mar 14 17:28:20 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6001}
|
||||||
|
Mar 14 17:28:21 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001}
|
||||||
|
Mar 14 17:28:21 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001}
|
||||||
|
Mar 14 17:28:22 <redacted> container-replicator ERROR Remote drive not mounted
|
||||||
|
{'zone': 5, 'weight': 1952.0, 'ip': '<redacted>.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6001}
|
||||||
|
|
||||||
|
That is why this node has a lot of async pendings: a bunch of disks that
|
||||||
|
are not mounted on <redacted> and <redacted>. There may be other issues,
|
||||||
|
but clearing this up will likely drop the async pendings a fair bit, as
|
||||||
|
other nodes will be having the same problem.
|
||||||
|
|
||||||
|
Assessing the availability risk when multiple storage servers are down
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
This procedure will tell you if you have a problem, however, in practice
|
||||||
|
you will find that you will not use this procedure frequently.
|
||||||
|
|
||||||
|
If three storage nodes (or, more precisely, three disks on three
|
||||||
|
different storage nodes) are down, there is a small but nonzero
|
||||||
|
probability that user objects, containers, or accounts will not be
|
||||||
|
available.
|
||||||
|
|
||||||
|
Procedure
|
||||||
|
---------
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
swift has three rings: one each for objects, containers and accounts.
|
||||||
|
This procedure should be run three times, each time specifying the
|
||||||
|
appropriate ``*.builder`` file.
|
||||||
|
|
||||||
|
#. Determine whether all three nodes are different Swift zones by
|
||||||
|
running the ring builder on a proxy node to determine which zones
|
||||||
|
the storage nodes are in. For example:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
% sudo swift-ring-builder /etc/swift/object.builder
|
||||||
|
/etc/swift/object.builder, build version 1467
|
||||||
|
2097152 partitions, 3 replicas, 5 zones, 1320 devices, 0.02 balance
|
||||||
|
The minimum number of hours before a partition can be reassigned is 24
|
||||||
|
Devices: id zone ip address port name weight partitions balance meta
|
||||||
|
0 1 <redacted>.4 6000 disk0 1708.00 4259 -0.00
|
||||||
|
1 1 <redacted>.4 6000 disk1 1708.00 4260 0.02
|
||||||
|
2 1 <redacted>.4 6000 disk2 1952.00 4868 0.01
|
||||||
|
3 1 <redacted>.4 6000 disk3 1952.00 4868 0.01
|
||||||
|
4 1 <redacted>.4 6000 disk4 1952.00 4867 -0.01
|
||||||
|
|
||||||
|
#. Here, node <redacted>.4 is in zone 1. If two or more of the three
|
||||||
|
nodes under consideration are in the same Swift zone, they do not
|
||||||
|
have any ring partitions in common; there is little/no data
|
||||||
|
availability risk if all three nodes are down.
|
||||||
|
|
||||||
|
#. If the nodes are in three distinct Swift zonesit is necessary to
|
||||||
|
whether the nodes have ring partitions in common. Run ``swift-ring``
|
||||||
|
builder again, this time with the ``list_parts`` option and specify
|
||||||
|
the nodes under consideration. For example (all on one line):
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
% sudo swift-ring-builder /etc/swift/object.builder list_parts <redacted>.8 <redacted>.15 <redacted>.72.2
|
||||||
|
Partition Matches
|
||||||
|
91 2
|
||||||
|
729 2
|
||||||
|
3754 2
|
||||||
|
3769 2
|
||||||
|
3947 2
|
||||||
|
5818 2
|
||||||
|
7918 2
|
||||||
|
8733 2
|
||||||
|
9509 2
|
||||||
|
10233 2
|
||||||
|
|
||||||
|
#. The ``list_parts`` option to the ring builder indicates how many ring
|
||||||
|
partitions the nodes have in common. If, as in this case, the
|
||||||
|
first entry in the list has a ‘Matches’ column of 2 or less, there
|
||||||
|
is no data availability risk if all three nodes are down.
|
||||||
|
|
||||||
|
#. If the ‘Matches’ column has entries equal to 3, there is some data
|
||||||
|
availability risk if all three nodes are down. The risk is generally
|
||||||
|
small, and is proportional to the number of entries that have a 3 in
|
||||||
|
the Matches column. For example:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
Partition Matches
|
||||||
|
26865 3
|
||||||
|
362367 3
|
||||||
|
745940 3
|
||||||
|
778715 3
|
||||||
|
797559 3
|
||||||
|
820295 3
|
||||||
|
822118 3
|
||||||
|
839603 3
|
||||||
|
852332 3
|
||||||
|
855965 3
|
||||||
|
858016 3
|
||||||
|
|
||||||
|
#. A quick way to count the number of rows with 3 matches is:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
% sudo swift-ring-builder /etc/swift/object.builder list_parts <redacted>.8 <redacted>.15 <redacted>.72.2 | grep “3$” - wc \\-l
|
||||||
|
|
||||||
|
30
|
||||||
|
|
||||||
|
#. In this case the nodes have 30 out of a total of 2097152 partitions
|
||||||
|
in common; about 0.001%. In this case the risk is small nonzero.
|
||||||
|
Recall that a partition is simply a portion of the ring mapping
|
||||||
|
space, not actual data. So having partitions in common is a necessary
|
||||||
|
but not sufficient condition for data unavailability.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
We should not bring down a node for repair if it shows
|
||||||
|
Matches entries of 3 with other nodes that are also down.
|
||||||
|
|
||||||
|
If three nodes that have 3 partitions in common are all down, there is
|
||||||
|
a nonzero probability that data are unavailable and we should work to
|
||||||
|
bring some or all of the nodes up ASAP.
|
|
@ -0,0 +1,367 @@
|
||||||
|
=================================
|
||||||
|
Software configuration procedures
|
||||||
|
=================================
|
||||||
|
|
||||||
|
Fix broken GPT table (broken disk partition)
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- If a GPT table is broken, a message like the following should be
|
||||||
|
observed when the command...
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo parted -l
|
||||||
|
|
||||||
|
- ... is run.
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
...
|
||||||
|
Error: The backup GPT table is corrupt, but the primary appears OK, so that will
|
||||||
|
be used.
|
||||||
|
OK/Cancel?
|
||||||
|
|
||||||
|
#. To fix this, firstly install the ``gdisk`` program to fix this:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo aptitude install gdisk
|
||||||
|
|
||||||
|
#. Run ``gdisk`` for the particular drive with the damaged partition:
|
||||||
|
|
||||||
|
.. code:
|
||||||
|
|
||||||
|
$ sudo gdisk /dev/sd*a-l*
|
||||||
|
GPT fdisk (gdisk) version 0.6.14
|
||||||
|
|
||||||
|
Caution: invalid backup GPT header, but valid main header; regenerating
|
||||||
|
backup header from main header.
|
||||||
|
|
||||||
|
Warning! One or more CRCs don't match. You should repair the disk!
|
||||||
|
|
||||||
|
Partition table scan:
|
||||||
|
MBR: protective
|
||||||
|
BSD: not present
|
||||||
|
APM: not present
|
||||||
|
GPT: damaged
|
||||||
|
/dev/sd
|
||||||
|
*****************************************************************************
|
||||||
|
Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk
|
||||||
|
verification and recovery are STRONGLY recommended.
|
||||||
|
*****************************************************************************
|
||||||
|
|
||||||
|
#. On the command prompt, type ``r`` (recovery and transformation
|
||||||
|
options), followed by ``d`` (use main GPT header) , ``v`` (verify disk)
|
||||||
|
and finally ``w`` (write table to disk and exit). Will also need to
|
||||||
|
enter ``Y`` when prompted in order to confirm actions.
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
Command (? for help): r
|
||||||
|
|
||||||
|
Recovery/transformation command (? for help): d
|
||||||
|
|
||||||
|
Recovery/transformation command (? for help): v
|
||||||
|
|
||||||
|
Caution: The CRC for the backup partition table is invalid. This table may
|
||||||
|
be corrupt. This program will automatically create a new backup partition
|
||||||
|
table when you save your partitions.
|
||||||
|
|
||||||
|
Caution: Partition 1 doesn't begin on a 8-sector boundary. This may
|
||||||
|
result in degraded performance on some modern (2009 and later) hard disks.
|
||||||
|
|
||||||
|
Caution: Partition 2 doesn't begin on a 8-sector boundary. This may
|
||||||
|
result in degraded performance on some modern (2009 and later) hard disks.
|
||||||
|
|
||||||
|
Caution: Partition 3 doesn't begin on a 8-sector boundary. This may
|
||||||
|
result in degraded performance on some modern (2009 and later) hard disks.
|
||||||
|
|
||||||
|
Identified 1 problems!
|
||||||
|
|
||||||
|
Recovery/transformation command (? for help): w
|
||||||
|
|
||||||
|
Final checks complete. About to write GPT data. THIS WILL OVERWRITE EXISTING
|
||||||
|
PARTITIONS!!
|
||||||
|
|
||||||
|
Do you want to proceed, possibly destroying your data? (Y/N): Y
|
||||||
|
|
||||||
|
OK; writing new GUID partition table (GPT).
|
||||||
|
The operation has completed successfully.
|
||||||
|
|
||||||
|
#. Running the command:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo parted /dev/sd#
|
||||||
|
|
||||||
|
#. Should now show that the partition is recovered and healthy again.
|
||||||
|
|
||||||
|
#. Finally, uninstall ``gdisk`` from the node:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo aptitude remove gdisk
|
||||||
|
|
||||||
|
Procedure: Fix broken XFS filesystem
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
#. A filesystem may be corrupt or broken if the following output is
|
||||||
|
observed when checking its label:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo xfs_admin -l /dev/sd#
|
||||||
|
cache_node_purge: refcount was 1, not zero (node=0x25d5ee0)
|
||||||
|
xfs_admin: cannot read root inode (117)
|
||||||
|
cache_node_purge: refcount was 1, not zero (node=0x25d92b0)
|
||||||
|
xfs_admin: cannot read realtime bitmap inode (117)
|
||||||
|
bad sb magic # 0 in AG 1
|
||||||
|
failed to read label in AG 1
|
||||||
|
|
||||||
|
#. Run the following commands to remove the broken/corrupt filesystem and replace.
|
||||||
|
(This example uses the filesystem ``/dev/sdb2``) Firstly need to replace the partition:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo parted
|
||||||
|
GNU Parted 2.3
|
||||||
|
Using /dev/sda
|
||||||
|
Welcome to GNU Parted! Type 'help' to view a list of commands.
|
||||||
|
(parted) select /dev/sdb
|
||||||
|
Using /dev/sdb
|
||||||
|
(parted) p
|
||||||
|
Model: HP LOGICAL VOLUME (scsi)
|
||||||
|
Disk /dev/sdb: 2000GB
|
||||||
|
Sector size (logical/physical): 512B/512B
|
||||||
|
Partition Table: gpt
|
||||||
|
|
||||||
|
Number Start End Size File system Name Flags
|
||||||
|
1 17.4kB 1024MB 1024MB ext3 boot
|
||||||
|
2 1024MB 1751GB 1750GB xfs sw-aw2az1-object045-disk1
|
||||||
|
3 1751GB 2000GB 249GB lvm
|
||||||
|
|
||||||
|
(parted) rm 2
|
||||||
|
(parted) mkpart primary 2 -1
|
||||||
|
Warning: You requested a partition from 2000kB to 2000GB.
|
||||||
|
The closest location we can manage is 1024MB to 1751GB.
|
||||||
|
Is this still acceptable to you?
|
||||||
|
Yes/No? Yes
|
||||||
|
Warning: The resulting partition is not properly aligned for best performance.
|
||||||
|
Ignore/Cancel? Ignore
|
||||||
|
(parted) p
|
||||||
|
Model: HP LOGICAL VOLUME (scsi)
|
||||||
|
Disk /dev/sdb: 2000GB
|
||||||
|
Sector size (logical/physical): 512B/512B
|
||||||
|
Partition Table: gpt
|
||||||
|
|
||||||
|
Number Start End Size File system Name Flags
|
||||||
|
1 17.4kB 1024MB 1024MB ext3 boot
|
||||||
|
2 1024MB 1751GB 1750GB xfs primary
|
||||||
|
3 1751GB 2000GB 249GB lvm
|
||||||
|
|
||||||
|
(parted) quit
|
||||||
|
|
||||||
|
#. Next step is to scrub the filesystem and format:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo dd if=/dev/zero of=/dev/sdb2 bs=$((1024\*1024)) count=1
|
||||||
|
1+0 records in
|
||||||
|
1+0 records out
|
||||||
|
1048576 bytes (1.0 MB) copied, 0.00480617 s, 218 MB/s
|
||||||
|
$ sudo /sbin/mkfs.xfs -f -i size=1024 /dev/sdb2
|
||||||
|
meta-data=/dev/sdb2 isize=1024 agcount=4, agsize=106811524 blks
|
||||||
|
= sectsz=512 attr=2, projid32bit=0
|
||||||
|
data = bsize=4096 blocks=427246093, imaxpct=5
|
||||||
|
= sunit=0 swidth=0 blks
|
||||||
|
naming =version 2 bsize=4096 ascii-ci=0
|
||||||
|
log =internal log bsize=4096 blocks=208616, version=2
|
||||||
|
= sectsz=512 sunit=0 blks, lazy-count=1
|
||||||
|
realtime =none extsz=4096 blocks=0, rtextents=0
|
||||||
|
|
||||||
|
#. You should now label and mount your filesystem.
|
||||||
|
|
||||||
|
#. Can now check to see if the filesystem is mounted using the command:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ mount
|
||||||
|
|
||||||
|
Procedure: Checking if an account is okay
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
``swift-direct`` is only available in the HPE Helion Public Cloud.
|
||||||
|
Use ``swiftly`` as an alternate.
|
||||||
|
|
||||||
|
If you have a tenant ID you can check the account is okay as follows from a proxy.
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo -u swift /opt/hp/swift/bin/swift-direct show <Api-Auth-Hash-or-TenantId>
|
||||||
|
|
||||||
|
The response will either be similar to a swift list of the account
|
||||||
|
containers, or an error indicating that the resource could not be found.
|
||||||
|
|
||||||
|
In the latter case you can establish if a backend database exists for
|
||||||
|
the tenantId by running the following on a proxy:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo -u swift swift-get-nodes /etc/swift/account.ring.gz <Api-Auth-Hash-or-TenantId>
|
||||||
|
|
||||||
|
The response will list ssh commands that will list the replicated
|
||||||
|
account databases, if they exist.
|
||||||
|
|
||||||
|
Procedure: Revive a deleted account
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Swift accounts are normally not recreated. If a tenant unsubscribes from
|
||||||
|
Swift, the account is deleted. To re-subscribe to Swift, you can create
|
||||||
|
a new tenant (new tenant ID), and subscribe to Swift. This creates a
|
||||||
|
new Swift account with the new tenant ID.
|
||||||
|
|
||||||
|
However, until the unsubscribe/new tenant process is supported, you may
|
||||||
|
hit a situation where a Swift account is deleted and the user is locked
|
||||||
|
out of Swift.
|
||||||
|
|
||||||
|
Deleting the account database files
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
Here is one possible solution. The containers and objects may be lost
|
||||||
|
forever. The solution is to delete the account database files and
|
||||||
|
re-create the account. This may only be done once the containers and
|
||||||
|
objects are completely deleted. This process is untested, but could
|
||||||
|
work as follows:
|
||||||
|
|
||||||
|
#. Use swift-get-nodes to locate the account's database file (on three
|
||||||
|
servers).
|
||||||
|
|
||||||
|
#. Rename the database files (on three servers).
|
||||||
|
|
||||||
|
#. Use ``swiftly`` to create the account (use original name).
|
||||||
|
|
||||||
|
Renaming account database so it can be revived
|
||||||
|
----------------------------------------------
|
||||||
|
|
||||||
|
Get the locations of the database files that hold the account data.
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_redacted-1856-44ae-97db-31242f7ad7a1
|
||||||
|
|
||||||
|
Account AUTH_redacted-1856-44ae-97db-31242f7ad7a1
|
||||||
|
Container None
|
||||||
|
|
||||||
|
Object None
|
||||||
|
|
||||||
|
Partition 18914
|
||||||
|
|
||||||
|
Hash 93c41ef56dd69173a9524193ab813e78
|
||||||
|
|
||||||
|
Server:Port Device 15.184.9.126:6002 disk7
|
||||||
|
Server:Port Device 15.184.9.94:6002 disk11
|
||||||
|
Server:Port Device 15.184.9.103:6002 disk10
|
||||||
|
Server:Port Device 15.184.9.80:6002 disk2 [Handoff]
|
||||||
|
Server:Port Device 15.184.9.120:6002 disk2 [Handoff]
|
||||||
|
Server:Port Device 15.184.9.98:6002 disk2 [Handoff]
|
||||||
|
|
||||||
|
curl -I -XHEAD "`*http://15.184.9.126:6002/disk7/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.126:6002/disk7/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_
|
||||||
|
curl -I -XHEAD "`*http://15.184.9.94:6002/disk11/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.94:6002/disk11/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_
|
||||||
|
|
||||||
|
curl -I -XHEAD "`*http://15.184.9.103:6002/disk10/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.103:6002/disk10/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_
|
||||||
|
|
||||||
|
curl -I -XHEAD "`*http://15.184.9.80:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.80:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]
|
||||||
|
curl -I -XHEAD "`*http://15.184.9.120:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.120:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]
|
||||||
|
curl -I -XHEAD "`*http://15.184.9.98:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.98:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]
|
||||||
|
|
||||||
|
ssh 15.184.9.126 "ls -lah /srv/node/disk7/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"
|
||||||
|
ssh 15.184.9.94 "ls -lah /srv/node/disk11/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"
|
||||||
|
ssh 15.184.9.103 "ls -lah /srv/node/disk10/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"
|
||||||
|
ssh 15.184.9.80 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]
|
||||||
|
ssh 15.184.9.120 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]
|
||||||
|
ssh 15.184.9.98 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]
|
||||||
|
|
||||||
|
$ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH\_redacted-1856-44ae-97db-31242f7ad7a1Account AUTH_redacted-1856-44ae-97db-
|
||||||
|
31242f7ad7a1Container NoneObject NonePartition 18914Hash 93c41ef56dd69173a9524193ab813e78Server:Port Device 15.184.9.126:6002 disk7Server:Port Device 15.184.9.94:6002 disk11Server:Port Device 15.184.9.103:6002 disk10Server:Port Device 15.184.9.80:6002
|
||||||
|
disk2 [Handoff]Server:Port Device 15.184.9.120:6002 disk2 [Handoff]Server:Port Device 15.184.9.98:6002 disk2 [Handoff]curl -I -XHEAD
|
||||||
|
"`*http://15.184.9.126:6002/disk7/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"*<http://15.184.9.126:6002/disk7/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ curl -I -XHEAD
|
||||||
|
|
||||||
|
"`*http://15.184.9.94:6002/disk11/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.94:6002/disk11/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ curl -I -XHEAD
|
||||||
|
|
||||||
|
"`*http://15.184.9.103:6002/disk10/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.103:6002/disk10/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ curl -I -XHEAD
|
||||||
|
|
||||||
|
"`*http://15.184.9.80:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.80:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]curl -I -XHEAD
|
||||||
|
|
||||||
|
"`*http://15.184.9.120:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.120:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]curl -I -XHEAD
|
||||||
|
|
||||||
|
"`*http://15.184.9.98:6002/disk2/18914/AUTH_redacted-1856-44ae-97db-31242f7ad7a1"* <http://15.184.9.98:6002/disk2/18914/AUTH_cc9ebdb8-1856-44ae-97db-31242f7ad7a1>`_ # [Handoff]ssh 15.184.9.126
|
||||||
|
|
||||||
|
"ls -lah /srv/node/disk7/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"ssh 15.184.9.94 "ls -lah /srv/node/disk11/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"ssh 15.184.9.103
|
||||||
|
"ls -lah /srv/node/disk10/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"ssh 15.184.9.80 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]ssh 15.184.9.120
|
||||||
|
"ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]ssh 15.184.9.98 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/" # [Handoff]
|
||||||
|
|
||||||
|
Check that the handoff nodes do not have account databases:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ ssh 15.184.9.80 "ls -lah /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/"
|
||||||
|
ls: cannot access /srv/node/disk2/accounts/18914/e78/93c41ef56dd69173a9524193ab813e78/: No such file or directory
|
||||||
|
|
||||||
|
If the handoff node has a database, wait for rebalancing to occur.
|
||||||
|
|
||||||
|
Procedure: Temporarily stop load balancers from directing traffic to a proxy server
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can stop the load balancers sending requests to a proxy server as
|
||||||
|
follows. This can be useful when a proxy is misbehaving but you need
|
||||||
|
Swift running to help diagnose the problem. By removing from the load
|
||||||
|
balancers, customer's are not impacted by the misbehaving proxy.
|
||||||
|
|
||||||
|
#. Ensure that in proxyserver.com the ``disable_path`` variable is set to
|
||||||
|
``/etc/swift/disabled-by-file``.
|
||||||
|
|
||||||
|
#. Log onto the proxy node.
|
||||||
|
|
||||||
|
#. Shut down Swift as follows:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo swift-init proxy shutdown
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Shutdown, not stop.
|
||||||
|
|
||||||
|
#. Create the ``/etc/swift/disabled-by-file`` file. For example:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo touch /etc/swift/disabled-by-file
|
||||||
|
|
||||||
|
#. Optional, restart Swift:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo swift-init proxy start
|
||||||
|
|
||||||
|
It works because the healthcheck middleware looks for this file. If it
|
||||||
|
find it, it will return 503 error instead of 200/OK. This means the load balancer
|
||||||
|
should stop sending traffic to the proxy.
|
||||||
|
|
||||||
|
``/healthcheck`` will report
|
||||||
|
``FAIL: disabled by file`` if the ``disabled-by-file`` file exists.
|
||||||
|
|
||||||
|
Procedure: Ad-Hoc disk performance test
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can get an idea whether a disk drive is performing as follows:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo dd bs=1M count=256 if=/dev/zero conv=fdatasync of=/srv/node/disk11/remember-to-delete-this-later
|
||||||
|
|
||||||
|
You can expect ~600MB/sec. If you get a low number, repeat many times as
|
||||||
|
Swift itself may also read or write to the disk, hence giving a lower
|
||||||
|
number.
|
|
@ -0,0 +1,177 @@
|
||||||
|
==============================
|
||||||
|
Further issues and resolutions
|
||||||
|
==============================
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The urgency levels in each **Action** column indicates whether or
|
||||||
|
not it is required to take immediate action, or if the problem can be worked
|
||||||
|
on during business hours.
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
:widths: 33 33 33
|
||||||
|
:header-rows: 1
|
||||||
|
|
||||||
|
* - **Scenario**
|
||||||
|
- **Description**
|
||||||
|
- **Action**
|
||||||
|
* - ``/healthcheck`` latency is high.
|
||||||
|
- The ``/healthcheck`` test does not tax the proxy very much so any drop in value is probably related to
|
||||||
|
network issues, rather than the proxies being very busy. A very slow proxy might impact the average
|
||||||
|
number, but it would need to be very slow to shift the number that much.
|
||||||
|
- Check networks. Do a ``curl https://<ip-address>/healthcheck where ip-address`` is individual proxy
|
||||||
|
IP address to see if you can pin point a problem in the network.
|
||||||
|
|
||||||
|
Urgency: If there are other indications that your system is slow, you should treat
|
||||||
|
this as an urgent problem.
|
||||||
|
* - Swift process is not running.
|
||||||
|
- You can use ``swift-init`` status to check if swift processes are running on any
|
||||||
|
given server.
|
||||||
|
- Run this command:
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo swift-init all start
|
||||||
|
|
||||||
|
Examine messages in the swift log files to see if there are any
|
||||||
|
error messages related to any of the swift processes since the time you
|
||||||
|
ran the ``swift-init`` command.
|
||||||
|
|
||||||
|
Take any corrective actions that seem necessary.
|
||||||
|
|
||||||
|
Urgency: If this only affects one server, and you have more than one,
|
||||||
|
identifying and fixing the problem can wait until business hours.
|
||||||
|
If this same problem affects many servers, then you need to take corrective
|
||||||
|
action immediately.
|
||||||
|
* - ntpd is not running.
|
||||||
|
- NTP is not running.
|
||||||
|
- Configure and start NTP.
|
||||||
|
Urgency: For proxy servers, this is vital.
|
||||||
|
|
||||||
|
* - Host clock is not syncd to an NTP server.
|
||||||
|
- Node time settings does not match NTP server time.
|
||||||
|
This may take some time to sync after a reboot.
|
||||||
|
- Assuming NTP is configured and running, you have to wait until the times sync.
|
||||||
|
* - A swift process has hundreds, to thousands of open file descriptors.
|
||||||
|
- May happen to any of the swift processes.
|
||||||
|
Known to have happened with a ``rsyslod restart`` and where ``/tmp`` was hanging.
|
||||||
|
|
||||||
|
- Restart the swift processes on the affected node:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
% sudo swift-init all reload
|
||||||
|
|
||||||
|
Urgency:
|
||||||
|
If known performance problem: Immediate
|
||||||
|
|
||||||
|
If system seems fine: Medium
|
||||||
|
* - A swift process is not owned by the swift user.
|
||||||
|
- If the UID of the swift user has changed, then the processes might not be
|
||||||
|
owned by that UID.
|
||||||
|
- Urgency: If this only affects one server, and you have more than one,
|
||||||
|
identifying and fixing the problem can wait until business hours.
|
||||||
|
If this same problem affects many servers, then you need to take corrective
|
||||||
|
action immediately.
|
||||||
|
* - Object account or container files not owned by swift.
|
||||||
|
- This typically happens if during a reinstall or a re-image of a server that the UID
|
||||||
|
of the swift user was changed. The data files in the object account and container
|
||||||
|
directories are owned by the original swift UID. As a result, the current swift
|
||||||
|
user does not own these files.
|
||||||
|
- Correct the UID of the swift user to reflect that of the original UID. An alternate
|
||||||
|
action is to change the ownership of every file on all file systems. This alternate
|
||||||
|
action is often impractical and will take considerable time.
|
||||||
|
|
||||||
|
Urgency: If this only affects one server, and you have more than one,
|
||||||
|
identifying and fixing the problem can wait until business hours.
|
||||||
|
If this same problem affects many servers, then you need to take corrective
|
||||||
|
action immediately.
|
||||||
|
* - A disk drive has a high IO wait or service time.
|
||||||
|
- If high wait IO times are seen for a single disk, then the disk drive is the problem.
|
||||||
|
If most/all devices are slow, the controller is probably the source of the problem.
|
||||||
|
The controller cache may also be miss configured – which will cause similar long
|
||||||
|
wait or service times.
|
||||||
|
- As a first step, if your controllers have a cache, check that it is enabled and their battery/capacitor
|
||||||
|
is working.
|
||||||
|
|
||||||
|
Second, reboot the server.
|
||||||
|
If problem persists, file a DC ticket to have the drive or controller replaced.
|
||||||
|
See `Diagnose: Slow disk devices` on how to check the drive wait or service times.
|
||||||
|
|
||||||
|
Urgency: Medium
|
||||||
|
* - The network interface is not up.
|
||||||
|
- Use the ``ifconfig`` and ``ethtool`` commands to determine the network state.
|
||||||
|
- You can try restarting the interface. However, generally the interface
|
||||||
|
(or cable) is probably broken, especially if the interface is flapping.
|
||||||
|
|
||||||
|
Urgency: If this only affects one server, and you have more than one,
|
||||||
|
identifying and fixing the problem can wait until business hours.
|
||||||
|
If this same problem affects many servers, then you need to take corrective
|
||||||
|
action immediately.
|
||||||
|
* - Network interface card (NIC) is not operating at the expected speed.
|
||||||
|
- The NIC is running at a slower speed than its nominal rated speed.
|
||||||
|
For example, it is running at 100 Mb/s and the NIC is a 1Ge NIC.
|
||||||
|
- 1. Try resetting the interface with:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo ethtool -s eth0 speed 1000
|
||||||
|
|
||||||
|
... and then run:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo lshw -class
|
||||||
|
|
||||||
|
See if size goes to the expected speed. Failing
|
||||||
|
that, check hardware (NIC cable/switch port).
|
||||||
|
|
||||||
|
2. If persistent, consider shutting down the server (especially if a proxy)
|
||||||
|
until the problem is identified and resolved. If you leave this server
|
||||||
|
running it can have a large impact on overall performance.
|
||||||
|
|
||||||
|
Urgency: High
|
||||||
|
* - The interface RX/TX error count is non-zero.
|
||||||
|
- A value of 0 is typical, but counts of 1 or 2 do not indicate a problem.
|
||||||
|
- 1. For low numbers (For example, 1 or 2), you can simply ignore. Numbers in the range
|
||||||
|
3-30 probably indicate that the error count has crept up slowly over a long time.
|
||||||
|
Consider rebooting the server to remove the report from the noise.
|
||||||
|
|
||||||
|
Typically, when a cable or interface is bad, the error count goes to 400+. For example,
|
||||||
|
it stands out. There may be other symptoms such as the interface going up and down or
|
||||||
|
not running at correct speed. A server with a high error count should be watched.
|
||||||
|
|
||||||
|
2. If the error count continue to climb, consider taking the server down until
|
||||||
|
it can be properly investigated. In any case, a reboot should be done to clear
|
||||||
|
the error count.
|
||||||
|
|
||||||
|
Urgency: High, if the error count increasing.
|
||||||
|
|
||||||
|
* - In a swift log you see a message that a process has not replicated in over 24 hours.
|
||||||
|
- The replicator has not successfully completed a run in the last 24 hours.
|
||||||
|
This indicates that the replicator has probably hung.
|
||||||
|
- Use ``swift-init`` to stop and then restart the replicator process.
|
||||||
|
|
||||||
|
Urgency: Low (high if recent adding or replacement of disk drives), however if you
|
||||||
|
recently added or replaced disk drives then you should treat this urgently.
|
||||||
|
* - Container Updater has not run in 4 hour(s).
|
||||||
|
- The service may appear to be running however, it may be hung. Examine their swift
|
||||||
|
logs to see if there are any error messages relating to the container updater. This
|
||||||
|
may potentially explain why the container is not running.
|
||||||
|
- Urgency: Medium
|
||||||
|
This may have been triggered by a recent restart of the rsyslog daemon.
|
||||||
|
Restart the service with:
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo swift-init <service> reload
|
||||||
|
* - Object replicator: Reports the remaining time and that time is more than 100 hours.
|
||||||
|
- Each replication cycle the object replicator writes a log message to its log
|
||||||
|
reporting statistics about the current cycle. This includes an estimate for the
|
||||||
|
remaining time needed to replicate all objects. If this time is longer than
|
||||||
|
100 hours, there is a problem with the replication process.
|
||||||
|
- Urgency: Medium
|
||||||
|
Restart the service with:
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
sudo swift-init object-replicator reload
|
||||||
|
|
||||||
|
Check that the remaining replication time is going down.
|
|
@ -0,0 +1,264 @@
|
||||||
|
====================
|
||||||
|
Troubleshooting tips
|
||||||
|
====================
|
||||||
|
|
||||||
|
Diagnose: Customer complains they receive a HTTP status 500 when trying to browse containers
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This entry is prompted by a real customer issue and exclusively focused on how
|
||||||
|
that problem was identified.
|
||||||
|
There are many reasons why a http status of 500 could be returned. If
|
||||||
|
there are no obvious problems with the swift object store, then it may
|
||||||
|
be necessary to take a closer look at the users transactions.
|
||||||
|
After finding the users swift account, you can
|
||||||
|
search the swift proxy logs on each swift proxy server for
|
||||||
|
transactions from this user. The linux ``bzgrep`` command can be used to
|
||||||
|
search all the proxy log files on a node including the ``.bz2`` compressed
|
||||||
|
files. For example:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh
|
||||||
|
|
||||||
|
-w <redacted>.68.[4-11,132-139 4-11,132-139],<redacted>.132.[4-11,132-139
|
||||||
|
4-11,132-139] 'sudo bzgrep -w AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log\*'
|
||||||
|
dshbak -c
|
||||||
|
.
|
||||||
|
.
|
||||||
|
\---------------\-
|
||||||
|
<redacted>.132.6
|
||||||
|
\---------------\-
|
||||||
|
Feb 29 08:51:57 sw-aw2az2-proxy011 proxy-server <redacted>.16.132
|
||||||
|
<redacted>.66.8 29/Feb/2012/08/51/57 GET /v1.0/AUTH_redacted-4962-4692-98fb-52ddda82a5af
|
||||||
|
/%3Fformat%3Djson HTTP/1.0 404 - - <REDACTED>_4f4d50c5e4b064d88bd7ab82 - - -
|
||||||
|
tx429fc3be354f434ab7f9c6c4206c1dc3 - 0.0130
|
||||||
|
|
||||||
|
This shows a ``GET`` operation on the users account.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The HTTP status returned is 404, not found, rather than 500 as reported by the user.
|
||||||
|
|
||||||
|
Using the transaction ID, ``tx429fc3be354f434ab7f9c6c4206c1dc3`` you can
|
||||||
|
search the swift object servers log files for this transaction ID:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername>
|
||||||
|
|
||||||
|
-R ssh
|
||||||
|
-w <redacted>.72.[4-67|4-67],<redacted>.[4-67|4-67],<redacted>.[4-67|4-67],<redacted>.204.[4-131| 4-131]
|
||||||
|
'sudo bzgrep tx429fc3be354f434ab7f9c6c4206c1dc3 /var/log/swift/server.log*'
|
||||||
|
| dshbak -c
|
||||||
|
.
|
||||||
|
.
|
||||||
|
\---------------\-
|
||||||
|
<redacted>.72.16
|
||||||
|
\---------------\-
|
||||||
|
Feb 29 08:51:57 sw-aw2az1-object013 account-server <redacted>.132.6 - -
|
||||||
|
|
||||||
|
[29/Feb/2012:08:51:57 +0000|] "GET /disk9/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af"
|
||||||
|
404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-"
|
||||||
|
|
||||||
|
0.0016 ""
|
||||||
|
\---------------\-
|
||||||
|
<redacted>.31
|
||||||
|
\---------------\-
|
||||||
|
Feb 29 08:51:57 node-az2-object060 account-server <redacted>.132.6 - -
|
||||||
|
[29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962-
|
||||||
|
4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0011 ""
|
||||||
|
\---------------\-
|
||||||
|
<redacted>.204.70
|
||||||
|
\---------------\-
|
||||||
|
|
||||||
|
Feb 29 08:51:57 sw-aw2az3-object0067 account-server <redacted>.132.6 - -
|
||||||
|
[29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962-
|
||||||
|
4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0014 ""
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The 3 GET operations to 3 different object servers that hold the 3
|
||||||
|
replicas of this users account. Each ``GET`` returns a HTTP status of 404,
|
||||||
|
not found.
|
||||||
|
|
||||||
|
Next, use the ``swift-get-nodes`` command to determine exactly where the
|
||||||
|
users account data is stored:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_redacted-4962-4692-98fb-52ddda82a5af
|
||||||
|
Account AUTH_redacted-4962-4692-98fb-52ddda82a5af
|
||||||
|
Container None
|
||||||
|
Object None
|
||||||
|
|
||||||
|
Partition 198875
|
||||||
|
Hash 1846d99185f8a0edaf65cfbf37439696
|
||||||
|
|
||||||
|
Server:Port Device <redacted>.31:6002 disk6
|
||||||
|
Server:Port Device <redacted>.204.70:6002 disk6
|
||||||
|
Server:Port Device <redacted>.72.16:6002 disk9
|
||||||
|
Server:Port Device <redacted>.204.64:6002 disk11 [Handoff]
|
||||||
|
Server:Port Device <redacted>.26:6002 disk11 [Handoff]
|
||||||
|
Server:Port Device <redacted>.72.27:6002 disk11 [Handoff]
|
||||||
|
|
||||||
|
curl -I -XHEAD "`http://<redacted>.31:6002/disk6/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af"
|
||||||
|
<http://15.185.138.31:6002/disk6/198875/AUTH_db0050ad-4962-4692-98fb-52ddda82a5af>`_
|
||||||
|
curl -I -XHEAD "`http://<redacted>.204.70:6002/disk6/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af"
|
||||||
|
<http://15.185.204.70:6002/disk6/198875/AUTH_db0050ad-4962-4692-98fb-52ddda82a5af>`_
|
||||||
|
curl -I -XHEAD "`http://<redacted>.72.16:6002/disk9/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af"
|
||||||
|
<http://15.185.72.16:6002/disk9/198875/AUTH_db0050ad-4962-4692-98fb-52ddda82a5af>`_
|
||||||
|
curl -I -XHEAD "`http://<redacted>.204.64:6002/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af"
|
||||||
|
<http://15.185.204.64:6002/disk11/198875/AUTH_db0050ad-4962-4692-98fb-52ddda82a5af>`_ # [Handoff]
|
||||||
|
curl -I -XHEAD "`http://<redacted>.26:6002/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af"
|
||||||
|
<http://15.185.136.26:6002/disk11/198875/AUTH_db0050ad-4962-4692-98fb-52ddda82a5af>`_ # [Handoff]
|
||||||
|
curl -I -XHEAD "`http://<redacted>.72.27:6002/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af"
|
||||||
|
<http://15.185.72.27:6002/disk11/198875/AUTH_db0050ad-4962-4692-98fb-52ddda82a5af>`_ # [Handoff]
|
||||||
|
|
||||||
|
ssh <redacted>.31 "ls \-lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/"
|
||||||
|
ssh <redacted>.204.70 "ls \-lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/"
|
||||||
|
ssh <redacted>.72.16 "ls \-lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/"
|
||||||
|
ssh <redacted>.204.64 "ls \-lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff]
|
||||||
|
ssh <redacted>.26 "ls \-lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff]
|
||||||
|
ssh <redacted>.72.27 "ls \-lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff]
|
||||||
|
|
||||||
|
Check each of the primary servers, <redacted>.31, <redacted>.204.70 and <redacted>.72.16, for
|
||||||
|
this users account. For example on <redacted>.72.16:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ ls \\-lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/
|
||||||
|
total 1.0M
|
||||||
|
drwxrwxrwx 2 swift swift 98 2012-02-23 14:49 .
|
||||||
|
drwxrwxrwx 3 swift swift 45 2012-02-03 23:28 ..
|
||||||
|
-rw-\\-----\\- 1 swift swift 15K 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db
|
||||||
|
-rw-rw-rw- 1 swift swift 0 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db.pending
|
||||||
|
|
||||||
|
So this users account db, an sqlite db is present. Use sqlite to
|
||||||
|
checkout the account:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo cp /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/1846d99185f8a0edaf65cfbf37439696.db /tmp
|
||||||
|
$ sudo sqlite3 /tmp/1846d99185f8a0edaf65cfbf37439696.db
|
||||||
|
sqlite> .mode line
|
||||||
|
sqlite> select * from account_stat;
|
||||||
|
account = AUTH_redacted-4962-4692-98fb-52ddda82a5af
|
||||||
|
created_at = 1328311738.42190
|
||||||
|
put_timestamp = 1330000873.61411
|
||||||
|
delete_timestamp = 1330001026.00514
|
||||||
|
container_count = 0
|
||||||
|
object_count = 0
|
||||||
|
bytes_used = 0
|
||||||
|
hash = eb7e5d0ea3544d9def940b19114e8b43
|
||||||
|
id = 2de8c8a8-cef9-4a94-a421-2f845802fe90
|
||||||
|
status = DELETED
|
||||||
|
status_changed_at = 1330001026.00514
|
||||||
|
metadata =
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The status is ``DELETED``. So this account was deleted. This explains
|
||||||
|
why the GET operations are returning 404, not found. Check the account
|
||||||
|
delete date/time:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ python
|
||||||
|
|
||||||
|
>>> import time
|
||||||
|
>>> time.ctime(1330001026.00514)
|
||||||
|
'Thu Feb 23 12:43:46 2012'
|
||||||
|
|
||||||
|
Next try and find the ``DELETE`` operation for this account in the proxy
|
||||||
|
server logs:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l <yourusername> -R ssh -w <redacted>.68.[4-11,132-139 4-11,132-
|
||||||
|
139],<redacted>.132.[4-11,132-139|4-11,132-139] 'sudo bzgrep AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log\* | grep -w
|
||||||
|
DELETE |awk "{print \\$3,\\$10,\\$12}"' |- dshbak -c
|
||||||
|
.
|
||||||
|
.
|
||||||
|
Feb 23 12:43:46 sw-aw2az2-proxy001 proxy-server 15.203.233.76 <redacted>.66.7 23/Feb/2012/12/43/46 DELETE /v1.0/AUTH_redacted-4962-4692-98fb-
|
||||||
|
52ddda82a5af/ HTTP/1.0 204 - Apache-HttpClient/4.1.2%20%28java%201.5%29 <REDACTED>_4f458ee4e4b02a869c3aad02 - - -
|
||||||
|
|
||||||
|
tx4471188b0b87406899973d297c55ab53 - 0.0086
|
||||||
|
|
||||||
|
From this you can see the operation that resulted in the account being deleted.
|
||||||
|
|
||||||
|
Procedure: Deleting objects
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Simple case - deleting small number of objects and containers
|
||||||
|
-------------------------------------------------------------
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
``swift-direct`` is specific to the Hewlett Packard Enterprise Helion Public Cloud.
|
||||||
|
Use ``swiftly`` as an alternative.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Object and container names are in UTF8. Swift direct accepts UTF8
|
||||||
|
directly, not URL-encoded UTF8 (the REST API expects UTF8 and then
|
||||||
|
URL-encoded). In practice cut and paste of foreign language strings to
|
||||||
|
a terminal window will produce the right result.
|
||||||
|
|
||||||
|
Hint: Use the ``head`` command before any destructive commands.
|
||||||
|
|
||||||
|
To delete a small number of objects, log into any proxy node and proceed
|
||||||
|
as follows:
|
||||||
|
|
||||||
|
Examine the object in question:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo -u swift /opt/hp/swift/bin/swift-direct head 132345678912345 container_name obj_name
|
||||||
|
|
||||||
|
See if ``X-Object-Manifest`` or ``X-Static-Large-Object`` is set,
|
||||||
|
then this is the manifest object and segment objects may be in another
|
||||||
|
container.
|
||||||
|
|
||||||
|
If the ``X-Object-Manifest`` attribute is set, you need to find the
|
||||||
|
name of the objects this means it is a DLO. For example,
|
||||||
|
if ``X-Object-Manifest`` is ``container2/seg-blah``, list the contents
|
||||||
|
of the container container2 as follows:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo -u swift /opt/hp/swift/bin/swift-direct show 132345678912345 container2
|
||||||
|
|
||||||
|
Pick out the objects whose names start with ``seg-blah``.
|
||||||
|
Delete the segment objects as follows:
|
||||||
|
|
||||||
|
.. code::
|
||||||
|
|
||||||
|
$ sudo -u swift /opt/hp/swift/bin/swift-direct delete 132345678912345 container2 seg-blah01
|
||||||
|
$ sudo -u swift /opt/hp/swift/bin/swift-direct delete 132345678912345 container2 seg-blah02
|
||||||
|
etc
|
||||||
|
|
||||||
|
If ``X-Static-Large-Object`` is set, you need to read the contents. Do this by:
|
||||||
|
|
||||||
|
- Using swift-get-nodes to get the details of the object's location.
|
||||||
|
- Change the ``-X HEAD`` to ``-X GET`` and run ``curl`` against one copy.
|
||||||
|
- This lists a json body listing containers and object names
|
||||||
|
- Delete the objects as described above for DLO segments
|
||||||
|
|
||||||
|
Once the segments are deleted, you can delete the object using
|
||||||
|
``swift-direct`` as described above.
|
||||||
|
|
||||||
|
Finally, use ``swift-direct`` to delete the container.
|
||||||
|
|
||||||
|
Procedure: Decommissioning swift nodes
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Should Swift nodes need to be decommissioned. For example, where they are being
|
||||||
|
re-purposed, it is very important to follow the following steps.
|
||||||
|
|
||||||
|
#. In the case of object servers, follow the procedure for removing
|
||||||
|
the node from the rings.
|
||||||
|
#. In the case of swift proxy servers, have the network team remove
|
||||||
|
the node from the load balancers.
|
||||||
|
#. Open a network ticket to have the node removed from network
|
||||||
|
firewalls.
|
||||||
|
#. Make sure that you remove the ``/etc/swift`` directory and everything in it.
|
|
@ -207,7 +207,7 @@ that the user is allowed to operate on project resources.
|
||||||
OpenStack Service Using Composite Tokens
|
OpenStack Service Using Composite Tokens
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
Some Openstack services such as Cinder and Glance may use
|
Some OpenStack services such as Cinder and Glance may use
|
||||||
a "service account". In this mode, you configure a separate account where
|
a "service account". In this mode, you configure a separate account where
|
||||||
the service stores project data that it manages. This account is not used
|
the service stores project data that it manages. This account is not used
|
||||||
directly by the end-user. Instead, all access is done through the service.
|
directly by the end-user. Instead, all access is done through the service.
|
||||||
|
@ -234,19 +234,19 @@ situation as follows:
|
||||||
(see ``/etc/keystone/default_catalog.templates`` above). Normally
|
(see ``/etc/keystone/default_catalog.templates`` above). Normally
|
||||||
this is ``AUTH``.
|
this is ``AUTH``.
|
||||||
* The second item in the reseller_prefix list is the prefix used by the
|
* The second item in the reseller_prefix list is the prefix used by the
|
||||||
Openstack services(s). You must configure this value (``SERVICE`` in the
|
OpenStack services(s). You must configure this value (``SERVICE`` in the
|
||||||
example) with whatever the other Openstack service(s) use.
|
example) with whatever the other OpenStack service(s) use.
|
||||||
* Set the operator_roles option to contain a role or roles that end-user's
|
* Set the operator_roles option to contain a role or roles that end-user's
|
||||||
have on project's they use.
|
have on project's they use.
|
||||||
* Set the SERVICE_service_roles value to a role or roles that only the
|
* Set the SERVICE_service_roles value to a role or roles that only the
|
||||||
Openstack service user has. Do not use a role that is assigned to
|
OpenStack service user has. Do not use a role that is assigned to
|
||||||
"normal" end users. In this example, the role ``service`` is used.
|
"normal" end users. In this example, the role ``service`` is used.
|
||||||
The service user is granted this role to a *single* project only. You do
|
The service user is granted this role to a *single* project only. You do
|
||||||
not need to make the service user a member of every project.
|
not need to make the service user a member of every project.
|
||||||
|
|
||||||
This configuration works as follows:
|
This configuration works as follows:
|
||||||
|
|
||||||
* The end-user presents a user token to an Openstack service. The service
|
* The end-user presents a user token to an OpenStack service. The service
|
||||||
then makes a Swift request to the account with the ``SERVICE`` prefix.
|
then makes a Swift request to the account with the ``SERVICE`` prefix.
|
||||||
* The service forwards the original user token with the request. It also
|
* The service forwards the original user token with the request. It also
|
||||||
adds it's own service token.
|
adds it's own service token.
|
||||||
|
|
|
@ -15,7 +15,7 @@ Glance writes the image to a Swift container as a set of objects.
|
||||||
Throughout this section, the following terminology and concepts are used:
|
Throughout this section, the following terminology and concepts are used:
|
||||||
|
|
||||||
* User or end-user. This is a person making a request that will result in
|
* User or end-user. This is a person making a request that will result in
|
||||||
an Openstack Service making a request to Swift.
|
an OpenStack Service making a request to Swift.
|
||||||
|
|
||||||
* Project (also known as Tenant). This is the unit of resource ownership.
|
* Project (also known as Tenant). This is the unit of resource ownership.
|
||||||
While data such as snapshot images or block volume backups may be
|
While data such as snapshot images or block volume backups may be
|
||||||
|
@ -182,7 +182,7 @@ Using the HTTP_X_SERVICE_CATALOG to get Swift Account Name
|
||||||
|
|
||||||
The auth_token middleware populates the wsgi environment with information when
|
The auth_token middleware populates the wsgi environment with information when
|
||||||
it validates the user's token. The HTTP_X_SERVICE_CATALOG item is a JSON
|
it validates the user's token. The HTTP_X_SERVICE_CATALOG item is a JSON
|
||||||
string containing details of the Openstack endpoints. For Swift, this also
|
string containing details of the OpenStack endpoints. For Swift, this also
|
||||||
contains the project's Swift account name. Here is an example of a catalog
|
contains the project's Swift account name. Here is an example of a catalog
|
||||||
entry for Swift::
|
entry for Swift::
|
||||||
|
|
||||||
|
@ -236,7 +236,7 @@ requirement is that your Service User has the appropriate role. In practice:
|
||||||
reseller_prefix = AUTH_, SERVICE_
|
reseller_prefix = AUTH_, SERVICE_
|
||||||
SERVICE_service_role = service
|
SERVICE_service_role = service
|
||||||
|
|
||||||
The ``service`` role should only be granted to Openstack Services. It should
|
The ``service`` role should only be granted to OpenStack Services. It should
|
||||||
not be granted to users.
|
not be granted to users.
|
||||||
|
|
||||||
Single or multiple Service Prefixes?
|
Single or multiple Service Prefixes?
|
||||||
|
@ -244,7 +244,7 @@ Single or multiple Service Prefixes?
|
||||||
|
|
||||||
Most of the examples used in this document used a single prefix. The
|
Most of the examples used in this document used a single prefix. The
|
||||||
prefix, ``SERVICE`` was used. By using a single prefix, an operator is
|
prefix, ``SERVICE`` was used. By using a single prefix, an operator is
|
||||||
allowing all Openstack Services to share the same account for data
|
allowing all OpenStack Services to share the same account for data
|
||||||
associated with a given project. For test systems or deployments well protected
|
associated with a given project. For test systems or deployments well protected
|
||||||
on private firewalled networks, this is appropriate.
|
on private firewalled networks, this is appropriate.
|
||||||
|
|
||||||
|
@ -270,4 +270,4 @@ Container Naming
|
||||||
Since a single Service Prefix is possible, container names should be prefixed
|
Since a single Service Prefix is possible, container names should be prefixed
|
||||||
with a unique string to prevent name clashes. We suggest you use the service
|
with a unique string to prevent name clashes. We suggest you use the service
|
||||||
type field (as used in the service catalog). For example, The Glance Service
|
type field (as used in the service catalog). For example, The Glance Service
|
||||||
would use "image" as a prefix.
|
would use "image" as a prefix.
|
||||||
|
|
|
@ -29,7 +29,7 @@ synchronization key.
|
||||||
Configuring Container Sync
|
Configuring Container Sync
|
||||||
--------------------------
|
--------------------------
|
||||||
|
|
||||||
Create a container-sync-realms.conf file specifying the allowable clusters
|
Create a ``container-sync-realms.conf`` file specifying the allowable clusters
|
||||||
and their information::
|
and their information::
|
||||||
|
|
||||||
[realm1]
|
[realm1]
|
||||||
|
@ -50,18 +50,18 @@ clusters that have agreed to allow container syncing with each other. Realm
|
||||||
names will be considered case insensitive.
|
names will be considered case insensitive.
|
||||||
|
|
||||||
The key is the overall cluster-to-cluster key used in combination with the
|
The key is the overall cluster-to-cluster key used in combination with the
|
||||||
external users' key that they set on their containers' X-Container-Sync-Key
|
external users' key that they set on their containers'
|
||||||
metadata header values. These keys will be used to sign each request the
|
``X-Container-Sync-Key`` metadata header values. These keys will be used to
|
||||||
container sync daemon makes and used to validate each incoming container sync
|
sign each request the container sync daemon makes and used to validate each
|
||||||
request.
|
incoming container sync request.
|
||||||
|
|
||||||
The key2 is optional and is an additional key incoming requests will be checked
|
The key2 is optional and is an additional key incoming requests will be checked
|
||||||
against. This is so you can rotate keys if you wish; you move the existing key
|
against. This is so you can rotate keys if you wish; you move the existing key
|
||||||
to key2 and make a new key value.
|
to key2 and make a new key value.
|
||||||
|
|
||||||
Any values in the realm section whose names begin with cluster\_ will indicate
|
Any values in the realm section whose names begin with ``cluster_`` will
|
||||||
the name and endpoint of a cluster and will be used by external users in
|
indicate the name and endpoint of a cluster and will be used by external users in
|
||||||
their containers' X-Container-Sync-To metadata header values with the format
|
their containers' ``X-Container-Sync-To`` metadata header values with the format
|
||||||
"//realm_name/cluster_name/account_name/container_name". Realm and cluster
|
"//realm_name/cluster_name/account_name/container_name". Realm and cluster
|
||||||
names are considered case insensitive.
|
names are considered case insensitive.
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ container servers, since that is where the container sync daemon runs. Note
|
||||||
that the endpoint ends with /v1/ and that the container sync daemon will then
|
that the endpoint ends with /v1/ and that the container sync daemon will then
|
||||||
add the account/container/obj name after that.
|
add the account/container/obj name after that.
|
||||||
|
|
||||||
Distribute this container-sync-realms.conf file to all your proxy servers
|
Distribute this ``container-sync-realms.conf`` file to all your proxy servers
|
||||||
and container servers.
|
and container servers.
|
||||||
|
|
||||||
You also need to add the container_sync middleware to your proxy pipeline. It
|
You also need to add the container_sync middleware to your proxy pipeline. It
|
||||||
|
@ -95,7 +95,7 @@ section, Configuring Container Sync, for the new-style.
|
||||||
With the old-style, the Swift cluster operator must allow synchronization with
|
With the old-style, the Swift cluster operator must allow synchronization with
|
||||||
a set of hosts before the user can enable container synchronization. First, the
|
a set of hosts before the user can enable container synchronization. First, the
|
||||||
backend container server needs to be given this list of hosts in the
|
backend container server needs to be given this list of hosts in the
|
||||||
container-server.conf file::
|
``container-server.conf`` file::
|
||||||
|
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
# This is a comma separated list of hosts allowed in the
|
# This is a comma separated list of hosts allowed in the
|
||||||
|
@ -170,8 +170,8 @@ we'll make next::
|
||||||
|
|
||||||
The ``-t`` indicates the cluster to sync to, which is the realm name of the
|
The ``-t`` indicates the cluster to sync to, which is the realm name of the
|
||||||
section from container-sync-realms.conf, followed by the cluster name from
|
section from container-sync-realms.conf, followed by the cluster name from
|
||||||
that section (without the cluster\_ prefix), followed by the account and container names we want to sync to.
|
that section (without the cluster\_ prefix), followed by the account and container
|
||||||
The ``-k`` specifies the secret key the two containers will share for
|
names we want to sync to. The ``-k`` specifies the secret key the two containers will share for
|
||||||
synchronization; this is the user key, the cluster key in
|
synchronization; this is the user key, the cluster key in
|
||||||
container-sync-realms.conf will also be used behind the scenes.
|
container-sync-realms.conf will also be used behind the scenes.
|
||||||
|
|
||||||
|
@ -195,8 +195,18 @@ as it gets synchronized over to the second::
|
||||||
list container2
|
list container2
|
||||||
|
|
||||||
[Nothing there yet, so we wait a bit...]
|
[Nothing there yet, so we wait a bit...]
|
||||||
[If you're an operator running SAIO and just testing, you may need to
|
|
||||||
run 'swift-init container-sync once' to perform a sync scan.]
|
.. note::
|
||||||
|
|
||||||
|
If you're an operator running SAIO and just testing, each time you
|
||||||
|
configure a container for synchronization and place objects in the
|
||||||
|
source container you will need to ensure that container-sync runs
|
||||||
|
before attempting to retrieve objects from the target container.
|
||||||
|
That is, you need to run::
|
||||||
|
|
||||||
|
swift-init container-sync once
|
||||||
|
|
||||||
|
Now expect to see objects copied from the first container to the second::
|
||||||
|
|
||||||
$ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 \
|
$ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 \
|
||||||
list container2
|
list container2
|
||||||
|
@ -340,13 +350,34 @@ synchronize to the second, we could have used this curl command::
|
||||||
What's going on behind the scenes, in the cluster?
|
What's going on behind the scenes, in the cluster?
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
The swift-container-sync does the job of sending updates to the remote
|
Container ring devices have a directory called ``containers``, where container
|
||||||
container.
|
databases reside. In addition to ``containers``, each container ring device
|
||||||
|
also has a directory called ``sync-containers``. ``sync-containers`` holds
|
||||||
|
symlinks to container databases that were configured for container sync using
|
||||||
|
``x-container-sync-to`` and ``x-container-sync-key`` metadata keys.
|
||||||
|
|
||||||
This is done by scanning the local devices for container databases and
|
The swift-container-sync process does the job of sending updates to the remote
|
||||||
checking for x-container-sync-to and x-container-sync-key metadata values.
|
container. This is done by scanning ``sync-containers`` for container
|
||||||
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
|
databases. For each container db found, newer rows since the last sync will
|
||||||
to the other container.
|
trigger PUTs or DELETEs to the other container.
|
||||||
|
|
||||||
|
``sync-containers`` is maintained as follows:
|
||||||
|
Whenever the container-server processes a PUT or a POST request that carries
|
||||||
|
``x-container-sync-to`` and ``x-container-sync-key`` metadata keys the server
|
||||||
|
creates a symlink to the container database in ``sync-containers``. Whenever
|
||||||
|
the container server deletes a synced container, the appropriate symlink
|
||||||
|
is deleted from ``sync-containers``.
|
||||||
|
|
||||||
|
In addition to the container-server, the container-replicator process does the
|
||||||
|
job of identifying containers that should be synchronized. This is done by
|
||||||
|
scanning the local devices for container databases and checking for
|
||||||
|
x-container-sync-to and x-container-sync-key metadata values. If they exist
|
||||||
|
then a symlink to the container database is created in a sync-containers
|
||||||
|
sub-directory on the same device.
|
||||||
|
|
||||||
|
Similarly, when the container sync metadata keys are deleted, the container
|
||||||
|
server and container-replicator would take care of deleting the symlinks
|
||||||
|
from ``sync-containers``.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
|
|
|
@ -182,17 +182,13 @@ similar to that of replication with a few notable exceptions:
|
||||||
Performance Considerations
|
Performance Considerations
|
||||||
--------------------------
|
--------------------------
|
||||||
|
|
||||||
Efforts are underway to characterize performance of various Erasure Code
|
|
||||||
schemes. One of the main goals of the beta release is to perform this
|
|
||||||
characterization and encourage others to do so and provide meaningful feedback
|
|
||||||
to the development community. There are many factors that will affect
|
|
||||||
performance of EC so it is vital that we have multiple characterization
|
|
||||||
activities happening.
|
|
||||||
|
|
||||||
In general, EC has different performance characteristics than replicated data.
|
In general, EC has different performance characteristics than replicated data.
|
||||||
EC requires substantially more CPU to read and write data, and is more suited
|
EC requires substantially more CPU to read and write data, and is more suited
|
||||||
for larger objects that are not frequently accessed (eg backups).
|
for larger objects that are not frequently accessed (eg backups).
|
||||||
|
|
||||||
|
Operators are encouraged to characterize the performance of various EC schemes
|
||||||
|
and share their observations with the developer community.
|
||||||
|
|
||||||
----------------------------
|
----------------------------
|
||||||
Using an Erasure Code Policy
|
Using an Erasure Code Policy
|
||||||
----------------------------
|
----------------------------
|
||||||
|
@ -204,7 +200,7 @@ an EC policy can be setup is shown below::
|
||||||
[storage-policy:2]
|
[storage-policy:2]
|
||||||
name = ec104
|
name = ec104
|
||||||
policy_type = erasure_coding
|
policy_type = erasure_coding
|
||||||
ec_type = jerasure_rs_vand
|
ec_type = liberasurecode_rs_vand
|
||||||
ec_num_data_fragments = 10
|
ec_num_data_fragments = 10
|
||||||
ec_num_parity_fragments = 4
|
ec_num_parity_fragments = 4
|
||||||
ec_object_segment_size = 1048576
|
ec_object_segment_size = 1048576
|
||||||
|
|
|
@ -45,8 +45,8 @@ Direct API
|
||||||
|
|
||||||
SLO support centers around the user generated manifest file. After the user
|
SLO support centers around the user generated manifest file. After the user
|
||||||
has uploaded the segments into their account a manifest file needs to be
|
has uploaded the segments into their account a manifest file needs to be
|
||||||
built and uploaded. All object segments, except the last, must be above 1 MB
|
built and uploaded. All object segments, must be at least 1 byte
|
||||||
(by default) in size. Please see the SLO docs for :ref:`slo-doc` further
|
in size. Please see the SLO docs for :ref:`slo-doc` further
|
||||||
details.
|
details.
|
||||||
|
|
||||||
----------------
|
----------------
|
||||||
|
|
|
@ -37,8 +37,7 @@ There are many reasons why this might be desirable:
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
Today, Swift supports two different policy types: Replication and Erasure
|
Today, Swift supports two different policy types: Replication and Erasure
|
||||||
Code. Erasure Code policy is currently a beta release and should not be
|
Code. See :doc:`overview_erasure_code` for details.
|
||||||
used in a Production cluster. See :doc:`overview_erasure_code` for details.
|
|
||||||
|
|
||||||
Also note that Diskfile refers to backend object storage plug-in
|
Also note that Diskfile refers to backend object storage plug-in
|
||||||
architecture. See :doc:`development_ondisk_backends` for details.
|
architecture. See :doc:`development_ondisk_backends` for details.
|
||||||
|
@ -286,6 +285,7 @@ example configuration.::
|
||||||
|
|
||||||
[swift-hash]
|
[swift-hash]
|
||||||
# random unique strings that can never change (DO NOT LOSE)
|
# random unique strings that can never change (DO NOT LOSE)
|
||||||
|
# Use only printable chars (python -c "import string; print(string.printable)")
|
||||||
swift_hash_path_prefix = changeme
|
swift_hash_path_prefix = changeme
|
||||||
swift_hash_path_suffix = changeme
|
swift_hash_path_suffix = changeme
|
||||||
|
|
||||||
|
|
|
@ -4,9 +4,9 @@ The Rings
|
||||||
|
|
||||||
The rings determine where data should reside in the cluster. There is a
|
The rings determine where data should reside in the cluster. There is a
|
||||||
separate ring for account databases, container databases, and individual
|
separate ring for account databases, container databases, and individual
|
||||||
objects but each ring works in the same way. These rings are externally
|
object storage policies but each ring works in the same way. These rings are
|
||||||
managed, in that the server processes themselves do not modify the rings, they
|
externally managed, in that the server processes themselves do not modify the
|
||||||
are instead given new rings modified by other tools.
|
rings, they are instead given new rings modified by other tools.
|
||||||
|
|
||||||
The ring uses a configurable number of bits from a path's MD5 hash as a
|
The ring uses a configurable number of bits from a path's MD5 hash as a
|
||||||
partition index that designates a device. The number of bits kept from the hash
|
partition index that designates a device. The number of bits kept from the hash
|
||||||
|
@ -18,10 +18,25 @@ cluster all at once.
|
||||||
|
|
||||||
Another configurable value is the replica count, which indicates how many of
|
Another configurable value is the replica count, which indicates how many of
|
||||||
the partition->device assignments comprise a single ring. For a given partition
|
the partition->device assignments comprise a single ring. For a given partition
|
||||||
number, each replica's device will not be in the same zone as any other
|
number, each replica will be assigned to a different device in the ring.
|
||||||
replica's device. Zones can be used to group devices based on physical
|
|
||||||
locations, power separations, network separations, or any other attribute that
|
Devices are added to the ring to describe the capacity available for
|
||||||
would lessen multiple replicas being unavailable at the same time.
|
part-replica assignment. Devices are placed into failure domains consisting
|
||||||
|
of region, zone, and server. Regions can be used to describe geo-graphically
|
||||||
|
systems characterized by lower-bandwidth or higher latency between machines in
|
||||||
|
different regions. Many rings will consist of only a single region. Zones
|
||||||
|
can be used to group devices based on physical locations, power separations,
|
||||||
|
network separations, or any other attribute that would lessen multiple
|
||||||
|
replicas being unavailable at the same time.
|
||||||
|
|
||||||
|
Devices are given a weight which describes relative weight of the device in
|
||||||
|
comparison to other devices.
|
||||||
|
|
||||||
|
When building a ring all of each part's replicas will be assigned to devices
|
||||||
|
according to their weight. Additionally, each replica of a part will attempt
|
||||||
|
to be assigned to a device who's failure domain does not already have a
|
||||||
|
replica for the part. Only a single replica of a part may be assigned to each
|
||||||
|
device - you must have as many devices as replicas.
|
||||||
|
|
||||||
------------
|
------------
|
||||||
Ring Builder
|
Ring Builder
|
||||||
|
@ -91,8 +106,7 @@ Note: The list of devices may contain holes, or indexes set to None, for
|
||||||
devices that have been removed from the cluster. Generally, device ids are not
|
devices that have been removed from the cluster. Generally, device ids are not
|
||||||
reused. Also, some devices may be temporarily disabled by setting their weight
|
reused. Also, some devices may be temporarily disabled by setting their weight
|
||||||
to 0.0. To obtain a list of active devices (for uptime polling, for example)
|
to 0.0. To obtain a list of active devices (for uptime polling, for example)
|
||||||
the Python code would look like: ``devices = [device for device in self.devs if
|
the Python code would look like: ``devices = list(self._iter_devs())``
|
||||||
device and device['weight']]``
|
|
||||||
|
|
||||||
*************************
|
*************************
|
||||||
Partition Assignment List
|
Partition Assignment List
|
||||||
|
@ -108,14 +122,24 @@ So, to create a list of device dictionaries assigned to a partition, the Python
|
||||||
code would look like: ``devices = [self.devs[part2dev_id[partition]] for
|
code would look like: ``devices = [self.devs[part2dev_id[partition]] for
|
||||||
part2dev_id in self._replica2part2dev_id]``
|
part2dev_id in self._replica2part2dev_id]``
|
||||||
|
|
||||||
That code is a little simplistic, as it does not account for the
|
|
||||||
removal of duplicate devices. If a ring has more replicas than
|
|
||||||
devices, then a partition will have more than one replica on one
|
|
||||||
device; that's simply the pigeonhole principle at work.
|
|
||||||
|
|
||||||
array('H') is used for memory conservation as there may be millions of
|
array('H') is used for memory conservation as there may be millions of
|
||||||
partitions.
|
partitions.
|
||||||
|
|
||||||
|
*********************
|
||||||
|
Partition Shift Value
|
||||||
|
*********************
|
||||||
|
|
||||||
|
The partition shift value is known internally to the Ring class as _part_shift.
|
||||||
|
This value used to shift an MD5 hash to calculate the partition on which the
|
||||||
|
data for that hash should reside. Only the top four bytes of the hash is used
|
||||||
|
in this process. For example, to compute the partition for the path
|
||||||
|
/account/container/object the Python code might look like: ``partition =
|
||||||
|
unpack_from('>I', md5('/account/container/object').digest())[0] >>
|
||||||
|
self._part_shift``
|
||||||
|
|
||||||
|
For a ring generated with part_power P, the partition shift value is
|
||||||
|
32 - P.
|
||||||
|
|
||||||
*******************
|
*******************
|
||||||
Fractional Replicas
|
Fractional Replicas
|
||||||
*******************
|
*******************
|
||||||
|
@ -130,6 +154,21 @@ for the ring. This means that some partitions will have more replicas than
|
||||||
others. For example, if a ring has 3.25 replicas, then 25% of its partitions
|
others. For example, if a ring has 3.25 replicas, then 25% of its partitions
|
||||||
will have four replicas, while the remaining 75% will have just three.
|
will have four replicas, while the remaining 75% will have just three.
|
||||||
|
|
||||||
|
**********
|
||||||
|
Dispersion
|
||||||
|
**********
|
||||||
|
|
||||||
|
With each rebalance, the ring builder calculates a dispersion metric. This is
|
||||||
|
the percentage of partitions in the ring that have too many replicas within a
|
||||||
|
particular failure domain.
|
||||||
|
|
||||||
|
For example, if you have three servers in a cluster but two replicas for a
|
||||||
|
partition get placed onto the same server, that partition will count towards
|
||||||
|
the dispersion metric.
|
||||||
|
|
||||||
|
A lower dispersion value is better, and the value can be used to find the
|
||||||
|
proper value for "overload".
|
||||||
|
|
||||||
********
|
********
|
||||||
Overload
|
Overload
|
||||||
********
|
********
|
||||||
|
@ -168,74 +207,118 @@ on them than the disks in nodes A and B. If 80% full is the warning
|
||||||
threshold for the cluster, node C's disks will reach 80% full while A
|
threshold for the cluster, node C's disks will reach 80% full while A
|
||||||
and B's disks are only 72.7% full.
|
and B's disks are only 72.7% full.
|
||||||
|
|
||||||
**********
|
-------------------------------
|
||||||
Dispersion
|
Partition & Replica Terminology
|
||||||
**********
|
-------------------------------
|
||||||
|
|
||||||
With each rebalance, the ring builder calculates a dispersion metric. This is
|
All descriptions of consistent hashing describe the process of breaking the
|
||||||
the percentage of partitions in the ring that have too many replicas within a
|
keyspace up into multiple ranges (vnodes, buckets, etc.) - many more than the
|
||||||
particular failure domain.
|
number of "nodes" to which keys in the keyspace must be assigned. Swift calls
|
||||||
|
these ranges `partitions` - they are partitions of the total keyspace.
|
||||||
|
|
||||||
For example, if you have three servers in a cluster but two replicas for a
|
Each partition will have multiple replicas. Every replica of each partition
|
||||||
partition get placed onto the same server, that partition will count towards the
|
must be assigned to a device in the ring. When a describing a specific
|
||||||
dispersion metric.
|
replica of a partition (like when it's assigned a device) it is described as a
|
||||||
|
`part-replica` in that it is a specific `replica` of the specific `partition`.
|
||||||
|
A single device may be assigned different replicas from many parts, but it may
|
||||||
|
not be assigned multiple replicas of a single part.
|
||||||
|
|
||||||
A lower dispersion value is better, and the value can be used to find the proper
|
The total number of partitions in a ring is calculated as ``2 **
|
||||||
value for "overload".
|
<part-power>``. The total number of part-replicas in a ring is calculated as
|
||||||
|
``<replica-count> * 2 ** <part-power>``.
|
||||||
|
|
||||||
*********************
|
When considering a device's `weight` it is useful to describe the number of
|
||||||
Partition Shift Value
|
part-replicas it would like to be assigned. A single device regardless of
|
||||||
*********************
|
weight will never hold more than ``2 ** <part-power>`` part-replicas because
|
||||||
|
it can not have more than one replica of any part assigned. The number of
|
||||||
|
part-replicas a device can take by weights is calculated as it's
|
||||||
|
`parts_wanted`. The true number of part-replicas assigned to a device can be
|
||||||
|
compared to it's parts wanted similarly to a calculation of percentage error -
|
||||||
|
this deviation in the observed result from the idealized target is called a
|
||||||
|
devices `balance`.
|
||||||
|
|
||||||
The partition shift value is known internally to the Ring class as _part_shift.
|
When considering a device's `failure domain` it is useful to describe the
|
||||||
This value used to shift an MD5 hash to calculate the partition on which the
|
number of part-replicas it would like to be assigned. The number of
|
||||||
data for that hash should reside. Only the top four bytes of the hash is used
|
part-replicas wanted in a failure domain of a tier is the sum of the
|
||||||
in this process. For example, to compute the partition for the path
|
part-replicas wanted in the failure domains of it's sub-tier. However,
|
||||||
/account/container/object the Python code might look like: ``partition =
|
collectively when the total number of part-replicas in a failure domain
|
||||||
unpack_from('>I', md5('/account/container/object').digest())[0] >>
|
exceeds or is equal to ``2 ** <part-power>`` it is most obvious that it's no
|
||||||
self._part_shift``
|
longer sufficient to consider only the number of total part-replicas, but
|
||||||
|
rather the fraction of each replica's partitions. Consider for example a ring
|
||||||
For a ring generated with part_power P, the partition shift value is
|
with ``3`` replicas and ``3`` servers, while it's necessary for dispersion
|
||||||
32 - P.
|
that each server hold only ``1/3`` of the total part-replicas it is
|
||||||
|
additionally constrained to require ``1.0`` replica of *each* partition. It
|
||||||
|
would not be sufficient to satisfy dispersion if two devices on one of the
|
||||||
|
servers each held a replica of a single partition, while another server held
|
||||||
|
none. By considering a decimal fraction of one replica's worth of parts in a
|
||||||
|
failure domain we can derive the total part-replicas wanted in a failure
|
||||||
|
domain (``1.0 * 2 ** <part-power>``). Additionally we infer more about
|
||||||
|
`which` part-replicas must go in the failure domain. Consider a ring with
|
||||||
|
three replicas, and two zones, each with two servers (four servers total).
|
||||||
|
The three replicas worth of partitions will be assigned into two failure
|
||||||
|
domains at the zone tier. Each zone must hold more than one replica of some
|
||||||
|
parts. We represent this improper faction of a replica's worth of partitions
|
||||||
|
in decimal form as ``1.5`` (``3.0 / 2``). This tells us not only the *number*
|
||||||
|
of total parts (``1.5 * 2 ** <part-power>``) but also that *each* partition
|
||||||
|
must have `at least` one replica in this failure domain (in fact ``0.5`` of
|
||||||
|
the partitions will have ``2`` replicas). Within each zone the two servers
|
||||||
|
will hold ``0.75`` of a replica's worth of partitions - this is equal both to
|
||||||
|
"the fraction of a replica's worth of partitions assigned to each zone
|
||||||
|
(``1.5``) divided evenly among the number of failure domain's in it's sub-tier
|
||||||
|
(``2`` servers in each zone, i.e. ``1.5 / 2``)" but *also* "the total number
|
||||||
|
of replicas (``3.0``) divided evenly among the total number of failure domains
|
||||||
|
in the server tier (``2`` servers x ``2`` zones = ``4``, i.e. ``3.0 / 4``)".
|
||||||
|
It is useful to consider that each server in this ring will hold only ``0.75``
|
||||||
|
of a replica's worth of partitions which tells that any server should have `at
|
||||||
|
most` one replica of a given part assigned. In the interests of brevity, some
|
||||||
|
variable names will often refer to the concept representing the fraction of a
|
||||||
|
replica's worth of partitions in decimal form as *replicanths* - this is meant
|
||||||
|
to invoke connotations similar to ordinal numbers as applied to fractions, but
|
||||||
|
generalized to a replica instead of four*th* or a fif*th*. The 'n' was
|
||||||
|
probably thrown in because of Blade Runner.
|
||||||
|
|
||||||
-----------------
|
-----------------
|
||||||
Building the Ring
|
Building the Ring
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
The initial building of the ring first calculates the number of partitions that
|
First the ring builder calculates the replicanths wanted at each tier in the
|
||||||
should ideally be assigned to each device based the device's weight. For
|
ring's topology based on weight.
|
||||||
example, given a partition power of 20, the ring will have 1,048,576 partitions.
|
|
||||||
If there are 1,000 devices of equal weight they will each desire 1,048.576
|
|
||||||
partitions. The devices are then sorted by the number of partitions they desire
|
|
||||||
and kept in order throughout the initialization process.
|
|
||||||
|
|
||||||
Note: each device is also assigned a random tiebreaker value that is used when
|
Then the ring builder calculates the replicanths wanted at each tier in the
|
||||||
two devices desire the same number of partitions. This tiebreaker is not stored
|
ring's topology based on dispersion.
|
||||||
on disk anywhere, and so two different rings created with the same parameters
|
|
||||||
will have different partition assignments. For repeatable partition assignments,
|
|
||||||
``RingBuilder.rebalance()`` takes an optional seed value that will be used to
|
|
||||||
seed Python's pseudo-random number generator.
|
|
||||||
|
|
||||||
Then, the ring builder assigns each replica of each partition to the device that
|
Then the ring calculates the maximum deviation on a single device between it's
|
||||||
desires the most partitions at that point while keeping it as far away as
|
weighted replicanths and wanted replicanths.
|
||||||
possible from other replicas. The ring builder prefers to assign a replica to a
|
|
||||||
device in a regions that has no replicas already; should there be no such region
|
|
||||||
available, the ring builder will try to find a device in a different zone; if
|
|
||||||
not possible, it will look on a different server; failing that, it will just
|
|
||||||
look for a device that has no replicas; finally, if all other options are
|
|
||||||
exhausted, the ring builder will assign the replica to the device that has the
|
|
||||||
fewest replicas already assigned. Note that assignment of multiple replicas to
|
|
||||||
one device will only happen if the ring has fewer devices than it has replicas.
|
|
||||||
|
|
||||||
When building a new ring based on an old ring, the desired number of partitions
|
Next we interpolate between the two replicanth values (weighted & wanted) at
|
||||||
each device wants is recalculated. Next the partitions to be reassigned are
|
each tier using the specified overload (up to the maximum required overload).
|
||||||
gathered up. Any removed devices have all their assigned partitions unassigned
|
It's a linear interpolation, similar to solving for a point on a line between
|
||||||
and added to the gathered list. Any partition replicas that (due to the
|
two points - we calculate the slope across the max required overload and then
|
||||||
addition of new devices) can be spread out for better durability are unassigned
|
calculate the intersection of the line with the desired overload. This
|
||||||
and added to the gathered list. Any devices that have more partitions than they
|
becomes the target.
|
||||||
now desire have random partitions unassigned from them and added to the
|
|
||||||
gathered list. Lastly, the gathered partitions are then reassigned to devices
|
From the target we calculate the minimum and maximum number of replicas any
|
||||||
using a similar method as in the initial assignment described above.
|
part may have in a tier. This becomes the replica_plan.
|
||||||
|
|
||||||
|
Finally, we calculate the number of partitions that should ideally be assigned
|
||||||
|
to each device based the replica_plan.
|
||||||
|
|
||||||
|
On initial balance, the first time partitions are placed to generate a ring,
|
||||||
|
we must assign each replica of each partition to the device that desires the
|
||||||
|
most partitions excluding any devices that already have their maximum number
|
||||||
|
of replicas of that part assigned to some parent tier of that device's failure
|
||||||
|
domain.
|
||||||
|
|
||||||
|
When building a new ring based on an old ring, the desired number of
|
||||||
|
partitions each device wants is recalculated from the current replica_plan.
|
||||||
|
Next the partitions to be reassigned are gathered up. Any removed devices have
|
||||||
|
all their assigned partitions unassigned and added to the gathered list. Any
|
||||||
|
partition replicas that (due to the addition of new devices) can be spread out
|
||||||
|
for better durability are unassigned and added to the gathered list. Any
|
||||||
|
devices that have more partitions than they now desire have random partitions
|
||||||
|
unassigned from them and added to the gathered list. Lastly, the gathered
|
||||||
|
partitions are then reassigned to devices using a similar method as in the
|
||||||
|
initial assignment described above.
|
||||||
|
|
||||||
Whenever a partition has a replica reassigned, the time of the reassignment is
|
Whenever a partition has a replica reassigned, the time of the reassignment is
|
||||||
recorded. This is taken into account when gathering partitions to reassign so
|
recorded. This is taken into account when gathering partitions to reassign so
|
||||||
|
@ -247,10 +330,9 @@ failure and there's no choice but to make a reassignment.
|
||||||
|
|
||||||
The above processes don't always perfectly rebalance a ring due to the random
|
The above processes don't always perfectly rebalance a ring due to the random
|
||||||
nature of gathering partitions for reassignment. To help reach a more balanced
|
nature of gathering partitions for reassignment. To help reach a more balanced
|
||||||
ring, the rebalance process is repeated until near perfect (less 1% off) or
|
ring, the rebalance process is repeated a fixed number of times until the
|
||||||
when the balance doesn't improve by at least 1% (indicating we probably can't
|
replica_plan is fulfilled or unable to be fulfilled (indicating we probably
|
||||||
get perfect balance due to wildly imbalanced zones or too many partitions
|
can't get perfect balance due to too many partitions recently moved).
|
||||||
recently moved).
|
|
||||||
|
|
||||||
---------------------
|
---------------------
|
||||||
Ring Builder Analyzer
|
Ring Builder Analyzer
|
||||||
|
@ -263,8 +345,8 @@ History
|
||||||
-------
|
-------
|
||||||
|
|
||||||
The ring code went through many iterations before arriving at what it is now
|
The ring code went through many iterations before arriving at what it is now
|
||||||
and while it has been stable for a while now, the algorithm may be tweaked or
|
and while it has largely been stable, the algorithm has seen a few tweaks or
|
||||||
perhaps even fundamentally changed if new ideas emerge. This section will try
|
perhaps even fundamentally changed as new ideas emerge. This section will try
|
||||||
to describe the previous ideas attempted and attempt to explain why they were
|
to describe the previous ideas attempted and attempt to explain why they were
|
||||||
discarded.
|
discarded.
|
||||||
|
|
||||||
|
@ -329,15 +411,14 @@ be maintaining the rings themselves anyway and only doing hash lookups, MD5 was
|
||||||
chosen for its general availability, good distribution, and adequate speed.
|
chosen for its general availability, good distribution, and adequate speed.
|
||||||
|
|
||||||
The placement algorithm has seen a number of behavioral changes for
|
The placement algorithm has seen a number of behavioral changes for
|
||||||
unbalanceable rings. The ring builder wants to keep replicas as far
|
unbalanceable rings. The ring builder wants to keep replicas as far apart as
|
||||||
apart as possible while still respecting device weights. In most
|
possible while still respecting device weights. In most cases, the ring
|
||||||
cases, the ring builder can achieve both, but sometimes they conflict.
|
builder can achieve both, but sometimes they conflict. At first, the behavior
|
||||||
At first, the behavior was to keep the replicas far apart and ignore
|
was to keep the replicas far apart and ignore device weight, but that made it
|
||||||
device weight, but that made it impossible to gradually go from one
|
impossible to gradually go from one region to two, or from two to three. Then
|
||||||
region to two, or from two to three. Then it was changed to favor
|
it was changed to favor device weight over dispersion, but that wasn't so good
|
||||||
device weight over dispersion, but that wasn't so good for rings that
|
for rings that were close to balanceable, like 3 machines with 60TB, 60TB, and
|
||||||
were close to balanceable, like 3 machines with 60TB, 60TB, and 57TB
|
57TB of disk space; operators were expecting one replica per machine, but
|
||||||
of disk space; operators were expecting one replica per machine, but
|
didn't always get it. After that, overload was added to the ring builder so
|
||||||
didn't always get it. After that, overload was added to the ring
|
that operators could choose a balance between dispersion and device weights.
|
||||||
builder so that operators could choose a balance between dispersion
|
In time the overload concept was improved and made more accurate.
|
||||||
and device weights.
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ bind_port = 6002
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
|
|
@ -41,7 +41,7 @@ bind_port = 6001
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
# You can use this single conf file instead of having memcache_servers set in
|
# You can use this single conf file instead of having memcache_servers set in
|
||||||
# several other conf files under [filter:cache] for example. You can specify
|
# several other conf files under [filter:cache] for example. You can specify
|
||||||
# multiple servers separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211
|
# multiple servers separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211
|
||||||
|
# (IPv6 addresses must follow rfc3986 section-3.2.2, i.e. [::1]:11211)
|
||||||
# memcache_servers = 127.0.0.1:11211
|
# memcache_servers = 127.0.0.1:11211
|
||||||
#
|
#
|
||||||
# Sets how memcache values are serialized and deserialized:
|
# Sets how memcache values are serialized and deserialized:
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
@ -80,7 +80,7 @@ use = egg:swift#proxy_logging
|
||||||
# access_log_udp_port = 514
|
# access_log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can use log_statsd_* from [DEFAULT] or override them here:
|
# You can use log_statsd_* from [DEFAULT] or override them here:
|
||||||
# access_log_statsd_host = localhost
|
# access_log_statsd_host =
|
||||||
# access_log_statsd_port = 8125
|
# access_log_statsd_port = 8125
|
||||||
# access_log_statsd_default_sample_rate = 1.0
|
# access_log_statsd_default_sample_rate = 1.0
|
||||||
# access_log_statsd_sample_rate_factor = 1.0
|
# access_log_statsd_sample_rate_factor = 1.0
|
||||||
|
|
|
@ -44,7 +44,7 @@ bind_port = 6000
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
@ -82,6 +82,11 @@ use = egg:swift#object
|
||||||
# set log_address = /dev/log
|
# set log_address = /dev/log
|
||||||
#
|
#
|
||||||
# max_upload_time = 86400
|
# max_upload_time = 86400
|
||||||
|
#
|
||||||
|
# slow is the total amount of seconds an object PUT/DELETE request takes at
|
||||||
|
# least. If it is faster, the object server will sleep this amount of time minus
|
||||||
|
# the already passed transaction time. This is only useful for simulating slow
|
||||||
|
# devices on storage nodes during testing and development.
|
||||||
# slow = 0
|
# slow = 0
|
||||||
#
|
#
|
||||||
# Objects smaller than this are not evicted from the buffercache once read
|
# Objects smaller than this are not evicted from the buffercache once read
|
||||||
|
@ -282,6 +287,9 @@ use = egg:swift#recon
|
||||||
# log_level = INFO
|
# log_level = INFO
|
||||||
# log_address = /dev/log
|
# log_address = /dev/log
|
||||||
#
|
#
|
||||||
|
# Time in seconds to wait between auditor passes
|
||||||
|
# interval = 30
|
||||||
|
#
|
||||||
# You can set the disk chunk size that the auditor uses making it larger if
|
# You can set the disk chunk size that the auditor uses making it larger if
|
||||||
# you like for more efficient local auditing of larger objects
|
# you like for more efficient local auditing of larger objects
|
||||||
# disk_chunk_size = 65536
|
# disk_chunk_size = 65536
|
||||||
|
|
|
@ -63,7 +63,7 @@ bind_port = 8080
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
@ -171,9 +171,6 @@ use = egg:swift#proxy
|
||||||
# the number of seconds configured by timing_expiry.
|
# the number of seconds configured by timing_expiry.
|
||||||
# timing_expiry = 300
|
# timing_expiry = 300
|
||||||
#
|
#
|
||||||
# The maximum time (seconds) that a large object connection is allowed to last.
|
|
||||||
# max_large_object_get_time = 86400
|
|
||||||
#
|
|
||||||
# Set to the number of nodes to contact for a normal request. You can use
|
# Set to the number of nodes to contact for a normal request. You can use
|
||||||
# '* replicas' at the end to have it use the number given times the number of
|
# '* replicas' at the end to have it use the number given times the number of
|
||||||
# replicas for the ring being used for the request.
|
# replicas for the ring being used for the request.
|
||||||
|
@ -287,13 +284,21 @@ user_test5_tester5 = testing5 service
|
||||||
# You'll also need to have the keystoneauth middleware enabled and have it in
|
# You'll also need to have the keystoneauth middleware enabled and have it in
|
||||||
# your main pipeline, as show in the sample pipeline at the top of this file.
|
# your main pipeline, as show in the sample pipeline at the top of this file.
|
||||||
#
|
#
|
||||||
|
# Following parameters are known to work with keystonemiddleware v2.3.0
|
||||||
|
# (above v2.0.0), but checking the latest information in the wiki page[1]
|
||||||
|
# is recommended.
|
||||||
|
# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration
|
||||||
|
#
|
||||||
# [filter:authtoken]
|
# [filter:authtoken]
|
||||||
# paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
# paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
||||||
# identity_uri = http://keystonehost:35357/
|
# auth_uri = http://keystonehost:5000
|
||||||
# auth_uri = http://keystonehost:5000/
|
# auth_url = http://keystonehost:35357
|
||||||
# admin_tenant_name = service
|
# auth_plugin = password
|
||||||
# admin_user = swift
|
# project_domain_id = default
|
||||||
# admin_password = password
|
# user_domain_id = default
|
||||||
|
# project_name = service
|
||||||
|
# username = swift
|
||||||
|
# password = password
|
||||||
#
|
#
|
||||||
# delay_auth_decision defaults to False, but leaving it as false will
|
# delay_auth_decision defaults to False, but leaving it as false will
|
||||||
# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
|
# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
|
||||||
|
@ -388,7 +393,8 @@ use = egg:swift#memcache
|
||||||
# If not set here, the value for memcache_servers will be read from
|
# If not set here, the value for memcache_servers will be read from
|
||||||
# memcache.conf (see memcache.conf-sample) or lacking that file, it will
|
# memcache.conf (see memcache.conf-sample) or lacking that file, it will
|
||||||
# default to the value below. You can specify multiple servers separated with
|
# default to the value below. You can specify multiple servers separated with
|
||||||
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
|
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
|
||||||
|
# follow rfc3986 section-3.2.2, i.e. [::1]:11211)
|
||||||
# memcache_servers = 127.0.0.1:11211
|
# memcache_servers = 127.0.0.1:11211
|
||||||
#
|
#
|
||||||
# Sets how memcache values are serialized and deserialized:
|
# Sets how memcache values are serialized and deserialized:
|
||||||
|
@ -568,7 +574,7 @@ use = egg:swift#proxy_logging
|
||||||
# access_log_udp_port = 514
|
# access_log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can use log_statsd_* from [DEFAULT] or override them here:
|
# You can use log_statsd_* from [DEFAULT] or override them here:
|
||||||
# access_log_statsd_host = localhost
|
# access_log_statsd_host =
|
||||||
# access_log_statsd_port = 8125
|
# access_log_statsd_port = 8125
|
||||||
# access_log_statsd_default_sample_rate = 1.0
|
# access_log_statsd_default_sample_rate = 1.0
|
||||||
# access_log_statsd_sample_rate_factor = 1.0
|
# access_log_statsd_sample_rate_factor = 1.0
|
||||||
|
@ -628,14 +634,17 @@ use = egg:swift#bulk
|
||||||
use = egg:swift#slo
|
use = egg:swift#slo
|
||||||
# max_manifest_segments = 1000
|
# max_manifest_segments = 1000
|
||||||
# max_manifest_size = 2097152
|
# max_manifest_size = 2097152
|
||||||
# min_segment_size = 1048576
|
#
|
||||||
# Start rate-limiting SLO segment serving after the Nth segment of a
|
# Rate limiting applies only to segments smaller than this size (bytes).
|
||||||
|
# rate_limit_under_size = 1048576
|
||||||
|
#
|
||||||
|
# Start rate-limiting SLO segment serving after the Nth small segment of a
|
||||||
# segmented object.
|
# segmented object.
|
||||||
# rate_limit_after_segment = 10
|
# rate_limit_after_segment = 10
|
||||||
#
|
#
|
||||||
# Once segment rate-limiting kicks in for an object, limit segments served
|
# Once segment rate-limiting kicks in for an object, limit segments served
|
||||||
# to N per second. 0 means no rate-limiting.
|
# to N per second. 0 means no rate-limiting.
|
||||||
# rate_limit_segments_per_sec = 0
|
# rate_limit_segments_per_sec = 1
|
||||||
#
|
#
|
||||||
# Time limit on GET requests (seconds)
|
# Time limit on GET requests (seconds)
|
||||||
# max_get_time = 86400
|
# max_get_time = 86400
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
# the hashing algorithm when determining data placement in the cluster.
|
# the hashing algorithm when determining data placement in the cluster.
|
||||||
# These values should remain secret and MUST NOT change
|
# These values should remain secret and MUST NOT change
|
||||||
# once a cluster has been deployed.
|
# once a cluster has been deployed.
|
||||||
|
# Use only printable chars (python -c "import string; print(string.printable)")
|
||||||
|
|
||||||
swift_hash_path_suffix = changeme
|
swift_hash_path_suffix = changeme
|
||||||
swift_hash_path_prefix = changeme
|
swift_hash_path_prefix = changeme
|
||||||
|
@ -50,8 +51,7 @@ aliases = yellow, orange
|
||||||
#policy_type = replication
|
#policy_type = replication
|
||||||
|
|
||||||
# The following declares a storage policy of type 'erasure_coding' which uses
|
# The following declares a storage policy of type 'erasure_coding' which uses
|
||||||
# Erasure Coding for data reliability. The 'erasure_coding' storage policy in
|
# Erasure Coding for data reliability. Please refer to Swift documentation for
|
||||||
# Swift is available as a "beta". Please refer to Swift documentation for
|
|
||||||
# details on how the 'erasure_coding' storage policy is implemented.
|
# details on how the 'erasure_coding' storage policy is implemented.
|
||||||
#
|
#
|
||||||
# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode
|
# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode
|
||||||
|
@ -73,13 +73,14 @@ aliases = yellow, orange
|
||||||
# The example 'deepfreeze10-4' policy defined below is a _sample_
|
# The example 'deepfreeze10-4' policy defined below is a _sample_
|
||||||
# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity'
|
# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity'
|
||||||
# fragments. 'ec_type' defines the Erasure Coding scheme.
|
# fragments. 'ec_type' defines the Erasure Coding scheme.
|
||||||
# 'jerasure_rs_vand' (Reed-Solomon Vandermonde) is used as an example below.
|
# 'liberasurecode_rs_vand' (Reed-Solomon Vandermonde) is used as an example
|
||||||
|
# below.
|
||||||
#
|
#
|
||||||
#[storage-policy:2]
|
#[storage-policy:2]
|
||||||
#name = deepfreeze10-4
|
#name = deepfreeze10-4
|
||||||
#aliases = df10-4
|
#aliases = df10-4
|
||||||
#policy_type = erasure_coding
|
#policy_type = erasure_coding
|
||||||
#ec_type = jerasure_rs_vand
|
#ec_type = liberasurecode_rs_vand
|
||||||
#ec_num_data_fragments = 10
|
#ec_num_data_fragments = 10
|
||||||
#ec_num_parity_fragments = 4
|
#ec_num_parity_fragments = 4
|
||||||
#ec_object_segment_size = 1048576
|
#ec_object_segment_size = 1048576
|
||||||
|
|
|
@ -4,10 +4,10 @@
|
||||||
|
|
||||||
dnspython>=1.12.0;python_version<'3.0'
|
dnspython>=1.12.0;python_version<'3.0'
|
||||||
dnspython3>=1.12.0;python_version>='3.0'
|
dnspython3>=1.12.0;python_version>='3.0'
|
||||||
eventlet>=0.16.1,!=0.17.0
|
eventlet>=0.17.4 # MIT
|
||||||
greenlet>=0.3.1
|
greenlet>=0.3.1
|
||||||
netifaces>=0.5,!=0.10.0,!=0.10.1
|
netifaces>=0.5,!=0.10.0,!=0.10.1
|
||||||
pastedeploy>=1.3.3
|
pastedeploy>=1.3.3
|
||||||
six>=1.9.0
|
six>=1.9.0
|
||||||
xattr>=0.4
|
xattr>=0.4
|
||||||
PyECLib>=1.0.7 # BSD
|
PyECLib>=1.2.0 # BSD
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#! /usr/bin/env python
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
|
@ -22,6 +21,7 @@ from eventlet.green import urllib2, socket
|
||||||
from six.moves.urllib.parse import urlparse
|
from six.moves.urllib.parse import urlparse
|
||||||
from swift.common.utils import SWIFT_CONF_FILE
|
from swift.common.utils import SWIFT_CONF_FILE
|
||||||
from swift.common.ring import Ring
|
from swift.common.ring import Ring
|
||||||
|
from swift.common.storage_policy import POLICIES
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
import eventlet
|
import eventlet
|
||||||
import json
|
import json
|
||||||
|
@ -181,12 +181,12 @@ class SwiftRecon(object):
|
||||||
def _ptime(self, timev=None):
|
def _ptime(self, timev=None):
|
||||||
"""
|
"""
|
||||||
:param timev: a unix timestamp or None
|
:param timev: a unix timestamp or None
|
||||||
:returns: a pretty string of the current time or provided time
|
:returns: a pretty string of the current time or provided time in UTC
|
||||||
"""
|
"""
|
||||||
if timev:
|
if timev:
|
||||||
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timev))
|
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timev))
|
||||||
else:
|
else:
|
||||||
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
|
||||||
|
|
||||||
def _md5_file(self, path):
|
def _md5_file(self, path):
|
||||||
"""
|
"""
|
||||||
|
@ -203,18 +203,19 @@ class SwiftRecon(object):
|
||||||
block = f.read(4096)
|
block = f.read(4096)
|
||||||
return md5sum.hexdigest()
|
return md5sum.hexdigest()
|
||||||
|
|
||||||
def get_devices(self, region_filter, zone_filter, swift_dir, ring_name):
|
def get_hosts(self, region_filter, zone_filter, swift_dir, ring_names):
|
||||||
"""
|
"""
|
||||||
Get a list of hosts in the ring
|
Get a list of hosts in the rings.
|
||||||
|
|
||||||
:param region_filter: Only list regions matching given filter
|
:param region_filter: Only list regions matching given filter
|
||||||
:param zone_filter: Only list zones matching given filter
|
:param zone_filter: Only list zones matching given filter
|
||||||
:param swift_dir: Directory of swift config, usually /etc/swift
|
:param swift_dir: Directory of swift config, usually /etc/swift
|
||||||
:param ring_name: Name of the ring, such as 'object'
|
:param ring_names: Collection of ring names, such as
|
||||||
|
['object', 'object-2']
|
||||||
:returns: a set of tuples containing the ip and port of hosts
|
:returns: a set of tuples containing the ip and port of hosts
|
||||||
"""
|
"""
|
||||||
ring_data = Ring(swift_dir, ring_name=ring_name)
|
rings = [Ring(swift_dir, ring_name=n) for n in ring_names]
|
||||||
devs = [d for d in ring_data.devs if d]
|
devs = [d for r in rings for d in r.devs if d]
|
||||||
if region_filter is not None:
|
if region_filter is not None:
|
||||||
devs = [d for d in devs if d['region'] == region_filter]
|
devs = [d for d in devs if d['region'] == region_filter]
|
||||||
if zone_filter is not None:
|
if zone_filter is not None:
|
||||||
|
@ -495,16 +496,14 @@ class SwiftRecon(object):
|
||||||
elapsed = time.time() - least_recent_time
|
elapsed = time.time() - least_recent_time
|
||||||
elapsed, elapsed_unit = seconds2timeunit(elapsed)
|
elapsed, elapsed_unit = seconds2timeunit(elapsed)
|
||||||
print('Oldest completion was %s (%d %s ago) by %s.' % (
|
print('Oldest completion was %s (%d %s ago) by %s.' % (
|
||||||
time.strftime('%Y-%m-%d %H:%M:%S',
|
self._ptime(least_recent_time),
|
||||||
time.gmtime(least_recent_time)),
|
|
||||||
elapsed, elapsed_unit, host))
|
elapsed, elapsed_unit, host))
|
||||||
if most_recent_url is not None:
|
if most_recent_url is not None:
|
||||||
host = urlparse(most_recent_url).netloc
|
host = urlparse(most_recent_url).netloc
|
||||||
elapsed = time.time() - most_recent_time
|
elapsed = time.time() - most_recent_time
|
||||||
elapsed, elapsed_unit = seconds2timeunit(elapsed)
|
elapsed, elapsed_unit = seconds2timeunit(elapsed)
|
||||||
print('Most recent completion was %s (%d %s ago) by %s.' % (
|
print('Most recent completion was %s (%d %s ago) by %s.' % (
|
||||||
time.strftime('%Y-%m-%d %H:%M:%S',
|
self._ptime(most_recent_time),
|
||||||
time.gmtime(most_recent_time)),
|
|
||||||
elapsed, elapsed_unit, host))
|
elapsed, elapsed_unit, host))
|
||||||
print("=" * 79)
|
print("=" * 79)
|
||||||
|
|
||||||
|
@ -899,12 +898,8 @@ class SwiftRecon(object):
|
||||||
continue
|
continue
|
||||||
if (ts_remote < ts_start or ts_remote > ts_end):
|
if (ts_remote < ts_start or ts_remote > ts_end):
|
||||||
diff = abs(ts_end - ts_remote)
|
diff = abs(ts_end - ts_remote)
|
||||||
ts_end_f = time.strftime(
|
ts_end_f = self._ptime(ts_end)
|
||||||
"%Y-%m-%d %H:%M:%S",
|
ts_remote_f = self._ptime(ts_remote)
|
||||||
time.localtime(ts_end))
|
|
||||||
ts_remote_f = time.strftime(
|
|
||||||
"%Y-%m-%d %H:%M:%S",
|
|
||||||
time.localtime(ts_remote))
|
|
||||||
|
|
||||||
print("!! %s current time is %s, but remote is %s, "
|
print("!! %s current time is %s, but remote is %s, "
|
||||||
"differs by %.2f sec" % (
|
"differs by %.2f sec" % (
|
||||||
|
@ -920,6 +915,26 @@ class SwiftRecon(object):
|
||||||
matches, len(hosts), errors))
|
matches, len(hosts), errors))
|
||||||
print("=" * 79)
|
print("=" * 79)
|
||||||
|
|
||||||
|
def _get_ring_names(self, policy=None):
|
||||||
|
'''
|
||||||
|
Retrieve name of ring files.
|
||||||
|
|
||||||
|
If no policy is passed and the server type is object,
|
||||||
|
the ring names of all storage-policies are retrieved.
|
||||||
|
|
||||||
|
:param policy: name or index of storage policy, only applicable
|
||||||
|
with server_type==object.
|
||||||
|
:returns: list of ring names.
|
||||||
|
'''
|
||||||
|
if self.server_type == 'object':
|
||||||
|
ring_names = [p.ring_name for p in POLICIES if (
|
||||||
|
p.name == policy or not policy or (
|
||||||
|
policy.isdigit() and int(policy) == int(p)))]
|
||||||
|
else:
|
||||||
|
ring_names = [self.server_type]
|
||||||
|
|
||||||
|
return ring_names
|
||||||
|
|
||||||
def main(self):
|
def main(self):
|
||||||
"""
|
"""
|
||||||
Retrieve and report cluster info from hosts running recon middleware.
|
Retrieve and report cluster info from hosts running recon middleware.
|
||||||
|
@ -989,6 +1004,9 @@ class SwiftRecon(object):
|
||||||
default=5)
|
default=5)
|
||||||
args.add_option('--swiftdir', default="/etc/swift",
|
args.add_option('--swiftdir', default="/etc/swift",
|
||||||
help="Default = /etc/swift")
|
help="Default = /etc/swift")
|
||||||
|
args.add_option('--policy', '-p',
|
||||||
|
help='Only query object servers in specified '
|
||||||
|
'storage policy (specified as name or index).')
|
||||||
options, arguments = args.parse_args()
|
options, arguments = args.parse_args()
|
||||||
|
|
||||||
if len(sys.argv) <= 1 or len(arguments) > 1:
|
if len(sys.argv) <= 1 or len(arguments) > 1:
|
||||||
|
@ -1010,8 +1028,14 @@ class SwiftRecon(object):
|
||||||
self.suppress_errors = options.suppress
|
self.suppress_errors = options.suppress
|
||||||
self.timeout = options.timeout
|
self.timeout = options.timeout
|
||||||
|
|
||||||
hosts = self.get_devices(options.region, options.zone,
|
ring_names = self._get_ring_names(options.policy)
|
||||||
swift_dir, self.server_type)
|
if not ring_names:
|
||||||
|
print('Invalid Storage Policy')
|
||||||
|
args.print_help()
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
hosts = self.get_hosts(options.region, options.zone,
|
||||||
|
swift_dir, ring_names)
|
||||||
|
|
||||||
print("--> Starting reconnaissance on %s hosts" % len(hosts))
|
print("--> Starting reconnaissance on %s hosts" % len(hosts))
|
||||||
print("=" * 79)
|
print("=" * 79)
|
||||||
|
@ -1090,7 +1114,3 @@ def main():
|
||||||
reconnoiter.main()
|
reconnoiter.main()
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print('\n')
|
print('\n')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#! /usr/bin/env python
|
|
||||||
# Copyright (c) 2015 Samuel Merritt <sam@swiftstack.com>
|
# Copyright (c) 2015 Samuel Merritt <sam@swiftstack.com>
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
#! /usr/bin/env python
|
|
||||||
# Copyright (c) 2010-2012 OpenStack Foundation
|
# Copyright (c) 2010-2012 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
@ -25,6 +24,7 @@ from os.path import basename, abspath, dirname, exists, join as pathjoin
|
||||||
from sys import argv as sys_argv, exit, stderr, stdout
|
from sys import argv as sys_argv, exit, stderr, stdout
|
||||||
from textwrap import wrap
|
from textwrap import wrap
|
||||||
from time import time
|
from time import time
|
||||||
|
from datetime import timedelta
|
||||||
import optparse
|
import optparse
|
||||||
import math
|
import math
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ from six.moves import zip as izip
|
||||||
from six.moves import input
|
from six.moves import input
|
||||||
|
|
||||||
from swift.common import exceptions
|
from swift.common import exceptions
|
||||||
from swift.common.ring import RingBuilder, Ring
|
from swift.common.ring import RingBuilder, Ring, RingData
|
||||||
from swift.common.ring.builder import MAX_BALANCE
|
from swift.common.ring.builder import MAX_BALANCE
|
||||||
from swift.common.ring.utils import validate_args, \
|
from swift.common.ring.utils import validate_args, \
|
||||||
validate_and_normalize_ip, build_dev_from_opts, \
|
validate_and_normalize_ip, build_dev_from_opts, \
|
||||||
|
@ -389,11 +389,12 @@ def _parse_remove_values(argvish):
|
||||||
|
|
||||||
|
|
||||||
class Commands(object):
|
class Commands(object):
|
||||||
|
@staticmethod
|
||||||
def unknown():
|
def unknown():
|
||||||
print('Unknown command: %s' % argv[2])
|
print('Unknown command: %s' % argv[2])
|
||||||
exit(EXIT_ERROR)
|
exit(EXIT_ERROR)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def create():
|
def create():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> create <part_power> <replicas>
|
swift-ring-builder <builder_file> create <part_power> <replicas>
|
||||||
|
@ -417,6 +418,7 @@ swift-ring-builder <builder_file> create <part_power> <replicas>
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def default():
|
def default():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file>
|
swift-ring-builder <builder_file>
|
||||||
|
@ -444,9 +446,28 @@ swift-ring-builder <builder_file>
|
||||||
builder.parts, builder.replicas, regions, zones, dev_count,
|
builder.parts, builder.replicas, regions, zones, dev_count,
|
||||||
balance, dispersion_trailer))
|
balance, dispersion_trailer))
|
||||||
print('The minimum number of hours before a partition can be '
|
print('The minimum number of hours before a partition can be '
|
||||||
'reassigned is %s' % builder.min_part_hours)
|
'reassigned is %s (%s remaining)' % (
|
||||||
|
builder.min_part_hours,
|
||||||
|
timedelta(seconds=builder.min_part_seconds_left)))
|
||||||
print('The overload factor is %0.2f%% (%.6f)' % (
|
print('The overload factor is %0.2f%% (%.6f)' % (
|
||||||
builder.overload * 100, builder.overload))
|
builder.overload * 100, builder.overload))
|
||||||
|
|
||||||
|
# compare ring file against builder file
|
||||||
|
if not exists(ring_file):
|
||||||
|
print('Ring file %s not found, '
|
||||||
|
'probably it hasn\'t been written yet' % ring_file)
|
||||||
|
else:
|
||||||
|
builder_dict = builder.get_ring().to_dict()
|
||||||
|
try:
|
||||||
|
ring_dict = RingData.load(ring_file).to_dict()
|
||||||
|
except Exception as exc:
|
||||||
|
print('Ring file %s is invalid: %r' % (ring_file, exc))
|
||||||
|
else:
|
||||||
|
if builder_dict == ring_dict:
|
||||||
|
print('Ring file %s is up-to-date' % ring_file)
|
||||||
|
else:
|
||||||
|
print('Ring file %s is obsolete' % ring_file)
|
||||||
|
|
||||||
if builder.devs:
|
if builder.devs:
|
||||||
balance_per_dev = builder._build_balance_per_dev()
|
balance_per_dev = builder._build_balance_per_dev()
|
||||||
print('Devices: id region zone ip address port '
|
print('Devices: id region zone ip address port '
|
||||||
|
@ -463,6 +484,7 @@ swift-ring-builder <builder_file>
|
||||||
dev['meta']))
|
dev['meta']))
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def search():
|
def search():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> search <search-value>
|
swift-ring-builder <builder_file> search <search-value>
|
||||||
|
@ -513,6 +535,7 @@ swift-ring-builder <builder_file> search
|
||||||
dev['meta']))
|
dev['meta']))
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def list_parts():
|
def list_parts():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> list_parts <search-value> [<search-value>] ..
|
swift-ring-builder <builder_file> list_parts <search-value> [<search-value>] ..
|
||||||
|
@ -562,6 +585,7 @@ swift-ring-builder <builder_file> list_parts
|
||||||
print('%9d %7d' % (partition, count))
|
print('%9d %7d' % (partition, count))
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def add():
|
def add():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> add
|
swift-ring-builder <builder_file> add
|
||||||
|
@ -612,6 +636,7 @@ swift-ring-builder <builder_file> add
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def set_weight():
|
def set_weight():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> set_weight <search-value> <weight>
|
swift-ring-builder <builder_file> set_weight <search-value> <weight>
|
||||||
|
@ -644,6 +669,7 @@ swift-ring-builder <builder_file> set_weight
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def set_info():
|
def set_info():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> set_info
|
swift-ring-builder <builder_file> set_info
|
||||||
|
@ -689,6 +715,7 @@ swift-ring-builder <builder_file> set_info
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def remove():
|
def remove():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> remove <search-value> [search-value ...]
|
swift-ring-builder <builder_file> remove <search-value> [search-value ...]
|
||||||
|
@ -754,6 +781,7 @@ swift-ring-builder <builder_file> search
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def rebalance():
|
def rebalance():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> rebalance [options]
|
swift-ring-builder <builder_file> rebalance [options]
|
||||||
|
@ -787,6 +815,14 @@ swift-ring-builder <builder_file> rebalance [options]
|
||||||
handler.setFormatter(formatter)
|
handler.setFormatter(formatter)
|
||||||
logger.addHandler(handler)
|
logger.addHandler(handler)
|
||||||
|
|
||||||
|
if builder.min_part_seconds_left > 0 and not options.force:
|
||||||
|
print('No partitions could be reassigned.')
|
||||||
|
print('The time between rebalances must be at least '
|
||||||
|
'min_part_hours: %s hours (%s remaining)' % (
|
||||||
|
builder.min_part_hours,
|
||||||
|
timedelta(seconds=builder.min_part_seconds_left)))
|
||||||
|
exit(EXIT_WARNING)
|
||||||
|
|
||||||
devs_changed = builder.devs_changed
|
devs_changed = builder.devs_changed
|
||||||
try:
|
try:
|
||||||
last_balance = builder.get_balance()
|
last_balance = builder.get_balance()
|
||||||
|
@ -802,8 +838,7 @@ swift-ring-builder <builder_file> rebalance [options]
|
||||||
exit(EXIT_ERROR)
|
exit(EXIT_ERROR)
|
||||||
if not (parts or options.force or removed_devs):
|
if not (parts or options.force or removed_devs):
|
||||||
print('No partitions could be reassigned.')
|
print('No partitions could be reassigned.')
|
||||||
print('Either none need to be or none can be due to '
|
print('There is no need to do so at this time')
|
||||||
'min_part_hours [%s].' % builder.min_part_hours)
|
|
||||||
exit(EXIT_WARNING)
|
exit(EXIT_WARNING)
|
||||||
# If we set device's weight to zero, currently balance will be set
|
# If we set device's weight to zero, currently balance will be set
|
||||||
# special value(MAX_BALANCE) until zero weighted device return all
|
# special value(MAX_BALANCE) until zero weighted device return all
|
||||||
|
@ -859,6 +894,7 @@ swift-ring-builder <builder_file> rebalance [options]
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
exit(status)
|
exit(status)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def dispersion():
|
def dispersion():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> dispersion <search_filter> [options]
|
swift-ring-builder <builder_file> dispersion <search_filter> [options]
|
||||||
|
@ -953,6 +989,7 @@ swift-ring-builder <builder_file> dispersion <search_filter> [options]
|
||||||
print(template % args)
|
print(template % args)
|
||||||
exit(status)
|
exit(status)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def validate():
|
def validate():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> validate
|
swift-ring-builder <builder_file> validate
|
||||||
|
@ -961,6 +998,7 @@ swift-ring-builder <builder_file> validate
|
||||||
builder.validate()
|
builder.validate()
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def write_ring():
|
def write_ring():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> write_ring
|
swift-ring-builder <builder_file> write_ring
|
||||||
|
@ -982,6 +1020,7 @@ swift-ring-builder <builder_file> write_ring
|
||||||
ring_data.save(ring_file)
|
ring_data.save(ring_file)
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def write_builder():
|
def write_builder():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <ring_file> write_builder [min_part_hours]
|
swift-ring-builder <ring_file> write_builder [min_part_hours]
|
||||||
|
@ -1028,6 +1067,7 @@ swift-ring-builder <ring_file> write_builder [min_part_hours]
|
||||||
builder.devs[dev_id]['parts'] += 1
|
builder.devs[dev_id]['parts'] += 1
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def pretend_min_part_hours_passed():
|
def pretend_min_part_hours_passed():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> pretend_min_part_hours_passed
|
swift-ring-builder <builder_file> pretend_min_part_hours_passed
|
||||||
|
@ -1046,6 +1086,7 @@ swift-ring-builder <builder_file> pretend_min_part_hours_passed
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def set_min_part_hours():
|
def set_min_part_hours():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> set_min_part_hours <hours>
|
swift-ring-builder <builder_file> set_min_part_hours <hours>
|
||||||
|
@ -1062,6 +1103,7 @@ swift-ring-builder <builder_file> set_min_part_hours <hours>
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def set_replicas():
|
def set_replicas():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> set_replicas <replicas>
|
swift-ring-builder <builder_file> set_replicas <replicas>
|
||||||
|
@ -1094,6 +1136,7 @@ swift-ring-builder <builder_file> set_replicas <replicas>
|
||||||
builder.save(builder_file)
|
builder.save(builder_file)
|
||||||
exit(EXIT_SUCCESS)
|
exit(EXIT_SUCCESS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def set_overload():
|
def set_overload():
|
||||||
"""
|
"""
|
||||||
swift-ring-builder <builder_file> set_overload <overload>[%]
|
swift-ring-builder <builder_file> set_overload <overload>[%]
|
||||||
|
@ -1150,11 +1193,12 @@ def main(arguments=None):
|
||||||
globals())
|
globals())
|
||||||
print(Commands.default.__doc__.strip())
|
print(Commands.default.__doc__.strip())
|
||||||
print()
|
print()
|
||||||
cmds = [c for c, f in Commands.__dict__.items()
|
cmds = [c for c in dir(Commands)
|
||||||
if f.__doc__ and not c.startswith('_') and c != 'default']
|
if getattr(Commands, c).__doc__ and not c.startswith('_') and
|
||||||
|
c != 'default']
|
||||||
cmds.sort()
|
cmds.sort()
|
||||||
for cmd in cmds:
|
for cmd in cmds:
|
||||||
print(Commands.__dict__[cmd].__doc__.strip())
|
print(getattr(Commands, cmd).__doc__.strip())
|
||||||
print()
|
print()
|
||||||
print(parse_search_value.__doc__.strip())
|
print(parse_search_value.__doc__.strip())
|
||||||
print()
|
print()
|
||||||
|
@ -1199,13 +1243,9 @@ def main(arguments=None):
|
||||||
if argv[0].endswith('-safe'):
|
if argv[0].endswith('-safe'):
|
||||||
try:
|
try:
|
||||||
with lock_parent_directory(abspath(builder_file), 15):
|
with lock_parent_directory(abspath(builder_file), 15):
|
||||||
Commands.__dict__.get(command, Commands.unknown.__func__)()
|
getattr(Commands, command, Commands.unknown)()
|
||||||
except exceptions.LockTimeout:
|
except exceptions.LockTimeout:
|
||||||
print("Ring/builder dir currently locked.")
|
print("Ring/builder dir currently locked.")
|
||||||
exit(2)
|
exit(2)
|
||||||
else:
|
else:
|
||||||
Commands.__dict__.get(command, Commands.unknown.__func__)()
|
getattr(Commands, command, Commands.unknown)()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
|
@ -25,6 +25,7 @@ from time import time
|
||||||
|
|
||||||
from eventlet import sleep, Timeout
|
from eventlet import sleep, Timeout
|
||||||
import six
|
import six
|
||||||
|
import six.moves.cPickle as pickle
|
||||||
from six.moves.http_client import HTTPException
|
from six.moves.http_client import HTTPException
|
||||||
|
|
||||||
from swift.common.bufferedhttp import http_connect
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
@ -49,6 +50,30 @@ class DirectClientException(ClientException):
|
||||||
http_reason=resp.reason, http_headers=headers)
|
http_reason=resp.reason, http_headers=headers)
|
||||||
|
|
||||||
|
|
||||||
|
def _make_req(node, part, method, path, _headers, stype,
|
||||||
|
conn_timeout=5, response_timeout=15):
|
||||||
|
"""
|
||||||
|
Make request to backend storage node.
|
||||||
|
(i.e. 'Account', 'Container', 'Object')
|
||||||
|
:param node: a node dict from a ring
|
||||||
|
:param part: an integer, the partion number
|
||||||
|
:param method: a string, the HTTP method (e.g. 'PUT', 'DELETE', etc)
|
||||||
|
:param path: a string, the request path
|
||||||
|
:param headers: a dict, header name => value
|
||||||
|
:param stype: a string, describing the type of service
|
||||||
|
:returns: an HTTPResponse object
|
||||||
|
"""
|
||||||
|
with Timeout(conn_timeout):
|
||||||
|
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
||||||
|
method, path, headers=_headers)
|
||||||
|
with Timeout(response_timeout):
|
||||||
|
resp = conn.getresponse()
|
||||||
|
resp.read()
|
||||||
|
if not is_success(resp.status):
|
||||||
|
raise DirectClientException(stype, method, node, part, path, resp)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
def _get_direct_account_container(path, stype, node, part,
|
def _get_direct_account_container(path, stype, node, part,
|
||||||
marker=None, limit=None,
|
marker=None, limit=None,
|
||||||
prefix=None, delimiter=None, conn_timeout=5,
|
prefix=None, delimiter=None, conn_timeout=5,
|
||||||
|
@ -76,6 +101,7 @@ def _get_direct_account_container(path, stype, node, part,
|
||||||
if not is_success(resp.status):
|
if not is_success(resp.status):
|
||||||
resp.read()
|
resp.read()
|
||||||
raise DirectClientException(stype, 'GET', node, part, path, resp)
|
raise DirectClientException(stype, 'GET', node, part, path, resp)
|
||||||
|
|
||||||
resp_headers = HeaderKeyDict()
|
resp_headers = HeaderKeyDict()
|
||||||
for header, value in resp.getheaders():
|
for header, value in resp.getheaders():
|
||||||
resp_headers[header] = value
|
resp_headers[header] = value
|
||||||
|
@ -126,16 +152,8 @@ def direct_delete_account(node, part, account, conn_timeout=5,
|
||||||
headers = {}
|
headers = {}
|
||||||
|
|
||||||
path = '/%s' % account
|
path = '/%s' % account
|
||||||
with Timeout(conn_timeout):
|
_make_req(node, part, 'DELETE', path, gen_headers(headers, True),
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
'Account', conn_timeout, response_timeout)
|
||||||
'DELETE', path,
|
|
||||||
headers=gen_headers(headers, True))
|
|
||||||
with Timeout(response_timeout):
|
|
||||||
resp = conn.getresponse()
|
|
||||||
resp.read()
|
|
||||||
if not is_success(resp.status):
|
|
||||||
raise DirectClientException('Account', 'DELETE',
|
|
||||||
node, part, path, resp)
|
|
||||||
|
|
||||||
|
|
||||||
def direct_head_container(node, part, account, container, conn_timeout=5,
|
def direct_head_container(node, part, account, container, conn_timeout=5,
|
||||||
|
@ -153,15 +171,9 @@ def direct_head_container(node, part, account, container, conn_timeout=5,
|
||||||
:raises ClientException: HTTP HEAD request failed
|
:raises ClientException: HTTP HEAD request failed
|
||||||
"""
|
"""
|
||||||
path = '/%s/%s' % (account, container)
|
path = '/%s/%s' % (account, container)
|
||||||
with Timeout(conn_timeout):
|
resp = _make_req(node, part, 'HEAD', path, gen_headers(),
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
'Container', conn_timeout, response_timeout)
|
||||||
'HEAD', path, headers=gen_headers())
|
|
||||||
with Timeout(response_timeout):
|
|
||||||
resp = conn.getresponse()
|
|
||||||
resp.read()
|
|
||||||
if not is_success(resp.status):
|
|
||||||
raise DirectClientException('Container', 'HEAD',
|
|
||||||
node, part, path, resp)
|
|
||||||
resp_headers = HeaderKeyDict()
|
resp_headers = HeaderKeyDict()
|
||||||
for header, value in resp.getheaders():
|
for header, value in resp.getheaders():
|
||||||
resp_headers[header] = value
|
resp_headers[header] = value
|
||||||
|
@ -215,16 +227,8 @@ def direct_delete_container(node, part, account, container, conn_timeout=5,
|
||||||
|
|
||||||
path = '/%s/%s' % (account, container)
|
path = '/%s/%s' % (account, container)
|
||||||
add_timestamp = 'x-timestamp' not in (k.lower() for k in headers)
|
add_timestamp = 'x-timestamp' not in (k.lower() for k in headers)
|
||||||
with Timeout(conn_timeout):
|
_make_req(node, part, 'DELETE', path, gen_headers(headers, add_timestamp),
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
'Container', conn_timeout, response_timeout)
|
||||||
'DELETE', path,
|
|
||||||
headers=gen_headers(headers, add_timestamp))
|
|
||||||
with Timeout(response_timeout):
|
|
||||||
resp = conn.getresponse()
|
|
||||||
resp.read()
|
|
||||||
if not is_success(resp.status):
|
|
||||||
raise DirectClientException('Container', 'DELETE',
|
|
||||||
node, part, path, resp)
|
|
||||||
|
|
||||||
|
|
||||||
def direct_put_container_object(node, part, account, container, obj,
|
def direct_put_container_object(node, part, account, container, obj,
|
||||||
|
@ -236,17 +240,9 @@ def direct_put_container_object(node, part, account, container, obj,
|
||||||
have_x_timestamp = 'x-timestamp' in (k.lower() for k in headers)
|
have_x_timestamp = 'x-timestamp' in (k.lower() for k in headers)
|
||||||
|
|
||||||
path = '/%s/%s/%s' % (account, container, obj)
|
path = '/%s/%s/%s' % (account, container, obj)
|
||||||
with Timeout(conn_timeout):
|
_make_req(node, part, 'PUT', path,
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
gen_headers(headers, add_ts=(not have_x_timestamp)),
|
||||||
'PUT', path,
|
'Container', conn_timeout, response_timeout)
|
||||||
headers=gen_headers(headers,
|
|
||||||
add_ts=(not have_x_timestamp)))
|
|
||||||
with Timeout(response_timeout):
|
|
||||||
resp = conn.getresponse()
|
|
||||||
resp.read()
|
|
||||||
if not is_success(resp.status):
|
|
||||||
raise DirectClientException('Container', 'PUT',
|
|
||||||
node, part, path, resp)
|
|
||||||
|
|
||||||
|
|
||||||
def direct_delete_container_object(node, part, account, container, obj,
|
def direct_delete_container_object(node, part, account, container, obj,
|
||||||
|
@ -259,16 +255,8 @@ def direct_delete_container_object(node, part, account, container, obj,
|
||||||
k.lower() for k in headers))
|
k.lower() for k in headers))
|
||||||
|
|
||||||
path = '/%s/%s/%s' % (account, container, obj)
|
path = '/%s/%s/%s' % (account, container, obj)
|
||||||
with Timeout(conn_timeout):
|
_make_req(node, part, 'DELETE', path, headers,
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
'Container', conn_timeout, response_timeout)
|
||||||
'DELETE', path, headers=headers)
|
|
||||||
|
|
||||||
with Timeout(response_timeout):
|
|
||||||
resp = conn.getresponse()
|
|
||||||
resp.read()
|
|
||||||
if not is_success(resp.status):
|
|
||||||
raise DirectClientException('Container', 'DELETE',
|
|
||||||
node, part, path, resp)
|
|
||||||
|
|
||||||
|
|
||||||
def direct_head_object(node, part, account, container, obj, conn_timeout=5,
|
def direct_head_object(node, part, account, container, obj, conn_timeout=5,
|
||||||
|
@ -293,15 +281,9 @@ def direct_head_object(node, part, account, container, obj, conn_timeout=5,
|
||||||
headers = gen_headers(headers)
|
headers = gen_headers(headers)
|
||||||
|
|
||||||
path = '/%s/%s/%s' % (account, container, obj)
|
path = '/%s/%s/%s' % (account, container, obj)
|
||||||
with Timeout(conn_timeout):
|
resp = _make_req(node, part, 'HEAD', path, headers,
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
'Object', conn_timeout, response_timeout)
|
||||||
'HEAD', path, headers=headers)
|
|
||||||
with Timeout(response_timeout):
|
|
||||||
resp = conn.getresponse()
|
|
||||||
resp.read()
|
|
||||||
if not is_success(resp.status):
|
|
||||||
raise DirectClientException('Object', 'HEAD',
|
|
||||||
node, part, path, resp)
|
|
||||||
resp_headers = HeaderKeyDict()
|
resp_headers = HeaderKeyDict()
|
||||||
for header, value in resp.getheaders():
|
for header, value in resp.getheaders():
|
||||||
resp_headers[header] = value
|
resp_headers[header] = value
|
||||||
|
@ -337,8 +319,8 @@ def direct_get_object(node, part, account, container, obj, conn_timeout=5,
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
if not is_success(resp.status):
|
if not is_success(resp.status):
|
||||||
resp.read()
|
resp.read()
|
||||||
raise DirectClientException('Object', 'GET',
|
raise DirectClientException('Object', 'GET', node, part, path, resp)
|
||||||
node, part, path, resp)
|
|
||||||
if resp_chunk_size:
|
if resp_chunk_size:
|
||||||
|
|
||||||
def _object_body():
|
def _object_body():
|
||||||
|
@ -453,15 +435,8 @@ def direct_post_object(node, part, account, container, name, headers,
|
||||||
:raises ClientException: HTTP POST request failed
|
:raises ClientException: HTTP POST request failed
|
||||||
"""
|
"""
|
||||||
path = '/%s/%s/%s' % (account, container, name)
|
path = '/%s/%s/%s' % (account, container, name)
|
||||||
with Timeout(conn_timeout):
|
_make_req(node, part, 'POST', path, gen_headers(headers, True),
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
'Object', conn_timeout, response_timeout)
|
||||||
'POST', path, headers=gen_headers(headers, True))
|
|
||||||
with Timeout(response_timeout):
|
|
||||||
resp = conn.getresponse()
|
|
||||||
resp.read()
|
|
||||||
if not is_success(resp.status):
|
|
||||||
raise DirectClientException('Object', 'POST',
|
|
||||||
node, part, path, resp)
|
|
||||||
|
|
||||||
|
|
||||||
def direct_delete_object(node, part, account, container, obj,
|
def direct_delete_object(node, part, account, container, obj,
|
||||||
|
@ -485,15 +460,36 @@ def direct_delete_object(node, part, account, container, obj,
|
||||||
k.lower() for k in headers))
|
k.lower() for k in headers))
|
||||||
|
|
||||||
path = '/%s/%s/%s' % (account, container, obj)
|
path = '/%s/%s/%s' % (account, container, obj)
|
||||||
|
_make_req(node, part, 'DELETE', path, headers,
|
||||||
|
'Object', conn_timeout, response_timeout)
|
||||||
|
|
||||||
|
|
||||||
|
def direct_get_suffix_hashes(node, part, suffixes, conn_timeout=5,
|
||||||
|
response_timeout=15, headers=None):
|
||||||
|
"""
|
||||||
|
Get suffix hashes directly from the object server.
|
||||||
|
|
||||||
|
:param node: node dictionary from the ring
|
||||||
|
:param part: partition the container is on
|
||||||
|
:param conn_timeout: timeout in seconds for establishing the connection
|
||||||
|
:param response_timeout: timeout in seconds for getting the response
|
||||||
|
:param headers: dict to be passed into HTTPConnection headers
|
||||||
|
:returns: dict of suffix hashes
|
||||||
|
:raises ClientException: HTTP REPLICATE request failed
|
||||||
|
"""
|
||||||
|
if headers is None:
|
||||||
|
headers = {}
|
||||||
|
|
||||||
|
path = '/%s' % '-'.join(suffixes)
|
||||||
with Timeout(conn_timeout):
|
with Timeout(conn_timeout):
|
||||||
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
conn = http_connect(node['ip'], node['port'], node['device'], part,
|
||||||
'DELETE', path, headers=headers)
|
'REPLICATE', path, headers=gen_headers(headers))
|
||||||
with Timeout(response_timeout):
|
with Timeout(response_timeout):
|
||||||
resp = conn.getresponse()
|
resp = conn.getresponse()
|
||||||
resp.read()
|
|
||||||
if not is_success(resp.status):
|
if not is_success(resp.status):
|
||||||
raise DirectClientException('Object', 'DELETE',
|
raise DirectClientException('Object', 'REPLICATE',
|
||||||
node, part, path, resp)
|
node, part, path, resp)
|
||||||
|
return pickle.loads(resp.read())
|
||||||
|
|
||||||
|
|
||||||
def retry(func, *args, **kwargs):
|
def retry(func, *args, **kwargs):
|
||||||
|
|
|
@ -20,15 +20,16 @@ import six
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
from six.moves import urllib
|
from six.moves import urllib
|
||||||
import struct
|
import struct
|
||||||
from sys import exc_info
|
from sys import exc_info, exit
|
||||||
import zlib
|
import zlib
|
||||||
from swift import gettext_ as _
|
from swift import gettext_ as _
|
||||||
from time import gmtime, strftime, time
|
from time import gmtime, strftime, time
|
||||||
from zlib import compressobj
|
from zlib import compressobj
|
||||||
|
|
||||||
from swift.common.utils import quote
|
from swift.common.exceptions import ClientException
|
||||||
from swift.common.http import HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES
|
from swift.common.http import HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES
|
||||||
from swift.common.swob import Request
|
from swift.common.swob import Request
|
||||||
|
from swift.common.utils import quote
|
||||||
from swift.common.wsgi import loadapp, pipeline_property
|
from swift.common.wsgi import loadapp, pipeline_property
|
||||||
|
|
||||||
|
|
||||||
|
@ -807,9 +808,14 @@ class SimpleClient(object):
|
||||||
self.attempts += 1
|
self.attempts += 1
|
||||||
try:
|
try:
|
||||||
return self.base_request(method, **kwargs)
|
return self.base_request(method, **kwargs)
|
||||||
except (socket.error, httplib.HTTPException, urllib2.URLError):
|
except (socket.error, httplib.HTTPException, urllib2.URLError) \
|
||||||
|
as err:
|
||||||
if self.attempts > retries:
|
if self.attempts > retries:
|
||||||
raise
|
if isinstance(err, urllib2.HTTPError):
|
||||||
|
raise ClientException('Raise too many retries',
|
||||||
|
http_status=err.getcode())
|
||||||
|
else:
|
||||||
|
raise
|
||||||
sleep(backoff)
|
sleep(backoff)
|
||||||
backoff = min(backoff * 2, self.max_backoff)
|
backoff = min(backoff * 2, self.max_backoff)
|
||||||
|
|
||||||
|
|
|
@ -162,6 +162,16 @@ def safe_kill(pid, sig, name):
|
||||||
os.kill(pid, sig)
|
os.kill(pid, sig)
|
||||||
|
|
||||||
|
|
||||||
|
def kill_group(pid, sig):
|
||||||
|
"""Send signal to process group
|
||||||
|
|
||||||
|
: param pid: process id
|
||||||
|
: param sig: signal to send
|
||||||
|
"""
|
||||||
|
# Negative PID means process group
|
||||||
|
os.kill(-pid, sig)
|
||||||
|
|
||||||
|
|
||||||
class UnknownCommandError(Exception):
|
class UnknownCommandError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -285,11 +295,27 @@ class Manager(object):
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
# reached interval n watch_pids w/o killing all servers
|
# reached interval n watch_pids w/o killing all servers
|
||||||
|
kill_after_timeout = kwargs.get('kill_after_timeout', False)
|
||||||
for server, pids in server_pids.items():
|
for server, pids in server_pids.items():
|
||||||
if not killed_pids.issuperset(pids):
|
if not killed_pids.issuperset(pids):
|
||||||
# some pids of this server were not killed
|
# some pids of this server were not killed
|
||||||
print(_('Waited %s seconds for %s to die; giving up') % (
|
if kill_after_timeout:
|
||||||
kill_wait, server))
|
print(_('Waited %s seconds for %s to die; killing') % (
|
||||||
|
kill_wait, server))
|
||||||
|
# Send SIGKILL to all remaining pids
|
||||||
|
for pid in set(pids.keys()) - killed_pids:
|
||||||
|
print(_('Signal %s pid: %s signal: %s') % (
|
||||||
|
server, pid, signal.SIGKILL))
|
||||||
|
# Send SIGKILL to process group
|
||||||
|
try:
|
||||||
|
kill_group(pid, signal.SIGKILL)
|
||||||
|
except OSError as e:
|
||||||
|
# PID died before kill_group can take action?
|
||||||
|
if e.errno != errno.ESRCH:
|
||||||
|
raise e
|
||||||
|
else:
|
||||||
|
print(_('Waited %s seconds for %s to die; giving up') % (
|
||||||
|
kill_wait, server))
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
@command
|
@command
|
||||||
|
|
|
@ -56,7 +56,7 @@ from eventlet.green import socket
|
||||||
from eventlet.pools import Pool
|
from eventlet.pools import Pool
|
||||||
from eventlet import Timeout
|
from eventlet import Timeout
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
|
from swift.common import utils
|
||||||
|
|
||||||
DEFAULT_MEMCACHED_PORT = 11211
|
DEFAULT_MEMCACHED_PORT = 11211
|
||||||
|
|
||||||
|
@ -101,23 +101,28 @@ class MemcachePoolTimeout(Timeout):
|
||||||
|
|
||||||
|
|
||||||
class MemcacheConnPool(Pool):
|
class MemcacheConnPool(Pool):
|
||||||
"""Connection pool for Memcache Connections"""
|
"""
|
||||||
|
Connection pool for Memcache Connections
|
||||||
|
|
||||||
|
The *server* parameter can be a hostname, an IPv4 address, or an IPv6
|
||||||
|
address with an optional port. See
|
||||||
|
:func:`swift.common.utils.parse_socket_string` for details.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, server, size, connect_timeout):
|
def __init__(self, server, size, connect_timeout):
|
||||||
Pool.__init__(self, max_size=size)
|
Pool.__init__(self, max_size=size)
|
||||||
self.server = server
|
self.host, self.port = utils.parse_socket_string(
|
||||||
|
server, DEFAULT_MEMCACHED_PORT)
|
||||||
self._connect_timeout = connect_timeout
|
self._connect_timeout = connect_timeout
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
if ':' in self.server:
|
addrs = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
|
||||||
host, port = self.server.split(':')
|
socket.SOCK_STREAM)
|
||||||
else:
|
family, socktype, proto, canonname, sockaddr = addrs[0]
|
||||||
host = self.server
|
sock = socket.socket(family, socket.SOCK_STREAM)
|
||||||
port = DEFAULT_MEMCACHED_PORT
|
|
||||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||||
with Timeout(self._connect_timeout):
|
with Timeout(self._connect_timeout):
|
||||||
sock.connect((host, int(port)))
|
sock.connect(sockaddr)
|
||||||
return (sock.makefile(), sock)
|
return (sock.makefile(), sock)
|
||||||
|
|
||||||
def get(self):
|
def get(self):
|
||||||
|
|
|
@ -13,6 +13,183 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Middleware that will perform many operations on a single request.
|
||||||
|
|
||||||
|
---------------
|
||||||
|
Extract Archive
|
||||||
|
---------------
|
||||||
|
|
||||||
|
Expand tar files into a Swift account. Request must be a PUT with the
|
||||||
|
query parameter ``?extract-archive=format`` specifying the format of archive
|
||||||
|
file. Accepted formats are tar, tar.gz, and tar.bz2.
|
||||||
|
|
||||||
|
For a PUT to the following url::
|
||||||
|
|
||||||
|
/v1/AUTH_Account/$UPLOAD_PATH?extract-archive=tar.gz
|
||||||
|
|
||||||
|
UPLOAD_PATH is where the files will be expanded to. UPLOAD_PATH can be a
|
||||||
|
container, a pseudo-directory within a container, or an empty string. The
|
||||||
|
destination of a file in the archive will be built as follows::
|
||||||
|
|
||||||
|
/v1/AUTH_Account/$UPLOAD_PATH/$FILE_PATH
|
||||||
|
|
||||||
|
Where FILE_PATH is the file name from the listing in the tar file.
|
||||||
|
|
||||||
|
If the UPLOAD_PATH is an empty string, containers will be auto created
|
||||||
|
accordingly and files in the tar that would not map to any container (files
|
||||||
|
in the base directory) will be ignored.
|
||||||
|
|
||||||
|
Only regular files will be uploaded. Empty directories, symlinks, etc will
|
||||||
|
not be uploaded.
|
||||||
|
|
||||||
|
------------
|
||||||
|
Content Type
|
||||||
|
------------
|
||||||
|
|
||||||
|
If the content-type header is set in the extract-archive call, Swift will
|
||||||
|
assign that content-type to all the underlying files. The bulk middleware
|
||||||
|
will extract the archive file and send the internal files using PUT
|
||||||
|
operations using the same headers from the original request
|
||||||
|
(e.g. auth-tokens, content-Type, etc.). Notice that any middleware call
|
||||||
|
that follows the bulk middleware does not know if this was a bulk request
|
||||||
|
or if these were individual requests sent by the user.
|
||||||
|
|
||||||
|
In order to make Swift detect the content-type for the files based on the
|
||||||
|
file extension, the content-type in the extract-archive call should not be
|
||||||
|
set. Alternatively, it is possible to explicitly tell Swift to detect the
|
||||||
|
content type using this header::
|
||||||
|
|
||||||
|
X-Detect-Content-Type: true
|
||||||
|
|
||||||
|
For example::
|
||||||
|
|
||||||
|
curl -X PUT http://127.0.0.1/v1/AUTH_acc/cont/$?extract-archive=tar
|
||||||
|
-T backup.tar
|
||||||
|
-H "Content-Type: application/x-tar"
|
||||||
|
-H "X-Auth-Token: xxx"
|
||||||
|
-H "X-Detect-Content-Type: true"
|
||||||
|
|
||||||
|
------------------
|
||||||
|
Assigning Metadata
|
||||||
|
------------------
|
||||||
|
|
||||||
|
The tar file format (1) allows for UTF-8 key/value pairs to be associated
|
||||||
|
with each file in an archive. If a file has extended attributes, then tar
|
||||||
|
will store those as key/value pairs. The bulk middleware can read those
|
||||||
|
extended attributes and convert them to Swift object metadata. Attributes
|
||||||
|
starting with "user.meta" are converted to object metadata, and
|
||||||
|
"user.mime_type" is converted to Content-Type.
|
||||||
|
|
||||||
|
For example::
|
||||||
|
|
||||||
|
setfattr -n user.mime_type -v "application/python-setup" setup.py
|
||||||
|
setfattr -n user.meta.lunch -v "burger and fries" setup.py
|
||||||
|
setfattr -n user.meta.dinner -v "baked ziti" setup.py
|
||||||
|
setfattr -n user.stuff -v "whee" setup.py
|
||||||
|
|
||||||
|
Will get translated to headers::
|
||||||
|
|
||||||
|
Content-Type: application/python-setup
|
||||||
|
X-Object-Meta-Lunch: burger and fries
|
||||||
|
X-Object-Meta-Dinner: baked ziti
|
||||||
|
|
||||||
|
The bulk middleware will handle xattrs stored by both GNU and BSD tar (2).
|
||||||
|
Only xattrs ``user.mime_type`` and ``user.meta.*`` are processed. Other
|
||||||
|
attributes are ignored.
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
|
||||||
|
(1) The POSIX 1003.1-2001 (pax) format. The default format on GNU tar
|
||||||
|
1.27.1 or later.
|
||||||
|
|
||||||
|
(2) Even with pax-format tarballs, different encoders store xattrs slightly
|
||||||
|
differently; for example, GNU tar stores the xattr "user.userattribute" as
|
||||||
|
pax header "SCHILY.xattr.user.userattribute", while BSD tar (which uses
|
||||||
|
libarchive) stores it as "LIBARCHIVE.xattr.user.userattribute".
|
||||||
|
|
||||||
|
--------
|
||||||
|
Response
|
||||||
|
--------
|
||||||
|
|
||||||
|
The response from bulk operations functions differently from other Swift
|
||||||
|
responses. This is because a short request body sent from the client could
|
||||||
|
result in many operations on the proxy server and precautions need to be
|
||||||
|
made to prevent the request from timing out due to lack of activity. To
|
||||||
|
this end, the client will always receive a 200 OK response, regardless of
|
||||||
|
the actual success of the call. The body of the response must be parsed to
|
||||||
|
determine the actual success of the operation. In addition to this the
|
||||||
|
client may receive zero or more whitespace characters prepended to the
|
||||||
|
actual response body while the proxy server is completing the request.
|
||||||
|
|
||||||
|
The format of the response body defaults to text/plain but can be either
|
||||||
|
json or xml depending on the ``Accept`` header. Acceptable formats are
|
||||||
|
``text/plain``, ``application/json``, ``application/xml``, and ``text/xml``.
|
||||||
|
An example body is as follows::
|
||||||
|
|
||||||
|
{"Response Status": "201 Created",
|
||||||
|
"Response Body": "",
|
||||||
|
"Errors": [],
|
||||||
|
"Number Files Created": 10}
|
||||||
|
|
||||||
|
If all valid files were uploaded successfully the Response Status will be
|
||||||
|
201 Created. If any files failed to be created the response code
|
||||||
|
corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on
|
||||||
|
server errors), etc. In both cases the response body will specify the
|
||||||
|
number of files successfully uploaded and a list of the files that failed.
|
||||||
|
|
||||||
|
There are proxy logs created for each file (which becomes a subrequest) in
|
||||||
|
the tar. The subrequest's proxy log will have a swift.source set to "EA"
|
||||||
|
the log's content length will reflect the unzipped size of the file. If
|
||||||
|
double proxy-logging is used the leftmost logger will not have a
|
||||||
|
swift.source set and the content length will reflect the size of the
|
||||||
|
payload sent to the proxy (the unexpanded size of the tar.gz).
|
||||||
|
|
||||||
|
-----------
|
||||||
|
Bulk Delete
|
||||||
|
-----------
|
||||||
|
|
||||||
|
Will delete multiple objects or containers from their account with a
|
||||||
|
single request. Responds to POST requests with query parameter
|
||||||
|
``?bulk-delete`` set. The request url is your storage url. The Content-Type
|
||||||
|
should be set to ``text/plain``. The body of the POST request will be a
|
||||||
|
newline separated list of url encoded objects to delete. You can delete
|
||||||
|
10,000 (configurable) objects per request. The objects specified in the
|
||||||
|
POST request body must be URL encoded and in the form::
|
||||||
|
|
||||||
|
/container_name/obj_name
|
||||||
|
|
||||||
|
or for a container (which must be empty at time of delete)::
|
||||||
|
|
||||||
|
/container_name
|
||||||
|
|
||||||
|
The response is similar to extract archive as in every response will be a
|
||||||
|
200 OK and you must parse the response body for actual results. An example
|
||||||
|
response is::
|
||||||
|
|
||||||
|
{"Number Not Found": 0,
|
||||||
|
"Response Status": "200 OK",
|
||||||
|
"Response Body": "",
|
||||||
|
"Errors": [],
|
||||||
|
"Number Deleted": 6}
|
||||||
|
|
||||||
|
If all items were successfully deleted (or did not exist), the Response
|
||||||
|
Status will be 200 OK. If any failed to delete, the response code
|
||||||
|
corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on
|
||||||
|
server errors), etc. In all cases the response body will specify the number
|
||||||
|
of items successfully deleted, not found, and a list of those that failed.
|
||||||
|
The return body will be formatted in the way specified in the request's
|
||||||
|
``Accept`` header. Acceptable formats are ``text/plain``, ``application/json``,
|
||||||
|
``application/xml``, and ``text/xml``.
|
||||||
|
|
||||||
|
There are proxy logs created for each object or container (which becomes a
|
||||||
|
subrequest) that is deleted. The subrequest's proxy log will have a
|
||||||
|
swift.source set to "BD" the log's content length of 0. If double
|
||||||
|
proxy-logging is used the leftmost logger will not have a
|
||||||
|
swift.source set and the content length will reflect the size of the
|
||||||
|
payload sent to the proxy (the list of objects/containers to be deleted).
|
||||||
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from six.moves.urllib.parse import quote, unquote
|
from six.moves.urllib.parse import quote, unquote
|
||||||
import tarfile
|
import tarfile
|
||||||
|
@ -94,170 +271,6 @@ def pax_key_to_swift_header(pax_key):
|
||||||
|
|
||||||
|
|
||||||
class Bulk(object):
|
class Bulk(object):
|
||||||
"""
|
|
||||||
Middleware that will do many operations on a single request.
|
|
||||||
|
|
||||||
Extract Archive:
|
|
||||||
|
|
||||||
Expand tar files into a swift account. Request must be a PUT with the
|
|
||||||
query parameter ?extract-archive=format specifying the format of archive
|
|
||||||
file. Accepted formats are tar, tar.gz, and tar.bz2.
|
|
||||||
|
|
||||||
For a PUT to the following url:
|
|
||||||
|
|
||||||
/v1/AUTH_Account/$UPLOAD_PATH?extract-archive=tar.gz
|
|
||||||
|
|
||||||
UPLOAD_PATH is where the files will be expanded to. UPLOAD_PATH can be a
|
|
||||||
container, a pseudo-directory within a container, or an empty string. The
|
|
||||||
destination of a file in the archive will be built as follows:
|
|
||||||
|
|
||||||
/v1/AUTH_Account/$UPLOAD_PATH/$FILE_PATH
|
|
||||||
|
|
||||||
Where FILE_PATH is the file name from the listing in the tar file.
|
|
||||||
|
|
||||||
If the UPLOAD_PATH is an empty string, containers will be auto created
|
|
||||||
accordingly and files in the tar that would not map to any container (files
|
|
||||||
in the base directory) will be ignored.
|
|
||||||
|
|
||||||
Only regular files will be uploaded. Empty directories, symlinks, etc will
|
|
||||||
not be uploaded.
|
|
||||||
|
|
||||||
Content Type:
|
|
||||||
|
|
||||||
If the content-type header is set in the extract-archive call, Swift will
|
|
||||||
assign that content-type to all the underlying files. The bulk middleware
|
|
||||||
will extract the archive file and send the internal files using PUT
|
|
||||||
operations using the same headers from the original request
|
|
||||||
(e.g. auth-tokens, content-Type, etc.). Notice that any middleware call
|
|
||||||
that follows the bulk middleware does not know if this was a bulk request
|
|
||||||
or if these were individual requests sent by the user.
|
|
||||||
|
|
||||||
In order to make Swift detect the content-type for the files based on the
|
|
||||||
file extension, the content-type in the extract-archive call should not be
|
|
||||||
set. Alternatively, it is possible to explicitly tell swift to detect the
|
|
||||||
content type using this header:
|
|
||||||
|
|
||||||
X-Detect-Content-Type:true
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
curl -X PUT http://127.0.0.1/v1/AUTH_acc/cont/$?extract-archive=tar -T
|
|
||||||
backup.tar -H "Content-Type: application/x-tar" -H "X-Auth-Token: xxx"
|
|
||||||
-H "X-Detect-Content-Type:true"
|
|
||||||
|
|
||||||
Assigning Metadata:
|
|
||||||
|
|
||||||
The tar file format (1) allows for UTF-8 key/value pairs to be associated
|
|
||||||
with each file in an archive. If a file has extended attributes, then tar
|
|
||||||
will store those as key/value pairs. The bulk middleware can read those
|
|
||||||
extended attributes and convert them to Swift object metadata. Attributes
|
|
||||||
starting with "user.meta" are converted to object metadata, and
|
|
||||||
"user.mime_type" is converted to Content-Type.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
setfattr -n user.mime_type -v "application/python-setup" setup.py
|
|
||||||
setfattr -n user.meta.lunch -v "burger and fries" setup.py
|
|
||||||
setfattr -n user.meta.dinner -v "baked ziti" setup.py
|
|
||||||
setfattr -n user.stuff -v "whee" setup.py
|
|
||||||
|
|
||||||
Will get translated to headers:
|
|
||||||
|
|
||||||
Content-Type: application/python-setup
|
|
||||||
X-Object-Meta-Lunch: burger and fries
|
|
||||||
X-Object-Meta-Dinner: baked ziti
|
|
||||||
|
|
||||||
The bulk middleware will handle xattrs stored by both GNU and BSD tar (2).
|
|
||||||
Only xattrs user.mime_type and user.meta.* are processed. Other attributes
|
|
||||||
are ignored.
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
|
|
||||||
(1) The POSIX 1003.1-2001 (pax) format. The default format on GNU tar
|
|
||||||
1.27.1 or later.
|
|
||||||
|
|
||||||
(2) Even with pax-format tarballs, different encoders store xattrs slightly
|
|
||||||
differently; for example, GNU tar stores the xattr "user.userattribute" as
|
|
||||||
pax header "SCHILY.xattr.user.userattribute", while BSD tar (which uses
|
|
||||||
libarchive) stores it as "LIBARCHIVE.xattr.user.userattribute".
|
|
||||||
|
|
||||||
Response:
|
|
||||||
|
|
||||||
The response from bulk operations functions differently from other swift
|
|
||||||
responses. This is because a short request body sent from the client could
|
|
||||||
result in many operations on the proxy server and precautions need to be
|
|
||||||
made to prevent the request from timing out due to lack of activity. To
|
|
||||||
this end, the client will always receive a 200 OK response, regardless of
|
|
||||||
the actual success of the call. The body of the response must be parsed to
|
|
||||||
determine the actual success of the operation. In addition to this the
|
|
||||||
client may receive zero or more whitespace characters prepended to the
|
|
||||||
actual response body while the proxy server is completing the request.
|
|
||||||
|
|
||||||
The format of the response body defaults to text/plain but can be either
|
|
||||||
json or xml depending on the Accept header. Acceptable formats are
|
|
||||||
text/plain, application/json, application/xml, and text/xml. An example
|
|
||||||
body is as follows:
|
|
||||||
|
|
||||||
{"Response Status": "201 Created",
|
|
||||||
"Response Body": "",
|
|
||||||
"Errors": [],
|
|
||||||
"Number Files Created": 10}
|
|
||||||
|
|
||||||
If all valid files were uploaded successfully the Response Status will be
|
|
||||||
201 Created. If any files failed to be created the response code
|
|
||||||
corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on
|
|
||||||
server errors), etc. In both cases the response body will specify the
|
|
||||||
number of files successfully uploaded and a list of the files that failed.
|
|
||||||
|
|
||||||
There are proxy logs created for each file (which becomes a subrequest) in
|
|
||||||
the tar. The subrequest's proxy log will have a swift.source set to "EA"
|
|
||||||
the log's content length will reflect the unzipped size of the file. If
|
|
||||||
double proxy-logging is used the leftmost logger will not have a
|
|
||||||
swift.source set and the content length will reflect the size of the
|
|
||||||
payload sent to the proxy (the unexpanded size of the tar.gz).
|
|
||||||
|
|
||||||
Bulk Delete:
|
|
||||||
|
|
||||||
Will delete multiple objects or containers from their account with a
|
|
||||||
single request. Responds to POST requests with query parameter
|
|
||||||
?bulk-delete set. The request url is your storage url. The Content-Type
|
|
||||||
should be set to text/plain. The body of the POST request will be a
|
|
||||||
newline separated list of url encoded objects to delete. You can delete
|
|
||||||
10,000 (configurable) objects per request. The objects specified in the
|
|
||||||
POST request body must be URL encoded and in the form:
|
|
||||||
|
|
||||||
/container_name/obj_name
|
|
||||||
|
|
||||||
or for a container (which must be empty at time of delete)
|
|
||||||
|
|
||||||
/container_name
|
|
||||||
|
|
||||||
The response is similar to extract archive as in every response will be a
|
|
||||||
200 OK and you must parse the response body for actual results. An example
|
|
||||||
response is:
|
|
||||||
|
|
||||||
{"Number Not Found": 0,
|
|
||||||
"Response Status": "200 OK",
|
|
||||||
"Response Body": "",
|
|
||||||
"Errors": [],
|
|
||||||
"Number Deleted": 6}
|
|
||||||
|
|
||||||
If all items were successfully deleted (or did not exist), the Response
|
|
||||||
Status will be 200 OK. If any failed to delete, the response code
|
|
||||||
corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on
|
|
||||||
server errors), etc. In all cases the response body will specify the number
|
|
||||||
of items successfully deleted, not found, and a list of those that failed.
|
|
||||||
The return body will be formatted in the way specified in the request's
|
|
||||||
Accept header. Acceptable formats are text/plain, application/json,
|
|
||||||
application/xml, and text/xml.
|
|
||||||
|
|
||||||
There are proxy logs created for each object or container (which becomes a
|
|
||||||
subrequest) that is deleted. The subrequest's proxy log will have a
|
|
||||||
swift.source set to "BD" the log's content length of 0. If double
|
|
||||||
proxy-logging is used the leftmost logger will not have a
|
|
||||||
swift.source set and the content length will reflect the size of the
|
|
||||||
payload sent to the proxy (the list of objects/containers to be deleted).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, app, conf, max_containers_per_extraction=10000,
|
def __init__(self, app, conf, max_containers_per_extraction=10000,
|
||||||
max_failed_extractions=1000, max_deletes_per_request=10000,
|
max_failed_extractions=1000, max_deletes_per_request=10000,
|
||||||
|
|
|
@ -57,12 +57,11 @@ The format of the list will be:
|
||||||
"range": "1048576-2097151"}, ...]
|
"range": "1048576-2097151"}, ...]
|
||||||
|
|
||||||
The number of object segments is limited to a configurable amount, default
|
The number of object segments is limited to a configurable amount, default
|
||||||
1000. Each segment, except for the final one, must be at least 1 megabyte
|
1000. Each segment must be at least 1 byte. On upload, the middleware will
|
||||||
(configurable). On upload, the middleware will head every segment passed in to
|
head every segment passed in to verify:
|
||||||
verify:
|
|
||||||
|
|
||||||
1. the segment exists (i.e. the HEAD was successful);
|
1. the segment exists (i.e. the HEAD was successful);
|
||||||
2. the segment meets minimum size requirements (if not the last segment);
|
2. the segment meets minimum size requirements;
|
||||||
3. if the user provided a non-null etag, the etag matches;
|
3. if the user provided a non-null etag, the etag matches;
|
||||||
4. if the user provided a non-null size_bytes, the size_bytes matches; and
|
4. if the user provided a non-null size_bytes, the size_bytes matches; and
|
||||||
5. if the user provided a range, it is a singular, syntactically correct range
|
5. if the user provided a range, it is a singular, syntactically correct range
|
||||||
|
@ -121,8 +120,9 @@ finally bytes 2095104 through 2097152 (i.e., the last 2048 bytes) of
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
The minimum sized range is min_segment_size, which by
|
|
||||||
default is 1048576 (1MB).
|
The minimum sized range is 1 byte. This is the same as the minimum
|
||||||
|
segment size.
|
||||||
|
|
||||||
|
|
||||||
-------------------------
|
-------------------------
|
||||||
|
@ -221,7 +221,7 @@ from swift.common.middleware.bulk import get_response_body, \
|
||||||
ACCEPTABLE_FORMATS, Bulk
|
ACCEPTABLE_FORMATS, Bulk
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_MIN_SEGMENT_SIZE = 1024 * 1024 # 1 MiB
|
DEFAULT_RATE_LIMIT_UNDER_SIZE = 1024 * 1024 # 1 MiB
|
||||||
DEFAULT_MAX_MANIFEST_SEGMENTS = 1000
|
DEFAULT_MAX_MANIFEST_SEGMENTS = 1000
|
||||||
DEFAULT_MAX_MANIFEST_SIZE = 1024 * 1024 * 2 # 2 MiB
|
DEFAULT_MAX_MANIFEST_SIZE = 1024 * 1024 * 2 # 2 MiB
|
||||||
|
|
||||||
|
@ -231,7 +231,7 @@ OPTIONAL_SLO_KEYS = set(['range'])
|
||||||
ALLOWED_SLO_KEYS = REQUIRED_SLO_KEYS | OPTIONAL_SLO_KEYS
|
ALLOWED_SLO_KEYS = REQUIRED_SLO_KEYS | OPTIONAL_SLO_KEYS
|
||||||
|
|
||||||
|
|
||||||
def parse_and_validate_input(req_body, req_path, min_segment_size):
|
def parse_and_validate_input(req_body, req_path):
|
||||||
"""
|
"""
|
||||||
Given a request body, parses it and returns a list of dictionaries.
|
Given a request body, parses it and returns a list of dictionaries.
|
||||||
|
|
||||||
|
@ -269,7 +269,6 @@ def parse_and_validate_input(req_body, req_path, min_segment_size):
|
||||||
vrs, account, _junk = split_path(req_path, 3, 3, True)
|
vrs, account, _junk = split_path(req_path, 3, 3, True)
|
||||||
|
|
||||||
errors = []
|
errors = []
|
||||||
num_segs = len(parsed_data)
|
|
||||||
for seg_index, seg_dict in enumerate(parsed_data):
|
for seg_index, seg_dict in enumerate(parsed_data):
|
||||||
if not isinstance(seg_dict, dict):
|
if not isinstance(seg_dict, dict):
|
||||||
errors.append("Index %d: not a JSON object" % seg_index)
|
errors.append("Index %d: not a JSON object" % seg_index)
|
||||||
|
@ -315,10 +314,10 @@ def parse_and_validate_input(req_body, req_path, min_segment_size):
|
||||||
except (TypeError, ValueError):
|
except (TypeError, ValueError):
|
||||||
errors.append("Index %d: invalid size_bytes" % seg_index)
|
errors.append("Index %d: invalid size_bytes" % seg_index)
|
||||||
continue
|
continue
|
||||||
if (seg_size < min_segment_size and seg_index < num_segs - 1):
|
if seg_size < 1:
|
||||||
errors.append("Index %d: too small; each segment, except "
|
errors.append("Index %d: too small; each segment must be "
|
||||||
"the last, must be at least %d bytes."
|
"at least 1 byte."
|
||||||
% (seg_index, min_segment_size))
|
% (seg_index,))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
obj_path = '/'.join(['', vrs, account, seg_dict['path'].lstrip('/')])
|
obj_path = '/'.join(['', vrs, account, seg_dict['path'].lstrip('/')])
|
||||||
|
@ -461,13 +460,13 @@ class SloGetContext(WSGIContext):
|
||||||
# no bytes are needed from this or any future segment
|
# no bytes are needed from this or any future segment
|
||||||
break
|
break
|
||||||
|
|
||||||
range = seg_dict.get('range')
|
seg_range = seg_dict.get('range')
|
||||||
if range is None:
|
if seg_range is None:
|
||||||
range_start, range_end = 0, seg_length - 1
|
range_start, range_end = 0, seg_length - 1
|
||||||
else:
|
else:
|
||||||
# We already validated and supplied concrete values
|
# We already validated and supplied concrete values
|
||||||
# for the range on upload
|
# for the range on upload
|
||||||
range_start, range_end = map(int, range.split('-'))
|
range_start, range_end = map(int, seg_range.split('-'))
|
||||||
|
|
||||||
if config_true_value(seg_dict.get('sub_slo')):
|
if config_true_value(seg_dict.get('sub_slo')):
|
||||||
# do this check here so that we can avoid fetching this last
|
# do this check here so that we can avoid fetching this last
|
||||||
|
@ -662,10 +661,17 @@ class SloGetContext(WSGIContext):
|
||||||
plain_listing_iter = self._segment_listing_iterator(
|
plain_listing_iter = self._segment_listing_iterator(
|
||||||
req, ver, account, segments)
|
req, ver, account, segments)
|
||||||
|
|
||||||
|
def is_small_segment((seg_dict, start_byte, end_byte)):
|
||||||
|
start = 0 if start_byte is None else start_byte
|
||||||
|
end = int(seg_dict['bytes']) - 1 if end_byte is None else end_byte
|
||||||
|
is_small = (end - start + 1) < self.slo.rate_limit_under_size
|
||||||
|
return is_small
|
||||||
|
|
||||||
ratelimited_listing_iter = RateLimitedIterator(
|
ratelimited_listing_iter = RateLimitedIterator(
|
||||||
plain_listing_iter,
|
plain_listing_iter,
|
||||||
self.slo.rate_limit_segments_per_sec,
|
self.slo.rate_limit_segments_per_sec,
|
||||||
limit_after=self.slo.rate_limit_after_segment)
|
limit_after=self.slo.rate_limit_after_segment,
|
||||||
|
ratelimit_if=is_small_segment)
|
||||||
|
|
||||||
# self._segment_listing_iterator gives us 3-tuples of (segment dict,
|
# self._segment_listing_iterator gives us 3-tuples of (segment dict,
|
||||||
# start byte, end byte), but SegmentedIterable wants (obj path, etag,
|
# start byte, end byte), but SegmentedIterable wants (obj path, etag,
|
||||||
|
@ -716,7 +722,7 @@ class StaticLargeObject(object):
|
||||||
:param conf: The configuration dict for the middleware.
|
:param conf: The configuration dict for the middleware.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, app, conf, min_segment_size=DEFAULT_MIN_SEGMENT_SIZE,
|
def __init__(self, app, conf,
|
||||||
max_manifest_segments=DEFAULT_MAX_MANIFEST_SEGMENTS,
|
max_manifest_segments=DEFAULT_MAX_MANIFEST_SEGMENTS,
|
||||||
max_manifest_size=DEFAULT_MAX_MANIFEST_SIZE):
|
max_manifest_size=DEFAULT_MAX_MANIFEST_SIZE):
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
|
@ -724,12 +730,13 @@ class StaticLargeObject(object):
|
||||||
self.logger = get_logger(conf, log_route='slo')
|
self.logger = get_logger(conf, log_route='slo')
|
||||||
self.max_manifest_segments = max_manifest_segments
|
self.max_manifest_segments = max_manifest_segments
|
||||||
self.max_manifest_size = max_manifest_size
|
self.max_manifest_size = max_manifest_size
|
||||||
self.min_segment_size = min_segment_size
|
|
||||||
self.max_get_time = int(self.conf.get('max_get_time', 86400))
|
self.max_get_time = int(self.conf.get('max_get_time', 86400))
|
||||||
|
self.rate_limit_under_size = int(self.conf.get(
|
||||||
|
'rate_limit_under_size', DEFAULT_RATE_LIMIT_UNDER_SIZE))
|
||||||
self.rate_limit_after_segment = int(self.conf.get(
|
self.rate_limit_after_segment = int(self.conf.get(
|
||||||
'rate_limit_after_segment', '10'))
|
'rate_limit_after_segment', '10'))
|
||||||
self.rate_limit_segments_per_sec = int(self.conf.get(
|
self.rate_limit_segments_per_sec = int(self.conf.get(
|
||||||
'rate_limit_segments_per_sec', '0'))
|
'rate_limit_segments_per_sec', '1'))
|
||||||
self.bulk_deleter = Bulk(app, {}, logger=self.logger)
|
self.bulk_deleter = Bulk(app, {}, logger=self.logger)
|
||||||
|
|
||||||
def handle_multipart_get_or_head(self, req, start_response):
|
def handle_multipart_get_or_head(self, req, start_response):
|
||||||
|
@ -783,7 +790,7 @@ class StaticLargeObject(object):
|
||||||
raise HTTPLengthRequired(request=req)
|
raise HTTPLengthRequired(request=req)
|
||||||
parsed_data = parse_and_validate_input(
|
parsed_data = parse_and_validate_input(
|
||||||
req.body_file.read(self.max_manifest_size),
|
req.body_file.read(self.max_manifest_size),
|
||||||
req.path, self.min_segment_size)
|
req.path)
|
||||||
problem_segments = []
|
problem_segments = []
|
||||||
|
|
||||||
if len(parsed_data) > self.max_manifest_segments:
|
if len(parsed_data) > self.max_manifest_segments:
|
||||||
|
@ -812,6 +819,7 @@ class StaticLargeObject(object):
|
||||||
new_env['CONTENT_LENGTH'] = 0
|
new_env['CONTENT_LENGTH'] = 0
|
||||||
new_env['HTTP_USER_AGENT'] = \
|
new_env['HTTP_USER_AGENT'] = \
|
||||||
'%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT')
|
'%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT')
|
||||||
|
|
||||||
if obj_path != last_obj_path:
|
if obj_path != last_obj_path:
|
||||||
last_obj_path = obj_path
|
last_obj_path = obj_path
|
||||||
head_seg_resp = \
|
head_seg_resp = \
|
||||||
|
@ -840,12 +848,10 @@ class StaticLargeObject(object):
|
||||||
seg_dict['range'] = '%d-%d' % (rng[0], rng[1] - 1)
|
seg_dict['range'] = '%d-%d' % (rng[0], rng[1] - 1)
|
||||||
segment_length = rng[1] - rng[0]
|
segment_length = rng[1] - rng[0]
|
||||||
|
|
||||||
if segment_length < self.min_segment_size and \
|
if segment_length < 1:
|
||||||
index < len(parsed_data) - 1:
|
|
||||||
problem_segments.append(
|
problem_segments.append(
|
||||||
[quote(obj_name),
|
[quote(obj_name),
|
||||||
'Too small; each segment, except the last, must be '
|
'Too small; each segment must be at least 1 byte.'])
|
||||||
'at least %d bytes.' % self.min_segment_size])
|
|
||||||
total_size += segment_length
|
total_size += segment_length
|
||||||
if seg_dict['size_bytes'] is not None and \
|
if seg_dict['size_bytes'] is not None and \
|
||||||
seg_dict['size_bytes'] != head_seg_resp.content_length:
|
seg_dict['size_bytes'] != head_seg_resp.content_length:
|
||||||
|
@ -1045,18 +1051,17 @@ def filter_factory(global_conf, **local_conf):
|
||||||
DEFAULT_MAX_MANIFEST_SEGMENTS))
|
DEFAULT_MAX_MANIFEST_SEGMENTS))
|
||||||
max_manifest_size = int(conf.get('max_manifest_size',
|
max_manifest_size = int(conf.get('max_manifest_size',
|
||||||
DEFAULT_MAX_MANIFEST_SIZE))
|
DEFAULT_MAX_MANIFEST_SIZE))
|
||||||
min_segment_size = int(conf.get('min_segment_size',
|
|
||||||
DEFAULT_MIN_SEGMENT_SIZE))
|
|
||||||
|
|
||||||
register_swift_info('slo',
|
register_swift_info('slo',
|
||||||
max_manifest_segments=max_manifest_segments,
|
max_manifest_segments=max_manifest_segments,
|
||||||
max_manifest_size=max_manifest_size,
|
max_manifest_size=max_manifest_size,
|
||||||
min_segment_size=min_segment_size)
|
# this used to be configurable; report it as 1 for
|
||||||
|
# clients that might still care
|
||||||
|
min_segment_size=1)
|
||||||
|
|
||||||
def slo_filter(app):
|
def slo_filter(app):
|
||||||
return StaticLargeObject(
|
return StaticLargeObject(
|
||||||
app, conf,
|
app, conf,
|
||||||
max_manifest_segments=max_manifest_segments,
|
max_manifest_segments=max_manifest_segments,
|
||||||
max_manifest_size=max_manifest_size,
|
max_manifest_size=max_manifest_size)
|
||||||
min_segment_size=min_segment_size)
|
|
||||||
return slo_filter
|
return slo_filter
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Copyright (c) 2010-2012 OpenStack Foundation
|
# Copyright (c) 2010-2016 OpenStack Foundation
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -68,6 +68,12 @@ the .../listing.css style sheet. If you "view source" in your browser on a
|
||||||
listing page, you will see the well defined document structure that can be
|
listing page, you will see the well defined document structure that can be
|
||||||
styled.
|
styled.
|
||||||
|
|
||||||
|
By default, the listings will be rendered with a label of
|
||||||
|
"Listing of /v1/account/container/path". This can be altered by
|
||||||
|
setting a ``X-Container-Meta-Web-Listings-Label: <label>``. For example,
|
||||||
|
if the label is set to "example.com", a label of
|
||||||
|
"Listing of example.com/path" will be used instead.
|
||||||
|
|
||||||
The content-type of directory marker objects can be modified by setting
|
The content-type of directory marker objects can be modified by setting
|
||||||
the ``X-Container-Meta-Web-Directory-Type`` header. If the header is not set,
|
the ``X-Container-Meta-Web-Directory-Type`` header. If the header is not set,
|
||||||
application/directory is used by default. Directory marker objects are
|
application/directory is used by default. Directory marker objects are
|
||||||
|
@ -150,7 +156,7 @@ class _StaticWebContext(WSGIContext):
|
||||||
self.agent = '%(orig)s StaticWeb'
|
self.agent = '%(orig)s StaticWeb'
|
||||||
# Results from the last call to self._get_container_info.
|
# Results from the last call to self._get_container_info.
|
||||||
self._index = self._error = self._listings = self._listings_css = \
|
self._index = self._error = self._listings = self._listings_css = \
|
||||||
self._dir_type = None
|
self._dir_type = self._listings_label = None
|
||||||
|
|
||||||
def _error_response(self, response, env, start_response):
|
def _error_response(self, response, env, start_response):
|
||||||
"""
|
"""
|
||||||
|
@ -199,6 +205,7 @@ class _StaticWebContext(WSGIContext):
|
||||||
self._index = meta.get('web-index', '').strip()
|
self._index = meta.get('web-index', '').strip()
|
||||||
self._error = meta.get('web-error', '').strip()
|
self._error = meta.get('web-error', '').strip()
|
||||||
self._listings = meta.get('web-listings', '').strip()
|
self._listings = meta.get('web-listings', '').strip()
|
||||||
|
self._listings_label = meta.get('web-listings-label', '').strip()
|
||||||
self._listings_css = meta.get('web-listings-css', '').strip()
|
self._listings_css = meta.get('web-listings-css', '').strip()
|
||||||
self._dir_type = meta.get('web-directory-type', '').strip()
|
self._dir_type = meta.get('web-directory-type', '').strip()
|
||||||
|
|
||||||
|
@ -210,12 +217,18 @@ class _StaticWebContext(WSGIContext):
|
||||||
:param start_response: The original WSGI start_response hook.
|
:param start_response: The original WSGI start_response hook.
|
||||||
:param prefix: Any prefix desired for the container listing.
|
:param prefix: Any prefix desired for the container listing.
|
||||||
"""
|
"""
|
||||||
|
label = env['PATH_INFO']
|
||||||
|
if self._listings_label:
|
||||||
|
groups = env['PATH_INFO'].split('/')
|
||||||
|
label = '{0}/{1}'.format(self._listings_label,
|
||||||
|
'/'.join(groups[4:]))
|
||||||
|
|
||||||
if not config_true_value(self._listings):
|
if not config_true_value(self._listings):
|
||||||
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
|
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
|
||||||
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
|
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
|
||||||
'<html>\n' \
|
'<html>\n' \
|
||||||
'<head>\n' \
|
'<head>\n' \
|
||||||
'<title>Listing of %s</title>\n' % cgi.escape(env['PATH_INFO'])
|
'<title>Listing of %s</title>\n' % cgi.escape(label)
|
||||||
if self._listings_css:
|
if self._listings_css:
|
||||||
body += ' <link rel="stylesheet" type="text/css" ' \
|
body += ' <link rel="stylesheet" type="text/css" ' \
|
||||||
'href="%s" />\n' % self._build_css_path(prefix or '')
|
'href="%s" />\n' % self._build_css_path(prefix or '')
|
||||||
|
@ -261,8 +274,7 @@ class _StaticWebContext(WSGIContext):
|
||||||
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
|
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
|
||||||
'<html>\n' \
|
'<html>\n' \
|
||||||
' <head>\n' \
|
' <head>\n' \
|
||||||
' <title>Listing of %s</title>\n' % \
|
' <title>Listing of %s</title>\n' % cgi.escape(label)
|
||||||
cgi.escape(env['PATH_INFO'])
|
|
||||||
if self._listings_css:
|
if self._listings_css:
|
||||||
body += ' <link rel="stylesheet" type="text/css" ' \
|
body += ' <link rel="stylesheet" type="text/css" ' \
|
||||||
'href="%s" />\n' % (self._build_css_path(prefix))
|
'href="%s" />\n' % (self._build_css_path(prefix))
|
||||||
|
@ -281,8 +293,7 @@ class _StaticWebContext(WSGIContext):
|
||||||
' <th class="colname">Name</th>\n' \
|
' <th class="colname">Name</th>\n' \
|
||||||
' <th class="colsize">Size</th>\n' \
|
' <th class="colsize">Size</th>\n' \
|
||||||
' <th class="coldate">Date</th>\n' \
|
' <th class="coldate">Date</th>\n' \
|
||||||
' </tr>\n' % \
|
' </tr>\n' % cgi.escape(label)
|
||||||
cgi.escape(env['PATH_INFO'])
|
|
||||||
if prefix:
|
if prefix:
|
||||||
body += ' <tr id="parent" class="item">\n' \
|
body += ' <tr id="parent" class="item">\n' \
|
||||||
' <td class="colname"><a href="../">../</a></td>\n' \
|
' <td class="colname"><a href="../">../</a></td>\n' \
|
||||||
|
|
|
@ -38,6 +38,9 @@ from swift.common.utils import config_read_reseller_options
|
||||||
from swift.proxy.controllers.base import get_account_info
|
from swift.proxy.controllers.base import get_account_info
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_TOKEN_LIFE = 86400
|
||||||
|
|
||||||
|
|
||||||
class TempAuth(object):
|
class TempAuth(object):
|
||||||
"""
|
"""
|
||||||
Test authentication and authorization system.
|
Test authentication and authorization system.
|
||||||
|
@ -181,7 +184,7 @@ class TempAuth(object):
|
||||||
self.auth_prefix = '/' + self.auth_prefix
|
self.auth_prefix = '/' + self.auth_prefix
|
||||||
if not self.auth_prefix.endswith('/'):
|
if not self.auth_prefix.endswith('/'):
|
||||||
self.auth_prefix += '/'
|
self.auth_prefix += '/'
|
||||||
self.token_life = int(conf.get('token_life', 86400))
|
self.token_life = int(conf.get('token_life', DEFAULT_TOKEN_LIFE))
|
||||||
self.allow_overrides = config_true_value(
|
self.allow_overrides = config_true_value(
|
||||||
conf.get('allow_overrides', 't'))
|
conf.get('allow_overrides', 't'))
|
||||||
self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
|
self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
|
||||||
|
@ -631,7 +634,8 @@ class TempAuth(object):
|
||||||
req.start_time = time()
|
req.start_time = time()
|
||||||
handler = None
|
handler = None
|
||||||
try:
|
try:
|
||||||
version, account, user, _junk = req.split_path(1, 4, True)
|
version, account, user, _junk = split_path(req.path_info,
|
||||||
|
1, 4, True)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
self.logger.increment('errors')
|
self.logger.increment('errors')
|
||||||
return HTTPNotFound(request=req)
|
return HTTPNotFound(request=req)
|
||||||
|
@ -765,7 +769,8 @@ class TempAuth(object):
|
||||||
memcache_client.set(memcache_user_key, token,
|
memcache_client.set(memcache_user_key, token,
|
||||||
time=float(expires - time()))
|
time=float(expires - time()))
|
||||||
resp = Response(request=req, headers={
|
resp = Response(request=req, headers={
|
||||||
'x-auth-token': token, 'x-storage-token': token})
|
'x-auth-token': token, 'x-storage-token': token,
|
||||||
|
'x-auth-token-expires': str(int(expires - time()))})
|
||||||
url = self.users[account_user]['url'].replace('$HOST', resp.host_url)
|
url = self.users[account_user]['url'].replace('$HOST', resp.host_url)
|
||||||
if self.storage_url_scheme != 'default':
|
if self.storage_url_scheme != 'default':
|
||||||
url = self.storage_url_scheme + ':' + url.split(':', 1)[1]
|
url = self.storage_url_scheme + ':' + url.split(':', 1)[1]
|
||||||
|
|
|
@ -115,6 +115,7 @@ Disable versioning from a container (x is any value except empty)::
|
||||||
-H "X-Remove-Versions-Location: x" http://<storage_url>/container
|
-H "X-Remove-Versions-Location: x" http://<storage_url>/container
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import calendar
|
||||||
import json
|
import json
|
||||||
import six
|
import six
|
||||||
from six.moves.urllib.parse import quote, unquote
|
from six.moves.urllib.parse import quote, unquote
|
||||||
|
@ -209,9 +210,9 @@ class VersionedWritesContext(WSGIContext):
|
||||||
lprefix = prefix_len + object_name + '/'
|
lprefix = prefix_len + object_name + '/'
|
||||||
ts_source = hresp.environ.get('swift_x_timestamp')
|
ts_source = hresp.environ.get('swift_x_timestamp')
|
||||||
if ts_source is None:
|
if ts_source is None:
|
||||||
ts_source = time.mktime(time.strptime(
|
ts_source = calendar.timegm(time.strptime(
|
||||||
hresp.headers['last-modified'],
|
hresp.headers['last-modified'],
|
||||||
'%a, %d %b %Y %H:%M:%S GMT'))
|
'%a, %d %b %Y %H:%M:%S GMT'))
|
||||||
new_ts = Timestamp(ts_source).internal
|
new_ts = Timestamp(ts_source).internal
|
||||||
vers_obj_name = lprefix + new_ts
|
vers_obj_name = lprefix + new_ts
|
||||||
copy_headers = {
|
copy_headers = {
|
||||||
|
@ -348,7 +349,7 @@ class VersionedWritesMiddleware(object):
|
||||||
if 'X-Versions-Location' in req.headers:
|
if 'X-Versions-Location' in req.headers:
|
||||||
val = req.headers.get('X-Versions-Location')
|
val = req.headers.get('X-Versions-Location')
|
||||||
if val:
|
if val:
|
||||||
# diferently from previous version, we are actually
|
# differently from previous version, we are actually
|
||||||
# returning an error if user tries to set versions location
|
# returning an error if user tries to set versions location
|
||||||
# while feature is explicitly disabled.
|
# while feature is explicitly disabled.
|
||||||
if not config_true_value(enabled) and \
|
if not config_true_value(enabled) and \
|
||||||
|
|
|
@ -80,6 +80,8 @@ import time
|
||||||
|
|
||||||
from eventlet import greenthread, GreenPool, patcher
|
from eventlet import greenthread, GreenPool, patcher
|
||||||
import eventlet.green.profile as eprofile
|
import eventlet.green.profile as eprofile
|
||||||
|
import six
|
||||||
|
from six.moves import urllib
|
||||||
|
|
||||||
from swift import gettext_ as _
|
from swift import gettext_ as _
|
||||||
from swift.common.utils import get_logger, config_true_value
|
from swift.common.utils import get_logger, config_true_value
|
||||||
|
@ -89,28 +91,6 @@ from x_profile.exceptions import NotFoundException, MethodNotAllowed,\
|
||||||
from x_profile.html_viewer import HTMLViewer
|
from x_profile.html_viewer import HTMLViewer
|
||||||
from x_profile.profile_model import ProfileLog
|
from x_profile.profile_model import ProfileLog
|
||||||
|
|
||||||
# True if we are running on Python 3.
|
|
||||||
PY3 = sys.version_info[0] == 3
|
|
||||||
|
|
||||||
if PY3: # pragma: no cover
|
|
||||||
text_type = str
|
|
||||||
else:
|
|
||||||
text_type = unicode
|
|
||||||
|
|
||||||
|
|
||||||
def bytes_(s, encoding='utf-8', errors='strict'):
|
|
||||||
if isinstance(s, text_type): # pragma: no cover
|
|
||||||
return s.encode(encoding, errors)
|
|
||||||
return s
|
|
||||||
|
|
||||||
try:
|
|
||||||
from urllib.parse import parse_qs
|
|
||||||
except ImportError:
|
|
||||||
try:
|
|
||||||
from urlparse import parse_qs
|
|
||||||
except ImportError: # pragma: no cover
|
|
||||||
from cgi import parse_qs
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile'
|
DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile'
|
||||||
|
|
||||||
|
@ -198,8 +178,9 @@ class ProfileMiddleware(object):
|
||||||
wsgi_input = request.environ['wsgi.input']
|
wsgi_input = request.environ['wsgi.input']
|
||||||
query_dict = request.params
|
query_dict = request.params
|
||||||
qs_in_body = wsgi_input.read()
|
qs_in_body = wsgi_input.read()
|
||||||
query_dict.update(parse_qs(qs_in_body, keep_blank_values=True,
|
query_dict.update(urllib.parse.parse_qs(qs_in_body,
|
||||||
strict_parsing=False))
|
keep_blank_values=True,
|
||||||
|
strict_parsing=False))
|
||||||
return query_dict
|
return query_dict
|
||||||
|
|
||||||
def dump_checkpoint(self):
|
def dump_checkpoint(self):
|
||||||
|
@ -228,7 +209,9 @@ class ProfileMiddleware(object):
|
||||||
query_dict,
|
query_dict,
|
||||||
self.renew_profile)
|
self.renew_profile)
|
||||||
start_response('200 OK', headers)
|
start_response('200 OK', headers)
|
||||||
return [bytes_(content)]
|
if isinstance(content, six.text_type):
|
||||||
|
content = content.encode('utf-8')
|
||||||
|
return [content]
|
||||||
except MethodNotAllowed as mx:
|
except MethodNotAllowed as mx:
|
||||||
start_response('405 Method Not Allowed', [])
|
start_response('405 Method Not Allowed', [])
|
||||||
return '%s' % mx
|
return '%s' % mx
|
||||||
|
|
|
@ -33,10 +33,12 @@ from swift.common.storage_policy import POLICIES
|
||||||
from swift.common.constraints import FORMAT2CONTENT_TYPE
|
from swift.common.constraints import FORMAT2CONTENT_TYPE
|
||||||
from swift.common.exceptions import ListingIterError, SegmentError
|
from swift.common.exceptions import ListingIterError, SegmentError
|
||||||
from swift.common.http import is_success
|
from swift.common.http import is_success
|
||||||
from swift.common.swob import (HTTPBadRequest, HTTPNotAcceptable,
|
from swift.common.swob import HTTPBadRequest, HTTPNotAcceptable, \
|
||||||
HTTPServiceUnavailable, Range)
|
HTTPServiceUnavailable, Range, is_chunked
|
||||||
from swift.common.utils import split_path, validate_device_partition, \
|
from swift.common.utils import split_path, validate_device_partition, \
|
||||||
close_if_possible, maybe_multipart_byteranges_to_document_iters
|
close_if_possible, maybe_multipart_byteranges_to_document_iters, \
|
||||||
|
multipart_byteranges_to_document_iters, parse_content_type, \
|
||||||
|
parse_content_range
|
||||||
|
|
||||||
from swift.common.wsgi import make_subrequest
|
from swift.common.wsgi import make_subrequest
|
||||||
|
|
||||||
|
@ -454,6 +456,9 @@ class SegmentedIterable(object):
|
||||||
self.logger.exception(_('ERROR: An error occurred '
|
self.logger.exception(_('ERROR: An error occurred '
|
||||||
'while retrieving segments'))
|
'while retrieving segments'))
|
||||||
raise
|
raise
|
||||||
|
finally:
|
||||||
|
if self.current_resp:
|
||||||
|
close_if_possible(self.current_resp.app_iter)
|
||||||
|
|
||||||
def app_iter_range(self, *a, **kw):
|
def app_iter_range(self, *a, **kw):
|
||||||
"""
|
"""
|
||||||
|
@ -496,5 +501,46 @@ class SegmentedIterable(object):
|
||||||
Called when the client disconnect. Ensure that the connection to the
|
Called when the client disconnect. Ensure that the connection to the
|
||||||
backend server is closed.
|
backend server is closed.
|
||||||
"""
|
"""
|
||||||
if self.current_resp:
|
close_if_possible(self.app_iter)
|
||||||
close_if_possible(self.current_resp.app_iter)
|
|
||||||
|
|
||||||
|
def http_response_to_document_iters(response, read_chunk_size=4096):
|
||||||
|
"""
|
||||||
|
Takes a successful object-GET HTTP response and turns it into an
|
||||||
|
iterator of (first-byte, last-byte, length, headers, body-file)
|
||||||
|
5-tuples.
|
||||||
|
|
||||||
|
The response must either be a 200 or a 206; if you feed in a 204 or
|
||||||
|
something similar, this probably won't work.
|
||||||
|
|
||||||
|
:param response: HTTP response, like from bufferedhttp.http_connect(),
|
||||||
|
not a swob.Response.
|
||||||
|
"""
|
||||||
|
chunked = is_chunked(dict(response.getheaders()))
|
||||||
|
|
||||||
|
if response.status == 200:
|
||||||
|
if chunked:
|
||||||
|
# Single "range" that's the whole object with an unknown length
|
||||||
|
return iter([(0, None, None, response.getheaders(),
|
||||||
|
response)])
|
||||||
|
|
||||||
|
# Single "range" that's the whole object
|
||||||
|
content_length = int(response.getheader('Content-Length'))
|
||||||
|
return iter([(0, content_length - 1, content_length,
|
||||||
|
response.getheaders(), response)])
|
||||||
|
|
||||||
|
content_type, params_list = parse_content_type(
|
||||||
|
response.getheader('Content-Type'))
|
||||||
|
if content_type != 'multipart/byteranges':
|
||||||
|
# Single range; no MIME framing, just the bytes. The start and end
|
||||||
|
# byte indices are in the Content-Range header.
|
||||||
|
start, end, length = parse_content_range(
|
||||||
|
response.getheader('Content-Range'))
|
||||||
|
return iter([(start, end, length, response.getheaders(), response)])
|
||||||
|
else:
|
||||||
|
# Multiple ranges; the response body is a multipart/byteranges MIME
|
||||||
|
# document, and we have to parse it using the MIME boundary
|
||||||
|
# extracted from the Content-Type header.
|
||||||
|
params = dict(params_list)
|
||||||
|
return multipart_byteranges_to_document_iters(
|
||||||
|
response, params['boundary'], read_chunk_size)
|
||||||
|
|
|
@ -139,6 +139,12 @@ class RingBuilder(object):
|
||||||
finally:
|
finally:
|
||||||
self.logger.disabled = True
|
self.logger.disabled = True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def min_part_seconds_left(self):
|
||||||
|
"""Get the total seconds until a rebalance can be performed"""
|
||||||
|
elapsed_seconds = int(time() - self._last_part_moves_epoch)
|
||||||
|
return max((self.min_part_hours * 3600) - elapsed_seconds, 0)
|
||||||
|
|
||||||
def weight_of_one_part(self):
|
def weight_of_one_part(self):
|
||||||
"""
|
"""
|
||||||
Returns the weight of each partition as calculated from the
|
Returns the weight of each partition as calculated from the
|
||||||
|
@ -336,7 +342,10 @@ class RingBuilder(object):
|
||||||
if 'id' not in dev:
|
if 'id' not in dev:
|
||||||
dev['id'] = 0
|
dev['id'] = 0
|
||||||
if self.devs:
|
if self.devs:
|
||||||
dev['id'] = max(d['id'] for d in self.devs if d) + 1
|
try:
|
||||||
|
dev['id'] = self.devs.index(None)
|
||||||
|
except ValueError:
|
||||||
|
dev['id'] = len(self.devs)
|
||||||
if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None:
|
if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None:
|
||||||
raise exceptions.DuplicateDeviceError(
|
raise exceptions.DuplicateDeviceError(
|
||||||
'Duplicate device id: %d' % dev['id'])
|
'Duplicate device id: %d' % dev['id'])
|
||||||
|
@ -729,11 +738,12 @@ class RingBuilder(object):
|
||||||
def pretend_min_part_hours_passed(self):
|
def pretend_min_part_hours_passed(self):
|
||||||
"""
|
"""
|
||||||
Override min_part_hours by marking all partitions as having been moved
|
Override min_part_hours by marking all partitions as having been moved
|
||||||
255 hours ago. This can be used to force a full rebalance on the next
|
255 hours ago and last move epoch to 'the beginning of time'. This can
|
||||||
call to rebalance.
|
be used to force a full rebalance on the next call to rebalance.
|
||||||
"""
|
"""
|
||||||
for part in range(self.parts):
|
for part in range(self.parts):
|
||||||
self._last_part_moves[part] = 0xff
|
self._last_part_moves[part] = 0xff
|
||||||
|
self._last_part_moves_epoch = 0
|
||||||
|
|
||||||
def get_part_devices(self, part):
|
def get_part_devices(self, part):
|
||||||
"""
|
"""
|
||||||
|
@ -835,6 +845,8 @@ class RingBuilder(object):
|
||||||
more recently than min_part_hours.
|
more recently than min_part_hours.
|
||||||
"""
|
"""
|
||||||
elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600
|
elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600
|
||||||
|
if elapsed_hours <= 0:
|
||||||
|
return
|
||||||
for part in range(self.parts):
|
for part in range(self.parts):
|
||||||
# The "min(self._last_part_moves[part] + elapsed_hours, 0xff)"
|
# The "min(self._last_part_moves[part] + elapsed_hours, 0xff)"
|
||||||
# which was here showed up in profiling, so it got inlined.
|
# which was here showed up in profiling, so it got inlined.
|
||||||
|
@ -966,12 +978,6 @@ class RingBuilder(object):
|
||||||
if dev_id == NONE_DEV:
|
if dev_id == NONE_DEV:
|
||||||
continue
|
continue
|
||||||
dev = self.devs[dev_id]
|
dev = self.devs[dev_id]
|
||||||
# the min part hour check is ignored iff a device has more
|
|
||||||
# than one replica of a part assigned to it - which would have
|
|
||||||
# only been possible on rings built with older version of code
|
|
||||||
if (self._last_part_moves[part] < self.min_part_hours and
|
|
||||||
not replicas_at_tier[dev['tiers'][-1]] > 1):
|
|
||||||
break
|
|
||||||
if all(replicas_at_tier[tier] <=
|
if all(replicas_at_tier[tier] <=
|
||||||
replica_plan[tier]['max']
|
replica_plan[tier]['max']
|
||||||
for tier in dev['tiers']):
|
for tier in dev['tiers']):
|
||||||
|
@ -984,8 +990,12 @@ class RingBuilder(object):
|
||||||
undispersed_dev_replicas.sort(
|
undispersed_dev_replicas.sort(
|
||||||
key=lambda dr: dr[0]['parts_wanted'])
|
key=lambda dr: dr[0]['parts_wanted'])
|
||||||
for dev, replica in undispersed_dev_replicas:
|
for dev, replica in undispersed_dev_replicas:
|
||||||
if self._last_part_moves[part] < self.min_part_hours:
|
# the min part hour check is ignored iff a device has more
|
||||||
break
|
# than one replica of a part assigned to it - which would have
|
||||||
|
# only been possible on rings built with older version of code
|
||||||
|
if (self._last_part_moves[part] < self.min_part_hours and
|
||||||
|
not replicas_at_tier[dev['tiers'][-1]] > 1):
|
||||||
|
continue
|
||||||
dev['parts_wanted'] += 1
|
dev['parts_wanted'] += 1
|
||||||
dev['parts'] -= 1
|
dev['parts'] -= 1
|
||||||
assign_parts[part].append(replica)
|
assign_parts[part].append(replica)
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
|
|
||||||
import array
|
import array
|
||||||
import six.moves.cPickle as pickle
|
import six.moves.cPickle as pickle
|
||||||
import inspect
|
|
||||||
import json
|
import json
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from gzip import GzipFile
|
from gzip import GzipFile
|
||||||
|
@ -135,15 +134,8 @@ class RingData(object):
|
||||||
# Override the timestamp so that the same ring data creates
|
# Override the timestamp so that the same ring data creates
|
||||||
# the same bytes on disk. This makes a checksum comparison a
|
# the same bytes on disk. This makes a checksum comparison a
|
||||||
# good way to see if two rings are identical.
|
# good way to see if two rings are identical.
|
||||||
#
|
|
||||||
# This only works on Python 2.7; on 2.6, we always get the
|
|
||||||
# current time in the gzip output.
|
|
||||||
tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
|
tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
|
||||||
if 'mtime' in inspect.getargspec(GzipFile.__init__).args:
|
gz_file = GzipFile(filename, mode='wb', fileobj=tempf, mtime=mtime)
|
||||||
gz_file = GzipFile(filename, mode='wb', fileobj=tempf,
|
|
||||||
mtime=mtime)
|
|
||||||
else:
|
|
||||||
gz_file = GzipFile(filename, mode='wb', fileobj=tempf)
|
|
||||||
self.serialize_v1(gz_file)
|
self.serialize_v1(gz_file)
|
||||||
gz_file.close()
|
gz_file.close()
|
||||||
tempf.flush()
|
tempf.flush()
|
||||||
|
@ -203,12 +195,23 @@ class Ring(object):
|
||||||
|
|
||||||
# Do this now, when we know the data has changed, rather than
|
# Do this now, when we know the data has changed, rather than
|
||||||
# doing it on every call to get_more_nodes().
|
# doing it on every call to get_more_nodes().
|
||||||
|
#
|
||||||
|
# Since this is to speed up the finding of handoffs, we only
|
||||||
|
# consider devices with at least one partition assigned. This
|
||||||
|
# way, a region, zone, or server with no partitions assigned
|
||||||
|
# does not count toward our totals, thereby keeping the early
|
||||||
|
# bailouts in get_more_nodes() working.
|
||||||
|
dev_ids_with_parts = set()
|
||||||
|
for part2dev_id in self._replica2part2dev_id:
|
||||||
|
for dev_id in part2dev_id:
|
||||||
|
dev_ids_with_parts.add(dev_id)
|
||||||
|
|
||||||
regions = set()
|
regions = set()
|
||||||
zones = set()
|
zones = set()
|
||||||
ips = set()
|
ips = set()
|
||||||
self._num_devs = 0
|
self._num_devs = 0
|
||||||
for dev in self._devs:
|
for dev in self._devs:
|
||||||
if dev:
|
if dev and dev['id'] in dev_ids_with_parts:
|
||||||
regions.add(dev['region'])
|
regions.add(dev['region'])
|
||||||
zones.add((dev['region'], dev['zone']))
|
zones.add((dev['region'], dev['zone']))
|
||||||
ips.add((dev['region'], dev['zone'], dev['ip']))
|
ips.add((dev['region'], dev['zone'], dev['ip']))
|
||||||
|
|
|
@ -761,7 +761,7 @@ class StoragePolicyCollection(object):
|
||||||
Adds a new name or names to a policy
|
Adds a new name or names to a policy
|
||||||
|
|
||||||
:param policy_index: index of a policy in this policy collection.
|
:param policy_index: index of a policy in this policy collection.
|
||||||
:param *aliases: arbitrary number of string policy names to add.
|
:param aliases: arbitrary number of string policy names to add.
|
||||||
"""
|
"""
|
||||||
policy = self.get_by_index(policy_index)
|
policy = self.get_by_index(policy_index)
|
||||||
for alias in aliases:
|
for alias in aliases:
|
||||||
|
@ -779,7 +779,7 @@ class StoragePolicyCollection(object):
|
||||||
primary name then the next available alias will be adopted
|
primary name then the next available alias will be adopted
|
||||||
as the new primary name.
|
as the new primary name.
|
||||||
|
|
||||||
:param *aliases: arbitrary number of existing policy names to remove.
|
:param aliases: arbitrary number of existing policy names to remove.
|
||||||
"""
|
"""
|
||||||
for alias in aliases:
|
for alias in aliases:
|
||||||
policy = self.get_by_name(alias)
|
policy = self.get_by_name(alias)
|
||||||
|
|
|
@ -164,7 +164,7 @@ def _datetime_property(header):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def setter(self, value):
|
def setter(self, value):
|
||||||
if isinstance(value, (float, int, long)):
|
if isinstance(value, (float,) + six.integer_types):
|
||||||
self.headers[header] = time.strftime(
|
self.headers[header] = time.strftime(
|
||||||
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(value))
|
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(value))
|
||||||
elif isinstance(value, datetime):
|
elif isinstance(value, datetime):
|
||||||
|
@ -804,6 +804,27 @@ def _host_url_property():
|
||||||
return property(getter, doc="Get url for request/response up to path")
|
return property(getter, doc="Get url for request/response up to path")
|
||||||
|
|
||||||
|
|
||||||
|
def is_chunked(headers):
|
||||||
|
te = None
|
||||||
|
for key in headers:
|
||||||
|
if key.lower() == 'transfer-encoding':
|
||||||
|
te = headers.get(key)
|
||||||
|
if te:
|
||||||
|
encodings = te.split(',')
|
||||||
|
if len(encodings) > 1:
|
||||||
|
raise AttributeError('Unsupported Transfer-Coding header'
|
||||||
|
' value specified in Transfer-Encoding'
|
||||||
|
' header')
|
||||||
|
# If there are more than one transfer encoding value, the last
|
||||||
|
# one must be chunked, see RFC 2616 Sec. 3.6
|
||||||
|
if encodings[-1].lower() == 'chunked':
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
raise ValueError('Invalid Transfer-Encoding header value')
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
class Request(object):
|
class Request(object):
|
||||||
"""
|
"""
|
||||||
WSGI Request object.
|
WSGI Request object.
|
||||||
|
@ -955,7 +976,7 @@ class Request(object):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_chunked(self):
|
def is_chunked(self):
|
||||||
return 'chunked' in self.headers.get('transfer-encoding', '')
|
return is_chunked(self.headers)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def url(self):
|
def url(self):
|
||||||
|
@ -1061,22 +1082,7 @@ class Request(object):
|
||||||
:raises AttributeError: if the last value of the transfer-encoding
|
:raises AttributeError: if the last value of the transfer-encoding
|
||||||
header is not "chunked"
|
header is not "chunked"
|
||||||
"""
|
"""
|
||||||
te = self.headers.get('transfer-encoding')
|
if not is_chunked(self.headers):
|
||||||
if te:
|
|
||||||
encodings = te.split(',')
|
|
||||||
if len(encodings) > 1:
|
|
||||||
raise AttributeError('Unsupported Transfer-Coding header'
|
|
||||||
' value specified in Transfer-Encoding'
|
|
||||||
' header')
|
|
||||||
# If there are more than one transfer encoding value, the last
|
|
||||||
# one must be chunked, see RFC 2616 Sec. 3.6
|
|
||||||
if encodings[-1].lower() == 'chunked':
|
|
||||||
chunked = True
|
|
||||||
else:
|
|
||||||
raise ValueError('Invalid Transfer-Encoding header value')
|
|
||||||
else:
|
|
||||||
chunked = False
|
|
||||||
if not chunked:
|
|
||||||
# Because we are not using chunked transfer encoding we can pay
|
# Because we are not using chunked transfer encoding we can pay
|
||||||
# attention to the content-length header.
|
# attention to the content-length header.
|
||||||
fsize = self.headers.get('content-length', None)
|
fsize = self.headers.get('content-length', None)
|
||||||
|
|
|
@ -22,6 +22,7 @@ import fcntl
|
||||||
import grp
|
import grp
|
||||||
import hmac
|
import hmac
|
||||||
import json
|
import json
|
||||||
|
import math
|
||||||
import operator
|
import operator
|
||||||
import os
|
import os
|
||||||
import pwd
|
import pwd
|
||||||
|
@ -111,6 +112,9 @@ SWIFT_CONF_FILE = '/etc/swift/swift.conf'
|
||||||
AF_ALG = getattr(socket, 'AF_ALG', 38)
|
AF_ALG = getattr(socket, 'AF_ALG', 38)
|
||||||
F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
|
F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
|
||||||
|
|
||||||
|
# Used by the parse_socket_string() function to validate IPv6 addresses
|
||||||
|
IPV6_RE = re.compile("^\[(?P<address>.*)\](:(?P<port>[0-9]+))?$")
|
||||||
|
|
||||||
|
|
||||||
class InvalidHashPathConfigError(ValueError):
|
class InvalidHashPathConfigError(ValueError):
|
||||||
|
|
||||||
|
@ -453,6 +457,8 @@ class FileLikeIter(object):
|
||||||
def __init__(self, iterable):
|
def __init__(self, iterable):
|
||||||
"""
|
"""
|
||||||
Wraps an iterable to behave as a file-like object.
|
Wraps an iterable to behave as a file-like object.
|
||||||
|
|
||||||
|
The iterable must yield bytes strings.
|
||||||
"""
|
"""
|
||||||
self.iterator = iter(iterable)
|
self.iterator = iter(iterable)
|
||||||
self.buf = None
|
self.buf = None
|
||||||
|
@ -473,10 +479,11 @@ class FileLikeIter(object):
|
||||||
return rv
|
return rv
|
||||||
else:
|
else:
|
||||||
return next(self.iterator)
|
return next(self.iterator)
|
||||||
|
__next__ = next
|
||||||
|
|
||||||
def read(self, size=-1):
|
def read(self, size=-1):
|
||||||
"""
|
"""
|
||||||
read([size]) -> read at most size bytes, returned as a string.
|
read([size]) -> read at most size bytes, returned as a bytes string.
|
||||||
|
|
||||||
If the size argument is negative or omitted, read until EOF is reached.
|
If the size argument is negative or omitted, read until EOF is reached.
|
||||||
Notice that when in non-blocking mode, less data than what was
|
Notice that when in non-blocking mode, less data than what was
|
||||||
|
@ -485,9 +492,9 @@ class FileLikeIter(object):
|
||||||
if self.closed:
|
if self.closed:
|
||||||
raise ValueError('I/O operation on closed file')
|
raise ValueError('I/O operation on closed file')
|
||||||
if size < 0:
|
if size < 0:
|
||||||
return ''.join(self)
|
return b''.join(self)
|
||||||
elif not size:
|
elif not size:
|
||||||
chunk = ''
|
chunk = b''
|
||||||
elif self.buf:
|
elif self.buf:
|
||||||
chunk = self.buf
|
chunk = self.buf
|
||||||
self.buf = None
|
self.buf = None
|
||||||
|
@ -495,7 +502,7 @@ class FileLikeIter(object):
|
||||||
try:
|
try:
|
||||||
chunk = next(self.iterator)
|
chunk = next(self.iterator)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
return ''
|
return b''
|
||||||
if len(chunk) > size:
|
if len(chunk) > size:
|
||||||
self.buf = chunk[size:]
|
self.buf = chunk[size:]
|
||||||
chunk = chunk[:size]
|
chunk = chunk[:size]
|
||||||
|
@ -503,7 +510,7 @@ class FileLikeIter(object):
|
||||||
|
|
||||||
def readline(self, size=-1):
|
def readline(self, size=-1):
|
||||||
"""
|
"""
|
||||||
readline([size]) -> next line from the file, as a string.
|
readline([size]) -> next line from the file, as a bytes string.
|
||||||
|
|
||||||
Retain newline. A non-negative size argument limits the maximum
|
Retain newline. A non-negative size argument limits the maximum
|
||||||
number of bytes to return (an incomplete line may be returned then).
|
number of bytes to return (an incomplete line may be returned then).
|
||||||
|
@ -511,8 +518,8 @@ class FileLikeIter(object):
|
||||||
"""
|
"""
|
||||||
if self.closed:
|
if self.closed:
|
||||||
raise ValueError('I/O operation on closed file')
|
raise ValueError('I/O operation on closed file')
|
||||||
data = ''
|
data = b''
|
||||||
while '\n' not in data and (size < 0 or len(data) < size):
|
while b'\n' not in data and (size < 0 or len(data) < size):
|
||||||
if size < 0:
|
if size < 0:
|
||||||
chunk = self.read(1024)
|
chunk = self.read(1024)
|
||||||
else:
|
else:
|
||||||
|
@ -520,8 +527,8 @@ class FileLikeIter(object):
|
||||||
if not chunk:
|
if not chunk:
|
||||||
break
|
break
|
||||||
data += chunk
|
data += chunk
|
||||||
if '\n' in data:
|
if b'\n' in data:
|
||||||
data, sep, rest = data.partition('\n')
|
data, sep, rest = data.partition(b'\n')
|
||||||
data += sep
|
data += sep
|
||||||
if self.buf:
|
if self.buf:
|
||||||
self.buf = rest + self.buf
|
self.buf = rest + self.buf
|
||||||
|
@ -531,7 +538,7 @@ class FileLikeIter(object):
|
||||||
|
|
||||||
def readlines(self, sizehint=-1):
|
def readlines(self, sizehint=-1):
|
||||||
"""
|
"""
|
||||||
readlines([size]) -> list of strings, each a line from the file.
|
readlines([size]) -> list of bytes strings, each a line from the file.
|
||||||
|
|
||||||
Call readline() repeatedly and return a list of the lines so read.
|
Call readline() repeatedly and return a list of the lines so read.
|
||||||
The optional size argument, if given, is an approximate bound on the
|
The optional size argument, if given, is an approximate bound on the
|
||||||
|
@ -693,6 +700,7 @@ def drop_buffer_cache(fd, offset, length):
|
||||||
|
|
||||||
NORMAL_FORMAT = "%016.05f"
|
NORMAL_FORMAT = "%016.05f"
|
||||||
INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
|
INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
|
||||||
|
SHORT_FORMAT = NORMAL_FORMAT + '_%x'
|
||||||
MAX_OFFSET = (16 ** 16) - 1
|
MAX_OFFSET = (16 ** 16) - 1
|
||||||
PRECISION = 1e-5
|
PRECISION = 1e-5
|
||||||
# Setting this to True will cause the internal format to always display
|
# Setting this to True will cause the internal format to always display
|
||||||
|
@ -702,6 +710,7 @@ PRECISION = 1e-5
|
||||||
FORCE_INTERNAL = False # or True
|
FORCE_INTERNAL = False # or True
|
||||||
|
|
||||||
|
|
||||||
|
@functools.total_ordering
|
||||||
class Timestamp(object):
|
class Timestamp(object):
|
||||||
"""
|
"""
|
||||||
Internal Representation of Swift Time.
|
Internal Representation of Swift Time.
|
||||||
|
@ -778,6 +787,10 @@ class Timestamp(object):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
'delta must be greater than %d' % (-1 * self.raw))
|
'delta must be greater than %d' % (-1 * self.raw))
|
||||||
self.timestamp = float(self.raw * PRECISION)
|
self.timestamp = float(self.raw * PRECISION)
|
||||||
|
if self.timestamp < 0:
|
||||||
|
raise ValueError('timestamp cannot be negative')
|
||||||
|
if self.timestamp >= 10000000000:
|
||||||
|
raise ValueError('timestamp too large')
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return INTERNAL_FORMAT % (self.timestamp, self.offset)
|
return INTERNAL_FORMAT % (self.timestamp, self.offset)
|
||||||
|
@ -808,34 +821,154 @@ class Timestamp(object):
|
||||||
else:
|
else:
|
||||||
return self.normal
|
return self.normal
|
||||||
|
|
||||||
|
@property
|
||||||
|
def short(self):
|
||||||
|
if self.offset or FORCE_INTERNAL:
|
||||||
|
return SHORT_FORMAT % (self.timestamp, self.offset)
|
||||||
|
else:
|
||||||
|
return self.normal
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def isoformat(self):
|
def isoformat(self):
|
||||||
isoformat = datetime.datetime.utcfromtimestamp(
|
t = float(self.normal)
|
||||||
float(self.normal)).isoformat()
|
if six.PY3:
|
||||||
|
# On Python 3, round manually using ROUND_HALF_EVEN rounding
|
||||||
|
# method, to use the same rounding method than Python 2. Python 3
|
||||||
|
# used a different rounding method, but Python 3.4.4 and 3.5.1 use
|
||||||
|
# again ROUND_HALF_EVEN as Python 2.
|
||||||
|
# See https://bugs.python.org/issue23517
|
||||||
|
frac, t = math.modf(t)
|
||||||
|
us = round(frac * 1e6)
|
||||||
|
if us >= 1000000:
|
||||||
|
t += 1
|
||||||
|
us -= 1000000
|
||||||
|
elif us < 0:
|
||||||
|
t -= 1
|
||||||
|
us += 1000000
|
||||||
|
dt = datetime.datetime.utcfromtimestamp(t)
|
||||||
|
dt = dt.replace(microsecond=us)
|
||||||
|
else:
|
||||||
|
dt = datetime.datetime.utcfromtimestamp(t)
|
||||||
|
|
||||||
|
isoformat = dt.isoformat()
|
||||||
# python isoformat() doesn't include msecs when zero
|
# python isoformat() doesn't include msecs when zero
|
||||||
if len(isoformat) < len("1970-01-01T00:00:00.000000"):
|
if len(isoformat) < len("1970-01-01T00:00:00.000000"):
|
||||||
isoformat += ".000000"
|
isoformat += ".000000"
|
||||||
return isoformat
|
return isoformat
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
|
if other is None:
|
||||||
|
return False
|
||||||
if not isinstance(other, Timestamp):
|
if not isinstance(other, Timestamp):
|
||||||
other = Timestamp(other)
|
other = Timestamp(other)
|
||||||
return self.internal == other.internal
|
return self.internal == other.internal
|
||||||
|
|
||||||
def __ne__(self, other):
|
def __ne__(self, other):
|
||||||
|
if other is None:
|
||||||
|
return True
|
||||||
if not isinstance(other, Timestamp):
|
if not isinstance(other, Timestamp):
|
||||||
other = Timestamp(other)
|
other = Timestamp(other)
|
||||||
return self.internal != other.internal
|
return self.internal != other.internal
|
||||||
|
|
||||||
def __cmp__(self, other):
|
def __lt__(self, other):
|
||||||
|
if other is None:
|
||||||
|
return False
|
||||||
if not isinstance(other, Timestamp):
|
if not isinstance(other, Timestamp):
|
||||||
other = Timestamp(other)
|
other = Timestamp(other)
|
||||||
return cmp(self.internal, other.internal)
|
return self.internal < other.internal
|
||||||
|
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash(self.internal)
|
return hash(self.internal)
|
||||||
|
|
||||||
|
|
||||||
|
def encode_timestamps(t1, t2=None, t3=None, explicit=False):
|
||||||
|
"""
|
||||||
|
Encode up to three timestamps into a string. Unlike a Timestamp object, the
|
||||||
|
encoded string does NOT used fixed width fields and consequently no
|
||||||
|
relative chronology of the timestamps can be inferred from lexicographic
|
||||||
|
sorting of encoded timestamp strings.
|
||||||
|
|
||||||
|
The format of the encoded string is:
|
||||||
|
<t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
|
||||||
|
|
||||||
|
i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
|
||||||
|
otherwise the time offsets for t2 and t3 are appended. If explicit is True
|
||||||
|
then the offsets for t2 and t3 are always appended even if zero.
|
||||||
|
|
||||||
|
Note: any offset value in t1 will be preserved, but offsets on t2 and t3
|
||||||
|
are not preserved. In the anticipated use cases for this method (and the
|
||||||
|
inverse decode_timestamps method) the timestamps passed as t2 and t3 are
|
||||||
|
not expected to have offsets as they will be timestamps associated with a
|
||||||
|
POST request. In the case where the encoding is used in a container objects
|
||||||
|
table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
|
||||||
|
content type and metadata times (if different from the data file) i.e.
|
||||||
|
correspond to POST timestamps. In the case where the encoded form is used
|
||||||
|
in a .meta file name, t1 and t2 both correspond to POST timestamps.
|
||||||
|
"""
|
||||||
|
form = '{0}'
|
||||||
|
values = [t1.short]
|
||||||
|
if t2 is not None:
|
||||||
|
t2_t1_delta = t2.raw - t1.raw
|
||||||
|
explicit = explicit or (t2_t1_delta != 0)
|
||||||
|
values.append(t2_t1_delta)
|
||||||
|
if t3 is not None:
|
||||||
|
t3_t2_delta = t3.raw - t2.raw
|
||||||
|
explicit = explicit or (t3_t2_delta != 0)
|
||||||
|
values.append(t3_t2_delta)
|
||||||
|
if explicit:
|
||||||
|
form += '{1:+x}'
|
||||||
|
if t3 is not None:
|
||||||
|
form += '{2:+x}'
|
||||||
|
return form.format(*values)
|
||||||
|
|
||||||
|
|
||||||
|
def decode_timestamps(encoded, explicit=False):
|
||||||
|
"""
|
||||||
|
Parses a string of the form generated by encode_timestamps and returns
|
||||||
|
a tuple of the three component timestamps. If explicit is False, component
|
||||||
|
timestamps that are not explicitly encoded will be assumed to have zero
|
||||||
|
delta from the previous component and therefore take the value of the
|
||||||
|
previous component. If explicit is True, component timestamps that are
|
||||||
|
not explicitly encoded will be returned with value None.
|
||||||
|
"""
|
||||||
|
# TODO: some tests, e.g. in test_replicator, put float timestamps values
|
||||||
|
# into container db's, hence this defensive check, but in real world
|
||||||
|
# this may never happen.
|
||||||
|
if not isinstance(encoded, basestring):
|
||||||
|
ts = Timestamp(encoded)
|
||||||
|
return ts, ts, ts
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
signs = []
|
||||||
|
pos_parts = encoded.split('+')
|
||||||
|
for part in pos_parts:
|
||||||
|
# parse time components and their signs
|
||||||
|
# e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1]
|
||||||
|
neg_parts = part.split('-')
|
||||||
|
parts = parts + neg_parts
|
||||||
|
signs = signs + [1] + [-1] * (len(neg_parts) - 1)
|
||||||
|
t1 = Timestamp(parts[0])
|
||||||
|
t2 = t3 = None
|
||||||
|
if len(parts) > 1:
|
||||||
|
t2 = t1
|
||||||
|
delta = signs[1] * int(parts[1], 16)
|
||||||
|
# if delta = 0 we want t2 = t3 = t1 in order to
|
||||||
|
# preserve any offset in t1 - only construct a distinct
|
||||||
|
# timestamp if there is a non-zero delta.
|
||||||
|
if delta:
|
||||||
|
t2 = Timestamp((t1.raw + delta) * PRECISION)
|
||||||
|
elif not explicit:
|
||||||
|
t2 = t1
|
||||||
|
if len(parts) > 2:
|
||||||
|
t3 = t2
|
||||||
|
delta = signs[2] * int(parts[2], 16)
|
||||||
|
if delta:
|
||||||
|
t3 = Timestamp((t2.raw + delta) * PRECISION)
|
||||||
|
elif not explicit:
|
||||||
|
t3 = t2
|
||||||
|
return t1, t2, t3
|
||||||
|
|
||||||
|
|
||||||
def normalize_timestamp(timestamp):
|
def normalize_timestamp(timestamp):
|
||||||
"""
|
"""
|
||||||
Format a timestamp (string or numeric) into a standardized
|
Format a timestamp (string or numeric) into a standardized
|
||||||
|
@ -862,14 +995,10 @@ def last_modified_date_to_timestamp(last_modified_date_str):
|
||||||
start = datetime.datetime.strptime(last_modified_date_str,
|
start = datetime.datetime.strptime(last_modified_date_str,
|
||||||
'%Y-%m-%dT%H:%M:%S.%f')
|
'%Y-%m-%dT%H:%M:%S.%f')
|
||||||
delta = start - EPOCH
|
delta = start - EPOCH
|
||||||
# TODO(sam): after we no longer support py2.6, this expression can
|
|
||||||
# simplify to Timestamp(delta.total_seconds()).
|
|
||||||
#
|
|
||||||
# This calculation is based on Python 2.7's Modules/datetimemodule.c,
|
# This calculation is based on Python 2.7's Modules/datetimemodule.c,
|
||||||
# function delta_to_microseconds(), but written in Python.
|
# function delta_to_microseconds(), but written in Python.
|
||||||
return Timestamp(delta.days * 86400 +
|
return Timestamp(delta.total_seconds())
|
||||||
delta.seconds +
|
|
||||||
delta.microseconds / 1000000.0)
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_delete_at_timestamp(timestamp):
|
def normalize_delete_at_timestamp(timestamp):
|
||||||
|
@ -1044,22 +1173,28 @@ class RateLimitedIterator(object):
|
||||||
this many elements; default is 0 (rate limit
|
this many elements; default is 0 (rate limit
|
||||||
immediately)
|
immediately)
|
||||||
"""
|
"""
|
||||||
def __init__(self, iterable, elements_per_second, limit_after=0):
|
def __init__(self, iterable, elements_per_second, limit_after=0,
|
||||||
|
ratelimit_if=lambda _junk: True):
|
||||||
self.iterator = iter(iterable)
|
self.iterator = iter(iterable)
|
||||||
self.elements_per_second = elements_per_second
|
self.elements_per_second = elements_per_second
|
||||||
self.limit_after = limit_after
|
self.limit_after = limit_after
|
||||||
self.running_time = 0
|
self.running_time = 0
|
||||||
|
self.ratelimit_if = ratelimit_if
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def next(self):
|
def next(self):
|
||||||
if self.limit_after > 0:
|
next_value = next(self.iterator)
|
||||||
self.limit_after -= 1
|
|
||||||
else:
|
if self.ratelimit_if(next_value):
|
||||||
self.running_time = ratelimit_sleep(self.running_time,
|
if self.limit_after > 0:
|
||||||
self.elements_per_second)
|
self.limit_after -= 1
|
||||||
return next(self.iterator)
|
else:
|
||||||
|
self.running_time = ratelimit_sleep(self.running_time,
|
||||||
|
self.elements_per_second)
|
||||||
|
return next_value
|
||||||
|
__next__ = next
|
||||||
|
|
||||||
|
|
||||||
class GreenthreadSafeIterator(object):
|
class GreenthreadSafeIterator(object):
|
||||||
|
@ -1083,6 +1218,7 @@ class GreenthreadSafeIterator(object):
|
||||||
def next(self):
|
def next(self):
|
||||||
with self.semaphore:
|
with self.semaphore:
|
||||||
return next(self.unsafe_iter)
|
return next(self.unsafe_iter)
|
||||||
|
__next__ = next
|
||||||
|
|
||||||
|
|
||||||
class NullLogger(object):
|
class NullLogger(object):
|
||||||
|
@ -1122,6 +1258,7 @@ class LoggerFileObject(object):
|
||||||
|
|
||||||
def next(self):
|
def next(self):
|
||||||
raise IOError(errno.EBADF, 'Bad file descriptor')
|
raise IOError(errno.EBADF, 'Bad file descriptor')
|
||||||
|
__next__ = next
|
||||||
|
|
||||||
def read(self, size=-1):
|
def read(self, size=-1):
|
||||||
raise IOError(errno.EBADF, 'Bad file descriptor')
|
raise IOError(errno.EBADF, 'Bad file descriptor')
|
||||||
|
@ -1145,10 +1282,44 @@ class StatsdClient(object):
|
||||||
self.set_prefix(tail_prefix)
|
self.set_prefix(tail_prefix)
|
||||||
self._default_sample_rate = default_sample_rate
|
self._default_sample_rate = default_sample_rate
|
||||||
self._sample_rate_factor = sample_rate_factor
|
self._sample_rate_factor = sample_rate_factor
|
||||||
self._target = (self._host, self._port)
|
|
||||||
self.random = random
|
self.random = random
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
|
|
||||||
|
# Determine if host is IPv4 or IPv6
|
||||||
|
addr_info = None
|
||||||
|
try:
|
||||||
|
addr_info = socket.getaddrinfo(host, port, socket.AF_INET)
|
||||||
|
self._sock_family = socket.AF_INET
|
||||||
|
except socket.gaierror:
|
||||||
|
try:
|
||||||
|
addr_info = socket.getaddrinfo(host, port, socket.AF_INET6)
|
||||||
|
self._sock_family = socket.AF_INET6
|
||||||
|
except socket.gaierror:
|
||||||
|
# Don't keep the server from starting from what could be a
|
||||||
|
# transient DNS failure. Any hostname will get re-resolved as
|
||||||
|
# necessary in the .sendto() calls.
|
||||||
|
# However, we don't know if we're IPv4 or IPv6 in this case, so
|
||||||
|
# we assume legacy IPv4.
|
||||||
|
self._sock_family = socket.AF_INET
|
||||||
|
|
||||||
|
# NOTE: we use the original host value, not the DNS-resolved one
|
||||||
|
# because if host is a hostname, we don't want to cache the DNS
|
||||||
|
# resolution for the entire lifetime of this process. Let standard
|
||||||
|
# name resolution caching take effect. This should help operators use
|
||||||
|
# DNS trickery if they want.
|
||||||
|
if addr_info is not None:
|
||||||
|
# addr_info is a list of 5-tuples with the following structure:
|
||||||
|
# (family, socktype, proto, canonname, sockaddr)
|
||||||
|
# where sockaddr is the only thing of interest to us, and we only
|
||||||
|
# use the first result. We want to use the originally supplied
|
||||||
|
# host (see note above) and the remainder of the variable-length
|
||||||
|
# sockaddr: IPv4 has (address, port) while IPv6 has (address,
|
||||||
|
# port, flow info, scope id).
|
||||||
|
sockaddr = addr_info[0][-1]
|
||||||
|
self._target = (host,) + (sockaddr[1:])
|
||||||
|
else:
|
||||||
|
self._target = (host, port)
|
||||||
|
|
||||||
def set_prefix(self, new_prefix):
|
def set_prefix(self, new_prefix):
|
||||||
if new_prefix and self._base_prefix:
|
if new_prefix and self._base_prefix:
|
||||||
self._prefix = '.'.join([self._base_prefix, new_prefix, ''])
|
self._prefix = '.'.join([self._base_prefix, new_prefix, ''])
|
||||||
|
@ -1183,7 +1354,7 @@ class StatsdClient(object):
|
||||||
self._target, err)
|
self._target, err)
|
||||||
|
|
||||||
def _open_socket(self):
|
def _open_socket(self):
|
||||||
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
return socket.socket(self._sock_family, socket.SOCK_DGRAM)
|
||||||
|
|
||||||
def update_stats(self, m_name, m_value, sample_rate=None):
|
def update_stats(self, m_name, m_value, sample_rate=None):
|
||||||
return self._send(m_name, m_value, 'c', sample_rate)
|
return self._send(m_name, m_value, 'c', sample_rate)
|
||||||
|
@ -1263,6 +1434,7 @@ class LogAdapter(logging.LoggerAdapter, object):
|
||||||
def __init__(self, logger, server):
|
def __init__(self, logger, server):
|
||||||
logging.LoggerAdapter.__init__(self, logger, {})
|
logging.LoggerAdapter.__init__(self, logger, {})
|
||||||
self.server = server
|
self.server = server
|
||||||
|
self.warn = self.warning
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def txn_id(self):
|
def txn_id(self):
|
||||||
|
@ -1733,6 +1905,43 @@ def whataremyips(bind_ip=None):
|
||||||
return addresses
|
return addresses
|
||||||
|
|
||||||
|
|
||||||
|
def parse_socket_string(socket_string, default_port):
|
||||||
|
"""
|
||||||
|
Given a string representing a socket, returns a tuple of (host, port).
|
||||||
|
Valid strings are DNS names, IPv4 addresses, or IPv6 addresses, with an
|
||||||
|
optional port. If an IPv6 address is specified it **must** be enclosed in
|
||||||
|
[], like *[::1]* or *[::1]:11211*. This follows the accepted prescription
|
||||||
|
for `IPv6 host literals`_.
|
||||||
|
|
||||||
|
Examples::
|
||||||
|
|
||||||
|
server.org
|
||||||
|
server.org:1337
|
||||||
|
127.0.0.1:1337
|
||||||
|
[::1]:1337
|
||||||
|
[::1]
|
||||||
|
|
||||||
|
.. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2
|
||||||
|
"""
|
||||||
|
port = default_port
|
||||||
|
# IPv6 addresses must be between '[]'
|
||||||
|
if socket_string.startswith('['):
|
||||||
|
match = IPV6_RE.match(socket_string)
|
||||||
|
if not match:
|
||||||
|
raise ValueError("Invalid IPv6 address: %s" % socket_string)
|
||||||
|
host = match.group('address')
|
||||||
|
port = match.group('port') or port
|
||||||
|
else:
|
||||||
|
if ':' in socket_string:
|
||||||
|
tokens = socket_string.split(':')
|
||||||
|
if len(tokens) > 2:
|
||||||
|
raise ValueError("IPv6 addresses must be between '[]'")
|
||||||
|
host, port = tokens
|
||||||
|
else:
|
||||||
|
host = socket_string
|
||||||
|
return (host, port)
|
||||||
|
|
||||||
|
|
||||||
def storage_directory(datadir, partition, name_hash):
|
def storage_directory(datadir, partition, name_hash):
|
||||||
"""
|
"""
|
||||||
Get the storage directory
|
Get the storage directory
|
||||||
|
@ -2297,6 +2506,7 @@ class GreenAsyncPile(object):
|
||||||
rv = self._responses.get()
|
rv = self._responses.get()
|
||||||
self._pending -= 1
|
self._pending -= 1
|
||||||
return rv
|
return rv
|
||||||
|
__next__ = next
|
||||||
|
|
||||||
|
|
||||||
class ModifiedParseResult(ParseResult):
|
class ModifiedParseResult(ParseResult):
|
||||||
|
@ -2568,17 +2778,19 @@ def dump_recon_cache(cache_dict, cache_file, logger, lock_timeout=2):
|
||||||
pass
|
pass
|
||||||
for cache_key, cache_value in cache_dict.items():
|
for cache_key, cache_value in cache_dict.items():
|
||||||
put_recon_cache_entry(cache_entry, cache_key, cache_value)
|
put_recon_cache_entry(cache_entry, cache_key, cache_value)
|
||||||
|
tf = None
|
||||||
try:
|
try:
|
||||||
with NamedTemporaryFile(dir=os.path.dirname(cache_file),
|
with NamedTemporaryFile(dir=os.path.dirname(cache_file),
|
||||||
delete=False) as tf:
|
delete=False) as tf:
|
||||||
tf.write(json.dumps(cache_entry) + '\n')
|
tf.write(json.dumps(cache_entry) + '\n')
|
||||||
renamer(tf.name, cache_file, fsync=False)
|
renamer(tf.name, cache_file, fsync=False)
|
||||||
finally:
|
finally:
|
||||||
try:
|
if tf is not None:
|
||||||
os.unlink(tf.name)
|
try:
|
||||||
except OSError as err:
|
os.unlink(tf.name)
|
||||||
if err.errno != errno.ENOENT:
|
except OSError as err:
|
||||||
raise
|
if err.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
except (Exception, Timeout):
|
except (Exception, Timeout):
|
||||||
logger.exception(_('Exception dumping recon cache'))
|
logger.exception(_('Exception dumping recon cache'))
|
||||||
|
|
||||||
|
@ -2650,11 +2862,7 @@ def public(func):
|
||||||
:param func: function to make public
|
:param func: function to make public
|
||||||
"""
|
"""
|
||||||
func.publicly_accessible = True
|
func.publicly_accessible = True
|
||||||
|
return func
|
||||||
@functools.wraps(func)
|
|
||||||
def wrapped(*a, **kw):
|
|
||||||
return func(*a, **kw)
|
|
||||||
return wrapped
|
|
||||||
|
|
||||||
|
|
||||||
def quorum_size(n):
|
def quorum_size(n):
|
||||||
|
@ -3251,6 +3459,25 @@ def parse_content_type(content_type):
|
||||||
return content_type, parm_list
|
return content_type, parm_list
|
||||||
|
|
||||||
|
|
||||||
|
def extract_swift_bytes(content_type):
|
||||||
|
"""
|
||||||
|
Parse a content-type and return a tuple containing:
|
||||||
|
- the content_type string minus any swift_bytes param,
|
||||||
|
- the swift_bytes value or None if the param was not found
|
||||||
|
|
||||||
|
:param content_type: a content-type string
|
||||||
|
:return: a tuple of (content-type, swift_bytes or None)
|
||||||
|
"""
|
||||||
|
content_type, params = parse_content_type(content_type)
|
||||||
|
swift_bytes = None
|
||||||
|
for k, v in params:
|
||||||
|
if k == 'swift_bytes':
|
||||||
|
swift_bytes = v
|
||||||
|
else:
|
||||||
|
content_type += ';%s=%s' % (k, v)
|
||||||
|
return content_type, swift_bytes
|
||||||
|
|
||||||
|
|
||||||
def override_bytes_from_content_type(listing_dict, logger=None):
|
def override_bytes_from_content_type(listing_dict, logger=None):
|
||||||
"""
|
"""
|
||||||
Takes a dict from a container listing and overrides the content_type,
|
Takes a dict from a container listing and overrides the content_type,
|
||||||
|
@ -3308,7 +3535,7 @@ class _MultipartMimeFileLikeObject(object):
|
||||||
if not length:
|
if not length:
|
||||||
length = self.read_chunk_size
|
length = self.read_chunk_size
|
||||||
if self.no_more_data_for_this_file:
|
if self.no_more_data_for_this_file:
|
||||||
return ''
|
return b''
|
||||||
|
|
||||||
# read enough data to know whether we're going to run
|
# read enough data to know whether we're going to run
|
||||||
# into a boundary in next [length] bytes
|
# into a boundary in next [length] bytes
|
||||||
|
@ -3334,14 +3561,14 @@ class _MultipartMimeFileLikeObject(object):
|
||||||
# if it does, just return data up to the boundary
|
# if it does, just return data up to the boundary
|
||||||
else:
|
else:
|
||||||
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
|
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
|
||||||
self.no_more_files = self.input_buffer.startswith('--')
|
self.no_more_files = self.input_buffer.startswith(b'--')
|
||||||
self.no_more_data_for_this_file = True
|
self.no_more_data_for_this_file = True
|
||||||
self.input_buffer = self.input_buffer[2:]
|
self.input_buffer = self.input_buffer[2:]
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def readline(self):
|
def readline(self):
|
||||||
if self.no_more_data_for_this_file:
|
if self.no_more_data_for_this_file:
|
||||||
return ''
|
return b''
|
||||||
boundary_pos = newline_pos = -1
|
boundary_pos = newline_pos = -1
|
||||||
while newline_pos < 0 and boundary_pos < 0:
|
while newline_pos < 0 and boundary_pos < 0:
|
||||||
try:
|
try:
|
||||||
|
@ -3349,7 +3576,7 @@ class _MultipartMimeFileLikeObject(object):
|
||||||
except (IOError, ValueError) as e:
|
except (IOError, ValueError) as e:
|
||||||
raise swift.common.exceptions.ChunkReadError(str(e))
|
raise swift.common.exceptions.ChunkReadError(str(e))
|
||||||
self.input_buffer += chunk
|
self.input_buffer += chunk
|
||||||
newline_pos = self.input_buffer.find('\r\n')
|
newline_pos = self.input_buffer.find(b'\r\n')
|
||||||
boundary_pos = self.input_buffer.find(self.boundary)
|
boundary_pos = self.input_buffer.find(self.boundary)
|
||||||
if not chunk:
|
if not chunk:
|
||||||
self.no_more_files = True
|
self.no_more_files = True
|
||||||
|
@ -3358,7 +3585,7 @@ class _MultipartMimeFileLikeObject(object):
|
||||||
if newline_pos >= 0 and \
|
if newline_pos >= 0 and \
|
||||||
(boundary_pos < 0 or newline_pos < boundary_pos):
|
(boundary_pos < 0 or newline_pos < boundary_pos):
|
||||||
# Use self.read to ensure any logic there happens...
|
# Use self.read to ensure any logic there happens...
|
||||||
ret = ''
|
ret = b''
|
||||||
to_read = newline_pos + 2
|
to_read = newline_pos + 2
|
||||||
while to_read > 0:
|
while to_read > 0:
|
||||||
chunk = self.read(to_read)
|
chunk = self.read(to_read)
|
||||||
|
@ -3425,11 +3652,21 @@ def parse_mime_headers(doc_file):
|
||||||
headers = []
|
headers = []
|
||||||
while True:
|
while True:
|
||||||
line = doc_file.readline()
|
line = doc_file.readline()
|
||||||
|
done = line in (b'\r\n', b'\n', b'')
|
||||||
|
if six.PY3:
|
||||||
|
try:
|
||||||
|
line = line.decode('utf-8')
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
line = line.decode('latin1')
|
||||||
headers.append(line)
|
headers.append(line)
|
||||||
if line in (b'\r\n', b'\n', b''):
|
if done:
|
||||||
break
|
break
|
||||||
header_string = b''.join(headers)
|
if six.PY3:
|
||||||
return HeaderKeyDict(email.parser.Parser().parsestr(header_string))
|
header_string = ''.join(headers)
|
||||||
|
else:
|
||||||
|
header_string = b''.join(headers)
|
||||||
|
headers = email.parser.Parser().parsestr(header_string)
|
||||||
|
return HeaderKeyDict(headers)
|
||||||
|
|
||||||
|
|
||||||
def mime_to_document_iters(input_file, boundary, read_chunk_size=4096):
|
def mime_to_document_iters(input_file, boundary, read_chunk_size=4096):
|
||||||
|
@ -3511,8 +3748,8 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
|
||||||
HTTP response body, whether that's multipart/byteranges or not.
|
HTTP response body, whether that's multipart/byteranges or not.
|
||||||
|
|
||||||
This is almost, but not quite, the inverse of
|
This is almost, but not quite, the inverse of
|
||||||
http_response_to_document_iters(). This function only yields chunks of
|
request_helpers.http_response_to_document_iters(). This function only
|
||||||
the body, not any headers.
|
yields chunks of the body, not any headers.
|
||||||
|
|
||||||
:param ranges_iter: an iterator of dictionaries, one per range.
|
:param ranges_iter: an iterator of dictionaries, one per range.
|
||||||
Each dictionary must contain at least the following key:
|
Each dictionary must contain at least the following key:
|
||||||
|
@ -3587,41 +3824,6 @@ def multipart_byteranges_to_document_iters(input_file, boundary,
|
||||||
yield (first_byte, last_byte, length, headers.items(), body)
|
yield (first_byte, last_byte, length, headers.items(), body)
|
||||||
|
|
||||||
|
|
||||||
def http_response_to_document_iters(response, read_chunk_size=4096):
|
|
||||||
"""
|
|
||||||
Takes a successful object-GET HTTP response and turns it into an
|
|
||||||
iterator of (first-byte, last-byte, length, headers, body-file)
|
|
||||||
5-tuples.
|
|
||||||
|
|
||||||
The response must either be a 200 or a 206; if you feed in a 204 or
|
|
||||||
something similar, this probably won't work.
|
|
||||||
|
|
||||||
:param response: HTTP response, like from bufferedhttp.http_connect(),
|
|
||||||
not a swob.Response.
|
|
||||||
"""
|
|
||||||
if response.status == 200:
|
|
||||||
# Single "range" that's the whole object
|
|
||||||
content_length = int(response.getheader('Content-Length'))
|
|
||||||
return iter([(0, content_length - 1, content_length,
|
|
||||||
response.getheaders(), response)])
|
|
||||||
|
|
||||||
content_type, params_list = parse_content_type(
|
|
||||||
response.getheader('Content-Type'))
|
|
||||||
if content_type != 'multipart/byteranges':
|
|
||||||
# Single range; no MIME framing, just the bytes. The start and end
|
|
||||||
# byte indices are in the Content-Range header.
|
|
||||||
start, end, length = parse_content_range(
|
|
||||||
response.getheader('Content-Range'))
|
|
||||||
return iter([(start, end, length, response.getheaders(), response)])
|
|
||||||
else:
|
|
||||||
# Multiple ranges; the response body is a multipart/byteranges MIME
|
|
||||||
# document, and we have to parse it using the MIME boundary
|
|
||||||
# extracted from the Content-Type header.
|
|
||||||
params = dict(params_list)
|
|
||||||
return multipart_byteranges_to_document_iters(
|
|
||||||
response, params['boundary'], read_chunk_size)
|
|
||||||
|
|
||||||
|
|
||||||
#: Regular expression to match form attributes.
|
#: Regular expression to match form attributes.
|
||||||
ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
|
ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,8 @@ import six.moves.cPickle as pickle
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
|
||||||
from swift.common.utils import Timestamp
|
from swift.common.utils import Timestamp, encode_timestamps, decode_timestamps, \
|
||||||
|
extract_swift_bytes
|
||||||
from swift.common.db import DatabaseBroker, utf8encode
|
from swift.common.db import DatabaseBroker, utf8encode
|
||||||
|
|
||||||
|
|
||||||
|
@ -137,6 +138,90 @@ CONTAINER_STAT_VIEW_SCRIPT = '''
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
def update_new_item_from_existing(new_item, existing):
|
||||||
|
"""
|
||||||
|
Compare the data and meta related timestamps of a new object item with
|
||||||
|
the timestamps of an existing object record, and update the new item
|
||||||
|
with data and/or meta related attributes from the existing record if
|
||||||
|
their timestamps are newer.
|
||||||
|
|
||||||
|
The multiple timestamps are encoded into a single string for storing
|
||||||
|
in the 'created_at' column of the the objects db table.
|
||||||
|
|
||||||
|
:param new_item: A dict of object update attributes
|
||||||
|
:param existing: A dict of existing object attributes
|
||||||
|
:return: True if any attributes of the new item dict were found to be
|
||||||
|
newer than the existing and therefore not updated, otherwise
|
||||||
|
False implying that the updated item is equal to the existing.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# item[created_at] may be updated so keep a copy of the original
|
||||||
|
# value in case we process this item again
|
||||||
|
new_item.setdefault('data_timestamp', new_item['created_at'])
|
||||||
|
|
||||||
|
# content-type and metadata timestamps may be encoded in
|
||||||
|
# item[created_at], or may be set explicitly.
|
||||||
|
item_ts_data, item_ts_ctype, item_ts_meta = decode_timestamps(
|
||||||
|
new_item['data_timestamp'])
|
||||||
|
|
||||||
|
if new_item.get('ctype_timestamp'):
|
||||||
|
item_ts_ctype = Timestamp(new_item.get('ctype_timestamp'))
|
||||||
|
item_ts_meta = item_ts_ctype
|
||||||
|
if new_item.get('meta_timestamp'):
|
||||||
|
item_ts_meta = Timestamp(new_item.get('meta_timestamp'))
|
||||||
|
|
||||||
|
if not existing:
|
||||||
|
# encode new_item timestamps into one string for db record
|
||||||
|
new_item['created_at'] = encode_timestamps(
|
||||||
|
item_ts_data, item_ts_ctype, item_ts_meta)
|
||||||
|
return True
|
||||||
|
|
||||||
|
# decode existing timestamp into separate data, content-type and
|
||||||
|
# metadata timestamps
|
||||||
|
rec_ts_data, rec_ts_ctype, rec_ts_meta = decode_timestamps(
|
||||||
|
existing['created_at'])
|
||||||
|
|
||||||
|
# Extract any swift_bytes values from the content_type values. This is
|
||||||
|
# necessary because the swift_bytes value to persist should be that at the
|
||||||
|
# most recent data timestamp whereas the content-type value to persist is
|
||||||
|
# that at the most recent content-type timestamp. The two values happen to
|
||||||
|
# be stored in the same database column for historical reasons.
|
||||||
|
for item in (new_item, existing):
|
||||||
|
content_type, swift_bytes = extract_swift_bytes(item['content_type'])
|
||||||
|
item['content_type'] = content_type
|
||||||
|
item['swift_bytes'] = swift_bytes
|
||||||
|
|
||||||
|
newer_than_existing = [True, True, True]
|
||||||
|
if rec_ts_data >= item_ts_data:
|
||||||
|
# apply data attributes from existing record
|
||||||
|
new_item.update([(k, existing[k])
|
||||||
|
for k in ('size', 'etag', 'deleted', 'swift_bytes')])
|
||||||
|
item_ts_data = rec_ts_data
|
||||||
|
newer_than_existing[0] = False
|
||||||
|
if rec_ts_ctype >= item_ts_ctype:
|
||||||
|
# apply content-type attribute from existing record
|
||||||
|
new_item['content_type'] = existing['content_type']
|
||||||
|
item_ts_ctype = rec_ts_ctype
|
||||||
|
newer_than_existing[1] = False
|
||||||
|
if rec_ts_meta >= item_ts_meta:
|
||||||
|
# apply metadata timestamp from existing record
|
||||||
|
item_ts_meta = rec_ts_meta
|
||||||
|
newer_than_existing[2] = False
|
||||||
|
|
||||||
|
# encode updated timestamps into one string for db record
|
||||||
|
new_item['created_at'] = encode_timestamps(
|
||||||
|
item_ts_data, item_ts_ctype, item_ts_meta)
|
||||||
|
|
||||||
|
# append the most recent swift_bytes onto the most recent content_type in
|
||||||
|
# new_item and restore existing to its original state
|
||||||
|
for item in (new_item, existing):
|
||||||
|
if item['swift_bytes']:
|
||||||
|
item['content_type'] += ';swift_bytes=%s' % item['swift_bytes']
|
||||||
|
del item['swift_bytes']
|
||||||
|
|
||||||
|
return any(newer_than_existing)
|
||||||
|
|
||||||
|
|
||||||
class ContainerBroker(DatabaseBroker):
|
class ContainerBroker(DatabaseBroker):
|
||||||
"""Encapsulates working with a container database."""
|
"""Encapsulates working with a container database."""
|
||||||
db_type = 'container'
|
db_type = 'container'
|
||||||
|
@ -284,13 +369,20 @@ class ContainerBroker(DatabaseBroker):
|
||||||
storage_policy_index = data[6]
|
storage_policy_index = data[6]
|
||||||
else:
|
else:
|
||||||
storage_policy_index = 0
|
storage_policy_index = 0
|
||||||
|
content_type_timestamp = meta_timestamp = None
|
||||||
|
if len(data) > 7:
|
||||||
|
content_type_timestamp = data[7]
|
||||||
|
if len(data) > 8:
|
||||||
|
meta_timestamp = data[8]
|
||||||
item_list.append({'name': name,
|
item_list.append({'name': name,
|
||||||
'created_at': timestamp,
|
'created_at': timestamp,
|
||||||
'size': size,
|
'size': size,
|
||||||
'content_type': content_type,
|
'content_type': content_type,
|
||||||
'etag': etag,
|
'etag': etag,
|
||||||
'deleted': deleted,
|
'deleted': deleted,
|
||||||
'storage_policy_index': storage_policy_index})
|
'storage_policy_index': storage_policy_index,
|
||||||
|
'ctype_timestamp': content_type_timestamp,
|
||||||
|
'meta_timestamp': meta_timestamp})
|
||||||
|
|
||||||
def empty(self):
|
def empty(self):
|
||||||
"""
|
"""
|
||||||
|
@ -318,6 +410,7 @@ class ContainerBroker(DatabaseBroker):
|
||||||
|
|
||||||
:param name: object name to be deleted
|
:param name: object name to be deleted
|
||||||
:param timestamp: timestamp when the object was marked as deleted
|
:param timestamp: timestamp when the object was marked as deleted
|
||||||
|
:param storage_policy_index: the storage policy index for the object
|
||||||
"""
|
"""
|
||||||
self.put_object(name, timestamp, 0, 'application/deleted', 'noetag',
|
self.put_object(name, timestamp, 0, 'application/deleted', 'noetag',
|
||||||
deleted=1, storage_policy_index=storage_policy_index)
|
deleted=1, storage_policy_index=storage_policy_index)
|
||||||
|
@ -325,10 +418,13 @@ class ContainerBroker(DatabaseBroker):
|
||||||
def make_tuple_for_pickle(self, record):
|
def make_tuple_for_pickle(self, record):
|
||||||
return (record['name'], record['created_at'], record['size'],
|
return (record['name'], record['created_at'], record['size'],
|
||||||
record['content_type'], record['etag'], record['deleted'],
|
record['content_type'], record['etag'], record['deleted'],
|
||||||
record['storage_policy_index'])
|
record['storage_policy_index'],
|
||||||
|
record['ctype_timestamp'],
|
||||||
|
record['meta_timestamp'])
|
||||||
|
|
||||||
def put_object(self, name, timestamp, size, content_type, etag, deleted=0,
|
def put_object(self, name, timestamp, size, content_type, etag, deleted=0,
|
||||||
storage_policy_index=0):
|
storage_policy_index=0, ctype_timestamp=None,
|
||||||
|
meta_timestamp=None):
|
||||||
"""
|
"""
|
||||||
Creates an object in the DB with its metadata.
|
Creates an object in the DB with its metadata.
|
||||||
|
|
||||||
|
@ -340,11 +436,16 @@ class ContainerBroker(DatabaseBroker):
|
||||||
:param deleted: if True, marks the object as deleted and sets the
|
:param deleted: if True, marks the object as deleted and sets the
|
||||||
deleted_at timestamp to timestamp
|
deleted_at timestamp to timestamp
|
||||||
:param storage_policy_index: the storage policy index for the object
|
:param storage_policy_index: the storage policy index for the object
|
||||||
|
:param ctype_timestamp: timestamp of when content_type was last
|
||||||
|
updated
|
||||||
|
:param meta_timestamp: timestamp of when metadata was last updated
|
||||||
"""
|
"""
|
||||||
record = {'name': name, 'created_at': timestamp, 'size': size,
|
record = {'name': name, 'created_at': timestamp, 'size': size,
|
||||||
'content_type': content_type, 'etag': etag,
|
'content_type': content_type, 'etag': etag,
|
||||||
'deleted': deleted,
|
'deleted': deleted,
|
||||||
'storage_policy_index': storage_policy_index}
|
'storage_policy_index': storage_policy_index,
|
||||||
|
'ctype_timestamp': ctype_timestamp,
|
||||||
|
'meta_timestamp': meta_timestamp}
|
||||||
self.put_record(record)
|
self.put_record(record)
|
||||||
|
|
||||||
def _is_deleted_info(self, object_count, put_timestamp, delete_timestamp,
|
def _is_deleted_info(self, object_count, put_timestamp, delete_timestamp,
|
||||||
|
@ -570,6 +671,7 @@ class ContainerBroker(DatabaseBroker):
|
||||||
:param delimiter: delimiter for query
|
:param delimiter: delimiter for query
|
||||||
:param path: if defined, will set the prefix and delimiter based on
|
:param path: if defined, will set the prefix and delimiter based on
|
||||||
the path
|
the path
|
||||||
|
:param storage_policy_index: storage policy index for query
|
||||||
:param reverse: reverse the result order.
|
:param reverse: reverse the result order.
|
||||||
|
|
||||||
:returns: list of tuples of (name, created_at, size, content_type,
|
:returns: list of tuples of (name, created_at, size, content_type,
|
||||||
|
@ -647,7 +749,7 @@ class ContainerBroker(DatabaseBroker):
|
||||||
# is no delimiter then we can simply return the result as
|
# is no delimiter then we can simply return the result as
|
||||||
# prefixes are now handled in the SQL statement.
|
# prefixes are now handled in the SQL statement.
|
||||||
if prefix is None or not delimiter:
|
if prefix is None or not delimiter:
|
||||||
return [r for r in curs]
|
return [self._transform_record(r) for r in curs]
|
||||||
|
|
||||||
# We have a delimiter and a prefix (possibly empty string) to
|
# We have a delimiter and a prefix (possibly empty string) to
|
||||||
# handle
|
# handle
|
||||||
|
@ -686,18 +788,35 @@ class ContainerBroker(DatabaseBroker):
|
||||||
results.append([dir_name, '0', 0, None, ''])
|
results.append([dir_name, '0', 0, None, ''])
|
||||||
curs.close()
|
curs.close()
|
||||||
break
|
break
|
||||||
results.append(row)
|
results.append(self._transform_record(row))
|
||||||
if not rowcount:
|
if not rowcount:
|
||||||
break
|
break
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
def _transform_record(self, record):
|
||||||
|
"""
|
||||||
|
Decode the created_at timestamp into separate data, content-type and
|
||||||
|
meta timestamps and replace the created_at timestamp with the
|
||||||
|
metadata timestamp i.e. the last-modified time.
|
||||||
|
"""
|
||||||
|
t_data, t_ctype, t_meta = decode_timestamps(record[1])
|
||||||
|
return (record[0], t_meta.internal) + record[2:]
|
||||||
|
|
||||||
|
def _record_to_dict(self, rec):
|
||||||
|
if rec:
|
||||||
|
keys = ('name', 'created_at', 'size', 'content_type', 'etag',
|
||||||
|
'deleted', 'storage_policy_index')
|
||||||
|
return dict(zip(keys, rec))
|
||||||
|
return None
|
||||||
|
|
||||||
def merge_items(self, item_list, source=None):
|
def merge_items(self, item_list, source=None):
|
||||||
"""
|
"""
|
||||||
Merge items into the object table.
|
Merge items into the object table.
|
||||||
|
|
||||||
:param item_list: list of dictionaries of {'name', 'created_at',
|
:param item_list: list of dictionaries of {'name', 'created_at',
|
||||||
'size', 'content_type', 'etag', 'deleted',
|
'size', 'content_type', 'etag', 'deleted',
|
||||||
'storage_policy_index'}
|
'storage_policy_index', 'ctype_timestamp',
|
||||||
|
'meta_timestamp'}
|
||||||
:param source: if defined, update incoming_sync with the source
|
:param source: if defined, update incoming_sync with the source
|
||||||
"""
|
"""
|
||||||
for item in item_list:
|
for item in item_list:
|
||||||
|
@ -711,15 +830,16 @@ class ContainerBroker(DatabaseBroker):
|
||||||
else:
|
else:
|
||||||
query_mod = ''
|
query_mod = ''
|
||||||
curs.execute('BEGIN IMMEDIATE')
|
curs.execute('BEGIN IMMEDIATE')
|
||||||
# Get created_at times for objects in item_list that already exist.
|
# Get sqlite records for objects in item_list that already exist.
|
||||||
# We must chunk it up to avoid sqlite's limit of 999 args.
|
# We must chunk it up to avoid sqlite's limit of 999 args.
|
||||||
created_at = {}
|
records = {}
|
||||||
for offset in range(0, len(item_list), SQLITE_ARG_LIMIT):
|
for offset in range(0, len(item_list), SQLITE_ARG_LIMIT):
|
||||||
chunk = [rec['name'] for rec in
|
chunk = [rec['name'] for rec in
|
||||||
item_list[offset:offset + SQLITE_ARG_LIMIT]]
|
item_list[offset:offset + SQLITE_ARG_LIMIT]]
|
||||||
created_at.update(
|
records.update(
|
||||||
((rec[0], rec[1]), rec[2]) for rec in curs.execute(
|
((rec[0], rec[6]), rec) for rec in curs.execute(
|
||||||
'SELECT name, storage_policy_index, created_at '
|
'SELECT name, created_at, size, content_type,'
|
||||||
|
'etag, deleted, storage_policy_index '
|
||||||
'FROM object WHERE ' + query_mod + ' name IN (%s)' %
|
'FROM object WHERE ' + query_mod + ' name IN (%s)' %
|
||||||
','.join('?' * len(chunk)), chunk))
|
','.join('?' * len(chunk)), chunk))
|
||||||
# Sort item_list into things that need adding and deleting, based
|
# Sort item_list into things that need adding and deleting, based
|
||||||
|
@ -729,14 +849,13 @@ class ContainerBroker(DatabaseBroker):
|
||||||
for item in item_list:
|
for item in item_list:
|
||||||
item.setdefault('storage_policy_index', 0) # legacy
|
item.setdefault('storage_policy_index', 0) # legacy
|
||||||
item_ident = (item['name'], item['storage_policy_index'])
|
item_ident = (item['name'], item['storage_policy_index'])
|
||||||
if created_at.get(item_ident) < item['created_at']:
|
existing = self._record_to_dict(records.get(item_ident))
|
||||||
if item_ident in created_at: # exists with older timestamp
|
if update_new_item_from_existing(item, existing):
|
||||||
|
if item_ident in records: # exists with older timestamp
|
||||||
to_delete[item_ident] = item
|
to_delete[item_ident] = item
|
||||||
if item_ident in to_add: # duplicate entries in item_list
|
if item_ident in to_add: # duplicate entries in item_list
|
||||||
to_add[item_ident] = max(item, to_add[item_ident],
|
update_new_item_from_existing(item, to_add[item_ident])
|
||||||
key=lambda i: i['created_at'])
|
to_add[item_ident] = item
|
||||||
else:
|
|
||||||
to_add[item_ident] = item
|
|
||||||
if to_delete:
|
if to_delete:
|
||||||
curs.executemany(
|
curs.executemany(
|
||||||
'DELETE FROM object WHERE ' + query_mod +
|
'DELETE FROM object WHERE ' + query_mod +
|
||||||
|
|
|
@ -27,8 +27,7 @@ from swift.common.direct_client import (
|
||||||
from swift.common.internal_client import InternalClient, UnexpectedResponse
|
from swift.common.internal_client import InternalClient, UnexpectedResponse
|
||||||
from swift.common.utils import get_logger, split_path, quorum_size, \
|
from swift.common.utils import get_logger, split_path, quorum_size, \
|
||||||
FileLikeIter, Timestamp, last_modified_date_to_timestamp, \
|
FileLikeIter, Timestamp, last_modified_date_to_timestamp, \
|
||||||
LRUCache
|
LRUCache, decode_timestamps
|
||||||
|
|
||||||
|
|
||||||
MISPLACED_OBJECTS_ACCOUNT = '.misplaced_objects'
|
MISPLACED_OBJECTS_ACCOUNT = '.misplaced_objects'
|
||||||
MISPLACED_OBJECTS_CONTAINER_DIVISOR = 3600 # 1 hour
|
MISPLACED_OBJECTS_CONTAINER_DIVISOR = 3600 # 1 hour
|
||||||
|
@ -116,7 +115,18 @@ def best_policy_index(headers):
|
||||||
|
|
||||||
|
|
||||||
def get_reconciler_container_name(obj_timestamp):
|
def get_reconciler_container_name(obj_timestamp):
|
||||||
return str(int(Timestamp(obj_timestamp)) //
|
"""
|
||||||
|
Get the name of a container into which a misplaced object should be
|
||||||
|
enqueued. The name is the object's last modified time rounded down to the
|
||||||
|
nearest hour.
|
||||||
|
|
||||||
|
:param obj_timestamp: a string representation of the object's 'created_at'
|
||||||
|
time from it's container db row.
|
||||||
|
:return: a container name
|
||||||
|
"""
|
||||||
|
# Use last modified time of object to determine reconciler container name
|
||||||
|
_junk, _junk, ts_meta = decode_timestamps(obj_timestamp)
|
||||||
|
return str(int(ts_meta) //
|
||||||
MISPLACED_OBJECTS_CONTAINER_DIVISOR *
|
MISPLACED_OBJECTS_CONTAINER_DIVISOR *
|
||||||
MISPLACED_OBJECTS_CONTAINER_DIVISOR)
|
MISPLACED_OBJECTS_CONTAINER_DIVISOR)
|
||||||
|
|
||||||
|
@ -262,7 +272,7 @@ def parse_raw_obj(obj_info):
|
||||||
'container': container,
|
'container': container,
|
||||||
'obj': obj,
|
'obj': obj,
|
||||||
'q_op': q_op,
|
'q_op': q_op,
|
||||||
'q_ts': Timestamp(obj_info['hash']),
|
'q_ts': decode_timestamps((obj_info['hash']))[0],
|
||||||
'q_record': last_modified_date_to_timestamp(
|
'q_record': last_modified_date_to_timestamp(
|
||||||
obj_info['last_modified']),
|
obj_info['last_modified']),
|
||||||
'path': '/%s/%s/%s' % (account, container, obj)
|
'path': '/%s/%s/%s' % (account, container, obj)
|
||||||
|
|
|
@ -20,6 +20,7 @@ import time
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from eventlet import Timeout
|
from eventlet import Timeout
|
||||||
|
|
||||||
|
from swift.container.sync_store import ContainerSyncStore
|
||||||
from swift.container.backend import ContainerBroker, DATADIR
|
from swift.container.backend import ContainerBroker, DATADIR
|
||||||
from swift.container.reconciler import (
|
from swift.container.reconciler import (
|
||||||
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
|
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
|
||||||
|
@ -189,6 +190,13 @@ class ContainerReplicator(db_replicator.Replicator):
|
||||||
def _post_replicate_hook(self, broker, info, responses):
|
def _post_replicate_hook(self, broker, info, responses):
|
||||||
if info['account'] == MISPLACED_OBJECTS_ACCOUNT:
|
if info['account'] == MISPLACED_OBJECTS_ACCOUNT:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.sync_store.update_sync_store(broker)
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception('Failed to update sync_store %s' %
|
||||||
|
broker.db_file)
|
||||||
|
|
||||||
point = broker.get_reconciler_sync()
|
point = broker.get_reconciler_sync()
|
||||||
if not broker.has_multiple_policies() and info['max_row'] != point:
|
if not broker.has_multiple_policies() and info['max_row'] != point:
|
||||||
broker.update_reconciler_sync(info['max_row'])
|
broker.update_reconciler_sync(info['max_row'])
|
||||||
|
@ -210,6 +218,13 @@ class ContainerReplicator(db_replicator.Replicator):
|
||||||
# this container shouldn't be here, make sure it's cleaned up
|
# this container shouldn't be here, make sure it's cleaned up
|
||||||
self.reconciler_cleanups[broker.container] = broker
|
self.reconciler_cleanups[broker.container] = broker
|
||||||
return
|
return
|
||||||
|
try:
|
||||||
|
# DB is going to get deleted. Be preemptive about it
|
||||||
|
self.sync_store.remove_synced_container(broker)
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception('Failed to remove sync_store entry %s' %
|
||||||
|
broker.db_file)
|
||||||
|
|
||||||
return super(ContainerReplicator, self).delete_db(broker)
|
return super(ContainerReplicator, self).delete_db(broker)
|
||||||
|
|
||||||
def replicate_reconcilers(self):
|
def replicate_reconcilers(self):
|
||||||
|
@ -237,6 +252,9 @@ class ContainerReplicator(db_replicator.Replicator):
|
||||||
def run_once(self, *args, **kwargs):
|
def run_once(self, *args, **kwargs):
|
||||||
self.reconciler_containers = {}
|
self.reconciler_containers = {}
|
||||||
self.reconciler_cleanups = {}
|
self.reconciler_cleanups = {}
|
||||||
|
self.sync_store = ContainerSyncStore(self.root,
|
||||||
|
self.logger,
|
||||||
|
self.mount_check)
|
||||||
rv = super(ContainerReplicator, self).run_once(*args, **kwargs)
|
rv = super(ContainerReplicator, self).run_once(*args, **kwargs)
|
||||||
if any([self.reconciler_containers, self.reconciler_cleanups]):
|
if any([self.reconciler_containers, self.reconciler_cleanups]):
|
||||||
self.replicate_reconcilers()
|
self.replicate_reconcilers()
|
||||||
|
|
|
@ -23,6 +23,7 @@ from xml.etree.cElementTree import Element, SubElement, tostring
|
||||||
from eventlet import Timeout
|
from eventlet import Timeout
|
||||||
|
|
||||||
import swift.common.db
|
import swift.common.db
|
||||||
|
from swift.container.sync_store import ContainerSyncStore
|
||||||
from swift.container.backend import ContainerBroker, DATADIR
|
from swift.container.backend import ContainerBroker, DATADIR
|
||||||
from swift.container.replicator import ContainerReplicatorRpc
|
from swift.container.replicator import ContainerReplicatorRpc
|
||||||
from swift.common.db import DatabaseAlreadyExists
|
from swift.common.db import DatabaseAlreadyExists
|
||||||
|
@ -110,6 +111,9 @@ class ContainerController(BaseStorageServer):
|
||||||
self.save_headers.append('x-versions-location')
|
self.save_headers.append('x-versions-location')
|
||||||
swift.common.db.DB_PREALLOCATION = \
|
swift.common.db.DB_PREALLOCATION = \
|
||||||
config_true_value(conf.get('db_preallocation', 'f'))
|
config_true_value(conf.get('db_preallocation', 'f'))
|
||||||
|
self.sync_store = ContainerSyncStore(self.root,
|
||||||
|
self.logger,
|
||||||
|
self.mount_check)
|
||||||
|
|
||||||
def _get_container_broker(self, drive, part, account, container, **kwargs):
|
def _get_container_broker(self, drive, part, account, container, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
@ -242,6 +246,13 @@ class ContainerController(BaseStorageServer):
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def _update_sync_store(self, broker, method):
|
||||||
|
try:
|
||||||
|
self.sync_store.update_sync_store(broker)
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception('Failed to update sync_store %s during %s' %
|
||||||
|
(broker.db_file, method))
|
||||||
|
|
||||||
@public
|
@public
|
||||||
@timing_stats()
|
@timing_stats()
|
||||||
def DELETE(self, req):
|
def DELETE(self, req):
|
||||||
|
@ -276,6 +287,7 @@ class ContainerController(BaseStorageServer):
|
||||||
broker.delete_db(req_timestamp.internal)
|
broker.delete_db(req_timestamp.internal)
|
||||||
if not broker.is_deleted():
|
if not broker.is_deleted():
|
||||||
return HTTPConflict(request=req)
|
return HTTPConflict(request=req)
|
||||||
|
self._update_sync_store(broker, 'DELETE')
|
||||||
resp = self.account_update(req, account, container, broker)
|
resp = self.account_update(req, account, container, broker)
|
||||||
if resp:
|
if resp:
|
||||||
return resp
|
return resp
|
||||||
|
@ -356,7 +368,9 @@ class ContainerController(BaseStorageServer):
|
||||||
int(req.headers['x-size']),
|
int(req.headers['x-size']),
|
||||||
req.headers['x-content-type'],
|
req.headers['x-content-type'],
|
||||||
req.headers['x-etag'], 0,
|
req.headers['x-etag'], 0,
|
||||||
obj_policy_index)
|
obj_policy_index,
|
||||||
|
req.headers.get('x-content-type-timestamp'),
|
||||||
|
req.headers.get('x-meta-timestamp'))
|
||||||
return HTTPCreated(request=req)
|
return HTTPCreated(request=req)
|
||||||
else: # put container
|
else: # put container
|
||||||
if requested_policy_index is None:
|
if requested_policy_index is None:
|
||||||
|
@ -381,6 +395,8 @@ class ContainerController(BaseStorageServer):
|
||||||
broker.metadata['X-Container-Sync-To'][0]:
|
broker.metadata['X-Container-Sync-To'][0]:
|
||||||
broker.set_x_container_sync_points(-1, -1)
|
broker.set_x_container_sync_points(-1, -1)
|
||||||
broker.update_metadata(metadata, validate_metadata=True)
|
broker.update_metadata(metadata, validate_metadata=True)
|
||||||
|
if metadata:
|
||||||
|
self._update_sync_store(broker, 'PUT')
|
||||||
resp = self.account_update(req, account, container, broker)
|
resp = self.account_update(req, account, container, broker)
|
||||||
if resp:
|
if resp:
|
||||||
return resp
|
return resp
|
||||||
|
@ -564,6 +580,7 @@ class ContainerController(BaseStorageServer):
|
||||||
broker.metadata['X-Container-Sync-To'][0]:
|
broker.metadata['X-Container-Sync-To'][0]:
|
||||||
broker.set_x_container_sync_points(-1, -1)
|
broker.set_x_container_sync_points(-1, -1)
|
||||||
broker.update_metadata(metadata, validate_metadata=True)
|
broker.update_metadata(metadata, validate_metadata=True)
|
||||||
|
self._update_sync_store(broker, 'POST')
|
||||||
return HTTPNoContent(request=req)
|
return HTTPNoContent(request=req)
|
||||||
|
|
||||||
def __call__(self, env, start_response):
|
def __call__(self, env, start_response):
|
||||||
|
|
|
@ -24,7 +24,9 @@ from struct import unpack_from
|
||||||
from eventlet import sleep, Timeout
|
from eventlet import sleep, Timeout
|
||||||
|
|
||||||
import swift.common.db
|
import swift.common.db
|
||||||
from swift.container.backend import ContainerBroker, DATADIR
|
from swift.common.db import DatabaseConnectionError
|
||||||
|
from swift.container.backend import ContainerBroker
|
||||||
|
from swift.container.sync_store import ContainerSyncStore
|
||||||
from swift.common.container_sync_realms import ContainerSyncRealms
|
from swift.common.container_sync_realms import ContainerSyncRealms
|
||||||
from swift.common.internal_client import (
|
from swift.common.internal_client import (
|
||||||
delete_object, put_object, InternalClient, UnexpectedResponse)
|
delete_object, put_object, InternalClient, UnexpectedResponse)
|
||||||
|
@ -32,9 +34,9 @@ from swift.common.exceptions import ClientException
|
||||||
from swift.common.ring import Ring
|
from swift.common.ring import Ring
|
||||||
from swift.common.ring.utils import is_local_device
|
from swift.common.ring.utils import is_local_device
|
||||||
from swift.common.utils import (
|
from swift.common.utils import (
|
||||||
audit_location_generator, clean_content_type, config_true_value,
|
clean_content_type, config_true_value,
|
||||||
FileLikeIter, get_logger, hash_path, quote, urlparse, validate_sync_to,
|
FileLikeIter, get_logger, hash_path, quote, urlparse, validate_sync_to,
|
||||||
whataremyips, Timestamp)
|
whataremyips, Timestamp, decode_timestamps)
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
|
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
|
||||||
from swift.common.storage_policy import POLICIES
|
from swift.common.storage_policy import POLICIES
|
||||||
|
@ -63,7 +65,7 @@ ic_conf_body = """
|
||||||
# log_udp_port = 514
|
# log_udp_port = 514
|
||||||
#
|
#
|
||||||
# You can enable StatsD logging here:
|
# You can enable StatsD logging here:
|
||||||
# log_statsd_host = localhost
|
# log_statsd_host =
|
||||||
# log_statsd_port = 8125
|
# log_statsd_port = 8125
|
||||||
# log_statsd_default_sample_rate = 1.0
|
# log_statsd_default_sample_rate = 1.0
|
||||||
# log_statsd_sample_rate_factor = 1.0
|
# log_statsd_sample_rate_factor = 1.0
|
||||||
|
@ -168,7 +170,7 @@ class ContainerSync(Daemon):
|
||||||
#: running wild on near empty systems.
|
#: running wild on near empty systems.
|
||||||
self.interval = int(conf.get('interval', 300))
|
self.interval = int(conf.get('interval', 300))
|
||||||
#: Maximum amount of time to spend syncing a container before moving on
|
#: Maximum amount of time to spend syncing a container before moving on
|
||||||
#: to the next one. If a conatiner sync hasn't finished in this time,
|
#: to the next one. If a container sync hasn't finished in this time,
|
||||||
#: it'll just be resumed next scan.
|
#: it'll just be resumed next scan.
|
||||||
self.container_time = int(conf.get('container_time', 60))
|
self.container_time = int(conf.get('container_time', 60))
|
||||||
#: ContainerSyncCluster instance for validating sync-to values.
|
#: ContainerSyncCluster instance for validating sync-to values.
|
||||||
|
@ -187,6 +189,10 @@ class ContainerSync(Daemon):
|
||||||
a.strip()
|
a.strip()
|
||||||
for a in conf.get('sync_proxy', '').split(',')
|
for a in conf.get('sync_proxy', '').split(',')
|
||||||
if a.strip()]
|
if a.strip()]
|
||||||
|
#: ContainerSyncStore instance for iterating over synced containers
|
||||||
|
self.sync_store = ContainerSyncStore(self.devices,
|
||||||
|
self.logger,
|
||||||
|
self.mount_check)
|
||||||
#: Number of containers with sync turned on that were successfully
|
#: Number of containers with sync turned on that were successfully
|
||||||
#: synced.
|
#: synced.
|
||||||
self.container_syncs = 0
|
self.container_syncs = 0
|
||||||
|
@ -194,7 +200,8 @@ class ContainerSync(Daemon):
|
||||||
self.container_deletes = 0
|
self.container_deletes = 0
|
||||||
#: Number of successful PUTs triggered.
|
#: Number of successful PUTs triggered.
|
||||||
self.container_puts = 0
|
self.container_puts = 0
|
||||||
#: Number of containers that didn't have sync turned on.
|
#: Number of containers whose sync has been turned off, but
|
||||||
|
#: are not yet cleared from the sync store.
|
||||||
self.container_skips = 0
|
self.container_skips = 0
|
||||||
#: Number of containers that had a failure of some type.
|
#: Number of containers that had a failure of some type.
|
||||||
self.container_failures = 0
|
self.container_failures = 0
|
||||||
|
@ -247,10 +254,7 @@ class ContainerSync(Daemon):
|
||||||
sleep(random() * self.interval)
|
sleep(random() * self.interval)
|
||||||
while True:
|
while True:
|
||||||
begin = time()
|
begin = time()
|
||||||
all_locs = audit_location_generator(self.devices, DATADIR, '.db',
|
for path in self.sync_store.synced_containers_generator():
|
||||||
mount_check=self.mount_check,
|
|
||||||
logger=self.logger)
|
|
||||||
for path, device, partition in all_locs:
|
|
||||||
self.container_sync(path)
|
self.container_sync(path)
|
||||||
if time() - self.reported >= 3600: # once an hour
|
if time() - self.reported >= 3600: # once an hour
|
||||||
self.report()
|
self.report()
|
||||||
|
@ -264,10 +268,7 @@ class ContainerSync(Daemon):
|
||||||
"""
|
"""
|
||||||
self.logger.info(_('Begin container sync "once" mode'))
|
self.logger.info(_('Begin container sync "once" mode'))
|
||||||
begin = time()
|
begin = time()
|
||||||
all_locs = audit_location_generator(self.devices, DATADIR, '.db',
|
for path in self.sync_store.synced_containers_generator():
|
||||||
mount_check=self.mount_check,
|
|
||||||
logger=self.logger)
|
|
||||||
for path, device, partition in all_locs:
|
|
||||||
self.container_sync(path)
|
self.container_sync(path)
|
||||||
if time() - self.reported >= 3600: # once an hour
|
if time() - self.reported >= 3600: # once an hour
|
||||||
self.report()
|
self.report()
|
||||||
|
@ -308,7 +309,20 @@ class ContainerSync(Daemon):
|
||||||
broker = None
|
broker = None
|
||||||
try:
|
try:
|
||||||
broker = ContainerBroker(path)
|
broker = ContainerBroker(path)
|
||||||
info = broker.get_info()
|
# The path we pass to the ContainerBroker is a real path of
|
||||||
|
# a container DB. If we get here, however, it means that this
|
||||||
|
# path is linked from the sync_containers dir. In rare cases
|
||||||
|
# of race or processes failures the link can be stale and
|
||||||
|
# the get_info below will raise a DB doesn't exist exception
|
||||||
|
# In this case we remove the stale link and raise an error
|
||||||
|
# since in most cases the db should be there.
|
||||||
|
try:
|
||||||
|
info = broker.get_info()
|
||||||
|
except DatabaseConnectionError as db_err:
|
||||||
|
if str(db_err).endswith("DB doesn't exist"):
|
||||||
|
self.sync_store.remove_synced_container(broker)
|
||||||
|
raise
|
||||||
|
|
||||||
x, nodes = self.container_ring.get_nodes(info['account'],
|
x, nodes = self.container_ring.get_nodes(info['account'],
|
||||||
info['container'])
|
info['container'])
|
||||||
for ordinal, node in enumerate(nodes):
|
for ordinal, node in enumerate(nodes):
|
||||||
|
@ -388,7 +402,7 @@ class ContainerSync(Daemon):
|
||||||
broker.set_x_container_sync_points(sync_point1, None)
|
broker.set_x_container_sync_points(sync_point1, None)
|
||||||
self.container_syncs += 1
|
self.container_syncs += 1
|
||||||
self.logger.increment('syncs')
|
self.logger.increment('syncs')
|
||||||
except (Exception, Timeout) as err:
|
except (Exception, Timeout):
|
||||||
self.container_failures += 1
|
self.container_failures += 1
|
||||||
self.logger.increment('failures')
|
self.logger.increment('failures')
|
||||||
self.logger.exception(_('ERROR Syncing %s'),
|
self.logger.exception(_('ERROR Syncing %s'),
|
||||||
|
@ -417,9 +431,14 @@ class ContainerSync(Daemon):
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
start_time = time()
|
start_time = time()
|
||||||
|
# extract last modified time from the created_at value
|
||||||
|
ts_data, ts_ctype, ts_meta = decode_timestamps(
|
||||||
|
row['created_at'])
|
||||||
if row['deleted']:
|
if row['deleted']:
|
||||||
|
# when sync'ing a deleted object, use ts_data - this is the
|
||||||
|
# timestamp of the source tombstone
|
||||||
try:
|
try:
|
||||||
headers = {'x-timestamp': row['created_at']}
|
headers = {'x-timestamp': ts_data.internal}
|
||||||
if realm and realm_key:
|
if realm and realm_key:
|
||||||
nonce = uuid.uuid4().hex
|
nonce = uuid.uuid4().hex
|
||||||
path = urlparse(sync_to).path + '/' + quote(
|
path = urlparse(sync_to).path + '/' + quote(
|
||||||
|
@ -442,35 +461,31 @@ class ContainerSync(Daemon):
|
||||||
self.logger.increment('deletes')
|
self.logger.increment('deletes')
|
||||||
self.logger.timing_since('deletes.timing', start_time)
|
self.logger.timing_since('deletes.timing', start_time)
|
||||||
else:
|
else:
|
||||||
|
# when sync'ing a live object, use ts_meta - this is the time
|
||||||
|
# at which the source object was last modified by a PUT or POST
|
||||||
part, nodes = \
|
part, nodes = \
|
||||||
self.get_object_ring(info['storage_policy_index']). \
|
self.get_object_ring(info['storage_policy_index']). \
|
||||||
get_nodes(info['account'], info['container'],
|
get_nodes(info['account'], info['container'],
|
||||||
row['name'])
|
row['name'])
|
||||||
shuffle(nodes)
|
shuffle(nodes)
|
||||||
exc = None
|
exc = None
|
||||||
looking_for_timestamp = Timestamp(row['created_at'])
|
|
||||||
timestamp = -1
|
|
||||||
headers = body = None
|
|
||||||
# look up for the newest one
|
# look up for the newest one
|
||||||
headers_out = {'X-Newest': True,
|
headers_out = {'X-Newest': True,
|
||||||
'X-Backend-Storage-Policy-Index':
|
'X-Backend-Storage-Policy-Index':
|
||||||
str(info['storage_policy_index'])}
|
str(info['storage_policy_index'])}
|
||||||
try:
|
try:
|
||||||
source_obj_status, source_obj_info, source_obj_iter = \
|
source_obj_status, headers, body = \
|
||||||
self.swift.get_object(info['account'],
|
self.swift.get_object(info['account'],
|
||||||
info['container'], row['name'],
|
info['container'], row['name'],
|
||||||
headers=headers_out,
|
headers=headers_out,
|
||||||
acceptable_statuses=(2, 4))
|
acceptable_statuses=(2, 4))
|
||||||
|
|
||||||
except (Exception, UnexpectedResponse, Timeout) as err:
|
except (Exception, UnexpectedResponse, Timeout) as err:
|
||||||
source_obj_info = {}
|
headers = {}
|
||||||
source_obj_iter = None
|
body = None
|
||||||
exc = err
|
exc = err
|
||||||
timestamp = Timestamp(source_obj_info.get(
|
timestamp = Timestamp(headers.get('x-timestamp', 0))
|
||||||
'x-timestamp', 0))
|
if timestamp < ts_meta:
|
||||||
headers = source_obj_info
|
|
||||||
body = source_obj_iter
|
|
||||||
if timestamp < looking_for_timestamp:
|
|
||||||
if exc:
|
if exc:
|
||||||
raise exc
|
raise exc
|
||||||
raise Exception(
|
raise Exception(
|
||||||
|
@ -487,7 +502,6 @@ class ContainerSync(Daemon):
|
||||||
if 'content-type' in headers:
|
if 'content-type' in headers:
|
||||||
headers['content-type'] = clean_content_type(
|
headers['content-type'] = clean_content_type(
|
||||||
headers['content-type'])
|
headers['content-type'])
|
||||||
headers['x-timestamp'] = row['created_at']
|
|
||||||
if realm and realm_key:
|
if realm and realm_key:
|
||||||
nonce = uuid.uuid4().hex
|
nonce = uuid.uuid4().hex
|
||||||
path = urlparse(sync_to).path + '/' + quote(row['name'])
|
path = urlparse(sync_to).path + '/' + quote(row['name'])
|
||||||
|
|
|
@ -0,0 +1,177 @@
|
||||||
|
# Copyright (c) 2010-2016 OpenStack Foundation
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import errno
|
||||||
|
|
||||||
|
from swift.common.utils import audit_location_generator, mkdirs
|
||||||
|
from swift.container.backend import DATADIR
|
||||||
|
|
||||||
|
SYNC_DATADIR = 'sync_containers'
|
||||||
|
|
||||||
|
|
||||||
|
class ContainerSyncStore(object):
|
||||||
|
"""
|
||||||
|
Filesystem based store for local containers that needs to be synced.
|
||||||
|
|
||||||
|
The store holds a list of containers that need to be synced by the
|
||||||
|
container sync daemon. The store is local to the container server node,
|
||||||
|
that is, only containers whose databases are kept locally on the node are
|
||||||
|
listed.
|
||||||
|
"""
|
||||||
|
def __init__(self, devices, logger, mount_check):
|
||||||
|
self.devices = os.path.normpath(os.path.join('/', devices)) + '/'
|
||||||
|
self.logger = logger
|
||||||
|
self.mount_check = mount_check
|
||||||
|
|
||||||
|
def _container_to_synced_container_path(self, path):
|
||||||
|
# path is assumed to be of the form:
|
||||||
|
# /srv/node/sdb/containers/part/.../*.db
|
||||||
|
# or more generally:
|
||||||
|
# devices/device/containers/part/.../*.db
|
||||||
|
# Below we split the path to the following parts:
|
||||||
|
# devices, device, rest
|
||||||
|
devices = self.devices
|
||||||
|
path = os.path.normpath(path)
|
||||||
|
device = path[len(devices):path.rfind(DATADIR)]
|
||||||
|
rest = path[path.rfind(DATADIR) + len(DATADIR) + 1:]
|
||||||
|
|
||||||
|
return os.path.join(devices, device, SYNC_DATADIR, rest)
|
||||||
|
|
||||||
|
def _synced_container_to_container_path(self, path):
|
||||||
|
# synced path is assumed to be of the form:
|
||||||
|
# /srv/node/sdb/sync_containers/part/.../*.db
|
||||||
|
# or more generally:
|
||||||
|
# devices/device/sync_containers/part/.../*.db
|
||||||
|
# Below we split the path to the following parts:
|
||||||
|
# devices, device, rest
|
||||||
|
devices = self.devices
|
||||||
|
path = os.path.normpath(path)
|
||||||
|
device = path[len(devices):path.rfind(SYNC_DATADIR)]
|
||||||
|
rest = path[path.rfind(SYNC_DATADIR) + len(SYNC_DATADIR) + 1:]
|
||||||
|
|
||||||
|
return os.path.join(devices, device, DATADIR, rest)
|
||||||
|
|
||||||
|
def add_synced_container(self, broker):
|
||||||
|
"""
|
||||||
|
Adds the container db represented by broker to the list of synced
|
||||||
|
containers.
|
||||||
|
|
||||||
|
:param broker: An instance of ContainerBroker representing the
|
||||||
|
container to add.
|
||||||
|
"""
|
||||||
|
sync_file = self._container_to_synced_container_path(broker.db_file)
|
||||||
|
stat = None
|
||||||
|
try:
|
||||||
|
stat = os.stat(sync_file)
|
||||||
|
except OSError as oserr:
|
||||||
|
if oserr.errno != errno.ENOENT:
|
||||||
|
raise oserr
|
||||||
|
|
||||||
|
if stat is not None:
|
||||||
|
return
|
||||||
|
|
||||||
|
sync_path = os.path.dirname(sync_file)
|
||||||
|
mkdirs(sync_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.symlink(broker.db_file, sync_file)
|
||||||
|
except OSError as oserr:
|
||||||
|
if (oserr.errno != errno.EEXIST or
|
||||||
|
not os.path.islink(sync_file)):
|
||||||
|
raise oserr
|
||||||
|
|
||||||
|
def remove_synced_container(self, broker):
|
||||||
|
"""
|
||||||
|
Removes the container db represented by broker from the list of synced
|
||||||
|
containers.
|
||||||
|
|
||||||
|
:param broker: An instance of ContainerBroker representing the
|
||||||
|
container to remove.
|
||||||
|
"""
|
||||||
|
sync_file = broker.db_file
|
||||||
|
sync_file = self._container_to_synced_container_path(sync_file)
|
||||||
|
try:
|
||||||
|
os.unlink(sync_file)
|
||||||
|
os.removedirs(os.path.dirname(sync_file))
|
||||||
|
except OSError as oserr:
|
||||||
|
if oserr.errno != errno.ENOENT:
|
||||||
|
raise oserr
|
||||||
|
|
||||||
|
def update_sync_store(self, broker):
|
||||||
|
"""
|
||||||
|
Add or remove a symlink to/from the sync-containers directory
|
||||||
|
according to the broker's metadata.
|
||||||
|
|
||||||
|
Decide according to the broker x-container-sync-to and
|
||||||
|
x-container-sync-key whether a symlink needs to be added or
|
||||||
|
removed.
|
||||||
|
|
||||||
|
We mention that if both metadata items do not appear
|
||||||
|
at all, the container has never been set for sync in reclaim_age
|
||||||
|
in which case we do nothing. This is important as this method is
|
||||||
|
called for ALL containers from the container replicator.
|
||||||
|
|
||||||
|
Once we realize that we do need to do something, we check if
|
||||||
|
the container is marked for delete, in which case we want to
|
||||||
|
remove the symlink
|
||||||
|
|
||||||
|
For adding a symlink we notice that both x-container-sync-to and
|
||||||
|
x-container-sync-key exist and are valid, that is, are not empty.
|
||||||
|
|
||||||
|
At this point we know we need to do something, the container
|
||||||
|
is not marked for delete and the condition to add a symlink
|
||||||
|
is not met. conclusion need to remove the symlink.
|
||||||
|
|
||||||
|
:param broker: An instance of ContainerBroker
|
||||||
|
"""
|
||||||
|
# If the broker metadata does not have both x-container-sync-to
|
||||||
|
# and x-container-sync-key it has *never* been set. Make sure
|
||||||
|
# we do nothing in this case
|
||||||
|
if ('X-Container-Sync-To' not in broker.metadata and
|
||||||
|
'X-Container-Sync-Key' not in broker.metadata):
|
||||||
|
return
|
||||||
|
|
||||||
|
if broker.is_deleted():
|
||||||
|
self.remove_synced_container(broker)
|
||||||
|
return
|
||||||
|
|
||||||
|
# If both x-container-sync-to and x-container-sync-key
|
||||||
|
# exist and valid, add the symlink
|
||||||
|
sync_to = sync_key = None
|
||||||
|
if 'X-Container-Sync-To' in broker.metadata:
|
||||||
|
sync_to = broker.metadata['X-Container-Sync-To'][0]
|
||||||
|
if 'X-Container-Sync-Key' in broker.metadata:
|
||||||
|
sync_key = broker.metadata['X-Container-Sync-Key'][0]
|
||||||
|
if sync_to and sync_key:
|
||||||
|
self.add_synced_container(broker)
|
||||||
|
return
|
||||||
|
|
||||||
|
self.remove_synced_container(broker)
|
||||||
|
|
||||||
|
def synced_containers_generator(self):
|
||||||
|
"""
|
||||||
|
Iterates over the list of synced containers
|
||||||
|
yielding the path of the container db
|
||||||
|
"""
|
||||||
|
all_locs = audit_location_generator(self.devices, SYNC_DATADIR, '.db',
|
||||||
|
mount_check=self.mount_check,
|
||||||
|
logger=self.logger)
|
||||||
|
for path, device, partition in all_locs:
|
||||||
|
# What we want to yield is the real path as its being used for
|
||||||
|
# initiating a container broker. The broker would break if not
|
||||||
|
# given the db real path, as it e.g. assumes the existence of
|
||||||
|
# .pending in the same path
|
||||||
|
yield self._synced_container_to_container_path(path)
|
|
@ -143,7 +143,7 @@ class ContainerUpdater(Daemon):
|
||||||
pid2filename[pid] = tmpfilename
|
pid2filename[pid] = tmpfilename
|
||||||
else:
|
else:
|
||||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||||
patcher.monkey_patch(all=False, socket=True)
|
patcher.monkey_patch(all=False, socket=True, thread=True)
|
||||||
self.no_changes = 0
|
self.no_changes = 0
|
||||||
self.successes = 0
|
self.successes = 0
|
||||||
self.failures = 0
|
self.failures = 0
|
||||||
|
@ -177,7 +177,7 @@ class ContainerUpdater(Daemon):
|
||||||
"""
|
"""
|
||||||
Run the updater once.
|
Run the updater once.
|
||||||
"""
|
"""
|
||||||
patcher.monkey_patch(all=False, socket=True)
|
patcher.monkey_patch(all=False, socket=True, thread=True)
|
||||||
self.logger.info(_('Begin container update single threaded sweep'))
|
self.logger.info(_('Begin container update single threaded sweep'))
|
||||||
begin = time.time()
|
begin = time.time()
|
||||||
self.no_changes = 0
|
self.no_changes = 0
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# German translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -8,20 +8,22 @@
|
||||||
# Jonas John <jonas.john@e-werkzeug.eu>, 2015
|
# Jonas John <jonas.john@e-werkzeug.eu>, 2015
|
||||||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||||
|
# Monika Wolf <vcomas3@de.ibm.com>, 2016. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
|
||||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
|
||||||
"Language: de\n"
|
|
||||||
"Language-Team: German\n"
|
|
||||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
"MIME-Version: 1.0\n"
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"Generated-By: Babel 2.1.1\n"
|
"PO-Revision-Date: 2016-03-07 06:04+0000\n"
|
||||||
|
"Last-Translator: Monika Wolf <vcomas3@de.ibm.com>\n"
|
||||||
|
"Language: de\n"
|
||||||
|
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
|
"Language-Team: German\n"
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -234,6 +236,15 @@ msgstr ""
|
||||||
"Clientpfad %(client)s entspricht nicht dem in den Objektmetadaten "
|
"Clientpfad %(client)s entspricht nicht dem in den Objektmetadaten "
|
||||||
"gespeicherten Pfad %(meta)s"
|
"gespeicherten Pfad %(meta)s"
|
||||||
|
|
||||||
|
#, fuzzy
|
||||||
|
msgid ""
|
||||||
|
"Configuration option internal_client_conf_path not defined. Using default "
|
||||||
|
"configuration, See internal-client.conf-sample for options"
|
||||||
|
msgstr ""
|
||||||
|
"Konfigurationsoption internal_client_conf_path nicht definiert. "
|
||||||
|
"Standardkonfiguration wird verwendet. Informationen zu den Optionen finden "
|
||||||
|
"Sie in internal-client.conf-sample."
|
||||||
|
|
||||||
msgid "Connection refused"
|
msgid "Connection refused"
|
||||||
msgstr "Verbindung abgelehnt"
|
msgstr "Verbindung abgelehnt"
|
||||||
|
|
||||||
|
@ -663,6 +674,10 @@ msgstr "Kein Cluster-Endpunkt für %r %r"
|
||||||
msgid "No permission to signal PID %d"
|
msgid "No permission to signal PID %d"
|
||||||
msgstr "Keine Berechtigung zu Signal-Programmkennung %d"
|
msgstr "Keine Berechtigung zu Signal-Programmkennung %d"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "No policy with index %s"
|
||||||
|
msgstr "Keine Richtlinie mit Index %s"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "No realm key for %r"
|
msgid "No realm key for %r"
|
||||||
msgstr "Kein Bereichsschlüssel für %r"
|
msgstr "Kein Bereichsschlüssel für %r"
|
||||||
|
@ -715,18 +730,6 @@ msgstr ""
|
||||||
"%(errors)d, Dateien/s insgesamt: %(frate).2f, Bytes/s insgesamt: "
|
"%(errors)d, Dateien/s insgesamt: %(frate).2f, Bytes/s insgesamt: "
|
||||||
"%(brate).2f, Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f"
|
"%(brate).2f, Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"Objektprüfung (%(type)s). Seit %(start_time)s: Lokal: %(passes)d übergeben, "
|
|
||||||
"%(quars)d unter Quarantäne gestellt, %(errors)d Fehlerdateien/s: "
|
|
||||||
"%(frate).2f , Bytes/s: %(brate).2f, Zeit insgesamt: %(total).2f, "
|
|
||||||
"Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "Objektprüfungsstatistik: %s"
|
msgstr "Objektprüfungsstatistik: %s"
|
||||||
|
@ -842,6 +845,14 @@ msgstr "%s Objekte werden entfernt"
|
||||||
msgid "Removing partition: %s"
|
msgid "Removing partition: %s"
|
||||||
msgstr "Partition wird entfernt: %s"
|
msgstr "Partition wird entfernt: %s"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Removing pid file %s with invalid pid"
|
||||||
|
msgstr "PID-Datei %s mit ungültiger PID wird entfernt."
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Removing pid file %s with wrong pid %d"
|
||||||
|
msgstr "PID-Datei %s mit falscher PID %d wird entfernt."
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Removing stale pid file %s"
|
msgid "Removing stale pid file %s"
|
||||||
msgstr "Veraltete PID-Datei %s wird entfernt"
|
msgstr "Veraltete PID-Datei %s wird entfernt"
|
||||||
|
@ -983,6 +994,10 @@ msgid "Unable to locate %s in libc. Leaving as a no-op."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"%s konnte nicht in libc gefunden werden. Wird als Nullbefehl verlassen."
|
"%s konnte nicht in libc gefunden werden. Wird als Nullbefehl verlassen."
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Unable to locate config for %s"
|
||||||
|
msgstr "Konfiguration für %s wurde nicht gefunden."
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
|
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
@ -1042,6 +1057,10 @@ msgstr ""
|
||||||
msgid "Waited %s seconds for %s to die; giving up"
|
msgid "Waited %s seconds for %s to die; giving up"
|
||||||
msgstr "Hat %s Sekunden für %s zum Erlöschen gewartet; Gibt auf"
|
msgstr "Hat %s Sekunden für %s zum Erlöschen gewartet; Gibt auf"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Waited %s seconds for %s to die; killing"
|
||||||
|
msgstr "Hat %s Sekunden für %s zum Erlöschen gewartet. Wird abgebrochen."
|
||||||
|
|
||||||
msgid "Warning: Cannot ratelimit without a memcached client"
|
msgid "Warning: Cannot ratelimit without a memcached client"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Warnung: Geschwindigkeitsbegrenzung kann nicht ohne memcached-Client "
|
"Warnung: Geschwindigkeitsbegrenzung kann nicht ohne memcached-Client "
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Spanish translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -8,18 +8,19 @@
|
||||||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"PO-Revision-Date: 2015-09-09 05:36+0000\n"
|
"PO-Revision-Date: 2015-09-09 05:36+0000\n"
|
||||||
"Last-Translator: Carlos A. Muñoz <camunoz@redhat.com>\n"
|
"Last-Translator: Carlos A. Muñoz <camunoz@redhat.com>\n"
|
||||||
"Language: es\n"
|
"Language: es\n"
|
||||||
|
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
"Language-Team: Spanish\n"
|
"Language-Team: Spanish\n"
|
||||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"Generated-By: Babel 2.1.1\n"
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -703,18 +704,6 @@ msgstr ""
|
||||||
"segundo: %(brate).2f, Tiempo de auditoría: %(audit).2f, Velocidad: "
|
"segundo: %(brate).2f, Tiempo de auditoría: %(audit).2f, Velocidad: "
|
||||||
"%(audit_rate).2f"
|
"%(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"Auditoría de objetos (%(type)s). Desde %(start_time)s: Localmente: "
|
|
||||||
"%(passes)d han pasado, %(quars)d en cuarentena, %(errors)d errores archivos "
|
|
||||||
"por segundo: %(frate).2f , bytes por segundo: %(brate).2f, Tiempo total: "
|
|
||||||
"%(total).2f, Tiempo de auditoría: %(audit).2f, Velocidad: %(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "Estadísticas de auditoría de objetos: %s"
|
msgstr "Estadísticas de auditoría de objetos: %s"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# French translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -8,18 +8,19 @@
|
||||||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||||
"Language: fr\n"
|
"Language: fr\n"
|
||||||
|
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
"Language-Team: French\n"
|
"Language-Team: French\n"
|
||||||
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"Generated-By: Babel 2.1.1\n"
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -713,18 +714,6 @@ msgstr ""
|
||||||
"total d'octets/sec : %(brate).2f. Durée d'audit : %(audit).2f. Taux : "
|
"total d'octets/sec : %(brate).2f. Durée d'audit : %(audit).2f. Taux : "
|
||||||
"%(audit_rate).2f"
|
"%(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"Audit d'objet (%(type)s). Depuis %(start_time)s, localement : %(passes)d "
|
|
||||||
"succès. %(quars)d en quarantaine. %(errors)d erreurs. Fichiers/sec : "
|
|
||||||
"%(frate).2f. octets/sec : %(brate).2f. Durée totale : %(total).2f. Durée "
|
|
||||||
"d'audit : %(audit).2f. Taux : %(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "Statistiques de l'audit d'objet : %s"
|
msgstr "Statistiques de l'audit d'objet : %s"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Italian translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -7,18 +7,19 @@
|
||||||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||||
"Language: it\n"
|
"Language: it\n"
|
||||||
|
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
"Language-Team: Italian\n"
|
"Language-Team: Italian\n"
|
||||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"Generated-By: Babel 2.1.1\n"
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -702,18 +703,6 @@ msgstr ""
|
||||||
"Totale file/sec: %(frate).2f, Totale byte/sec: %(brate).2f, Tempo verifica: "
|
"Totale file/sec: %(frate).2f, Totale byte/sec: %(brate).2f, Tempo verifica: "
|
||||||
"%(audit).2f, Velocità: %(audit_rate).2f"
|
"%(audit).2f, Velocità: %(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"Verifica oggetto (%(type)s). A partire da %(start_time)s: In locale: "
|
|
||||||
"%(passes)d passati, %(quars)d in quarantena, %(errors)d errori file/sec: "
|
|
||||||
"%(frate).2f , byte/sec: %(brate).2f, Tempo totale: %(total).2f, Tempo "
|
|
||||||
"verifica: %(audit).2f, Velocità: %(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "Statistiche verifica oggetto: %s"
|
msgstr "Statistiche verifica oggetto: %s"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Japanese translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -9,18 +9,19 @@
|
||||||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"PO-Revision-Date: 2015-09-26 09:26+0000\n"
|
"PO-Revision-Date: 2015-09-26 09:26+0000\n"
|
||||||
"Last-Translator: Akihiro Motoki <amotoki@gmail.com>\n"
|
"Last-Translator: Akihiro Motoki <amotoki@gmail.com>\n"
|
||||||
"Language: ja\n"
|
"Language: ja\n"
|
||||||
|
"Plural-Forms: nplurals=1; plural=0;\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
"Language-Team: Japanese\n"
|
"Language-Team: Japanese\n"
|
||||||
"Plural-Forms: nplurals=1; plural=0\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"Generated-By: Babel 2.1.1\n"
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -690,18 +691,6 @@ msgstr ""
|
||||||
"済み: %(quars)d、合計エラー: %(errors)d、合計ファイル/秒: %(frate).2f、合計バ"
|
"済み: %(quars)d、合計エラー: %(errors)d、合計ファイル/秒: %(frate).2f、合計バ"
|
||||||
"イト/秒: %(brate).2f、監査時間: %(audit).2f、率: %(audit_rate).2f"
|
"イト/秒: %(brate).2f、監査時間: %(audit).2f、率: %(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"オブジェクト監査 (%(type)s)。%(start_time)s 以降: ローカル: パス済"
|
|
||||||
"み%(passes)d、検疫済み %(quars)d、エラー %(errors)d、ファイル/秒:"
|
|
||||||
"%(frate).2f、バイト/秒: %(brate).2f、合計時間: %(total).2f、監査時間:"
|
|
||||||
"%(audit).2f、率: %(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "オブジェクト監査統計: %s"
|
msgstr "オブジェクト監査統計: %s"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Korean (South Korea) translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -7,20 +7,22 @@
|
||||||
# Ying Chun Guo <daisy.ycguo@gmail.com>, 2015
|
# Ying Chun Guo <daisy.ycguo@gmail.com>, 2015
|
||||||
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
|
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
|
||||||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||||
|
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
"PO-Revision-Date: 2015-09-09 05:10+0000\n"
|
|
||||||
"Last-Translator: Ying Chun Guo <daisy.ycguo@gmail.com>\n"
|
|
||||||
"Language: ko_KR\n"
|
|
||||||
"Language-Team: Korean (South Korea)\n"
|
|
||||||
"Plural-Forms: nplurals=1; plural=0\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
"MIME-Version: 1.0\n"
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"Generated-By: Babel 2.1.1\n"
|
"PO-Revision-Date: 2016-01-30 06:54+0000\n"
|
||||||
|
"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
|
||||||
|
"Language: ko-KR\n"
|
||||||
|
"Plural-Forms: nplurals=1; plural=0;\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
|
"Language-Team: Korean (South Korea)\n"
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -554,7 +556,7 @@ msgstr "컨테이너 %s %s 삭제 중 예외 발생"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Exception while deleting object %s %s %s"
|
msgid "Exception while deleting object %s %s %s"
|
||||||
msgstr "오브젝트 %s %s 삭제 중 예외 발생"
|
msgstr "오브젝트 %s %s %s 삭제 중 예외 발생"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Exception with %(ip)s:%(port)s/%(device)s"
|
msgid "Exception with %(ip)s:%(port)s/%(device)s"
|
||||||
|
@ -684,18 +686,6 @@ msgstr ""
|
||||||
"목: %(quars)d, 총 오류 수: %(errors)d, 총 파일/초: %(frate).2f, 총 바이트/"
|
"목: %(quars)d, 총 오류 수: %(errors)d, 총 파일/초: %(frate).2f, 총 바이트/"
|
||||||
"초: %(brate).2f, 감사 시간: %(audit).2f, 속도: %(audit_rate).2f"
|
"초: %(brate).2f, 감사 시간: %(audit).2f, 속도: %(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"오브젝트 감사(%(type)s). %(start_time)s 이후: 로컬: %(passes)d개 패스, "
|
|
||||||
"%(quars)d개 격리, %(errors)d개 오류 파일/초: %(frate).2f ,바이트/초: "
|
|
||||||
"%(brate).2f, 총 시간: %(total).2f, 감사 시간: %(audit).2f, 속도: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "오브젝트 감사 통계: %s"
|
msgstr "오브젝트 감사 통계: %s"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Portuguese (Brazil) translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -11,18 +11,19 @@
|
||||||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||||
"Language: pt_BR\n"
|
"Language: pt-BR\n"
|
||||||
|
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
"Language-Team: Portuguese (Brazil)\n"
|
"Language-Team: Portuguese (Brazil)\n"
|
||||||
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"Generated-By: Babel 2.1.1\n"
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -698,18 +699,6 @@ msgstr ""
|
||||||
"Total de arquivos/seg: %(frate).2f, Total de bytes/seg: %(brate).2f, Tempo "
|
"Total de arquivos/seg: %(frate).2f, Total de bytes/seg: %(brate).2f, Tempo "
|
||||||
"de auditoria: %(audit).2f, Taxa: %(audit_rate).2f"
|
"de auditoria: %(audit).2f, Taxa: %(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"Auditoria de objeto (%(type)s). Desde %(start_time)s: Localmente: %(passes)d "
|
|
||||||
"aprovado, %(quars)d em quarentena, %(errors)d arquivos de erros/seg: "
|
|
||||||
"%(frate).2f, bytes/seg: %(brate).2f, Tempo total: %(total).2f, Tempo de "
|
|
||||||
"auditoria: %(audit).2f, Taxa: %(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "Estatísticas de auditoria do objeto: %s"
|
msgstr "Estatísticas de auditoria do objeto: %s"
|
||||||
|
|
|
@ -1,26 +1,28 @@
|
||||||
# Russian translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
# Translators:
|
# Translators:
|
||||||
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
|
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
|
||||||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||||
|
# Filatov Sergey <filatecs@gmail.com>, 2016. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
"MIME-Version: 1.0\n"
|
||||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
|
"PO-Revision-Date: 2016-01-17 10:49+0000\n"
|
||||||
|
"Last-Translator: Filatov Sergey <filatecs@gmail.com>\n"
|
||||||
"Language: ru\n"
|
"Language: ru\n"
|
||||||
"Language-Team: Russian\n"
|
|
||||||
"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
|
"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
|
||||||
"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
|
"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
|
||||||
"%100>=11 && n%100<=14)? 2 : 3)\n"
|
"%100>=11 && n%100<=14)? 2 : 3);\n"
|
||||||
"MIME-Version: 1.0\n"
|
"Generated-By: Babel 2.0\n"
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Language-Team: Russian\n"
|
||||||
"Generated-By: Babel 2.1.1\n"
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -52,6 +54,16 @@ msgstr "Ответили как размонтированные: %(ip)s/%(devic
|
||||||
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
|
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
|
||||||
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
|
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid ""
|
||||||
|
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/"
|
||||||
|
"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs "
|
||||||
|
"(%(rate).2f/sec, %(remaining)s remaining)"
|
||||||
|
msgstr ""
|
||||||
|
"Реконструированно разделов: %(reconstructed)d/%(total)d (%(percentage).2f%%) "
|
||||||
|
"partitions of %(device)d/%(dtotal)d (%(dpercentage).2f%%) за время "
|
||||||
|
"%(time).2fs (%(rate).2f/sec, осталось: %(remaining)s)"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid ""
|
msgid ""
|
||||||
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
|
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
|
||||||
|
@ -88,6 +100,10 @@ msgstr "%s не существует"
|
||||||
msgid "%s is not mounted"
|
msgid "%s is not mounted"
|
||||||
msgstr "%s не смонтирован"
|
msgstr "%s не смонтирован"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "%s responded as unmounted"
|
||||||
|
msgstr "%s ответил как размонтированный"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "%s running (%s - %s)"
|
msgid "%s running (%s - %s)"
|
||||||
msgstr "%s выполняется (%s - %s)"
|
msgstr "%s выполняется (%s - %s)"
|
||||||
|
@ -225,6 +241,14 @@ msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Путь клиента %(client)s не соответствует пути в метаданных объекта %(meta)s"
|
"Путь клиента %(client)s не соответствует пути в метаданных объекта %(meta)s"
|
||||||
|
|
||||||
|
msgid ""
|
||||||
|
"Configuration option internal_client_conf_path not defined. Using default "
|
||||||
|
"configuration, See internal-client.conf-sample for options"
|
||||||
|
msgstr ""
|
||||||
|
"Опция internal_client_conf_path конфигурации не определена. Используется "
|
||||||
|
"конфигурация по умолчанию. Используйте intenal-client.conf-sample для "
|
||||||
|
"информации об опциях"
|
||||||
|
|
||||||
msgid "Connection refused"
|
msgid "Connection refused"
|
||||||
msgstr "Соединение отклонено"
|
msgstr "Соединение отклонено"
|
||||||
|
|
||||||
|
@ -284,6 +308,10 @@ msgstr "Ошибка загрузки данных: %s"
|
||||||
msgid "Devices pass completed: %.02fs"
|
msgid "Devices pass completed: %.02fs"
|
||||||
msgstr "Проход устройств выполнен: %.02fs"
|
msgstr "Проход устройств выполнен: %.02fs"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Directory %r does not map to a valid policy (%s)"
|
||||||
|
msgstr "Каталог %r не связан со стратегией policy (%s)"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
|
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
|
||||||
msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s"
|
msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s"
|
||||||
|
@ -560,6 +588,9 @@ msgstr ""
|
||||||
msgid "Exception in top-level replication loop"
|
msgid "Exception in top-level replication loop"
|
||||||
msgstr "Исключительная ситуация в цикле репликации верхнего уровня"
|
msgstr "Исключительная ситуация в цикле репликации верхнего уровня"
|
||||||
|
|
||||||
|
msgid "Exception in top-levelreconstruction loop"
|
||||||
|
msgstr "Исключение в цикле реконструкции верхнего уровня"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Exception while deleting container %s %s"
|
msgid "Exception while deleting container %s %s"
|
||||||
msgstr "Исключительная ситуация во время удаления контейнера %s %s"
|
msgstr "Исключительная ситуация во время удаления контейнера %s %s"
|
||||||
|
@ -617,6 +648,10 @@ msgstr "Недопустимый хост %r в X-Container-Sync-To"
|
||||||
msgid "Invalid pending entry %(file)s: %(entry)s"
|
msgid "Invalid pending entry %(file)s: %(entry)s"
|
||||||
msgstr "Недопустимая ожидающая запись %(file)s: %(entry)s"
|
msgstr "Недопустимая ожидающая запись %(file)s: %(entry)s"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Invalid response %(resp)s from %(full_path)s"
|
||||||
|
msgstr "Недопустимый ответ %(resp)s от %(full_path)s"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Invalid response %(resp)s from %(ip)s"
|
msgid "Invalid response %(resp)s from %(ip)s"
|
||||||
msgstr "Недопустимый ответ %(resp)s от %(ip)s"
|
msgstr "Недопустимый ответ %(resp)s от %(ip)s"
|
||||||
|
@ -652,10 +687,18 @@ msgstr "Отсутствует конечная точка кластера дл
|
||||||
msgid "No permission to signal PID %d"
|
msgid "No permission to signal PID %d"
|
||||||
msgstr "Нет прав доступа для отправки сигнала в PID %d"
|
msgstr "Нет прав доступа для отправки сигнала в PID %d"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "No policy with index %s"
|
||||||
|
msgstr "Не найдено стратегии с индексом %s"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "No realm key for %r"
|
msgid "No realm key for %r"
|
||||||
msgstr "Отсутствует ключ области для %r"
|
msgstr "Отсутствует ключ области для %r"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "No space left on device for %s (%s)"
|
||||||
|
msgstr "Не устройстве %s (%s) закончилось место"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
|
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
|
||||||
msgstr "Ограниченная ошибка узла %(ip)s:%(port)s (%(device)s)"
|
msgstr "Ограниченная ошибка узла %(ip)s:%(port)s (%(device)s)"
|
||||||
|
@ -668,6 +711,10 @@ msgstr ""
|
||||||
"Не найдено: %(sync_from)r => %(sync_to)r - объект "
|
"Не найдено: %(sync_from)r => %(sync_to)r - объект "
|
||||||
"%(obj_name)r"
|
"%(obj_name)r"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Nothing reconstructed for %s seconds."
|
||||||
|
msgstr "Ничего не реконструировано за %s с."
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Nothing replicated for %s seconds."
|
msgid "Nothing replicated for %s seconds."
|
||||||
msgstr "Ничего не реплицировано за %s с."
|
msgstr "Ничего не реплицировано за %s с."
|
||||||
|
@ -700,22 +747,14 @@ msgstr ""
|
||||||
"%(frate).2f, всего байт/с: %(brate).2f, время контроля: %(audit).2f, "
|
"%(frate).2f, всего байт/с: %(brate).2f, время контроля: %(audit).2f, "
|
||||||
"скорость: %(audit_rate).2f"
|
"скорость: %(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"Проверка объекта (%(type)s). После %(start_time)s: локально: успешно - "
|
|
||||||
"%(passes)d, в карантине - %(quars)d, файлов с ошибками %(errors)d в секунду: "
|
|
||||||
"%(frate).2f , байт/с: %(brate).2f, общее время: %(total).2f, время контроля: "
|
|
||||||
"%(audit).2f, скорость: %(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "Состояние контроля объекта: %s"
|
msgstr "Состояние контроля объекта: %s"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Object reconstruction complete (once). (%.02f minutes)"
|
||||||
|
msgstr "Реконструкция объекта выполнена (однократно). (%.02f мин.)"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object replication complete (once). (%.02f minutes)"
|
msgid "Object replication complete (once). (%.02f minutes)"
|
||||||
msgstr "Репликация объекта выполнена (однократно). (%.02f мин.)"
|
msgstr "Репликация объекта выполнена (однократно). (%.02f мин.)"
|
||||||
|
@ -775,6 +814,14 @@ msgstr "Требуется путь в X-Container-Sync-To"
|
||||||
msgid "Problem cleaning up %s"
|
msgid "Problem cleaning up %s"
|
||||||
msgstr "Неполадка при очистке %s"
|
msgstr "Неполадка при очистке %s"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Problem cleaning up %s (%s)"
|
||||||
|
msgstr "Возникла проблема при очистке %s (%s)"
|
||||||
|
|
||||||
|
#, fuzzy, python-format
|
||||||
|
msgid "Problem writing durable state file %s (%s)"
|
||||||
|
msgstr "Возникла проблема при записи файла состояния %s (%s)"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Profiling Error: %s"
|
msgid "Profiling Error: %s"
|
||||||
msgstr "Ошибка профилирования: %s"
|
msgstr "Ошибка профилирования: %s"
|
||||||
|
@ -818,6 +865,14 @@ msgstr "Удаление объектов %s"
|
||||||
msgid "Removing partition: %s"
|
msgid "Removing partition: %s"
|
||||||
msgstr "Удаление раздела: %s"
|
msgstr "Удаление раздела: %s"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Removing pid file %s with invalid pid"
|
||||||
|
msgstr "Удаление pid файла %s с неверным pid-ом"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Removing pid file %s with wrong pid %d"
|
||||||
|
msgstr "Удаление pid файла %s с неверным pid-ом %d"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Removing stale pid file %s"
|
msgid "Removing stale pid file %s"
|
||||||
msgstr "Удаление устаревшего файла pid %s"
|
msgstr "Удаление устаревшего файла pid %s"
|
||||||
|
@ -837,6 +892,11 @@ msgstr ""
|
||||||
"Возвращено 498 для %(meth)s в %(acc)s/%(cont)s/%(obj)s . Ratelimit "
|
"Возвращено 498 для %(meth)s в %(acc)s/%(cont)s/%(obj)s . Ratelimit "
|
||||||
"(максимальная задержка): %(e)s"
|
"(максимальная задержка): %(e)s"
|
||||||
|
|
||||||
|
msgid "Ring change detected. Aborting current reconstruction pass."
|
||||||
|
msgstr ""
|
||||||
|
"Обнаружено изменение кольца. Принудительное завершение текущего прохода "
|
||||||
|
"реконструкции."
|
||||||
|
|
||||||
msgid "Ring change detected. Aborting current replication pass."
|
msgid "Ring change detected. Aborting current replication pass."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Обнаружено кольцевое изменение. Принудительное завершение текущего прохода "
|
"Обнаружено кольцевое изменение. Принудительное завершение текущего прохода "
|
||||||
|
@ -846,6 +906,9 @@ msgstr ""
|
||||||
msgid "Running %s once"
|
msgid "Running %s once"
|
||||||
msgstr "Однократное выполнение %s"
|
msgstr "Однократное выполнение %s"
|
||||||
|
|
||||||
|
msgid "Running object reconstructor in script mode."
|
||||||
|
msgstr "Запуск утилиты реконструкции объектов в режиме скрипта."
|
||||||
|
|
||||||
msgid "Running object replicator in script mode."
|
msgid "Running object replicator in script mode."
|
||||||
msgstr "Запуск утилиты репликации объектов в режиме сценариев."
|
msgstr "Запуск утилиты репликации объектов в режиме сценариев."
|
||||||
|
|
||||||
|
@ -889,6 +952,12 @@ msgstr "%s будет пропущен, так как он не смонтиро
|
||||||
msgid "Starting %s"
|
msgid "Starting %s"
|
||||||
msgstr "Запуск %s"
|
msgstr "Запуск %s"
|
||||||
|
|
||||||
|
msgid "Starting object reconstruction pass."
|
||||||
|
msgstr "Запуск прохода реконструкции объектов."
|
||||||
|
|
||||||
|
msgid "Starting object reconstructor in daemon mode."
|
||||||
|
msgstr "Запуск утилиты реконструкции объектов в режиме демона."
|
||||||
|
|
||||||
msgid "Starting object replication pass."
|
msgid "Starting object replication pass."
|
||||||
msgstr "Запуск прохода репликации объектов."
|
msgstr "Запуск прохода репликации объектов."
|
||||||
|
|
||||||
|
@ -914,10 +983,18 @@ msgstr ""
|
||||||
msgid "Timeout %(action)s to memcached: %(server)s"
|
msgid "Timeout %(action)s to memcached: %(server)s"
|
||||||
msgstr "Тайм-аут действия %(action)s для сохранения в кэш памяти: %(server)s"
|
msgstr "Тайм-аут действия %(action)s для сохранения в кэш памяти: %(server)s"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
|
||||||
|
msgstr "Исключение по таймауту %(ip)s:%(port)s/%(device)s"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Trying to %(method)s %(path)s"
|
msgid "Trying to %(method)s %(path)s"
|
||||||
msgstr "Попытка выполнения метода %(method)s %(path)s"
|
msgstr "Попытка выполнения метода %(method)s %(path)s"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Trying to GET %(full_path)s"
|
||||||
|
msgstr "Попытка GET-запроса %(full_path)s"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Trying to get final status of PUT to %s"
|
msgid "Trying to get final status of PUT to %s"
|
||||||
msgstr "Попытка получения конечного состояния PUT в %s"
|
msgstr "Попытка получения конечного состояния PUT в %s"
|
||||||
|
@ -942,10 +1019,18 @@ msgstr "Необрабатываемая исключительная ситуа
|
||||||
msgid "Unable to find %s config section in %s"
|
msgid "Unable to find %s config section in %s"
|
||||||
msgstr "Не удалось найти раздел конфигурации %s в %s"
|
msgstr "Не удалось найти раздел конфигурации %s в %s"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Unable to load internal client from config: %r (%s)"
|
||||||
|
msgstr "Не удалось загрузить клиент из конфигурации: %r (%s)"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Unable to locate %s in libc. Leaving as a no-op."
|
msgid "Unable to locate %s in libc. Leaving as a no-op."
|
||||||
msgstr "Не удалось найти %s в libc. Оставлено как no-op."
|
msgstr "Не удалось найти %s в libc. Оставлено как no-op."
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Unable to locate config for %s"
|
||||||
|
msgstr "Не удалось найти конфигурационный файл для %s"
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
|
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
@ -970,6 +1055,11 @@ msgstr "Непредвиденный ответ: %s"
|
||||||
msgid "Unhandled exception"
|
msgid "Unhandled exception"
|
||||||
msgstr "Необработанная исключительная ситуация"
|
msgstr "Необработанная исключительная ситуация"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
|
||||||
|
msgstr ""
|
||||||
|
"Неизвестное исключение в GET-запросе: %(account)r %(container)r %(object)r"
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Update report failed for %(container)s %(dbfile)s"
|
msgid "Update report failed for %(container)s %(dbfile)s"
|
||||||
msgstr "Отчет об обновлении для %(container)s %(dbfile)s не выполнен"
|
msgstr "Отчет об обновлении для %(container)s %(dbfile)s не выполнен"
|
||||||
|
@ -1004,6 +1094,10 @@ msgstr ""
|
||||||
msgid "Waited %s seconds for %s to die; giving up"
|
msgid "Waited %s seconds for %s to die; giving up"
|
||||||
msgstr "Система ожидала %s секунд для %s завершения; освобождение"
|
msgstr "Система ожидала %s секунд для %s завершения; освобождение"
|
||||||
|
|
||||||
|
#, python-format
|
||||||
|
msgid "Waited %s seconds for %s to die; killing"
|
||||||
|
msgstr "Система ожидала %s секунд для %s завершения; Принудительное завершение"
|
||||||
|
|
||||||
msgid "Warning: Cannot ratelimit without a memcached client"
|
msgid "Warning: Cannot ratelimit without a memcached client"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Предупреждение: не удается ограничить скорость без клиента с кэшированием "
|
"Предупреждение: не удается ограничить скорость без клиента с кэшированием "
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,4 +1,4 @@
|
||||||
# Turkish (Turkey) translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -7,18 +7,19 @@
|
||||||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"PO-Revision-Date: 2015-09-04 07:42+0000\n"
|
"PO-Revision-Date: 2015-09-04 07:42+0000\n"
|
||||||
"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
|
"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
|
||||||
"Language: tr_TR\n"
|
"Language: tr-TR\n"
|
||||||
|
"Plural-Forms: nplurals=1; plural=0;\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
"Language-Team: Turkish (Turkey)\n"
|
"Language-Team: Turkish (Turkey)\n"
|
||||||
"Plural-Forms: nplurals=1; plural=0\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"Generated-By: Babel 2.1.1\n"
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -737,18 +738,6 @@ msgstr ""
|
||||||
"%(frate).2f, Toplam bayt/sn: %(brate).2f, Denetleme zamanı: %(audit).2f, "
|
"%(frate).2f, Toplam bayt/sn: %(brate).2f, Denetleme zamanı: %(audit).2f, "
|
||||||
"Oran: %(audit_rate).2f"
|
"Oran: %(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"Nesne denedimi (%(type)s). %(start_time)s den beri: Yerel olarak: %(passes)d "
|
|
||||||
"geçti, %(quars)d karantinaya alındı, %(errors)d hata dosya/sn: %(frate).2f , "
|
|
||||||
"bayt/sn: %(brate).2f, Toplam süre: %(total).2f, Denetleme süresi: "
|
|
||||||
"%(audit).2f, Oran: %(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "Nesne denetim istatistikleri: %s"
|
msgstr "Nesne denetim istatistikleri: %s"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Chinese (Simplified, China) translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -8,18 +8,19 @@
|
||||||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||||
"Language: zh_Hans_CN\n"
|
"Language: zh-CN\n"
|
||||||
|
"Plural-Forms: nplurals=1; plural=0;\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
"Language-Team: Chinese (China)\n"
|
"Language-Team: Chinese (China)\n"
|
||||||
"Plural-Forms: nplurals=1; plural=0\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"Generated-By: Babel 2.1.1\n"
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -668,17 +669,6 @@ msgstr ""
|
||||||
"%(quars)d, 错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: "
|
"%(quars)d, 错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: "
|
||||||
"%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
|
"%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d "
|
|
||||||
"隔离, %(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: "
|
|
||||||
"%(total).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "对象审计统计:%s"
|
msgstr "对象审计统计:%s"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Chinese (Traditional, Taiwan) translations for swift.
|
# Translations template for swift.
|
||||||
# Copyright (C) 2015 ORGANIZATION
|
# Copyright (C) 2015 ORGANIZATION
|
||||||
# This file is distributed under the same license as the swift project.
|
# This file is distributed under the same license as the swift project.
|
||||||
#
|
#
|
||||||
|
@ -7,18 +7,19 @@
|
||||||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||||
msgid ""
|
msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
"Project-Id-Version: swift 2.6.1.dev176\n"
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
"POT-Creation-Date: 2016-03-08 04:09+0000\n"
|
||||||
|
"MIME-Version: 1.0\n"
|
||||||
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||||
"Language: zh_Hant_TW\n"
|
"Language: zh-TW\n"
|
||||||
|
"Plural-Forms: nplurals=1; plural=0;\n"
|
||||||
|
"Generated-By: Babel 2.0\n"
|
||||||
|
"X-Generator: Zanata 3.7.3\n"
|
||||||
"Language-Team: Chinese (Taiwan)\n"
|
"Language-Team: Chinese (Taiwan)\n"
|
||||||
"Plural-Forms: nplurals=1; plural=0\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"Generated-By: Babel 2.1.1\n"
|
|
||||||
|
|
||||||
msgid ""
|
msgid ""
|
||||||
"\n"
|
"\n"
|
||||||
|
@ -668,18 +669,6 @@ msgstr ""
|
||||||
"%(quars)d,錯誤總計:%(errors)d,檔案/秒總計:%(frate).2f,位元組/秒總計:"
|
"%(quars)d,錯誤總計:%(errors)d,檔案/秒總計:%(frate).2f,位元組/秒總計:"
|
||||||
"%(brate).2f,審核時間:%(audit).2f,速率:%(audit_rate).2f"
|
"%(brate).2f,審核時間:%(audit).2f,速率:%(audit_rate).2f"
|
||||||
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
|
|
||||||
"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
|
|
||||||
"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
msgstr ""
|
|
||||||
"物件審核 (%(type)s)。自 %(start_time)s 以來:本端:%(passes)d 個已通"
|
|
||||||
"過,%(quars)d 個已隔離,%(errors)d 個錯誤檔案/秒:%(frate).2f,位元組數/秒:"
|
|
||||||
"%(brate).2f,時間總計:%(total).2f,審核時間:%(audit).2f,速率:"
|
|
||||||
"%(audit_rate).2f"
|
|
||||||
|
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "Object audit stats: %s"
|
msgid "Object audit stats: %s"
|
||||||
msgstr "物件審核統計資料:%s"
|
msgstr "物件審核統計資料:%s"
|
||||||
|
|
|
@ -28,8 +28,7 @@ from swift.common.utils import get_logger, ratelimit_sleep, dump_recon_cache, \
|
||||||
list_from_csv, listdir
|
list_from_csv, listdir
|
||||||
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist
|
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist
|
||||||
from swift.common.daemon import Daemon
|
from swift.common.daemon import Daemon
|
||||||
|
from swift.common.storage_policy import POLICIES
|
||||||
SLEEP_BETWEEN_AUDITS = 30
|
|
||||||
|
|
||||||
|
|
||||||
class AuditorWorker(object):
|
class AuditorWorker(object):
|
||||||
|
@ -39,7 +38,7 @@ class AuditorWorker(object):
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.devices = devices
|
self.devices = devices
|
||||||
self.diskfile_mgr = diskfile.DiskFileManager(conf, self.logger)
|
self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger)
|
||||||
self.max_files_per_second = float(conf.get('files_per_second', 20))
|
self.max_files_per_second = float(conf.get('files_per_second', 20))
|
||||||
self.max_bytes_per_second = float(conf.get('bytes_per_second',
|
self.max_bytes_per_second = float(conf.get('bytes_per_second',
|
||||||
10000000))
|
10000000))
|
||||||
|
@ -87,8 +86,16 @@ class AuditorWorker(object):
|
||||||
total_quarantines = 0
|
total_quarantines = 0
|
||||||
total_errors = 0
|
total_errors = 0
|
||||||
time_auditing = 0
|
time_auditing = 0
|
||||||
all_locs = self.diskfile_mgr.object_audit_location_generator(
|
# TODO: we should move audit-location generation to the storage policy,
|
||||||
device_dirs=device_dirs)
|
# as we may (conceivably) have a different filesystem layout for each.
|
||||||
|
# We'd still need to generate the policies to audit from the actual
|
||||||
|
# directories found on-disk, and have appropriate error reporting if we
|
||||||
|
# find a directory that doesn't correspond to any known policy. This
|
||||||
|
# will require a sizable refactor, but currently all diskfile managers
|
||||||
|
# can find all diskfile locations regardless of policy -- so for now
|
||||||
|
# just use Policy-0's manager.
|
||||||
|
all_locs = (self.diskfile_router[POLICIES[0]]
|
||||||
|
.object_audit_location_generator(device_dirs=device_dirs))
|
||||||
for location in all_locs:
|
for location in all_locs:
|
||||||
loop_time = time.time()
|
loop_time = time.time()
|
||||||
self.failsafe_object_audit(location)
|
self.failsafe_object_audit(location)
|
||||||
|
@ -101,8 +108,8 @@ class AuditorWorker(object):
|
||||||
self.logger.info(_(
|
self.logger.info(_(
|
||||||
'Object audit (%(type)s). '
|
'Object audit (%(type)s). '
|
||||||
'Since %(start_time)s: Locally: %(passes)d passed, '
|
'Since %(start_time)s: Locally: %(passes)d passed, '
|
||||||
'%(quars)d quarantined, %(errors)d errors '
|
'%(quars)d quarantined, %(errors)d errors, '
|
||||||
'files/sec: %(frate).2f , bytes/sec: %(brate).2f, '
|
'files/sec: %(frate).2f, bytes/sec: %(brate).2f, '
|
||||||
'Total time: %(total).2f, Auditing time: %(audit).2f, '
|
'Total time: %(total).2f, Auditing time: %(audit).2f, '
|
||||||
'Rate: %(audit_rate).2f') % {
|
'Rate: %(audit_rate).2f') % {
|
||||||
'type': '%s%s' % (self.auditor_type, description),
|
'type': '%s%s' % (self.auditor_type, description),
|
||||||
|
@ -187,8 +194,9 @@ class AuditorWorker(object):
|
||||||
def raise_dfq(msg):
|
def raise_dfq(msg):
|
||||||
raise DiskFileQuarantined(msg)
|
raise DiskFileQuarantined(msg)
|
||||||
|
|
||||||
|
diskfile_mgr = self.diskfile_router[location.policy]
|
||||||
try:
|
try:
|
||||||
df = self.diskfile_mgr.get_diskfile_from_audit_location(location)
|
df = diskfile_mgr.get_diskfile_from_audit_location(location)
|
||||||
with df.open():
|
with df.open():
|
||||||
metadata = df.get_metadata()
|
metadata = df.get_metadata()
|
||||||
obj_size = int(metadata['Content-Length'])
|
obj_size = int(metadata['Content-Length'])
|
||||||
|
@ -230,9 +238,10 @@ class ObjectAuditor(Daemon):
|
||||||
self.recon_cache_path = conf.get('recon_cache_path',
|
self.recon_cache_path = conf.get('recon_cache_path',
|
||||||
'/var/cache/swift')
|
'/var/cache/swift')
|
||||||
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
|
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
|
||||||
|
self.interval = int(conf.get('interval', 30))
|
||||||
|
|
||||||
def _sleep(self):
|
def _sleep(self):
|
||||||
time.sleep(SLEEP_BETWEEN_AUDITS)
|
time.sleep(self.interval)
|
||||||
|
|
||||||
def clear_recon_cache(self, auditor_type):
|
def clear_recon_cache(self, auditor_type):
|
||||||
"""Clear recon cache entries"""
|
"""Clear recon cache entries"""
|
||||||
|
@ -261,7 +270,8 @@ class ObjectAuditor(Daemon):
|
||||||
try:
|
try:
|
||||||
self.run_audit(**kwargs)
|
self.run_audit(**kwargs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(_("ERROR: Unable to run auditing: %s") % e)
|
self.logger.exception(
|
||||||
|
_("ERROR: Unable to run auditing: %s") % e)
|
||||||
finally:
|
finally:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ from swift.common.utils import mkdirs, Timestamp, \
|
||||||
storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \
|
storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \
|
||||||
fsync_dir, drop_buffer_cache, ThreadPool, lock_path, write_pickle, \
|
fsync_dir, drop_buffer_cache, ThreadPool, lock_path, write_pickle, \
|
||||||
config_true_value, listdir, split_path, ismount, remove_file, \
|
config_true_value, listdir, split_path, ismount, remove_file, \
|
||||||
get_md5_socket, F_SETPIPE_SZ
|
get_md5_socket, F_SETPIPE_SZ, decode_timestamps, encode_timestamps
|
||||||
from swift.common.splice import splice, tee
|
from swift.common.splice import splice, tee
|
||||||
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
|
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
|
||||||
DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \
|
DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \
|
||||||
|
@ -76,7 +76,7 @@ METADATA_KEY = 'user.swift.metadata'
|
||||||
DROP_CACHE_WINDOW = 1024 * 1024
|
DROP_CACHE_WINDOW = 1024 * 1024
|
||||||
# These are system-set metadata keys that cannot be changed with a POST.
|
# These are system-set metadata keys that cannot be changed with a POST.
|
||||||
# They should be lowercase.
|
# They should be lowercase.
|
||||||
DATAFILE_SYSTEM_META = set('content-length content-type deleted etag'.split())
|
DATAFILE_SYSTEM_META = set('content-length deleted etag'.split())
|
||||||
DATADIR_BASE = 'objects'
|
DATADIR_BASE = 'objects'
|
||||||
ASYNCDIR_BASE = 'async_pending'
|
ASYNCDIR_BASE = 'async_pending'
|
||||||
TMP_BASE = 'tmp'
|
TMP_BASE = 'tmp'
|
||||||
|
@ -442,23 +442,78 @@ class BaseDiskFileManager(object):
|
||||||
max_pipe_size = int(f.read())
|
max_pipe_size = int(f.read())
|
||||||
self.pipe_size = min(max_pipe_size, self.disk_chunk_size)
|
self.pipe_size = min(max_pipe_size, self.disk_chunk_size)
|
||||||
|
|
||||||
|
def make_on_disk_filename(self, timestamp, ext=None,
|
||||||
|
ctype_timestamp=None, *a, **kw):
|
||||||
|
"""
|
||||||
|
Returns filename for given timestamp.
|
||||||
|
|
||||||
|
:param timestamp: the object timestamp, an instance of
|
||||||
|
:class:`~swift.common.utils.Timestamp`
|
||||||
|
:param ext: an optional string representing a file extension to be
|
||||||
|
appended to the returned file name
|
||||||
|
:param ctype_timestamp: an optional content-type timestamp, an instance
|
||||||
|
of :class:`~swift.common.utils.Timestamp`
|
||||||
|
:returns: a file name
|
||||||
|
"""
|
||||||
|
rv = timestamp.internal
|
||||||
|
if ext == '.meta' and ctype_timestamp:
|
||||||
|
# If ctype_timestamp is None then the filename is simply the
|
||||||
|
# internal form of the timestamp. If ctype_timestamp is not None
|
||||||
|
# then the difference between the raw values of the two timestamps
|
||||||
|
# is appended as a hex number, with its sign.
|
||||||
|
#
|
||||||
|
# There are two reasons for encoding the content-type timestamp
|
||||||
|
# in the filename in this way. First, it means that two .meta files
|
||||||
|
# having the same timestamp but different content-type timestamps
|
||||||
|
# (and potentially different content-type values) will be distinct
|
||||||
|
# and therefore will be independently replicated when rsync
|
||||||
|
# replication is used. That ensures that all nodes end up having
|
||||||
|
# all content-type values after replication (with the most recent
|
||||||
|
# value being selected when the diskfile is opened). Second, having
|
||||||
|
# the content-type encoded in timestamp in the filename makes it
|
||||||
|
# possible for the on disk file search code to determine that
|
||||||
|
# timestamp by inspecting only the filename, and not needing to
|
||||||
|
# open the file and read its xattrs.
|
||||||
|
rv = encode_timestamps(timestamp, ctype_timestamp, explicit=True)
|
||||||
|
if ext:
|
||||||
|
rv = '%s%s' % (rv, ext)
|
||||||
|
return rv
|
||||||
|
|
||||||
def parse_on_disk_filename(self, filename):
|
def parse_on_disk_filename(self, filename):
|
||||||
"""
|
"""
|
||||||
Parse an on disk file name.
|
Parse an on disk file name.
|
||||||
|
|
||||||
:param filename: the data file name including extension
|
:param filename: the file name including extension
|
||||||
:returns: a dict, with keys for timestamp, and ext:
|
:returns: a dict, with keys for timestamp, ext and ctype_timestamp:
|
||||||
|
|
||||||
* timestamp is a :class:`~swift.common.utils.Timestamp`
|
* timestamp is a :class:`~swift.common.utils.Timestamp`
|
||||||
|
* ctype_timestamp is a :class:`~swift.common.utils.Timestamp` or
|
||||||
|
None for .meta files, otherwise None
|
||||||
* ext is a string, the file extension including the leading dot or
|
* ext is a string, the file extension including the leading dot or
|
||||||
the empty string if the filename has no extension.
|
the empty string if the filename has no extension.
|
||||||
|
|
||||||
Subclases may add further keys to the returned dict.
|
Subclasses may override this method to add further keys to the
|
||||||
|
returned dict.
|
||||||
|
|
||||||
:raises DiskFileError: if any part of the filename is not able to be
|
:raises DiskFileError: if any part of the filename is not able to be
|
||||||
validated.
|
validated.
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
ts_ctype = None
|
||||||
|
fname, ext = splitext(filename)
|
||||||
|
try:
|
||||||
|
if ext == '.meta':
|
||||||
|
timestamp, ts_ctype = decode_timestamps(
|
||||||
|
fname, explicit=True)[:2]
|
||||||
|
else:
|
||||||
|
timestamp = Timestamp(fname)
|
||||||
|
except ValueError:
|
||||||
|
raise DiskFileError('Invalid Timestamp value in filename %r'
|
||||||
|
% filename)
|
||||||
|
return {
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'ext': ext,
|
||||||
|
'ctype_timestamp': ts_ctype
|
||||||
|
}
|
||||||
|
|
||||||
def _process_ondisk_files(self, exts, results, **kwargs):
|
def _process_ondisk_files(self, exts, results, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
@ -592,18 +647,45 @@ class BaseDiskFileManager(object):
|
||||||
# the results dict is used to collect results of file filtering
|
# the results dict is used to collect results of file filtering
|
||||||
results = {}
|
results = {}
|
||||||
|
|
||||||
# non-tombstones older than or equal to latest tombstone are obsolete
|
|
||||||
if exts.get('.ts'):
|
if exts.get('.ts'):
|
||||||
|
# non-tombstones older than or equal to latest tombstone are
|
||||||
|
# obsolete
|
||||||
for ext in filter(lambda ext: ext != '.ts', exts.keys()):
|
for ext in filter(lambda ext: ext != '.ts', exts.keys()):
|
||||||
exts[ext], older = self._split_gt_timestamp(
|
exts[ext], older = self._split_gt_timestamp(
|
||||||
exts[ext], exts['.ts'][0]['timestamp'])
|
exts[ext], exts['.ts'][0]['timestamp'])
|
||||||
results.setdefault('obsolete', []).extend(older)
|
results.setdefault('obsolete', []).extend(older)
|
||||||
|
# all but most recent .ts are obsolete
|
||||||
|
results.setdefault('obsolete', []).extend(exts['.ts'][1:])
|
||||||
|
exts['.ts'] = exts['.ts'][:1]
|
||||||
|
|
||||||
# all but most recent .meta and .ts are obsolete
|
if exts.get('.meta'):
|
||||||
for ext in ('.meta', '.ts'):
|
# retain the newest meta file
|
||||||
if ext in exts:
|
retain = 1
|
||||||
results.setdefault('obsolete', []).extend(exts[ext][1:])
|
if exts['.meta'][1:]:
|
||||||
exts[ext] = exts[ext][:1]
|
# there are other meta files so find the one with newest
|
||||||
|
# ctype_timestamp...
|
||||||
|
exts['.meta'][1:] = sorted(
|
||||||
|
exts['.meta'][1:],
|
||||||
|
key=lambda info: info['ctype_timestamp'],
|
||||||
|
reverse=True)
|
||||||
|
# ...and retain this IFF its ctype_timestamp is greater than
|
||||||
|
# newest meta file
|
||||||
|
if (exts['.meta'][1]['ctype_timestamp'] >
|
||||||
|
exts['.meta'][0]['ctype_timestamp']):
|
||||||
|
if (exts['.meta'][1]['timestamp'] ==
|
||||||
|
exts['.meta'][0]['timestamp']):
|
||||||
|
# both at same timestamp so retain only the one with
|
||||||
|
# newest ctype
|
||||||
|
exts['.meta'][:2] = [exts['.meta'][1],
|
||||||
|
exts['.meta'][0]]
|
||||||
|
retain = 1
|
||||||
|
else:
|
||||||
|
# retain both - first has newest metadata, second has
|
||||||
|
# newest ctype
|
||||||
|
retain = 2
|
||||||
|
# discard all meta files not being retained...
|
||||||
|
results.setdefault('obsolete', []).extend(exts['.meta'][retain:])
|
||||||
|
exts['.meta'] = exts['.meta'][:retain]
|
||||||
|
|
||||||
# delegate to subclass handler
|
# delegate to subclass handler
|
||||||
self._process_ondisk_files(exts, results, **kwargs)
|
self._process_ondisk_files(exts, results, **kwargs)
|
||||||
|
@ -612,11 +694,16 @@ class BaseDiskFileManager(object):
|
||||||
if exts.get('.ts'):
|
if exts.get('.ts'):
|
||||||
results['ts_info'] = exts['.ts'][0]
|
results['ts_info'] = exts['.ts'][0]
|
||||||
if 'data_info' in results and exts.get('.meta'):
|
if 'data_info' in results and exts.get('.meta'):
|
||||||
# only report a meta file if there is a data file
|
# only report meta files if there is a data file
|
||||||
results['meta_info'] = exts['.meta'][0]
|
results['meta_info'] = exts['.meta'][0]
|
||||||
|
ctype_info = exts['.meta'].pop()
|
||||||
|
if (ctype_info['ctype_timestamp']
|
||||||
|
> results['data_info']['timestamp']):
|
||||||
|
results['ctype_info'] = ctype_info
|
||||||
|
|
||||||
# set ts_file, data_file and meta_file with path to chosen file or None
|
# set ts_file, data_file, meta_file and ctype_file with path to
|
||||||
for info_key in ('data_info', 'meta_info', 'ts_info'):
|
# chosen file or None
|
||||||
|
for info_key in ('data_info', 'meta_info', 'ts_info', 'ctype_info'):
|
||||||
info = results.get(info_key)
|
info = results.get(info_key)
|
||||||
key = info_key[:-5] + '_file'
|
key = info_key[:-5] + '_file'
|
||||||
results[key] = join(datadir, info['filename']) if info else None
|
results[key] = join(datadir, info['filename']) if info else None
|
||||||
|
@ -678,7 +765,23 @@ class BaseDiskFileManager(object):
|
||||||
return self.cleanup_ondisk_files(
|
return self.cleanup_ondisk_files(
|
||||||
hsh_path, reclaim_age=reclaim_age)['files']
|
hsh_path, reclaim_age=reclaim_age)['files']
|
||||||
|
|
||||||
def _hash_suffix_dir(self, path, mapper, reclaim_age):
|
def _update_suffix_hashes(self, hashes, ondisk_info):
|
||||||
|
"""
|
||||||
|
Applies policy specific updates to the given dict of md5 hashes for
|
||||||
|
the given ondisk_info.
|
||||||
|
|
||||||
|
:param hashes: a dict of md5 hashes to be updated
|
||||||
|
:param ondisk_info: a dict describing the state of ondisk files, as
|
||||||
|
returned by get_ondisk_files
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def _hash_suffix_dir(self, path, reclaim_age):
|
||||||
|
"""
|
||||||
|
|
||||||
|
:param path: full path to directory
|
||||||
|
:param reclaim_age: age in seconds at which to remove tombstones
|
||||||
|
"""
|
||||||
hashes = defaultdict(hashlib.md5)
|
hashes = defaultdict(hashlib.md5)
|
||||||
try:
|
try:
|
||||||
path_contents = sorted(os.listdir(path))
|
path_contents = sorted(os.listdir(path))
|
||||||
|
@ -689,7 +792,7 @@ class BaseDiskFileManager(object):
|
||||||
for hsh in path_contents:
|
for hsh in path_contents:
|
||||||
hsh_path = join(path, hsh)
|
hsh_path = join(path, hsh)
|
||||||
try:
|
try:
|
||||||
files = self.hash_cleanup_listdir(hsh_path, reclaim_age)
|
ondisk_info = self.cleanup_ondisk_files(hsh_path, reclaim_age)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
if err.errno == errno.ENOTDIR:
|
if err.errno == errno.ENOTDIR:
|
||||||
partition_path = dirname(path)
|
partition_path = dirname(path)
|
||||||
|
@ -702,14 +805,40 @@ class BaseDiskFileManager(object):
|
||||||
'quar_path': quar_path})
|
'quar_path': quar_path})
|
||||||
continue
|
continue
|
||||||
raise
|
raise
|
||||||
if not files:
|
if not ondisk_info['files']:
|
||||||
try:
|
try:
|
||||||
os.rmdir(hsh_path)
|
os.rmdir(hsh_path)
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
for filename in files:
|
continue
|
||||||
key, value = mapper(filename)
|
|
||||||
hashes[key].update(value)
|
# ondisk_info has info dicts containing timestamps for those
|
||||||
|
# files that could determine the state of the diskfile if it were
|
||||||
|
# to be opened. We update the suffix hash with the concatenation of
|
||||||
|
# each file's timestamp and extension. The extension is added to
|
||||||
|
# guarantee distinct hash values from two object dirs that have
|
||||||
|
# different file types at the same timestamp(s).
|
||||||
|
#
|
||||||
|
# Files that may be in the object dir but would have no effect on
|
||||||
|
# the state of the diskfile are not used to update the hash.
|
||||||
|
for key in (k for k in ('meta_info', 'ts_info')
|
||||||
|
if k in ondisk_info):
|
||||||
|
info = ondisk_info[key]
|
||||||
|
hashes[None].update(info['timestamp'].internal + info['ext'])
|
||||||
|
|
||||||
|
# delegate to subclass for data file related updates...
|
||||||
|
self._update_suffix_hashes(hashes, ondisk_info)
|
||||||
|
|
||||||
|
if 'ctype_info' in ondisk_info:
|
||||||
|
# We have a distinct content-type timestamp so update the
|
||||||
|
# hash. As a precaution, append '_ctype' to differentiate this
|
||||||
|
# value from any other timestamp value that might included in
|
||||||
|
# the hash in future. There is no .ctype file so use _ctype to
|
||||||
|
# avoid any confusion.
|
||||||
|
info = ondisk_info['ctype_info']
|
||||||
|
hashes[None].update(info['ctype_timestamp'].internal
|
||||||
|
+ '_ctype')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
os.rmdir(path)
|
os.rmdir(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
|
@ -725,6 +854,7 @@ class BaseDiskFileManager(object):
|
||||||
"""
|
"""
|
||||||
Performs reclamation and returns an md5 of all (remaining) files.
|
Performs reclamation and returns an md5 of all (remaining) files.
|
||||||
|
|
||||||
|
:param path: full path to directory
|
||||||
:param reclaim_age: age in seconds at which to remove tombstones
|
:param reclaim_age: age in seconds at which to remove tombstones
|
||||||
:raises PathNotDir: if given path is not a valid directory
|
:raises PathNotDir: if given path is not a valid directory
|
||||||
:raises OSError: for non-ENOTDIR errors
|
:raises OSError: for non-ENOTDIR errors
|
||||||
|
@ -831,6 +961,7 @@ class BaseDiskFileManager(object):
|
||||||
A context manager that will lock on the device given, if
|
A context manager that will lock on the device given, if
|
||||||
configured to do so.
|
configured to do so.
|
||||||
|
|
||||||
|
:param device: name of target device
|
||||||
:raises ReplicationLockTimeout: If the lock on the device
|
:raises ReplicationLockTimeout: If the lock on the device
|
||||||
cannot be granted within the configured timeout.
|
cannot be granted within the configured timeout.
|
||||||
"""
|
"""
|
||||||
|
@ -846,6 +977,18 @@ class BaseDiskFileManager(object):
|
||||||
|
|
||||||
def pickle_async_update(self, device, account, container, obj, data,
|
def pickle_async_update(self, device, account, container, obj, data,
|
||||||
timestamp, policy):
|
timestamp, policy):
|
||||||
|
"""
|
||||||
|
Write data describing a container update notification to a pickle file
|
||||||
|
in the async_pending directory.
|
||||||
|
|
||||||
|
:param device: name of target device
|
||||||
|
:param account: account name for the object
|
||||||
|
:param container: container name for the object
|
||||||
|
:param obj: object name for the object
|
||||||
|
:param data: update data to be written to pickle file
|
||||||
|
:param timestamp: a Timestamp
|
||||||
|
:param policy: the StoragePolicy instance
|
||||||
|
"""
|
||||||
device_path = self.construct_dev_path(device)
|
device_path = self.construct_dev_path(device)
|
||||||
async_dir = os.path.join(device_path, get_async_dir(policy))
|
async_dir = os.path.join(device_path, get_async_dir(policy))
|
||||||
ohash = hash_path(account, container, obj)
|
ohash = hash_path(account, container, obj)
|
||||||
|
@ -859,6 +1002,17 @@ class BaseDiskFileManager(object):
|
||||||
|
|
||||||
def get_diskfile(self, device, partition, account, container, obj,
|
def get_diskfile(self, device, partition, account, container, obj,
|
||||||
policy, **kwargs):
|
policy, **kwargs):
|
||||||
|
"""
|
||||||
|
Returns a BaseDiskFile instance for an object based on the object's
|
||||||
|
partition, path parts and policy.
|
||||||
|
|
||||||
|
:param device: name of target device
|
||||||
|
:param partition: partition on device in which the object lives
|
||||||
|
:param account: account name for the object
|
||||||
|
:param container: container name for the object
|
||||||
|
:param obj: object name for the object
|
||||||
|
:param policy: the StoragePolicy instance
|
||||||
|
"""
|
||||||
dev_path = self.get_dev_path(device)
|
dev_path = self.get_dev_path(device)
|
||||||
if not dev_path:
|
if not dev_path:
|
||||||
raise DiskFileDeviceUnavailable()
|
raise DiskFileDeviceUnavailable()
|
||||||
|
@ -868,10 +1022,21 @@ class BaseDiskFileManager(object):
|
||||||
pipe_size=self.pipe_size, **kwargs)
|
pipe_size=self.pipe_size, **kwargs)
|
||||||
|
|
||||||
def object_audit_location_generator(self, device_dirs=None):
|
def object_audit_location_generator(self, device_dirs=None):
|
||||||
|
"""
|
||||||
|
Yield an AuditLocation for all objects stored under device_dirs.
|
||||||
|
|
||||||
|
:param device_dirs: directory of target device
|
||||||
|
"""
|
||||||
return object_audit_location_generator(self.devices, self.mount_check,
|
return object_audit_location_generator(self.devices, self.mount_check,
|
||||||
self.logger, device_dirs)
|
self.logger, device_dirs)
|
||||||
|
|
||||||
def get_diskfile_from_audit_location(self, audit_location):
|
def get_diskfile_from_audit_location(self, audit_location):
|
||||||
|
"""
|
||||||
|
Returns a BaseDiskFile instance for an object at the given
|
||||||
|
AuditLocation.
|
||||||
|
|
||||||
|
:param audit_location: object location to be audited
|
||||||
|
"""
|
||||||
dev_path = self.get_dev_path(audit_location.device, mount_check=False)
|
dev_path = self.get_dev_path(audit_location.device, mount_check=False)
|
||||||
return self.diskfile_cls.from_hash_dir(
|
return self.diskfile_cls.from_hash_dir(
|
||||||
self, audit_location.path, dev_path,
|
self, audit_location.path, dev_path,
|
||||||
|
@ -886,7 +1051,12 @@ class BaseDiskFileManager(object):
|
||||||
instance representing the tombstoned object is returned
|
instance representing the tombstoned object is returned
|
||||||
instead.
|
instead.
|
||||||
|
|
||||||
|
:param device: name of target device
|
||||||
|
:param partition: partition on the device in which the object lives
|
||||||
|
:param object_hash: the hash of an object path
|
||||||
|
:param policy: the StoragePolicy instance
|
||||||
:raises DiskFileNotExist: if the object does not exist
|
:raises DiskFileNotExist: if the object does not exist
|
||||||
|
:returns: an instance of BaseDiskFile
|
||||||
"""
|
"""
|
||||||
dev_path = self.get_dev_path(device)
|
dev_path = self.get_dev_path(device)
|
||||||
if not dev_path:
|
if not dev_path:
|
||||||
|
@ -924,6 +1094,14 @@ class BaseDiskFileManager(object):
|
||||||
policy=policy, **kwargs)
|
policy=policy, **kwargs)
|
||||||
|
|
||||||
def get_hashes(self, device, partition, suffixes, policy):
|
def get_hashes(self, device, partition, suffixes, policy):
|
||||||
|
"""
|
||||||
|
|
||||||
|
:param device: name of target device
|
||||||
|
:param partition: partition name
|
||||||
|
:param suffixes: a list of suffix directories to be recalculated
|
||||||
|
:param policy: the StoragePolicy instance
|
||||||
|
:returns: a dictionary that maps suffix directories
|
||||||
|
"""
|
||||||
dev_path = self.get_dev_path(device)
|
dev_path = self.get_dev_path(device)
|
||||||
if not dev_path:
|
if not dev_path:
|
||||||
raise DiskFileDeviceUnavailable()
|
raise DiskFileDeviceUnavailable()
|
||||||
|
@ -936,6 +1114,9 @@ class BaseDiskFileManager(object):
|
||||||
return hashes
|
return hashes
|
||||||
|
|
||||||
def _listdir(self, path):
|
def _listdir(self, path):
|
||||||
|
"""
|
||||||
|
:param path: full path to directory
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
return os.listdir(path)
|
return os.listdir(path)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
|
@ -949,6 +1130,10 @@ class BaseDiskFileManager(object):
|
||||||
"""
|
"""
|
||||||
Yields tuples of (full_path, suffix_only) for suffixes stored
|
Yields tuples of (full_path, suffix_only) for suffixes stored
|
||||||
on the given device and partition.
|
on the given device and partition.
|
||||||
|
|
||||||
|
:param device: name of target device
|
||||||
|
:param partition: partition name
|
||||||
|
:param policy: the StoragePolicy instance
|
||||||
"""
|
"""
|
||||||
dev_path = self.get_dev_path(device)
|
dev_path = self.get_dev_path(device)
|
||||||
if not dev_path:
|
if not dev_path:
|
||||||
|
@ -978,9 +1163,16 @@ class BaseDiskFileManager(object):
|
||||||
|
|
||||||
ts_data -> timestamp of data or tombstone file,
|
ts_data -> timestamp of data or tombstone file,
|
||||||
ts_meta -> timestamp of meta file, if one exists
|
ts_meta -> timestamp of meta file, if one exists
|
||||||
|
ts_ctype -> timestamp of meta file containing most recent
|
||||||
|
content-type value, if one exists
|
||||||
|
|
||||||
where timestamps are instances of
|
where timestamps are instances of
|
||||||
:class:`~swift.common.utils.Timestamp`
|
:class:`~swift.common.utils.Timestamp`
|
||||||
|
|
||||||
|
:param device: name of target device
|
||||||
|
:param partition: partition name
|
||||||
|
:param policy: the StoragePolicy instance
|
||||||
|
:param suffixes: optional list of suffix directories to be searched
|
||||||
"""
|
"""
|
||||||
dev_path = self.get_dev_path(device)
|
dev_path = self.get_dev_path(device)
|
||||||
if not dev_path:
|
if not dev_path:
|
||||||
|
@ -995,9 +1187,10 @@ class BaseDiskFileManager(object):
|
||||||
(os.path.join(partition_path, suffix), suffix)
|
(os.path.join(partition_path, suffix), suffix)
|
||||||
for suffix in suffixes)
|
for suffix in suffixes)
|
||||||
key_preference = (
|
key_preference = (
|
||||||
('ts_meta', 'meta_info'),
|
('ts_meta', 'meta_info', 'timestamp'),
|
||||||
('ts_data', 'data_info'),
|
('ts_data', 'data_info', 'timestamp'),
|
||||||
('ts_data', 'ts_info'),
|
('ts_data', 'ts_info', 'timestamp'),
|
||||||
|
('ts_ctype', 'ctype_info', 'ctype_timestamp'),
|
||||||
)
|
)
|
||||||
for suffix_path, suffix in suffixes:
|
for suffix_path, suffix in suffixes:
|
||||||
for object_hash in self._listdir(suffix_path):
|
for object_hash in self._listdir(suffix_path):
|
||||||
|
@ -1006,10 +1199,10 @@ class BaseDiskFileManager(object):
|
||||||
results = self.cleanup_ondisk_files(
|
results = self.cleanup_ondisk_files(
|
||||||
object_path, self.reclaim_age, **kwargs)
|
object_path, self.reclaim_age, **kwargs)
|
||||||
timestamps = {}
|
timestamps = {}
|
||||||
for ts_key, info_key in key_preference:
|
for ts_key, info_key, info_ts_key in key_preference:
|
||||||
if info_key not in results:
|
if info_key not in results:
|
||||||
continue
|
continue
|
||||||
timestamps[ts_key] = results[info_key]['timestamp']
|
timestamps[ts_key] = results[info_key][info_ts_key]
|
||||||
if 'ts_data' not in timestamps:
|
if 'ts_data' not in timestamps:
|
||||||
# file sets that do not include a .data or .ts
|
# file sets that do not include a .data or .ts
|
||||||
# file cannot be opened and therefore cannot
|
# file cannot be opened and therefore cannot
|
||||||
|
@ -1133,6 +1326,34 @@ class BaseDiskFileWriter(object):
|
||||||
except OSError:
|
except OSError:
|
||||||
logging.exception(_('Problem cleaning up %s'), self._datadir)
|
logging.exception(_('Problem cleaning up %s'), self._datadir)
|
||||||
|
|
||||||
|
def _put(self, metadata, cleanup=True, *a, **kw):
|
||||||
|
"""
|
||||||
|
Helper method for subclasses.
|
||||||
|
|
||||||
|
For this implementation, this method is responsible for renaming the
|
||||||
|
temporary file to the final name and directory location. This method
|
||||||
|
should be called after the final call to
|
||||||
|
:func:`swift.obj.diskfile.DiskFileWriter.write`.
|
||||||
|
|
||||||
|
:param metadata: dictionary of metadata to be associated with the
|
||||||
|
object
|
||||||
|
:param cleanup: a Boolean. If True then obsolete files will be removed
|
||||||
|
from the object dir after the put completes, otherwise
|
||||||
|
obsolete files are left in place.
|
||||||
|
"""
|
||||||
|
timestamp = Timestamp(metadata['X-Timestamp'])
|
||||||
|
ctype_timestamp = metadata.get('Content-Type-Timestamp')
|
||||||
|
if ctype_timestamp:
|
||||||
|
ctype_timestamp = Timestamp(ctype_timestamp)
|
||||||
|
filename = self.manager.make_on_disk_filename(
|
||||||
|
timestamp, self._extension, ctype_timestamp=ctype_timestamp,
|
||||||
|
*a, **kw)
|
||||||
|
metadata['name'] = self._name
|
||||||
|
target_path = join(self._datadir, filename)
|
||||||
|
|
||||||
|
self._threadpool.force_run_in_thread(
|
||||||
|
self._finalize_put, metadata, target_path, cleanup)
|
||||||
|
|
||||||
def put(self, metadata):
|
def put(self, metadata):
|
||||||
"""
|
"""
|
||||||
Finalize writing the file on disk.
|
Finalize writing the file on disk.
|
||||||
|
@ -1360,7 +1581,10 @@ class BaseDiskFileReader(object):
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
def app_iter_range(self, start, stop):
|
def app_iter_range(self, start, stop):
|
||||||
"""Returns an iterator over the data file for range (start, stop)"""
|
"""
|
||||||
|
Returns an iterator over the data file for range (start, stop)
|
||||||
|
|
||||||
|
"""
|
||||||
if start or start == 0:
|
if start or start == 0:
|
||||||
self._fp.seek(start)
|
self._fp.seek(start)
|
||||||
if stop is not None:
|
if stop is not None:
|
||||||
|
@ -1381,7 +1605,10 @@ class BaseDiskFileReader(object):
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
def app_iter_ranges(self, ranges, content_type, boundary, size):
|
def app_iter_ranges(self, ranges, content_type, boundary, size):
|
||||||
"""Returns an iterator over the data file for a set of ranges"""
|
"""
|
||||||
|
Returns an iterator over the data file for a set of ranges
|
||||||
|
|
||||||
|
"""
|
||||||
if not ranges:
|
if not ranges:
|
||||||
yield ''
|
yield ''
|
||||||
else:
|
else:
|
||||||
|
@ -1396,7 +1623,11 @@ class BaseDiskFileReader(object):
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
def _drop_cache(self, fd, offset, length):
|
def _drop_cache(self, fd, offset, length):
|
||||||
"""Method for no-oping buffer cache drop method."""
|
"""
|
||||||
|
Method for no-oping buffer cache drop method.
|
||||||
|
|
||||||
|
:param fd: file descriptor or filename
|
||||||
|
"""
|
||||||
if not self._keep_cache:
|
if not self._keep_cache:
|
||||||
drop_buffer_cache(fd, offset, length)
|
drop_buffer_cache(fd, offset, length)
|
||||||
|
|
||||||
|
@ -1579,6 +1810,20 @@ class BaseDiskFile(object):
|
||||||
def fragments(self):
|
def fragments(self):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def content_type(self):
|
||||||
|
if self._metadata is None:
|
||||||
|
raise DiskFileNotOpen()
|
||||||
|
return self._metadata.get('Content-Type')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def content_type_timestamp(self):
|
||||||
|
if self._metadata is None:
|
||||||
|
raise DiskFileNotOpen()
|
||||||
|
t = self._metadata.get('Content-Type-Timestamp',
|
||||||
|
self._datafile_metadata.get('X-Timestamp'))
|
||||||
|
return Timestamp(t)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy):
|
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy):
|
||||||
return cls(mgr, device_path, None, partition, _datadir=hash_dir_path,
|
return cls(mgr, device_path, None, partition, _datadir=hash_dir_path,
|
||||||
|
@ -1718,6 +1963,10 @@ class BaseDiskFile(object):
|
||||||
return exc
|
return exc
|
||||||
|
|
||||||
def _verify_name_matches_hash(self, data_file):
|
def _verify_name_matches_hash(self, data_file):
|
||||||
|
"""
|
||||||
|
|
||||||
|
:param data_file: data file name, used when quarantines occur
|
||||||
|
"""
|
||||||
hash_from_fs = os.path.basename(self._datadir)
|
hash_from_fs = os.path.basename(self._datadir)
|
||||||
hash_from_name = hash_path(self._name.lstrip('/'))
|
hash_from_name = hash_path(self._name.lstrip('/'))
|
||||||
if hash_from_fs != hash_from_name:
|
if hash_from_fs != hash_from_name:
|
||||||
|
@ -1794,8 +2043,16 @@ class BaseDiskFile(object):
|
||||||
return obj_size
|
return obj_size
|
||||||
|
|
||||||
def _failsafe_read_metadata(self, source, quarantine_filename=None):
|
def _failsafe_read_metadata(self, source, quarantine_filename=None):
|
||||||
# Takes source and filename separately so we can read from an open
|
"""
|
||||||
# file if we have one
|
Read metadata from source object file. In case of failure, quarantine
|
||||||
|
the file.
|
||||||
|
|
||||||
|
Takes source and filename separately so we can read from an open
|
||||||
|
file if we have one.
|
||||||
|
|
||||||
|
:param source: file descriptor or filename to load the metadata from
|
||||||
|
:param quarantine_filename: full path of file to load the metadata from
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
return read_metadata(source)
|
return read_metadata(source)
|
||||||
except (DiskFileXattrNotSupported, DiskFileNotExist):
|
except (DiskFileXattrNotSupported, DiskFileNotExist):
|
||||||
|
@ -1805,14 +2062,36 @@ class BaseDiskFile(object):
|
||||||
quarantine_filename,
|
quarantine_filename,
|
||||||
"Exception reading metadata: %s" % err)
|
"Exception reading metadata: %s" % err)
|
||||||
|
|
||||||
def _construct_from_data_file(self, data_file, meta_file, **kwargs):
|
def _merge_content_type_metadata(self, ctype_file):
|
||||||
|
"""
|
||||||
|
When a second .meta file is providing the most recent Content-Type
|
||||||
|
metadata then merge it into the metafile_metadata.
|
||||||
|
|
||||||
|
:param ctype_file: An on-disk .meta file
|
||||||
|
"""
|
||||||
|
ctypefile_metadata = self._failsafe_read_metadata(
|
||||||
|
ctype_file, ctype_file)
|
||||||
|
if ('Content-Type' in ctypefile_metadata
|
||||||
|
and (ctypefile_metadata.get('Content-Type-Timestamp') >
|
||||||
|
self._metafile_metadata.get('Content-Type-Timestamp'))
|
||||||
|
and (ctypefile_metadata.get('Content-Type-Timestamp') >
|
||||||
|
self.data_timestamp)):
|
||||||
|
self._metafile_metadata['Content-Type'] = \
|
||||||
|
ctypefile_metadata['Content-Type']
|
||||||
|
self._metafile_metadata['Content-Type-Timestamp'] = \
|
||||||
|
ctypefile_metadata.get('Content-Type-Timestamp')
|
||||||
|
|
||||||
|
def _construct_from_data_file(self, data_file, meta_file, ctype_file,
|
||||||
|
**kwargs):
|
||||||
"""
|
"""
|
||||||
Open the `.data` file to fetch its metadata, and fetch the metadata
|
Open the `.data` file to fetch its metadata, and fetch the metadata
|
||||||
from the fast-POST `.meta` file as well if it exists, merging them
|
from fast-POST `.meta` files as well if any exist, merging them
|
||||||
properly.
|
properly.
|
||||||
|
|
||||||
:param data_file: on-disk `.data` file being considered
|
:param data_file: on-disk `.data` file being considered
|
||||||
:param meta_file: on-disk fast-POST `.meta` file being considered
|
:param meta_file: on-disk fast-POST `.meta` file being considered
|
||||||
|
:param ctype_file: on-disk fast-POST `.meta` file being considered that
|
||||||
|
contains content-type and content-type timestamp
|
||||||
:returns: an opened data file pointer
|
:returns: an opened data file pointer
|
||||||
:raises DiskFileError: various exceptions from
|
:raises DiskFileError: various exceptions from
|
||||||
:func:`swift.obj.diskfile.DiskFile._verify_data_file`
|
:func:`swift.obj.diskfile.DiskFile._verify_data_file`
|
||||||
|
@ -1823,6 +2102,8 @@ class BaseDiskFile(object):
|
||||||
if meta_file:
|
if meta_file:
|
||||||
self._metafile_metadata = self._failsafe_read_metadata(
|
self._metafile_metadata = self._failsafe_read_metadata(
|
||||||
meta_file, meta_file)
|
meta_file, meta_file)
|
||||||
|
if ctype_file and ctype_file != meta_file:
|
||||||
|
self._merge_content_type_metadata(ctype_file)
|
||||||
sys_metadata = dict(
|
sys_metadata = dict(
|
||||||
[(key, val) for key, val in self._datafile_metadata.items()
|
[(key, val) for key, val in self._datafile_metadata.items()
|
||||||
if key.lower() in DATAFILE_SYSTEM_META
|
if key.lower() in DATAFILE_SYSTEM_META
|
||||||
|
@ -1831,6 +2112,14 @@ class BaseDiskFile(object):
|
||||||
self._metadata.update(sys_metadata)
|
self._metadata.update(sys_metadata)
|
||||||
# diskfile writer added 'name' to metafile, so remove it here
|
# diskfile writer added 'name' to metafile, so remove it here
|
||||||
self._metafile_metadata.pop('name', None)
|
self._metafile_metadata.pop('name', None)
|
||||||
|
# TODO: the check for Content-Type is only here for tests that
|
||||||
|
# create .data files without Content-Type
|
||||||
|
if ('Content-Type' in self._datafile_metadata and
|
||||||
|
(self.data_timestamp >
|
||||||
|
self._metafile_metadata.get('Content-Type-Timestamp'))):
|
||||||
|
self._metadata['Content-Type'] = \
|
||||||
|
self._datafile_metadata['Content-Type']
|
||||||
|
self._metadata.pop('Content-Type-Timestamp', None)
|
||||||
else:
|
else:
|
||||||
self._metadata.update(self._datafile_metadata)
|
self._metadata.update(self._datafile_metadata)
|
||||||
if self._name is None:
|
if self._name is None:
|
||||||
|
@ -2029,21 +2318,10 @@ class DiskFileWriter(BaseDiskFileWriter):
|
||||||
"""
|
"""
|
||||||
Finalize writing the file on disk.
|
Finalize writing the file on disk.
|
||||||
|
|
||||||
For this implementation, this method is responsible for renaming the
|
|
||||||
temporary file to the final name and directory location. This method
|
|
||||||
should be called after the final call to
|
|
||||||
:func:`swift.obj.diskfile.DiskFileWriter.write`.
|
|
||||||
|
|
||||||
:param metadata: dictionary of metadata to be associated with the
|
:param metadata: dictionary of metadata to be associated with the
|
||||||
object
|
object
|
||||||
"""
|
"""
|
||||||
timestamp = Timestamp(metadata['X-Timestamp']).internal
|
super(DiskFileWriter, self)._put(metadata, True)
|
||||||
metadata['name'] = self._name
|
|
||||||
target_path = join(self._datadir, timestamp + self._extension)
|
|
||||||
cleanup = True
|
|
||||||
|
|
||||||
self._threadpool.force_run_in_thread(
|
|
||||||
self._finalize_put, metadata, target_path, cleanup)
|
|
||||||
|
|
||||||
|
|
||||||
class DiskFile(BaseDiskFile):
|
class DiskFile(BaseDiskFile):
|
||||||
|
@ -2059,31 +2337,6 @@ class DiskFile(BaseDiskFile):
|
||||||
class DiskFileManager(BaseDiskFileManager):
|
class DiskFileManager(BaseDiskFileManager):
|
||||||
diskfile_cls = DiskFile
|
diskfile_cls = DiskFile
|
||||||
|
|
||||||
def parse_on_disk_filename(self, filename):
|
|
||||||
"""
|
|
||||||
Returns the timestamp extracted .data file name.
|
|
||||||
|
|
||||||
:param filename: the data file name including extension
|
|
||||||
:returns: a dict, with keys for timestamp, and ext:
|
|
||||||
|
|
||||||
* timestamp is a :class:`~swift.common.utils.Timestamp`
|
|
||||||
* ext is a string, the file extension including the leading dot or
|
|
||||||
the empty string if the filename has no extension.
|
|
||||||
|
|
||||||
:raises DiskFileError: if any part of the filename is not able to be
|
|
||||||
validated.
|
|
||||||
"""
|
|
||||||
float_part, ext = splitext(filename)
|
|
||||||
try:
|
|
||||||
timestamp = Timestamp(float_part)
|
|
||||||
except ValueError:
|
|
||||||
raise DiskFileError('Invalid Timestamp value in filename %r'
|
|
||||||
% filename)
|
|
||||||
return {
|
|
||||||
'timestamp': timestamp,
|
|
||||||
'ext': ext,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _process_ondisk_files(self, exts, results, **kwargs):
|
def _process_ondisk_files(self, exts, results, **kwargs):
|
||||||
"""
|
"""
|
||||||
Implement replication policy specific handling of .data files.
|
Implement replication policy specific handling of .data files.
|
||||||
|
@ -2107,16 +2360,31 @@ class DiskFileManager(BaseDiskFileManager):
|
||||||
# set results
|
# set results
|
||||||
results['data_info'] = exts['.data'][0]
|
results['data_info'] = exts['.data'][0]
|
||||||
|
|
||||||
|
def _update_suffix_hashes(self, hashes, ondisk_info):
|
||||||
|
"""
|
||||||
|
Applies policy specific updates to the given dict of md5 hashes for
|
||||||
|
the given ondisk_info.
|
||||||
|
|
||||||
|
:param hashes: a dict of md5 hashes to be updated
|
||||||
|
:param ondisk_info: a dict describing the state of ondisk files, as
|
||||||
|
returned by get_ondisk_files
|
||||||
|
"""
|
||||||
|
if 'data_info' in ondisk_info:
|
||||||
|
file_info = ondisk_info['data_info']
|
||||||
|
hashes[None].update(
|
||||||
|
file_info['timestamp'].internal + file_info['ext'])
|
||||||
|
|
||||||
def _hash_suffix(self, path, reclaim_age):
|
def _hash_suffix(self, path, reclaim_age):
|
||||||
"""
|
"""
|
||||||
Performs reclamation and returns an md5 of all (remaining) files.
|
Performs reclamation and returns an md5 of all (remaining) files.
|
||||||
|
|
||||||
|
:param path: full path to directory
|
||||||
:param reclaim_age: age in seconds at which to remove tombstones
|
:param reclaim_age: age in seconds at which to remove tombstones
|
||||||
:raises PathNotDir: if given path is not a valid directory
|
:raises PathNotDir: if given path is not a valid directory
|
||||||
:raises OSError: for non-ENOTDIR errors
|
:raises OSError: for non-ENOTDIR errors
|
||||||
|
:returns: md5 of files in suffix
|
||||||
"""
|
"""
|
||||||
mapper = lambda filename: (None, filename)
|
hashes = self._hash_suffix_dir(path, reclaim_age)
|
||||||
hashes = self._hash_suffix_dir(path, mapper, reclaim_age)
|
|
||||||
return hashes[None].hexdigest()
|
return hashes[None].hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
@ -2173,10 +2441,10 @@ class ECDiskFileWriter(BaseDiskFileWriter):
|
||||||
def put(self, metadata):
|
def put(self, metadata):
|
||||||
"""
|
"""
|
||||||
The only difference between this method and the replication policy
|
The only difference between this method and the replication policy
|
||||||
DiskFileWriter method is the call into manager.make_on_disk_filename
|
DiskFileWriter method is adding the frag index to the metadata.
|
||||||
to construct the data file name.
|
|
||||||
|
:param metadata: dictionary of metadata to be associated with object
|
||||||
"""
|
"""
|
||||||
timestamp = Timestamp(metadata['X-Timestamp'])
|
|
||||||
fi = None
|
fi = None
|
||||||
cleanup = True
|
cleanup = True
|
||||||
if self._extension == '.data':
|
if self._extension == '.data':
|
||||||
|
@ -2188,13 +2456,7 @@ class ECDiskFileWriter(BaseDiskFileWriter):
|
||||||
self._diskfile._frag_index)
|
self._diskfile._frag_index)
|
||||||
# defer cleanup until commit() writes .durable
|
# defer cleanup until commit() writes .durable
|
||||||
cleanup = False
|
cleanup = False
|
||||||
filename = self.manager.make_on_disk_filename(
|
super(ECDiskFileWriter, self)._put(metadata, cleanup, frag_index=fi)
|
||||||
timestamp, self._extension, frag_index=fi)
|
|
||||||
metadata['name'] = self._name
|
|
||||||
target_path = join(self._datadir, filename)
|
|
||||||
|
|
||||||
self._threadpool.force_run_in_thread(
|
|
||||||
self._finalize_put, metadata, target_path, cleanup)
|
|
||||||
|
|
||||||
|
|
||||||
class ECDiskFile(BaseDiskFile):
|
class ECDiskFile(BaseDiskFile):
|
||||||
|
@ -2246,6 +2508,8 @@ class ECDiskFile(BaseDiskFile):
|
||||||
The only difference between this method and the replication policy
|
The only difference between this method and the replication policy
|
||||||
DiskFile method is passing in the frag_index kwarg to our manager's
|
DiskFile method is passing in the frag_index kwarg to our manager's
|
||||||
get_ondisk_files method.
|
get_ondisk_files method.
|
||||||
|
|
||||||
|
:param files: list of file names
|
||||||
"""
|
"""
|
||||||
self._ondisk_info = self.manager.get_ondisk_files(
|
self._ondisk_info = self.manager.get_ondisk_files(
|
||||||
files, self._datadir, frag_index=self._frag_index)
|
files, self._datadir, frag_index=self._frag_index)
|
||||||
|
@ -2288,6 +2552,8 @@ class ECDiskFileManager(BaseDiskFileManager):
|
||||||
"""
|
"""
|
||||||
Return int representation of frag_index, or raise a DiskFileError if
|
Return int representation of frag_index, or raise a DiskFileError if
|
||||||
frag_index is not a whole number.
|
frag_index is not a whole number.
|
||||||
|
|
||||||
|
:param frag_index: a fragment archive index
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
frag_index = int(str(frag_index))
|
frag_index = int(str(frag_index))
|
||||||
|
@ -2300,7 +2566,7 @@ class ECDiskFileManager(BaseDiskFileManager):
|
||||||
return frag_index
|
return frag_index
|
||||||
|
|
||||||
def make_on_disk_filename(self, timestamp, ext=None, frag_index=None,
|
def make_on_disk_filename(self, timestamp, ext=None, frag_index=None,
|
||||||
*a, **kw):
|
ctype_timestamp=None, *a, **kw):
|
||||||
"""
|
"""
|
||||||
Returns the EC specific filename for given timestamp.
|
Returns the EC specific filename for given timestamp.
|
||||||
|
|
||||||
|
@ -2310,32 +2576,36 @@ class ECDiskFileManager(BaseDiskFileManager):
|
||||||
appended to the returned file name
|
appended to the returned file name
|
||||||
:param frag_index: a fragment archive index, used with .data extension
|
:param frag_index: a fragment archive index, used with .data extension
|
||||||
only, must be a whole number.
|
only, must be a whole number.
|
||||||
|
:param ctype_timestamp: an optional content-type timestamp, an instance
|
||||||
|
of :class:`~swift.common.utils.Timestamp`
|
||||||
:returns: a file name
|
:returns: a file name
|
||||||
:raises DiskFileError: if ext=='.data' and the kwarg frag_index is not
|
:raises DiskFileError: if ext=='.data' and the kwarg frag_index is not
|
||||||
a whole number
|
a whole number
|
||||||
"""
|
"""
|
||||||
rv = timestamp.internal
|
|
||||||
if ext == '.data':
|
if ext == '.data':
|
||||||
# for datafiles only we encode the fragment index in the filename
|
# for datafiles only we encode the fragment index in the filename
|
||||||
# to allow archives of different indexes to temporarily be stored
|
# to allow archives of different indexes to temporarily be stored
|
||||||
# on the same node in certain situations
|
# on the same node in certain situations
|
||||||
frag_index = self.validate_fragment_index(frag_index)
|
frag_index = self.validate_fragment_index(frag_index)
|
||||||
rv += '#' + str(frag_index)
|
rv = timestamp.internal + '#' + str(frag_index)
|
||||||
if ext:
|
return '%s%s' % (rv, ext or '')
|
||||||
rv = '%s%s' % (rv, ext)
|
return super(ECDiskFileManager, self).make_on_disk_filename(
|
||||||
return rv
|
timestamp, ext, ctype_timestamp, *a, **kw)
|
||||||
|
|
||||||
def parse_on_disk_filename(self, filename):
|
def parse_on_disk_filename(self, filename):
|
||||||
"""
|
"""
|
||||||
Returns the timestamp extracted from a policy specific .data file name.
|
Returns timestamp(s) and other info extracted from a policy specific
|
||||||
For EC policy the data file name includes a fragment index which must
|
file name. For EC policy the data file name includes a fragment index
|
||||||
be stripped off to retrieve the timestamp.
|
which must be stripped off to retrieve the timestamp.
|
||||||
|
|
||||||
:param filename: the data file name including extension
|
:param filename: the file name including extension
|
||||||
:returns: a dict, with keys for timestamp, frag_index, and ext:
|
:returns: a dict, with keys for timestamp, frag_index, ext and
|
||||||
|
ctype_timestamp:
|
||||||
|
|
||||||
* timestamp is a :class:`~swift.common.utils.Timestamp`
|
* timestamp is a :class:`~swift.common.utils.Timestamp`
|
||||||
* frag_index is an int or None
|
* frag_index is an int or None
|
||||||
|
* ctype_timestamp is a :class:`~swift.common.utils.Timestamp` or
|
||||||
|
None for .meta files, otherwise None
|
||||||
* ext is a string, the file extension including the leading dot or
|
* ext is a string, the file extension including the leading dot or
|
||||||
the empty string if the filename has no extension.
|
the empty string if the filename has no extension.
|
||||||
|
|
||||||
|
@ -2344,13 +2614,13 @@ class ECDiskFileManager(BaseDiskFileManager):
|
||||||
"""
|
"""
|
||||||
frag_index = None
|
frag_index = None
|
||||||
float_frag, ext = splitext(filename)
|
float_frag, ext = splitext(filename)
|
||||||
parts = float_frag.split('#', 1)
|
|
||||||
try:
|
|
||||||
timestamp = Timestamp(parts[0])
|
|
||||||
except ValueError:
|
|
||||||
raise DiskFileError('Invalid Timestamp value in filename %r'
|
|
||||||
% filename)
|
|
||||||
if ext == '.data':
|
if ext == '.data':
|
||||||
|
parts = float_frag.split('#', 1)
|
||||||
|
try:
|
||||||
|
timestamp = Timestamp(parts[0])
|
||||||
|
except ValueError:
|
||||||
|
raise DiskFileError('Invalid Timestamp value in filename %r'
|
||||||
|
% filename)
|
||||||
# it is an error for an EC data file to not have a valid
|
# it is an error for an EC data file to not have a valid
|
||||||
# fragment index
|
# fragment index
|
||||||
try:
|
try:
|
||||||
|
@ -2359,11 +2629,15 @@ class ECDiskFileManager(BaseDiskFileManager):
|
||||||
# expect validate_fragment_index raise DiskFileError
|
# expect validate_fragment_index raise DiskFileError
|
||||||
pass
|
pass
|
||||||
frag_index = self.validate_fragment_index(frag_index)
|
frag_index = self.validate_fragment_index(frag_index)
|
||||||
return {
|
return {
|
||||||
'timestamp': timestamp,
|
'timestamp': timestamp,
|
||||||
'frag_index': frag_index,
|
'frag_index': frag_index,
|
||||||
'ext': ext,
|
'ext': ext,
|
||||||
}
|
'ctype_timestamp': None
|
||||||
|
}
|
||||||
|
rv = super(ECDiskFileManager, self).parse_on_disk_filename(filename)
|
||||||
|
rv['frag_index'] = None
|
||||||
|
return rv
|
||||||
|
|
||||||
def _process_ondisk_files(self, exts, results, frag_index=None, **kwargs):
|
def _process_ondisk_files(self, exts, results, frag_index=None, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
@ -2449,25 +2723,41 @@ class ECDiskFileManager(BaseDiskFileManager):
|
||||||
return have_data_file == have_durable
|
return have_data_file == have_durable
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def _update_suffix_hashes(self, hashes, ondisk_info):
|
||||||
|
"""
|
||||||
|
Applies policy specific updates to the given dict of md5 hashes for
|
||||||
|
the given ondisk_info.
|
||||||
|
|
||||||
|
The only difference between this method and the replication policy
|
||||||
|
function is the way that data files update hashes dict. Instead of all
|
||||||
|
filenames hashed into a single hasher, each data file name will fall
|
||||||
|
into a bucket keyed by its fragment index.
|
||||||
|
|
||||||
|
:param hashes: a dict of md5 hashes to be updated
|
||||||
|
:param ondisk_info: a dict describing the state of ondisk files, as
|
||||||
|
returned by get_ondisk_files
|
||||||
|
"""
|
||||||
|
for frag_set in ondisk_info['frag_sets'].values():
|
||||||
|
for file_info in frag_set:
|
||||||
|
fi = file_info['frag_index']
|
||||||
|
hashes[fi].update(file_info['timestamp'].internal)
|
||||||
|
if 'durable_frag_set' in ondisk_info:
|
||||||
|
file_info = ondisk_info['durable_frag_set'][0]
|
||||||
|
hashes[None].update(file_info['timestamp'].internal + '.durable')
|
||||||
|
|
||||||
def _hash_suffix(self, path, reclaim_age):
|
def _hash_suffix(self, path, reclaim_age):
|
||||||
"""
|
"""
|
||||||
The only difference between this method and the replication policy
|
Performs reclamation and returns an md5 of all (remaining) files.
|
||||||
function is the way that files are updated on the returned hash.
|
|
||||||
|
|
||||||
Instead of all filenames hashed into a single hasher, each file name
|
:param path: full path to directory
|
||||||
will fall into a bucket either by fragment index for datafiles, or
|
:param reclaim_age: age in seconds at which to remove tombstones
|
||||||
None (indicating a durable, metadata or tombstone).
|
:raises PathNotDir: if given path is not a valid directory
|
||||||
|
:raises OSError: for non-ENOTDIR errors
|
||||||
|
:returns: dict of md5 hex digests
|
||||||
"""
|
"""
|
||||||
# hash_per_fi instead of single hash for whole suffix
|
# hash_per_fi instead of single hash for whole suffix
|
||||||
# here we flatten out the hashers hexdigest into a dictionary instead
|
# here we flatten out the hashers hexdigest into a dictionary instead
|
||||||
# of just returning the one hexdigest for the whole suffix
|
# of just returning the one hexdigest for the whole suffix
|
||||||
def mapper(filename):
|
|
||||||
info = self.parse_on_disk_filename(filename)
|
|
||||||
fi = info['frag_index']
|
|
||||||
if fi is None:
|
|
||||||
return None, filename
|
|
||||||
else:
|
|
||||||
return fi, info['timestamp'].internal
|
|
||||||
|
|
||||||
hash_per_fi = self._hash_suffix_dir(path, mapper, reclaim_age)
|
hash_per_fi = self._hash_suffix_dir(path, reclaim_age)
|
||||||
return dict((fi, md5.hexdigest()) for fi, md5 in hash_per_fi.items())
|
return dict((fi, md5.hexdigest()) for fi, md5 in hash_per_fi.items())
|
||||||
|
|
|
@ -25,7 +25,9 @@ from six import moves
|
||||||
from swift.common.utils import Timestamp
|
from swift.common.utils import Timestamp
|
||||||
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
|
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
|
||||||
DiskFileCollision, DiskFileDeleted, DiskFileNotOpen
|
DiskFileCollision, DiskFileDeleted, DiskFileNotOpen
|
||||||
|
from swift.common.request_helpers import is_sys_meta
|
||||||
from swift.common.swob import multi_range_iterator
|
from swift.common.swob import multi_range_iterator
|
||||||
|
from swift.obj.diskfile import DATAFILE_SYSTEM_META
|
||||||
|
|
||||||
|
|
||||||
class InMemoryFileSystem(object):
|
class InMemoryFileSystem(object):
|
||||||
|
@ -41,17 +43,37 @@ class InMemoryFileSystem(object):
|
||||||
self._filesystem = {}
|
self._filesystem = {}
|
||||||
|
|
||||||
def get_object(self, name):
|
def get_object(self, name):
|
||||||
|
"""
|
||||||
|
Return back an file-like object and its metadata
|
||||||
|
|
||||||
|
:param name: standard object name
|
||||||
|
:return (fp, metadata): fp is `StringIO` in-memory representation
|
||||||
|
object (or None). metadata is a dictionary
|
||||||
|
of metadata (or None)
|
||||||
|
"""
|
||||||
val = self._filesystem.get(name)
|
val = self._filesystem.get(name)
|
||||||
if val is None:
|
if val is None:
|
||||||
data, metadata = None, None
|
fp, metadata = None, None
|
||||||
else:
|
else:
|
||||||
data, metadata = val
|
fp, metadata = val
|
||||||
return data, metadata
|
return fp, metadata
|
||||||
|
|
||||||
def put_object(self, name, data, metadata):
|
def put_object(self, name, fp, metadata):
|
||||||
self._filesystem[name] = (data, metadata)
|
"""
|
||||||
|
Store object into memory
|
||||||
|
|
||||||
|
:param name: standard object name
|
||||||
|
:param fp: `StringIO` in-memory representation object
|
||||||
|
:param metadata: dictionary of metadata to be written
|
||||||
|
"""
|
||||||
|
self._filesystem[name] = (fp, metadata)
|
||||||
|
|
||||||
def del_object(self, name):
|
def del_object(self, name):
|
||||||
|
"""
|
||||||
|
Delete object from memory
|
||||||
|
|
||||||
|
:param name: standard object name
|
||||||
|
"""
|
||||||
del self._filesystem[name]
|
del self._filesystem[name]
|
||||||
|
|
||||||
def get_diskfile(self, account, container, obj, **kwargs):
|
def get_diskfile(self, account, container, obj, **kwargs):
|
||||||
|
@ -99,7 +121,6 @@ class DiskFileWriter(object):
|
||||||
with the `StringIO` object.
|
with the `StringIO` object.
|
||||||
|
|
||||||
:param metadata: dictionary of metadata to be written
|
:param metadata: dictionary of metadata to be written
|
||||||
:param extension: extension to be used when making the file
|
|
||||||
"""
|
"""
|
||||||
metadata['name'] = self._name
|
metadata['name'] = self._name
|
||||||
self._filesystem.put_object(self._name, self._fp, metadata)
|
self._filesystem.put_object(self._name, self._fp, metadata)
|
||||||
|
@ -209,7 +230,7 @@ class DiskFileReader(object):
|
||||||
if self._bytes_read != self._obj_size:
|
if self._bytes_read != self._obj_size:
|
||||||
self._quarantine(
|
self._quarantine(
|
||||||
"Bytes read: %s, does not match metadata: %s" % (
|
"Bytes read: %s, does not match metadata: %s" % (
|
||||||
self.bytes_read, self._obj_size))
|
self._bytes_read, self._obj_size))
|
||||||
elif self._iter_etag and \
|
elif self._iter_etag and \
|
||||||
self._etag != self._iter_etag.hexdigest():
|
self._etag != self._iter_etag.hexdigest():
|
||||||
self._quarantine(
|
self._quarantine(
|
||||||
|
@ -239,14 +260,10 @@ class DiskFile(object):
|
||||||
|
|
||||||
Manage object files in-memory.
|
Manage object files in-memory.
|
||||||
|
|
||||||
:param mgr: DiskFileManager
|
:param fs: an instance of InMemoryFileSystem
|
||||||
:param device_path: path to the target device or drive
|
|
||||||
:param threadpool: thread pool to use for blocking operations
|
|
||||||
:param partition: partition on the device in which the object lives
|
|
||||||
:param account: account name for the object
|
:param account: account name for the object
|
||||||
:param container: container name for the object
|
:param container: container name for the object
|
||||||
:param obj: object name for the object
|
:param obj: object name for the object
|
||||||
:param keep_cache: caller's preference for keeping data read in the cache
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, fs, account, container, obj):
|
def __init__(self, fs, account, container, obj):
|
||||||
|
@ -283,6 +300,19 @@ class DiskFile(object):
|
||||||
if self._fp is not None:
|
if self._fp is not None:
|
||||||
self._fp = None
|
self._fp = None
|
||||||
|
|
||||||
|
def _quarantine(self, name, msg):
|
||||||
|
"""
|
||||||
|
Quarantine a file; responsible for incrementing the associated logger's
|
||||||
|
count of quarantines.
|
||||||
|
|
||||||
|
:param name: name of object to quarantine
|
||||||
|
:param msg: reason for quarantining to be included in the exception
|
||||||
|
:returns: DiskFileQuarantined exception object
|
||||||
|
"""
|
||||||
|
# for this implementation we simply delete the bad object
|
||||||
|
self._filesystem.del_object(name)
|
||||||
|
return DiskFileQuarantined(msg)
|
||||||
|
|
||||||
def _verify_data_file(self, fp):
|
def _verify_data_file(self, fp):
|
||||||
"""
|
"""
|
||||||
Verify the metadata's name value matches what we think the object is
|
Verify the metadata's name value matches what we think the object is
|
||||||
|
@ -396,9 +426,18 @@ class DiskFile(object):
|
||||||
"""
|
"""
|
||||||
Write a block of metadata to an object.
|
Write a block of metadata to an object.
|
||||||
"""
|
"""
|
||||||
cur_fp = self._filesystem.get(self._name)
|
data, cur_mdata = self._filesystem.get_object(self._name)
|
||||||
if cur_fp is not None:
|
if data is not None:
|
||||||
self._filesystem[self._name] = (cur_fp, metadata)
|
# The object exists. Update the new metadata with the object's
|
||||||
|
# immutable metadata (e.g. name, size, etag, sysmeta) and store it
|
||||||
|
# with the object data.
|
||||||
|
immutable_metadata = dict(
|
||||||
|
[(key, val) for key, val in cur_mdata.items()
|
||||||
|
if key.lower() in DATAFILE_SYSTEM_META
|
||||||
|
or is_sys_meta('object', key)])
|
||||||
|
metadata.update(immutable_metadata)
|
||||||
|
metadata['name'] = self._name
|
||||||
|
self._filesystem.put_object(self._name, data, metadata)
|
||||||
|
|
||||||
def delete(self, timestamp):
|
def delete(self, timestamp):
|
||||||
"""
|
"""
|
||||||
|
@ -424,3 +463,11 @@ class DiskFile(object):
|
||||||
data_timestamp = timestamp
|
data_timestamp = timestamp
|
||||||
|
|
||||||
durable_timestamp = timestamp
|
durable_timestamp = timestamp
|
||||||
|
|
||||||
|
content_type_timestamp = timestamp
|
||||||
|
|
||||||
|
@property
|
||||||
|
def content_type(self):
|
||||||
|
if self._metadata is None:
|
||||||
|
raise DiskFileNotOpen()
|
||||||
|
return self._metadata.get('Content-Type')
|
||||||
|
|
|
@ -281,6 +281,7 @@ class ObjectReplicator(Daemon):
|
||||||
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
|
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
|
||||||
failure_devs_info = set()
|
failure_devs_info = set()
|
||||||
begin = time.time()
|
begin = time.time()
|
||||||
|
handoff_partition_deleted = False
|
||||||
try:
|
try:
|
||||||
responses = []
|
responses = []
|
||||||
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
|
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
|
||||||
|
@ -347,8 +348,10 @@ class ObjectReplicator(Daemon):
|
||||||
for failure_dev in job['nodes']])
|
for failure_dev in job['nodes']])
|
||||||
else:
|
else:
|
||||||
self.delete_partition(job['path'])
|
self.delete_partition(job['path'])
|
||||||
|
handoff_partition_deleted = True
|
||||||
elif not suffixes:
|
elif not suffixes:
|
||||||
self.delete_partition(job['path'])
|
self.delete_partition(job['path'])
|
||||||
|
handoff_partition_deleted = True
|
||||||
except (Exception, Timeout):
|
except (Exception, Timeout):
|
||||||
self.logger.exception(_("Error syncing handoff partition"))
|
self.logger.exception(_("Error syncing handoff partition"))
|
||||||
finally:
|
finally:
|
||||||
|
@ -357,6 +360,8 @@ class ObjectReplicator(Daemon):
|
||||||
for target_dev in job['nodes']])
|
for target_dev in job['nodes']])
|
||||||
self.stats['success'] += len(target_devs_info - failure_devs_info)
|
self.stats['success'] += len(target_devs_info - failure_devs_info)
|
||||||
self._add_failure_stats(failure_devs_info)
|
self._add_failure_stats(failure_devs_info)
|
||||||
|
if not handoff_partition_deleted:
|
||||||
|
self.handoffs_remaining += 1
|
||||||
self.partition_times.append(time.time() - begin)
|
self.partition_times.append(time.time() - begin)
|
||||||
self.logger.timing_since('partition.delete.timing', begin)
|
self.logger.timing_since('partition.delete.timing', begin)
|
||||||
|
|
||||||
|
@ -506,6 +511,9 @@ class ObjectReplicator(Daemon):
|
||||||
'remaining': '%d%s' % compute_eta(self.start,
|
'remaining': '%d%s' % compute_eta(self.start,
|
||||||
self.replication_count,
|
self.replication_count,
|
||||||
self.job_count)})
|
self.job_count)})
|
||||||
|
self.logger.info(_('%(success)s successes, %(failure)s failures')
|
||||||
|
% self.stats)
|
||||||
|
|
||||||
if self.suffix_count:
|
if self.suffix_count:
|
||||||
self.logger.info(
|
self.logger.info(
|
||||||
_("%(checked)d suffixes checked - "
|
_("%(checked)d suffixes checked - "
|
||||||
|
@ -680,6 +688,7 @@ class ObjectReplicator(Daemon):
|
||||||
self.partition_times = []
|
self.partition_times = []
|
||||||
self.my_replication_ips = self._get_my_replication_ips()
|
self.my_replication_ips = self._get_my_replication_ips()
|
||||||
self.all_devs_info = set()
|
self.all_devs_info = set()
|
||||||
|
self.handoffs_remaining = 0
|
||||||
|
|
||||||
stats = eventlet.spawn(self.heartbeat)
|
stats = eventlet.spawn(self.heartbeat)
|
||||||
lockup_detector = eventlet.spawn(self.detect_lockups)
|
lockup_detector = eventlet.spawn(self.detect_lockups)
|
||||||
|
@ -705,6 +714,15 @@ class ObjectReplicator(Daemon):
|
||||||
for failure_dev in job['nodes']])
|
for failure_dev in job['nodes']])
|
||||||
self.logger.warning(_('%s is not mounted'), job['device'])
|
self.logger.warning(_('%s is not mounted'), job['device'])
|
||||||
continue
|
continue
|
||||||
|
if self.handoffs_first and not job['delete']:
|
||||||
|
# in handoffs first mode, we won't process primary
|
||||||
|
# partitions until rebalance was successful!
|
||||||
|
if self.handoffs_remaining:
|
||||||
|
self.logger.warning(_(
|
||||||
|
"Handoffs first mode still has handoffs "
|
||||||
|
"remaining. Aborting current "
|
||||||
|
"replication pass."))
|
||||||
|
break
|
||||||
if not self.check_ring(job['policy'].object_ring):
|
if not self.check_ring(job['policy'].object_ring):
|
||||||
self.logger.info(_("Ring change detected. Aborting "
|
self.logger.info(_("Ring change detected. Aborting "
|
||||||
"current replication pass."))
|
"current replication pass."))
|
||||||
|
|
|
@ -33,7 +33,7 @@ from swift.common.utils import public, get_logger, \
|
||||||
config_true_value, timing_stats, replication, \
|
config_true_value, timing_stats, replication, \
|
||||||
normalize_delete_at_timestamp, get_log_line, Timestamp, \
|
normalize_delete_at_timestamp, get_log_line, Timestamp, \
|
||||||
get_expirer_container, parse_mime_headers, \
|
get_expirer_container, parse_mime_headers, \
|
||||||
iter_multipart_mime_documents
|
iter_multipart_mime_documents, extract_swift_bytes
|
||||||
from swift.common.bufferedhttp import http_connect
|
from swift.common.bufferedhttp import http_connect
|
||||||
from swift.common.constraints import check_object_creation, \
|
from swift.common.constraints import check_object_creation, \
|
||||||
valid_timestamp, check_utf8
|
valid_timestamp, check_utf8
|
||||||
|
@ -479,35 +479,103 @@ class ObjectController(BaseStorageServer):
|
||||||
except (DiskFileNotExist, DiskFileQuarantined):
|
except (DiskFileNotExist, DiskFileQuarantined):
|
||||||
return HTTPNotFound(request=request)
|
return HTTPNotFound(request=request)
|
||||||
orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
|
orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
|
||||||
if orig_timestamp >= req_timestamp:
|
orig_ctype_timestamp = disk_file.content_type_timestamp
|
||||||
|
req_ctype_time = '0'
|
||||||
|
req_ctype = request.headers.get('Content-Type')
|
||||||
|
if req_ctype:
|
||||||
|
req_ctype_time = request.headers.get('Content-Type-Timestamp',
|
||||||
|
req_timestamp.internal)
|
||||||
|
req_ctype_timestamp = Timestamp(req_ctype_time)
|
||||||
|
if orig_timestamp >= req_timestamp \
|
||||||
|
and orig_ctype_timestamp >= req_ctype_timestamp:
|
||||||
return HTTPConflict(
|
return HTTPConflict(
|
||||||
request=request,
|
request=request,
|
||||||
headers={'X-Backend-Timestamp': orig_timestamp.internal})
|
headers={'X-Backend-Timestamp': orig_timestamp.internal})
|
||||||
metadata = {'X-Timestamp': req_timestamp.internal}
|
|
||||||
self._preserve_slo_manifest(metadata, orig_metadata)
|
if req_timestamp > orig_timestamp:
|
||||||
metadata.update(val for val in request.headers.items()
|
metadata = {'X-Timestamp': req_timestamp.internal}
|
||||||
if is_user_meta('object', val[0]))
|
self._preserve_slo_manifest(metadata, orig_metadata)
|
||||||
headers_to_copy = (
|
metadata.update(val for val in request.headers.items()
|
||||||
request.headers.get(
|
if is_user_meta('object', val[0]))
|
||||||
'X-Backend-Replication-Headers', '').split() +
|
headers_to_copy = (
|
||||||
list(self.allowed_headers))
|
request.headers.get(
|
||||||
for header_key in headers_to_copy:
|
'X-Backend-Replication-Headers', '').split() +
|
||||||
if header_key in request.headers:
|
list(self.allowed_headers))
|
||||||
header_caps = header_key.title()
|
for header_key in headers_to_copy:
|
||||||
metadata[header_caps] = request.headers[header_key]
|
if header_key in request.headers:
|
||||||
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
|
header_caps = header_key.title()
|
||||||
if orig_delete_at != new_delete_at:
|
metadata[header_caps] = request.headers[header_key]
|
||||||
if new_delete_at:
|
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
|
||||||
self.delete_at_update('PUT', new_delete_at, account, container,
|
if orig_delete_at != new_delete_at:
|
||||||
obj, request, device, policy)
|
if new_delete_at:
|
||||||
if orig_delete_at:
|
self.delete_at_update(
|
||||||
self.delete_at_update('DELETE', orig_delete_at, account,
|
'PUT', new_delete_at, account, container, obj, request,
|
||||||
container, obj, request, device,
|
device, policy)
|
||||||
policy)
|
if orig_delete_at:
|
||||||
|
self.delete_at_update('DELETE', orig_delete_at, account,
|
||||||
|
container, obj, request, device,
|
||||||
|
policy)
|
||||||
|
else:
|
||||||
|
# preserve existing metadata, only content-type may be updated
|
||||||
|
metadata = dict(disk_file.get_metafile_metadata())
|
||||||
|
|
||||||
|
if req_ctype_timestamp > orig_ctype_timestamp:
|
||||||
|
# we have a new content-type, add to metadata and container update
|
||||||
|
content_type_headers = {
|
||||||
|
'Content-Type': request.headers['Content-Type'],
|
||||||
|
'Content-Type-Timestamp': req_ctype_timestamp.internal
|
||||||
|
}
|
||||||
|
metadata.update(content_type_headers)
|
||||||
|
else:
|
||||||
|
# send existing content-type with container update
|
||||||
|
content_type_headers = {
|
||||||
|
'Content-Type': disk_file.content_type,
|
||||||
|
'Content-Type-Timestamp': orig_ctype_timestamp.internal
|
||||||
|
}
|
||||||
|
if orig_ctype_timestamp != disk_file.data_timestamp:
|
||||||
|
# only add to metadata if it's not the datafile content-type
|
||||||
|
metadata.update(content_type_headers)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
disk_file.write_metadata(metadata)
|
disk_file.write_metadata(metadata)
|
||||||
except (DiskFileXattrNotSupported, DiskFileNoSpace):
|
except (DiskFileXattrNotSupported, DiskFileNoSpace):
|
||||||
return HTTPInsufficientStorage(drive=device, request=request)
|
return HTTPInsufficientStorage(drive=device, request=request)
|
||||||
|
|
||||||
|
update_etag = orig_metadata['ETag']
|
||||||
|
if 'X-Object-Sysmeta-Ec-Etag' in orig_metadata:
|
||||||
|
# For EC policy, send X-Object-Sysmeta-Ec-Etag which is same as the
|
||||||
|
# X-Backend-Container-Update-Override-Etag value sent with the
|
||||||
|
# original PUT. We have to send Etag (and size etc) with a POST
|
||||||
|
# container update because the original PUT container update may
|
||||||
|
# have failed or be in async_pending.
|
||||||
|
update_etag = orig_metadata['X-Object-Sysmeta-Ec-Etag']
|
||||||
|
|
||||||
|
if (content_type_headers['Content-Type-Timestamp']
|
||||||
|
!= disk_file.data_timestamp):
|
||||||
|
# Current content-type is not from the datafile, but the datafile
|
||||||
|
# content-type may have a swift_bytes param that was appended by
|
||||||
|
# SLO and we must continue to send that with the container update.
|
||||||
|
# Do this (rather than use a separate header) for backwards
|
||||||
|
# compatibility because there may be 'legacy' container updates in
|
||||||
|
# async pending that have content-types with swift_bytes params, so
|
||||||
|
# we have to be able to handle those in container server anyway.
|
||||||
|
_, swift_bytes = extract_swift_bytes(
|
||||||
|
disk_file.get_datafile_metadata()['Content-Type'])
|
||||||
|
if swift_bytes:
|
||||||
|
content_type_headers['Content-Type'] += (';swift_bytes=%s'
|
||||||
|
% swift_bytes)
|
||||||
|
|
||||||
|
self.container_update(
|
||||||
|
'PUT', account, container, obj, request,
|
||||||
|
HeaderKeyDict({
|
||||||
|
'x-size': orig_metadata['Content-Length'],
|
||||||
|
'x-content-type': content_type_headers['Content-Type'],
|
||||||
|
'x-timestamp': disk_file.data_timestamp.internal,
|
||||||
|
'x-content-type-timestamp':
|
||||||
|
content_type_headers['Content-Type-Timestamp'],
|
||||||
|
'x-meta-timestamp': metadata['X-Timestamp'],
|
||||||
|
'x-etag': update_etag}),
|
||||||
|
device, policy)
|
||||||
return HTTPAccepted(request=request)
|
return HTTPAccepted(request=request)
|
||||||
|
|
||||||
@public
|
@public
|
||||||
|
@ -556,9 +624,12 @@ class ObjectController(BaseStorageServer):
|
||||||
orig_timestamp = disk_file.data_timestamp
|
orig_timestamp = disk_file.data_timestamp
|
||||||
except DiskFileXattrNotSupported:
|
except DiskFileXattrNotSupported:
|
||||||
return HTTPInsufficientStorage(drive=device, request=request)
|
return HTTPInsufficientStorage(drive=device, request=request)
|
||||||
|
except DiskFileDeleted as e:
|
||||||
|
orig_metadata = e.metadata
|
||||||
|
orig_timestamp = e.timestamp
|
||||||
except (DiskFileNotExist, DiskFileQuarantined):
|
except (DiskFileNotExist, DiskFileQuarantined):
|
||||||
orig_metadata = {}
|
orig_metadata = {}
|
||||||
orig_timestamp = 0
|
orig_timestamp = Timestamp(0)
|
||||||
|
|
||||||
# Checks for If-None-Match
|
# Checks for If-None-Match
|
||||||
if request.if_none_match is not None and orig_metadata:
|
if request.if_none_match is not None and orig_metadata:
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue