Merge branch 'master' into feature/crypto

Conflicts:
	swift/common/wsgi.py

Change-Id: Ic37a285c008b01c7babf29110cfe0180bfde69c9
This commit is contained in:
Alistair Coles 2016-01-25 17:47:13 +00:00
commit f9b7fd3074
112 changed files with 4427 additions and 1623 deletions

View File

@ -1,9 +1,11 @@
#!/bin/bash
SRC_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))")
set -e
cd ${SRC_DIR}/test/functional
nosetests --exe $@
cd ${SRC_DIR}
export TESTS_DIR=${SRC_DIR}/test/functional
ostestr --serial --pretty $@
rvalue=$?
cd -

2
.gitignore vendored
View File

@ -15,4 +15,6 @@ pycscope.*
.idea
MANIFEST
.testrepository/*
subunit.log
test/probe/.noseids

View File

@ -87,3 +87,9 @@ Donagh McCabe <donagh.mccabe@hpe.com> <donagh.mccabe@hp.com>
Eamonn O'Toole <eamonn.otoole@hpe.com> <eamonn.otoole@hp.com>
Gerry Drudy <gerry.drudy@hpe.com> <gerry.drudy@hp.com>
Mark Seger <mark.seger@hpe.com> <mark.seger@hp.com>
Timur Alperovich <timur.alperovich@gmail.com> <timuralp@swiftstack.com>
Mehdi Abaakouk <sileht@redhat.com> <mehdi.abaakouk@enovance.com>
Richard Hawkins <richard.hawkins@rackspace.com> <hurricanerix@gmail.com>
Ondrej Novy <ondrej.novy@firma.seznam.cz>
Peter Lisak <peter.lisak@firma.seznam.cz>
Ke Liang <ke.liang@easystack.cn>

4
.testr.conf Normal file
View File

@ -0,0 +1,4 @@
[DEFAULT]
test_command=SWIFT_TEST_DEBUG_LOGS=${SWIFT_TEST_DEBUG_LOGS} ${PYTHON:-python} -m subunit.run discover -t ./ ${TESTS_DIR:-./test/functional/} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

32
AUTHORS
View File

@ -25,7 +25,7 @@ Chuck Thier (cthier@gmail.com)
Contributors
------------
Mehdi Abaakouk (mehdi.abaakouk@enovance.com)
Mehdi Abaakouk (sileht@redhat.com)
Timur Alperovich (timur.alperovich@gmail.com)
Jesse Andrews (anotherjesse@gmail.com)
Joe Arnold (joe@swiftstack.com)
@ -41,7 +41,7 @@ James E. Blair (jeblair@openstack.org)
Fabien Boucher (fabien.boucher@enovance.com)
Clark Boylan (clark.boylan@gmail.com)
Pádraig Brady (pbrady@redhat.com)
Lorcan Browne (lorcan.browne@hp.com)
Lorcan Browne (lorcan.browne@hpe.com)
Russell Bryant (rbryant@redhat.com)
Jay S. Bryant (jsbryant@us.ibm.com)
Tim Burke (tim.burke@gmail.com)
@ -56,15 +56,17 @@ François Charlier (francois.charlier@enovance.com)
Ray Chen (oldsharp@163.com)
Harshit Chitalia (harshit@acelio.com)
Brian Cline (bcline@softlayer.com)
Alistair Coles (alistair.coles@hp.com)
Alistair Coles (alistair.coles@hpe.com)
Clément Contini (ccontini@cloudops.com)
Brian Curtin (brian.curtin@rackspace.com)
Thiago da Silva (thiago@redhat.com)
Julien Danjou (julien@danjou.info)
Paul Dardeau (paul.dardeau@intel.com)
Zack M. Davis (zdavis@swiftstack.com)
Ksenia Demina (kdemina@mirantis.com)
Dan Dillinger (dan.dillinger@sonian.net)
Cedric Dos Santos (cedric.dos.sant@gmail.com)
Gerry Drudy (gerry.drudy@hp.com)
Gerry Drudy (gerry.drudy@hpe.com)
Morgan Fainberg (morgan.fainberg@gmail.com)
ZhiQiang Fan (aji.zqfan@gmail.com)
Oshrit Feder (oshritf@il.ibm.com)
@ -85,6 +87,7 @@ David Goetz (david.goetz@rackspace.com)
Tushar Gohad (tushar.gohad@intel.com)
Jonathan Gonzalez V (jonathan.abdiel@gmail.com)
Joe Gordon (jogo@cloudscaling.com)
ChangBo Guo(gcb) (eric.guo@easystack.cn)
David Hadas (davidh@il.ibm.com)
Andrew Hale (andy@wwwdata.eu)
Soren Hansen (soren@linux2go.dk)
@ -92,9 +95,12 @@ Richard Hawkins (richard.hawkins@rackspace.com)
Gregory Haynes (greg@greghaynes.net)
Doug Hellmann (doug.hellmann@dreamhost.com)
Dan Hersam (dan.hersam@hp.com)
hgangwx (hgangwx@cn.ibm.com)
Derek Higgins (derekh@redhat.com)
Jonathan Hinson (jlhinson@us.ibm.com)
Alex Holden (alex@alexjonasholden.com)
Edward Hope-Morley (opentastic@gmail.com)
Ferenc Horváth (hferenc@inf.u-szeged.hu)
Charles Hsu (charles0126@gmail.com)
Joanna H. Huang (joanna.huitzu.huang@gmail.com)
Kun Huang (gareth@unitedstack.com)
@ -111,6 +117,7 @@ Jason Johnson (jajohnson@softlayer.com)
Brian K. Jones (bkjones@gmail.com)
Arnaud JOST (arnaud.jost@ovh.net)
Kiyoung Jung (kiyoung.jung@kt.com)
Harshada Mangesh Kakad (harshadak@metsi.co.uk)
Takashi Kajinami (kajinamit@nttdata.co.jp)
Matt Kassawara (mkassawara@gmail.com)
Morita Kazutaka (morita.kazutaka@gmail.com)
@ -136,6 +143,8 @@ Eohyung Lee (liquidnuker@gmail.com)
Zhao Lei (zhaolei@cn.fujitsu.com)
Jamie Lennox (jlennox@redhat.com)
Tong Li (litong01@us.ibm.com)
Ke Liang (ke.liang@easystack.cn)
Peter Lisak (peter.lisak@firma.seznam.cz)
Changbin Liu (changbin.liu@gmail.com)
Jing Liuqing (jing.liuqing@99cloud.net)
Victor Lowther (victor.lowther@gmail.com)
@ -143,6 +152,7 @@ Sergey Lukjanov (slukjanov@mirantis.com)
Zhongyue Luo (zhongyue.nah@intel.com)
Paul Luse (paul.e.luse@intel.com)
Christopher MacGown (chris@pistoncloud.com)
Ganesh Maharaj Mahalingam (ganesh.mahalingam@intel.com)
Dragos Manolescu (dragosm@hp.com)
Ben Martin (blmartin@us.ibm.com)
Steve Martinelli (stevemar@ca.ibm.com)
@ -152,7 +162,7 @@ Nakagawa Masaaki (nakagawamsa@nttdata.co.jp)
Dolph Mathews (dolph.mathews@gmail.com)
Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com)
Michael Matur (michael.matur@gmail.com)
Donagh McCabe (donagh.mccabe@hp.com)
Donagh McCabe (donagh.mccabe@hpe.com)
Andy McCrae (andy.mccrae@gmail.com)
Paul McMillan (paul.mcmillan@nebula.com)
Ewan Mellor (ewan.mellor@citrix.com)
@ -168,19 +178,22 @@ Maru Newby (mnewby@internap.com)
Newptone (xingchao@unitedstack.com)
Colin Nicholson (colin.nicholson@iomart.com)
Zhenguo Niu (zhenguo@unitedstack.com)
Catherine Northcott (catherine@northcott.nz)
Ondrej Novy (ondrej.novy@firma.seznam.cz)
Timothy Okwii (tokwii@cisco.com)
Matthew Oliver (matt@oliver.net.au)
Hisashi Osanai (osanai.hisashi@jp.fujitsu.com)
Eamonn O'Toole (eamonn.otoole@hp.com)
Eamonn O'Toole (eamonn.otoole@hpe.com)
James Page (james.page@ubuntu.com)
Prashanth Pai (ppai@redhat.com)
Venkateswarlu Pallamala (p.venkatesh551@gmail.com)
Pawel Palucki (pawel.palucki@gmail.com)
Alex Pecoraro (alex.pecoraro@emc.com)
Sascha Peilicke (saschpe@gmx.de)
Constantine Peresypkin (constantine.peresypk@rackspace.com)
Dieter Plaetinck (dieter@vimeo.com)
Dan Prince (dprince@redhat.com)
Sivasathurappan Radhakrishnan (siva.radhakrishnan@intel.com)
Sarvesh Ranjan (saranjan@cisco.com)
Falk Reimann (falk.reimann@sap.com)
Brian Reitz (brian.reitz@oracle.com)
@ -198,7 +211,7 @@ Shilla Saebi (shilla.saebi@gmail.com)
Atsushi Sakai (sakaia@jp.fujitsu.com)
Cristian A Sanchez (cristian.a.sanchez@intel.com)
Christian Schwede (cschwede@redhat.com)
Mark Seger (Mark.Seger@hp.com)
Mark Seger (mark.seger@hpe.com)
Azhagu Selvan SP (tamizhgeek@gmail.com)
Alexandra Settle (alexandra.settle@rackspace.com)
Andrew Clay Shafer (acs@parvuscaptus.com)
@ -212,6 +225,7 @@ Pradeep Kumar Singh (pradeep.singh@nectechnologies.in)
Liu Siqi (meizu647@gmail.com)
Adrian Smith (adrian_f_smith@dell.com)
Jon Snitow (otherjon@swiftstack.com)
Emile Snyder (emile.snyder@gmail.com)
Emett Speer (speer.emett@gmail.com)
TheSriram (sriram@klusterkloud.com)
Jeremy Stanley (fungi@yuggoth.org)
@ -234,7 +248,9 @@ Dmitry Ukov (dukov@mirantis.com)
Vincent Untz (vuntz@suse.com)
Daniele Valeriani (daniele@dvaleriani.net)
Koert van der Veer (koert@cloudvps.com)
Béla Vancsics (vancsics@inf.u-szeged.hu)
Vladimir Vechkanov (vvechkanov@mirantis.com)
venkatamahesh (venkatamaheshkotha@gmail.com)
Gil Vernik (gilv@il.ibm.com)
Hou Ming Wang (houming.wang@easystack.cn)
Shane Wang (shane.wang@intel.com)
@ -248,7 +264,7 @@ Ye Jia Xu (xyj.asmy@gmail.com)
Alex Yang (alex890714@gmail.com)
Lin Yang (lin.a.yang@intel.com)
Yee (mail.zhang.yee@gmail.com)
Guang Yee (guang.yee@hp.com)
Guang Yee (guang.yee@hpe.com)
Pete Zaitcev (zaitcev@kotori.zaitcev.us)
Hua Zhang (zhuadl@cn.ibm.com)
Jian Zhang (jian.zhang@intel.com)

View File

@ -1,3 +1,92 @@
swift (2.6.0)
* Dependency changes
- Updated minimum version of eventlet to 0.17.4 to support IPv6.
- Updated the minimum version of PyECLib to 1.0.7.
* The ring rebalancing algorithm was updated to better handle edge cases
and to give better (more balanced) rings in the general case. New rings
will have better initial placement, capacity adjustments will move less
data for better balance, and existing rings that were imbalanced should
start to become better balanced as they go through rebalance cycles.
* Added container and account reverse listings.
A GET request to an account or container resource with a "reverse=true"
query parameter will return the listing in reverse order. When
iterating over pages of reverse listings, the relative order of marker
and end_marker are swapped.
* Storage policies now support having more than one name.
This allows operators to fix a typo without breaking existing clients,
or, alternatively, have "short names" for policies. This is implemented
with the "aliases" config key in the storage policy config in
swift.conf. The aliases value is a list of names that the storage
policy may also be identified by. The storage policy "name" is used to
report the policy to users (eg in container headers). The aliases have
the same naming restrictions as the policy's primary name.
* The object auditor learned the "interval" config value to control the
time between each audit pass.
* `swift-recon --all` now includes the config checksum check.
* `swift-init` learned the --kill-after-timeout option to force a service
to quit (SIGKILL) after a designated time.
* `swift-recon` now correctly shows timestamps in UTC instead of local
time.
* Fixed bug where `swift-ring-builder` couldn't select device id 0.
* Documented the previously undocumented
`swift-ring-builder pretend_min_part_hours_passed` command.
* The "node_timeout" config value now accepts decimal values.
* `swift-ring-builder` now properly removes devices with zero weight.
* `swift-init` return codes are updated via "--strict" and "--non-strict"
options. Please see the usage string for more information.
* `swift-ring-builder` now reports the min_part_hours lockout time
remaining
* Container sync has been improved to more quickly find and iterate over
the containers to be synced. This reduced server load and lowers the
time required to see data propagate between two clusters. Please see
http://swift.openstack.org/overview_container_sync.html for more details
about the new on-disk structure for tracking synchronized containers.
* A container POST will now update that container's put-timestamp value.
* TempURL header restrictions are now exposed in /info.
* Error messages on static large object manifest responses have been
greatly improved.
* Closed a bug where an unfinished read of a large object would leak a
socket file descriptor and a small amount of memory. (CVE-2016-0738)
* Fixed an issue where a zero-byte object PUT with an incorrect Etag
would return a 503.
* Fixed an error when a static large object manifest references the same
object more than once.
* Improved performance of finding handoff nodes if a zone is empty.
* Fixed duplication of headers in Access-Control-Expose-Headers on CORS
requests.
* Fixed handling of IPv6 connections to memcache pools.
* Continued work towards python 3 compatibility.
* Various other minor bug fixes and improvements.
swift (2.5.0, OpenStack Liberty)
* Added the ability to specify ranges for Static Large Object (SLO)

View File

@ -23,7 +23,6 @@ from time import time
from eventlet import GreenPool, hubs, patcher, Timeout
from eventlet.pools import Pool
from eventlet.green import urllib2
from swift.common import direct_client
try:
@ -174,8 +173,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring,
try:
objects = [o['name'] for o in conn.get_container(
container, prefix='dispersion_', full_listing=True)[1]]
except urllib2.HTTPError as err:
if err.getcode() != 404:
except ClientException as err:
if err.http_status != 404:
raise
print >>stderr, 'No objects to query. Has ' \

View File

@ -74,6 +74,11 @@ def main():
help="Return zero status code even if some config is "
"missing. Default mode if any server is a glob or "
"one of aliases `all`, `main` or `rest`.")
# SIGKILL daemon after kill_wait period
parser.add_option('--kill-after-timeout', dest='kill_after_timeout',
action='store_true',
help="Kill daemon and all childs after kill-wait "
"period.")
options, args = parser.parse_args()

View File

@ -384,7 +384,8 @@ Sets the maximum number of connections to each memcached server per worker.
If not set in the configuration file, the value for memcache_servers will be
read from /etc/swift/memcache.conf (see memcache.conf-sample) or lacking that
file, it will default to 127.0.0.1:11211. You can specify multiple servers
separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211.
separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211. (IPv6
addresses must follow rfc3986 section-3.2.2, i.e. [::1]:11211)
.IP \fBmemcache_serialization_support\fR
This sets how memcache values are serialized and deserialized:
.RE

View File

@ -111,6 +111,7 @@ allows one to use the keywords such as "all", "main" and "rest" for the <server>
.IP "-r RUN_DIR, --run-dir=RUN_DIR directory where the pids will be stored (default /var/run/swift)
.IP "--strict return non-zero status code if some config is missing. Default mode if server is explicitly named."
.IP "--non-strict return zero status code even if some config is missing. Default mode if server is one of aliases `all`, `main` or `rest`."
.IP "--kill-after-timeout kill daemon and all childs after kill-wait period."
.PD
.RE

View File

@ -463,7 +463,12 @@ Example::
Assuming 3 replicas, this configuration will make object PUTs try
storing the object's replicas on up to 6 disks ("2 * replicas") in
region 1 ("r1").
region 1 ("r1"). Proxy server tries to find 3 devices for storing the
object. While a device is unavailable, it queries the ring for the 4th
device and so on until 6th device. If the 6th disk is still unavailable,
the last replica will be sent to other region. It doesn't mean there'll
have 6 replicas in region 1.
You should be aware that, if you have data coming into SF faster than
your link to NY can transfer it, then your cluster's data distribution

View File

@ -13,9 +13,10 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import datetime
import os
import subprocess
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@ -144,8 +145,10 @@ modindex_common_prefix = ['swift.']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
html_last_updated_fmt = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE).communicate()[0]
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.

View File

@ -510,9 +510,9 @@ container_update_timeout 1 Time to wait while sending a contai
[object-server]
============================= ====================== =================================
============================= ====================== ===============================================
Option Default Description
----------------------------- ---------------------- ---------------------------------
----------------------------- ---------------------- -----------------------------------------------
use paste.deploy entry point for the
object server. For most cases,
this should be
@ -537,9 +537,9 @@ keep_cache_private false Allow non-public objects t
in kernel's buffer cache
allowed_headers Content-Disposition, Comma separated list of headers
Content-Encoding, that can be set in metadata on an object.
X-Delete-At, This list is in addition to X-Object-Meta-* headers and cannot include
X-Object-Manifest, Content-Type, etag, Content-Length, or deleted
X-Static-Large-Object
X-Delete-At, This list is in addition to
X-Object-Manifest, X-Object-Meta-* headers and cannot include
X-Static-Large-Object Content-Type, etag, Content-Length, or deleted
auto_create_account_prefix . Prefix used when automatically
creating accounts.
threads_per_disk 0 Size of the per-disk thread pool
@ -596,13 +596,13 @@ splice no Use splice() for zero-copy
will appear in the object server
logs at startup, but your object
servers should continue to function.
============================= ====================== =================================
============================= ====================== ===============================================
[object-replicator]
================== ======================== ================================
=========================== ======================== ================================
Option Default Description
------------------ ------------------------ --------------------------------
--------------------------- ------------------------ --------------------------------
log_name object-replicator Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
@ -613,14 +613,15 @@ interval 30 Time in seconds to wait between
replication passes
concurrency 1 Number of replication workers to
spawn
sync_method rsync The sync method to use; default is
rsync but you can use ssync to try the
EXPERIMENTAL all-swift-code-no-rsync-callouts
method. Once ssync is verified
as having performance comparable to,
sync_method rsync The sync method to use; default
is rsync but you can use ssync to
try the EXPERIMENTAL
all-swift-code-no-rsync-callouts
method. Once ssync is verified as
or better than, rsync, we plan to
deprecate rsync so we can move on
with more features for replication.
with more features for
replication.
rsync_timeout 900 Max duration of a partition rsync
rsync_bwlimit 0 Bandwidth limit for rsync in kB/s.
0 means unlimited.
@ -687,7 +688,7 @@ rsync_error_log_line_length 0 Limits how long rsync error log
ring_check_interval 15 Interval for checking new ring
file
recon_cache_path /var/cache/swift Path to recon cache
================== ======================== ================================
=========================== ======================== ================================
[object-updater]
@ -718,6 +719,8 @@ log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
log_address /dev/log Logging directory
log_time 3600 Frequency of status logs in seconds.
interval 30 Time in seconds to wait between
auditor passes
disk_chunk_size 65536 Size of chunks read during auditing
files_per_second 20 Maximum files audited per second per
auditor process. Should be tuned according
@ -887,15 +890,15 @@ rsync_module {replication_ip}::container Format of the rsync module
etc/rsyncd.conf-sample for
some examples.
rsync_compress no Allow rsync to compress data
which is transmitted to destination
node during sync. However, this
is applicable only when destination
node is in a different region
than the local one.
NOTE: Objects that are already
compressed (for example: .tar.gz,
.mp3) might slow down the syncing
process.
which is transmitted to
destination node during sync.
However, this is applicable
only when destination node is
in a different region than the
local one. NOTE: Objects that
are already compressed (for
example: .tar.gz, mp3) might
slow down the syncing process.
recon_cache_path /var/cache/swift Path to recon cache
================== =========================== =============================
@ -1090,15 +1093,15 @@ rsync_module {replication_ip}::account Format of the rsync module where
etc/rsyncd.conf-sample for some
examples.
rsync_compress no Allow rsync to compress data
which is transmitted to destination
node during sync. However, this
is applicable only when destination
node is in a different region
than the local one.
NOTE: Objects that are already
compressed (for example: .tar.gz,
.mp3) might slow down the syncing
process.
which is transmitted to
destination node during sync.
However, this is applicable only
when destination node is in a
different region than the local
one. NOTE: Objects that are
already compressed (for example:
.tar.gz, mp3) might slow down
the syncing process.
recon_cache_path /var/cache/swift Path to recon cache
================== ========================= ===============================
@ -1159,9 +1162,9 @@ The following configuration options are available:
[DEFAULT]
==================================== ======================== =============================
==================================== ======================== ========================================
Option Default Description
------------------------------------ ------------------------ -----------------------------
------------------------------------ ------------------------ ----------------------------------------
bind_ip 0.0.0.0 IP Address for server to
bind to
bind_port 80 Port for server to bind to
@ -1205,11 +1208,12 @@ cors_allow_origin This is a list o
strict_cors_mode True
client_timeout 60
trans_id_suffix This optional suffix (default is empty)
that would be appended to the swift transaction
id allows one to easily figure out from
which cluster that X-Trans-Id belongs to.
This is very useful when one is managing
more than one swift cluster.
that would be appended to the swift
transaction id allows one to easily
figure out from which cluster that
X-Trans-Id belongs to. This is very
useful when one is managing more than
one swift cluster.
log_name swift Label used when logging
log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
@ -1246,7 +1250,7 @@ disallowed_sections swift.valid_api_versions Allows the abili
the dict level with a ".".
expiring_objects_container_divisor 86400
expiring_objects_account_name expiring_objects
==================================== ======================== =============================
==================================== ======================== ========================================
[proxy-server]
@ -1276,7 +1280,8 @@ object_chunk_size 65536 Chunk size to read from
client_chunk_size 65536 Chunk size to read from
clients
memcache_servers 127.0.0.1:11211 Comma separated list of
memcached servers ip:port
memcached servers
ip:port or [ipv6addr]:port
memcache_max_connections 2 Max number of connections to
each memcached server per
worker

View File

@ -375,7 +375,7 @@ folks a start on their own code if they want to use repoze.what::
expiration = float(resp.getheader('x-auth-ttl'))
user = resp.getheader('x-auth-user')
memcache_client.set(key, (time(), expiration, user),
timeout=expiration)
time=expiration)
return user
return None
@ -487,7 +487,8 @@ folks a start on their own code if they want to use repoze.what::
Allowing CORS with Auth
-----------------------
Cross Origin RequestS require that the auth system allow the OPTIONS method to
pass through without a token. The preflight request will make an OPTIONS call
against the object or container and will not work if the auth system stops it.
Cross Origin Resource Sharing (CORS) require that the auth system allow the
OPTIONS method to pass through without a token. The preflight request will
make an OPTIONS call against the object or container and will not work if
the auth system stops it.
See TempAuth for an example of how OPTIONS requests are handled.

View File

@ -37,7 +37,8 @@ Installing dependencies
sudo apt-get update
sudo apt-get install curl gcc memcached rsync sqlite3 xfsprogs \
git-core libffi-dev python-setuptools
git-core libffi-dev python-setuptools \
liberasurecode-dev
sudo apt-get install python-coverage python-dev python-nose \
python-xattr python-eventlet \
python-greenlet python-pastedeploy \
@ -48,7 +49,8 @@ Installing dependencies
sudo yum update
sudo yum install curl gcc memcached rsync sqlite xfsprogs git-core \
libffi-devel xinetd python-setuptools \
libffi-devel xinetd liberasurecode-devel \
python-setuptools \
python-coverage python-devel python-nose \
pyxattr python-eventlet \
python-greenlet python-paste-deploy \

View File

@ -29,7 +29,7 @@ synchronization key.
Configuring Container Sync
--------------------------
Create a container-sync-realms.conf file specifying the allowable clusters
Create a ``container-sync-realms.conf`` file specifying the allowable clusters
and their information::
[realm1]
@ -50,18 +50,18 @@ clusters that have agreed to allow container syncing with each other. Realm
names will be considered case insensitive.
The key is the overall cluster-to-cluster key used in combination with the
external users' key that they set on their containers' X-Container-Sync-Key
metadata header values. These keys will be used to sign each request the
container sync daemon makes and used to validate each incoming container sync
request.
external users' key that they set on their containers'
``X-Container-Sync-Key`` metadata header values. These keys will be used to
sign each request the container sync daemon makes and used to validate each
incoming container sync request.
The key2 is optional and is an additional key incoming requests will be checked
against. This is so you can rotate keys if you wish; you move the existing key
to key2 and make a new key value.
Any values in the realm section whose names begin with cluster\_ will indicate
the name and endpoint of a cluster and will be used by external users in
their containers' X-Container-Sync-To metadata header values with the format
Any values in the realm section whose names begin with ``cluster_`` will
indicate the name and endpoint of a cluster and will be used by external users in
their containers' ``X-Container-Sync-To`` metadata header values with the format
"//realm_name/cluster_name/account_name/container_name". Realm and cluster
names are considered case insensitive.
@ -71,7 +71,7 @@ container servers, since that is where the container sync daemon runs. Note
that the endpoint ends with /v1/ and that the container sync daemon will then
add the account/container/obj name after that.
Distribute this container-sync-realms.conf file to all your proxy servers
Distribute this ``container-sync-realms.conf`` file to all your proxy servers
and container servers.
You also need to add the container_sync middleware to your proxy pipeline. It
@ -95,7 +95,7 @@ section, Configuring Container Sync, for the new-style.
With the old-style, the Swift cluster operator must allow synchronization with
a set of hosts before the user can enable container synchronization. First, the
backend container server needs to be given this list of hosts in the
container-server.conf file::
``container-server.conf`` file::
[DEFAULT]
# This is a comma separated list of hosts allowed in the
@ -170,8 +170,8 @@ we'll make next::
The ``-t`` indicates the cluster to sync to, which is the realm name of the
section from container-sync-realms.conf, followed by the cluster name from
that section (without the cluster\_ prefix), followed by the account and container names we want to sync to.
The ``-k`` specifies the secret key the two containers will share for
that section (without the cluster\_ prefix), followed by the account and container
names we want to sync to. The ``-k`` specifies the secret key the two containers will share for
synchronization; this is the user key, the cluster key in
container-sync-realms.conf will also be used behind the scenes.
@ -195,8 +195,18 @@ as it gets synchronized over to the second::
list container2
[Nothing there yet, so we wait a bit...]
[If you're an operator running SAIO and just testing, you may need to
run 'swift-init container-sync once' to perform a sync scan.]
.. note::
If you're an operator running SAIO and just testing, each time you
configure a container for synchronization and place objects in the
source container you will need to ensure that container-sync runs
before attempting to retrieve objects from the target container.
That is, you need to run::
swift-init container-sync once
Now expect to see objects copied from the first container to the second::
$ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 \
list container2
@ -340,13 +350,34 @@ synchronize to the second, we could have used this curl command::
What's going on behind the scenes, in the cluster?
--------------------------------------------------
The swift-container-sync does the job of sending updates to the remote
container.
Container ring devices have a directory called ``containers``, where container
databases reside. In addition to ``containers``, each container ring device
also has a directory called ``sync-containers``. ``sync-containers`` holds
symlinks to container databases that were configured for container sync using
``x-container-sync-to`` and ``x-container-sync-key`` metadata keys.
This is done by scanning the local devices for container databases and
checking for x-container-sync-to and x-container-sync-key metadata values.
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
The swift-container-sync process does the job of sending updates to the remote
container. This is done by scanning ``sync-containers`` for container
databases. For each container db found, newer rows since the last sync will
trigger PUTs or DELETEs to the other container.
``sync-containers`` is maintained as follows:
Whenever the container-server processes a PUT or a POST request that carries
``x-container-sync-to`` and ``x-container-sync-key`` metadata keys the server
creates a symlink to the container database in ``sync-containers``. Whenever
the container server deletes a synced container, the appropriate symlink
is deleted from ``sync-containers``.
In addition to the container-server, the container-replicator process does the
job of identifying containers that should be synchronized. This is done by
scanning the local devices for container databases and checking for
x-container-sync-to and x-container-sync-key metadata values. If they exist
then a symlink to the container database is created in a sync-containers
sub-directory on the same device.
Similarly, when the container sync metadata keys are deleted, the container
server and container-replicator would take care of deleting the symlinks
from ``sync-containers``.
.. note::

View File

@ -2,20 +2,6 @@
Erasure Code Support
====================
--------------------------
Beta: Not production ready
--------------------------
The erasure code support in Swift is considered "beta" at this point.
Most major functionality is included, but it has not been tested or validated
at large scale. This feature relies on ssync for durability. Deployers are
urged to do extensive testing and not deploy production data using an
erasure code storage policy.
If any bugs are found during testing, please report them to
https://bugs.launchpad.net/swift
-------------------------------
History and Theory of Operation
-------------------------------

View File

@ -57,7 +57,7 @@ deployers. Each container has a new special immutable metadata element called
the storage policy index. Note that internally, Swift relies on policy
indexes and not policy names. Policy names exist for human readability and
translation is managed in the proxy. When a container is created, one new
optional header is supported to specify the policy name. If nothing is
optional header is supported to specify the policy name. If no name is
specified, the default policy is used (and if no other policies defined,
Policy-0 is considered the default). We will be covering the difference
between default and Policy-0 in the next section.
@ -170,12 +170,13 @@ Storage Policies is a versatile feature intended to support both new and
pre-existing clusters with the same level of flexibility. For that reason, we
introduce the ``Policy-0`` concept which is not the same as the "default"
policy. As you will see when we begin to configure policies, each policy has
both a name (human friendly, configurable) as well as an index (or simply
policy number). Swift reserves index 0 to map to the object ring that's
present in all installations (e.g., ``/etc/swift/object.ring.gz``). You can
name this policy anything you like, and if no policies are defined it will
report itself as ``Policy-0``, however you cannot change the index as there must
always be a policy with index 0.
a single name and an arbitrary number of aliases (human friendly,
configurable) as well as an index (or simply policy number). Swift reserves
index 0 to map to the object ring that's present in all installations
(e.g., ``/etc/swift/object.ring.gz``). You can name this policy anything you
like, and if no policies are defined it will report itself as ``Policy-0``,
however you cannot change the index as there must always be a policy with
index 0.
Another important concept is the default policy which can be any policy
in the cluster. The default policy is the policy that is automatically
@ -273,6 +274,8 @@ file:
* Policy names must contain only letters, digits or a dash
* Policy names must be unique
* The policy name 'Policy-0' can only be used for the policy with index 0
* Multiple names can be assigned to one policy using aliases. All names
must follow the Swift naming rules.
* If any policies are defined, exactly one policy must be declared default
* Deprecated policies cannot be declared the default
* If no ``policy_type`` is provided, ``replication`` is the default value.
@ -288,6 +291,7 @@ example configuration.::
[storage-policy:0]
name = gold
aliases = yellow, orange
policy_type = replication
default = yes
@ -301,8 +305,10 @@ information about the ``default`` and ``deprecated`` options.
There are some other considerations when managing policies:
* Policy names can be changed (but be sure that users are aware, aliases are
not currently supported but could be implemented in custom middleware!)
* Policy names can be changed.
* Aliases are supported and can be added and removed. If the primary name
of a policy is removed the next available alias will be adopted as the
primary name. A policy must always have at least one name.
* You cannot change the index of a policy once it has been created
* The default policy can be changed at any time, by adding the
default directive to the desired policy section

4
doc/source/policies_saio.rst Normal file → Executable file
View File

@ -26,6 +26,7 @@ to implement a usable set of policies.
[storage-policy:0]
name = gold
aliases = yellow, orange
default = yes
[storage-policy:1]
@ -82,7 +83,8 @@ Storage Policies effect placement of data in Swift.
You should see this: (only showing the policy output here)::
policies: [{'default': True, 'name': 'gold'}, {'name': 'silver'}]
policies: [{'aliases': 'gold, yellow, orange', 'default': True,
'name': 'gold'}, {'aliases': 'silver', 'name': 'silver'}]
3. Now create a container without specifying a policy, it will use the
default, 'gold' and then put a test object in it (create the file ``file0.txt``

View File

@ -2,6 +2,7 @@
# You can use this single conf file instead of having memcache_servers set in
# several other conf files under [filter:cache] for example. You can specify
# multiple servers separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211
# (IPv6 addresses must follow rfc3986 section-3.2.2, i.e. [::1]:11211)
# memcache_servers = 127.0.0.1:11211
#
# Sets how memcache values are serialized and deserialized:

View File

@ -172,10 +172,7 @@ use = egg:swift#recon
# concurrency = 1
# stats_interval = 300
#
# The sync method to use; default is rsync but you can use ssync to try the
# EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified
# as having performance comparable to, or better than, rsync, we plan to
# deprecate rsync so we can move on with more features for replication.
# default is rsync, alternative is ssync
# sync_method = rsync
#
# max duration of a partition rsync
@ -285,6 +282,9 @@ use = egg:swift#recon
# log_level = INFO
# log_address = /dev/log
#
# Time in seconds to wait between auditor passes
# interval = 30
#
# You can set the disk chunk size that the auditor uses making it larger if
# you like for more efficient local auditing of larger objects
# disk_chunk_size = 65536

View File

@ -388,7 +388,8 @@ use = egg:swift#memcache
# If not set here, the value for memcache_servers will be read from
# memcache.conf (see memcache.conf-sample) or lacking that file, it will
# default to the value below. You can specify multiple servers separated with
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
# follow rfc3986 section-3.2.2, i.e. [::1]:11211)
# memcache_servers = 127.0.0.1:11211
#
# Sets how memcache values are serialized and deserialized:
@ -628,14 +629,17 @@ use = egg:swift#bulk
use = egg:swift#slo
# max_manifest_segments = 1000
# max_manifest_size = 2097152
# min_segment_size = 1048576
# Start rate-limiting SLO segment serving after the Nth segment of a
#
# Rate limiting applies only to segments smaller than this size (bytes).
# rate_limit_under_size = 1048576
#
# Start rate-limiting SLO segment serving after the Nth small segment of a
# segmented object.
# rate_limit_after_segment = 10
#
# Once segment rate-limiting kicks in for an object, limit segments served
# to N per second. 0 means no rate-limiting.
# rate_limit_segments_per_sec = 0
# rate_limit_segments_per_sec = 1
#
# Time limit on GET requests (seconds)
# max_get_time = 86400

15
etc/swift.conf-sample Normal file → Executable file
View File

@ -21,7 +21,7 @@ swift_hash_path_prefix = changeme
# policy with index 0 will be declared the default. If multiple policies are
# defined you must define a policy with index 0 and you must specify a
# default. It is recommended you always define a section for
# storage-policy:0.
# storage-policy:0. Aliases are not required when defining a storage policy.
#
# A 'policy_type' argument is also supported but is not mandatory. Default
# policy type 'replication' is used when 'policy_type' is unspecified.
@ -29,6 +29,7 @@ swift_hash_path_prefix = changeme
name = Policy-0
default = yes
#policy_type = replication
aliases = yellow, orange
# the following section would declare a policy called 'silver', the number of
# replicas will be determined by how the ring is built. In this example the
@ -40,7 +41,10 @@ default = yes
# this config has specified it as the default. However if a legacy container
# (one created with a pre-policy version of swift) is accessed, it is known
# implicitly to be assigned to the policy with index 0 as opposed to the
# current default.
# current default. Note that even without specifying any aliases, a policy
# always has at least the default name stored in aliases because this field is
# used to contain all human readable names for a storage policy.
#
#[storage-policy:1]
#name = silver
#policy_type = replication
@ -67,12 +71,13 @@ default = yes
# refer to Swift documentation for details on how to configure EC policies.
#
# The example 'deepfreeze10-4' policy defined below is a _sample_
# configuration with 10 'data' and 4 'parity' fragments. 'ec_type'
# defines the Erasure Coding scheme. 'jerasure_rs_vand' (Reed-Solomon
# Vandermonde) is used as an example below.
# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity'
# fragments. 'ec_type' defines the Erasure Coding scheme.
# 'jerasure_rs_vand' (Reed-Solomon Vandermonde) is used as an example below.
#
#[storage-policy:2]
#name = deepfreeze10-4
#aliases = df10-4
#policy_type = erasure_coding
#ec_type = jerasure_rs_vand
#ec_num_data_fragments = 10

View File

@ -4,7 +4,7 @@
dnspython>=1.12.0;python_version<'3.0'
dnspython3>=1.12.0;python_version>='3.0'
eventlet>=0.16.1,!=0.17.0
eventlet>=0.17.4 # MIT
greenlet>=0.3.1
netifaces>=0.5,!=0.10.0,!=0.10.1
pastedeploy>=1.3.3

View File

@ -311,7 +311,7 @@ class AccountReaper(Daemon):
delete_timestamp = Timestamp(info['delete_timestamp'])
if self.stats_containers_remaining and \
begin - float(delete_timestamp) >= self.reap_not_done_after:
self.logger.warn(_('Account %s has not been reaped since %s') %
self.logger.warning(_('Account %s has not been reaped since %s') %
(account, delete_timestamp.isoformat))
return True

View File

@ -181,12 +181,12 @@ class SwiftRecon(object):
def _ptime(self, timev=None):
"""
:param timev: a unix timestamp or None
:returns: a pretty string of the current time or provided time
:returns: a pretty string of the current time or provided time in UTC
"""
if timev:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timev))
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timev))
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
def _md5_file(self, path):
"""
@ -495,16 +495,14 @@ class SwiftRecon(object):
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Oldest completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(least_recent_time)),
self._ptime(least_recent_time),
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Most recent completion was %s (%d %s ago) by %s.' % (
time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(most_recent_time)),
self._ptime(most_recent_time),
elapsed, elapsed_unit, host))
print("=" * 79)
@ -899,12 +897,8 @@ class SwiftRecon(object):
continue
if (ts_remote < ts_start or ts_remote > ts_end):
diff = abs(ts_end - ts_remote)
ts_end_f = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(ts_end))
ts_remote_f = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(ts_remote))
ts_end_f = self._ptime(ts_end)
ts_remote_f = self._ptime(ts_remote)
print("!! %s current time is %s, but remote is %s, "
"differs by %.2f sec" % (

View File

@ -25,6 +25,7 @@ from os.path import basename, abspath, dirname, exists, join as pathjoin
from sys import argv as sys_argv, exit, stderr, stdout
from textwrap import wrap
from time import time
from datetime import timedelta
import optparse
import math
@ -444,7 +445,9 @@ swift-ring-builder <builder_file>
builder.parts, builder.replicas, regions, zones, dev_count,
balance, dispersion_trailer))
print('The minimum number of hours before a partition can be '
'reassigned is %s' % builder.min_part_hours)
'reassigned is %s (%s remaining)' % (
builder.min_part_hours,
timedelta(seconds=builder.min_part_seconds_left)))
print('The overload factor is %0.2f%% (%.6f)' % (
builder.overload * 100, builder.overload))
if builder.devs:
@ -787,6 +790,14 @@ swift-ring-builder <builder_file> rebalance [options]
handler.setFormatter(formatter)
logger.addHandler(handler)
if builder.min_part_seconds_left > 0 and not options.force:
print('No partitions could be reassigned.')
print('The time between rebalances must be at least '
'min_part_hours: %s hours (%s remaining)' % (
builder.min_part_hours,
timedelta(seconds=builder.min_part_seconds_left)))
exit(EXIT_WARNING)
devs_changed = builder.devs_changed
try:
last_balance = builder.get_balance()
@ -802,8 +813,7 @@ swift-ring-builder <builder_file> rebalance [options]
exit(EXIT_ERROR)
if not (parts or options.force or removed_devs):
print('No partitions could be reassigned.')
print('Either none need to be or none can be due to '
'min_part_hours [%s].' % builder.min_part_hours)
print('There is no need to do so at this time')
exit(EXIT_WARNING)
# If we set device's weight to zero, currently balance will be set
# special value(MAX_BALANCE) until zero weighted device return all
@ -1029,6 +1039,19 @@ swift-ring-builder <ring_file> write_builder [min_part_hours]
builder.save(builder_file)
def pretend_min_part_hours_passed():
"""
swift-ring-builder <builder_file> pretend_min_part_hours_passed
Resets the clock on the last time a rebalance happened, thus
circumventing the min_part_hours check.
*****************************
USE THIS WITH EXTREME CAUTION
*****************************
If you run this command and deploy rebalanced rings before a replication
pass completes, you may introduce unavailability in your cluster. This
has an end-user impact.
"""
builder.pretend_min_part_hours_passed()
builder.save(builder_file)
exit(EXIT_SUCCESS)

View File

@ -174,10 +174,11 @@ class Replicator(Daemon):
if not self.rsync_module:
self.rsync_module = '{replication_ip}::%s' % self.server_type
if config_true_value(conf.get('vm_test_mode', 'no')):
self.logger.warn('Option %(type)s-replicator/vm_test_mode is '
'deprecated and will be removed in a future '
'version. Update your configuration to use '
'option %(type)s-replicator/rsync_module.'
self.logger.warning('Option %(type)s-replicator/vm_test_mode '
'is deprecated and will be removed in a '
'future version. Update your configuration'
' to use option %(type)s-replicator/'
'rsync_module.'
% {'type': self.server_type})
self.rsync_module += '{replication_port}'
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
@ -632,7 +633,7 @@ class Replicator(Daemon):
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in self.ring.devs if failure_dev])
self.logger.warn(
self.logger.warning(
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(

View File

@ -50,7 +50,7 @@ class DirectClientException(ClientException):
def _get_direct_account_container(path, stype, node, part,
account, marker=None, limit=None,
marker=None, limit=None,
prefix=None, delimiter=None, conn_timeout=5,
response_timeout=15):
"""Base class for get direct account and container.
@ -113,7 +113,7 @@ def direct_get_account(node, part, account, marker=None, limit=None,
"""
path = '/' + account
return _get_direct_account_container(path, "Account", node, part,
account, marker=marker,
marker=marker,
limit=limit, prefix=prefix,
delimiter=delimiter,
conn_timeout=conn_timeout,
@ -189,7 +189,7 @@ def direct_get_container(node, part, account, container, marker=None,
"""
path = '/%s/%s' % (account, container)
return _get_direct_account_container(path, "Container", node,
part, account, marker=marker,
part, marker=marker,
limit=limit, prefix=prefix,
delimiter=delimiter,
conn_timeout=conn_timeout,

View File

@ -26,9 +26,10 @@ from swift import gettext_ as _
from time import gmtime, strftime, time
from zlib import compressobj
from swift.common.utils import quote
from swift.common.exceptions import ClientException
from swift.common.http import HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES
from swift.common.swob import Request
from swift.common.utils import quote
from swift.common.wsgi import loadapp, pipeline_property
@ -807,8 +808,13 @@ class SimpleClient(object):
self.attempts += 1
try:
return self.base_request(method, **kwargs)
except (socket.error, httplib.HTTPException, urllib2.URLError):
except (socket.error, httplib.HTTPException, urllib2.URLError) \
as err:
if self.attempts > retries:
if isinstance(err, urllib2.HTTPError):
raise ClientException('Raise too many retries',
http_status=err.getcode())
else:
raise
sleep(backoff)
backoff = min(backoff * 2, self.max_backoff)

View File

@ -162,6 +162,16 @@ def safe_kill(pid, sig, name):
os.kill(pid, sig)
def kill_group(pid, sig):
"""Send signal to process group
: param pid: process id
: param sig: signal to send
"""
# Negative PID means process group
os.kill(-pid, sig)
class UnknownCommandError(Exception):
pass
@ -285,9 +295,25 @@ class Manager(object):
return 0
# reached interval n watch_pids w/o killing all servers
kill_after_timeout = kwargs.get('kill_after_timeout', False)
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
if kill_after_timeout:
print(_('Waited %s seconds for %s to die; killing') % (
kill_wait, server))
# Send SIGKILL to all remaining pids
for pid in set(pids.keys()) - killed_pids:
print(_('Signal %s pid: %s signal: %s') % (
server, pid, signal.SIGKILL))
# Send SIGKILL to process group
try:
kill_group(pid, signal.SIGKILL)
except OSError as e:
# PID died before kill_group can take action?
if e.errno != errno.ESRCH:
raise e
else:
print(_('Waited %s seconds for %s to die; giving up') % (
kill_wait, server))
return 1

View File

@ -47,6 +47,7 @@ http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt
import six.moves.cPickle as pickle
import json
import logging
import re
import time
from bisect import bisect
from swift import gettext_ as _
@ -101,23 +102,58 @@ class MemcachePoolTimeout(Timeout):
class MemcacheConnPool(Pool):
"""Connection pool for Memcache Connections"""
"""
Connection pool for Memcache Connections
The *server* parameter can be a hostname, an IPv4 address, or an IPv6
address with an optional port. If an IPv6 address is specified it **must**
be enclosed in [], like *[::1]* or *[::1]:11211*. This follows the accepted
prescription for `IPv6 host literals`_.
Examples::
memcache.local:11211
127.0.0.1:11211
[::1]:11211
[::1]
.. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2
"""
IPV6_RE = re.compile("^\[(?P<address>.*)\](:(?P<port>[0-9]+))?$")
def __init__(self, server, size, connect_timeout):
Pool.__init__(self, max_size=size)
self.server = server
self._connect_timeout = connect_timeout
def create(self):
def _get_addr(self):
port = DEFAULT_MEMCACHED_PORT
# IPv6 addresses must be between '[]'
if self.server.startswith('['):
match = MemcacheConnPool.IPV6_RE.match(self.server)
if not match:
raise ValueError("Invalid IPv6 address: %s" % self.server)
host = match.group('address')
port = match.group('port') or port
else:
if ':' in self.server:
host, port = self.server.split(':')
tokens = self.server.split(':')
if len(tokens) > 2:
raise ValueError("IPv6 addresses must be between '[]'")
host, port = tokens
else:
host = self.server
port = DEFAULT_MEMCACHED_PORT
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return (host, port)
def create(self):
host, port = self._get_addr()
addrs = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
family, socktype, proto, canonname, sockaddr = addrs[0]
sock = socket.socket(family, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
with Timeout(self._connect_timeout):
sock.connect((host, int(port)))
sock.connect(sockaddr)
return (sock.makefile(), sock)
def get(self):
@ -223,7 +259,7 @@ class MemcacheRing(object):
"""Returns a server connection to the pool."""
self._client_cache[server].put((fp, sock))
def set(self, key, value, serialize=True, timeout=0, time=0,
def set(self, key, value, serialize=True, time=0,
min_compress_len=0):
"""
Set a key/value pair in memcache
@ -233,22 +269,14 @@ class MemcacheRing(object):
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param timeout: ttl in memcache, this parameter is now deprecated. It
will be removed in next release of OpenStack,
use time parameter instead in the future
:time: equivalent to timeout, this parameter is added to keep the
signature compatible with python-memcached interface. This
implementation will take this value and sign it to the
parameter timeout
:param time: the time to live
:min_compress_len: minimum compress length, this parameter was added
to keep the signature compatible with
python-memcached interface. This implementation
ignores it.
"""
key = md5hash(key)
if timeout:
logging.warn("parameter timeout has been deprecated, use time")
timeout = sanitize_timeout(time or timeout)
timeout = sanitize_timeout(time)
flags = 0
if serialize and self._allow_pickle:
value = pickle.dumps(value, PICKLE_PROTOCOL)
@ -302,7 +330,7 @@ class MemcacheRing(object):
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def incr(self, key, delta=1, time=0, timeout=0):
def incr(self, key, delta=1, time=0):
"""
Increments a key which has a numeric value by delta.
If the key can't be found, it's added as delta or 0 if delta < 0.
@ -315,22 +343,16 @@ class MemcacheRing(object):
:param key: key
:param delta: amount to add to the value of key (or set as the value
if the key is not found) will be cast to an int
:param time: the time to live. This parameter deprecates parameter
timeout. The addition of this parameter is to make the
interface consistent with set and set_multi methods
:param timeout: ttl in memcache, deprecated, will be removed in future
OpenStack releases
:param time: the time to live
:returns: result of incrementing
:raises MemcacheConnectionError:
"""
if timeout:
logging.warn("parameter timeout has been deprecated, use time")
key = md5hash(key)
command = 'incr'
if delta < 0:
command = 'decr'
delta = str(abs(int(delta)))
timeout = sanitize_timeout(time or timeout)
timeout = sanitize_timeout(time)
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
@ -358,7 +380,7 @@ class MemcacheRing(object):
self._exception_occurred(server, e, sock=sock, fp=fp)
raise MemcacheConnectionError("No Memcached connections succeeded.")
def decr(self, key, delta=1, time=0, timeout=0):
def decr(self, key, delta=1, time=0):
"""
Decrements a key which has a numeric value by delta. Calls incr with
-delta.
@ -367,18 +389,11 @@ class MemcacheRing(object):
:param delta: amount to subtract to the value of key (or set the
value to 0 if the key is not found) will be cast to
an int
:param time: the time to live. This parameter depcates parameter
timeout. The addition of this parameter is to make the
interface consistent with set and set_multi methods
:param timeout: ttl in memcache, deprecated, will be removed in future
OpenStack releases
:param time: the time to live
:returns: result of decrementing
:raises MemcacheConnectionError:
"""
if timeout:
logging.warn("parameter timeout has been deprecated, use time")
return self.incr(key, delta=-delta, time=(time or timeout))
return self.incr(key, delta=-delta, time=time)
def delete(self, key):
"""
@ -398,8 +413,8 @@ class MemcacheRing(object):
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def set_multi(self, mapping, server_key, serialize=True, timeout=0,
time=0, min_compress_len=0):
def set_multi(self, mapping, server_key, serialize=True, time=0,
min_compress_len=0):
"""
Sets multiple key/value pairs in memcache.
@ -409,23 +424,14 @@ class MemcacheRing(object):
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param timeout: ttl for memcache. This parameter is now deprecated, it
will be removed in next release of OpenStack, use time
parameter instead in the future
:time: equalvent to timeout, this parameter is added to keep the
signature compatible with python-memcached interface. This
implementation will take this value and sign it to parameter
timeout
:param time: the time to live
:min_compress_len: minimum compress length, this parameter was added
to keep the signature compatible with
python-memcached interface. This implementation
ignores it
"""
if timeout:
logging.warn("parameter timeout has been deprecated, use time")
server_key = md5hash(server_key)
timeout = sanitize_timeout(time or timeout)
timeout = sanitize_timeout(time)
msg = ''
for key, value in mapping.items():
key = md5hash(key)

View File

@ -210,7 +210,7 @@ class DecrypterObjContext(BaseDecrypterContext):
if not body_crypto_meta:
# TODO should this be an error i.e. should we never expect to get
# if keymaster is behaving correctly and sets crypto.override flag?
self.logger.warn("Warning: No sysmeta-crypto-meta for body.")
self.logger.warning("Warning: No sysmeta-crypto-meta for body.")
return None
content_range = self._response_header_value('Content-Range')

View File

@ -416,13 +416,12 @@ class DynamicLargeObject(object):
return GetContext(self, self.logger).\
handle_request(req, start_response)
elif req.method == 'PUT':
error_response = self.validate_x_object_manifest_header(
req, start_response)
error_response = self._validate_x_object_manifest_header(req)
if error_response:
return error_response(env, start_response)
return self.app(env, start_response)
def validate_x_object_manifest_header(self, req, start_response):
def _validate_x_object_manifest_header(self, req):
"""
Make sure that X-Object-Manifest is valid if present.
"""

View File

@ -160,7 +160,7 @@ class FakeFootersContext(WSGIContext):
# If there is a duplicate header, then the encrypter
# is not starting with a clean slate "crypto-wise",
# and needs to remove the associated remnant header.
self.logger.warn("Replacing Remnant header: %s" % h.lower())
self.logger.warning("Replacing Remnant header: %s" % h.lower())
else:
mod_resp_headers.append((h, v))

View File

@ -138,7 +138,7 @@ class KeyMasterContext(WSGIContext):
try:
obj_key_path = base64.b64decode(obj_key_path)
except TypeError:
self.logger.warn("path %s could not be decoded" %
self.logger.warning("path %s could not be decoded" %
obj_key_path)
raise ValueError("path %s could not be decoded" %
obj_key_path)

View File

@ -325,7 +325,7 @@ class KeystoneAuth(object):
# unknown domain, update if req confirms domain
new_id = req_id or ''
elif req_has_id and sysmeta_id != req_id:
self.logger.warn("Inconsistent project domain id: " +
self.logger.warning("Inconsistent project domain id: " +
"%s in token vs %s in account metadata."
% (req_id, sysmeta_id))

View File

@ -57,12 +57,11 @@ The format of the list will be:
"range": "1048576-2097151"}, ...]
The number of object segments is limited to a configurable amount, default
1000. Each segment, except for the final one, must be at least 1 megabyte
(configurable). On upload, the middleware will head every segment passed in to
verify:
1000. Each segment must be at least 1 byte. On upload, the middleware will
head every segment passed in to verify:
1. the segment exists (i.e. the HEAD was successful);
2. the segment meets minimum size requirements (if not the last segment);
2. the segment meets minimum size requirements;
3. if the user provided a non-null etag, the etag matches;
4. if the user provided a non-null size_bytes, the size_bytes matches; and
5. if the user provided a range, it is a singular, syntactically correct range
@ -121,8 +120,9 @@ finally bytes 2095104 through 2097152 (i.e., the last 2048 bytes) of
.. note::
The minimum sized range is min_segment_size, which by
default is 1048576 (1MB).
The minimum sized range is 1 byte. This is the same as the minimum
segment size.
-------------------------
@ -221,7 +221,7 @@ from swift.common.middleware.bulk import get_response_body, \
ACCEPTABLE_FORMATS, Bulk
DEFAULT_MIN_SEGMENT_SIZE = 1024 * 1024 # 1 MiB
DEFAULT_RATE_LIMIT_UNDER_SIZE = 1024 * 1024 # 1 MiB
DEFAULT_MAX_MANIFEST_SEGMENTS = 1000
DEFAULT_MAX_MANIFEST_SIZE = 1024 * 1024 * 2 # 2 MiB
@ -231,7 +231,7 @@ OPTIONAL_SLO_KEYS = set(['range'])
ALLOWED_SLO_KEYS = REQUIRED_SLO_KEYS | OPTIONAL_SLO_KEYS
def parse_and_validate_input(req_body, req_path, min_segment_size):
def parse_and_validate_input(req_body, req_path):
"""
Given a request body, parses it and returns a list of dictionaries.
@ -269,7 +269,6 @@ def parse_and_validate_input(req_body, req_path, min_segment_size):
vrs, account, _junk = split_path(req_path, 3, 3, True)
errors = []
num_segs = len(parsed_data)
for seg_index, seg_dict in enumerate(parsed_data):
if not isinstance(seg_dict, dict):
errors.append("Index %d: not a JSON object" % seg_index)
@ -315,10 +314,10 @@ def parse_and_validate_input(req_body, req_path, min_segment_size):
except (TypeError, ValueError):
errors.append("Index %d: invalid size_bytes" % seg_index)
continue
if (seg_size < min_segment_size and seg_index < num_segs - 1):
errors.append("Index %d: too small; each segment, except "
"the last, must be at least %d bytes."
% (seg_index, min_segment_size))
if seg_size < 1:
errors.append("Index %d: too small; each segment must be "
"at least 1 byte."
% (seg_index,))
continue
obj_path = '/'.join(['', vrs, account, seg_dict['path'].lstrip('/')])
@ -461,13 +460,13 @@ class SloGetContext(WSGIContext):
# no bytes are needed from this or any future segment
break
range = seg_dict.get('range')
if range is None:
seg_range = seg_dict.get('range')
if seg_range is None:
range_start, range_end = 0, seg_length - 1
else:
# We already validated and supplied concrete values
# for the range on upload
range_start, range_end = map(int, range.split('-'))
range_start, range_end = map(int, seg_range.split('-'))
if config_true_value(seg_dict.get('sub_slo')):
# do this check here so that we can avoid fetching this last
@ -662,10 +661,17 @@ class SloGetContext(WSGIContext):
plain_listing_iter = self._segment_listing_iterator(
req, ver, account, segments)
def is_small_segment((seg_dict, start_byte, end_byte)):
start = 0 if start_byte is None else start_byte
end = int(seg_dict['bytes']) - 1 if end_byte is None else end_byte
is_small = (end - start + 1) < self.slo.rate_limit_under_size
return is_small
ratelimited_listing_iter = RateLimitedIterator(
plain_listing_iter,
self.slo.rate_limit_segments_per_sec,
limit_after=self.slo.rate_limit_after_segment)
limit_after=self.slo.rate_limit_after_segment,
ratelimit_if=is_small_segment)
# self._segment_listing_iterator gives us 3-tuples of (segment dict,
# start byte, end byte), but SegmentedIterable wants (obj path, etag,
@ -716,7 +722,7 @@ class StaticLargeObject(object):
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf, min_segment_size=DEFAULT_MIN_SEGMENT_SIZE,
def __init__(self, app, conf,
max_manifest_segments=DEFAULT_MAX_MANIFEST_SEGMENTS,
max_manifest_size=DEFAULT_MAX_MANIFEST_SIZE):
self.conf = conf
@ -724,12 +730,13 @@ class StaticLargeObject(object):
self.logger = get_logger(conf, log_route='slo')
self.max_manifest_segments = max_manifest_segments
self.max_manifest_size = max_manifest_size
self.min_segment_size = min_segment_size
self.max_get_time = int(self.conf.get('max_get_time', 86400))
self.rate_limit_under_size = int(self.conf.get(
'rate_limit_under_size', DEFAULT_RATE_LIMIT_UNDER_SIZE))
self.rate_limit_after_segment = int(self.conf.get(
'rate_limit_after_segment', '10'))
self.rate_limit_segments_per_sec = int(self.conf.get(
'rate_limit_segments_per_sec', '0'))
'rate_limit_segments_per_sec', '1'))
self.bulk_deleter = Bulk(app, {}, logger=self.logger)
def handle_multipart_get_or_head(self, req, start_response):
@ -783,7 +790,7 @@ class StaticLargeObject(object):
raise HTTPLengthRequired(request=req)
parsed_data = parse_and_validate_input(
req.body_file.read(self.max_manifest_size),
req.path, self.min_segment_size)
req.path)
problem_segments = []
if len(parsed_data) > self.max_manifest_segments:
@ -812,6 +819,7 @@ class StaticLargeObject(object):
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT')
if obj_path != last_obj_path:
last_obj_path = obj_path
head_seg_resp = \
@ -840,12 +848,10 @@ class StaticLargeObject(object):
seg_dict['range'] = '%d-%d' % (rng[0], rng[1] - 1)
segment_length = rng[1] - rng[0]
if segment_length < self.min_segment_size and \
index < len(parsed_data) - 1:
if segment_length < 1:
problem_segments.append(
[quote(obj_name),
'Too small; each segment, except the last, must be '
'at least %d bytes.' % self.min_segment_size])
'Too small; each segment must be at least 1 byte.'])
total_size += segment_length
if seg_dict['size_bytes'] is not None and \
seg_dict['size_bytes'] != head_seg_resp.content_length:
@ -1045,18 +1051,17 @@ def filter_factory(global_conf, **local_conf):
DEFAULT_MAX_MANIFEST_SEGMENTS))
max_manifest_size = int(conf.get('max_manifest_size',
DEFAULT_MAX_MANIFEST_SIZE))
min_segment_size = int(conf.get('min_segment_size',
DEFAULT_MIN_SEGMENT_SIZE))
register_swift_info('slo',
max_manifest_segments=max_manifest_segments,
max_manifest_size=max_manifest_size,
min_segment_size=min_segment_size)
# this used to be configurable; report it as 1 for
# clients that might still care
min_segment_size=1)
def slo_filter(app):
return StaticLargeObject(
app, conf,
max_manifest_segments=max_manifest_segments,
max_manifest_size=max_manifest_size,
min_segment_size=min_segment_size)
max_manifest_size=max_manifest_size)
return slo_filter

View File

@ -38,6 +38,9 @@ from swift.common.utils import config_read_reseller_options
from swift.proxy.controllers.base import get_account_info
DEFAULT_TOKEN_LIFE = 86400
class TempAuth(object):
"""
Test authentication and authorization system.
@ -181,7 +184,7 @@ class TempAuth(object):
self.auth_prefix = '/' + self.auth_prefix
if not self.auth_prefix.endswith('/'):
self.auth_prefix += '/'
self.token_life = int(conf.get('token_life', 86400))
self.token_life = int(conf.get('token_life', DEFAULT_TOKEN_LIFE))
self.allow_overrides = config_true_value(
conf.get('allow_overrides', 't'))
self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
@ -429,10 +432,12 @@ class TempAuth(object):
try:
acls = acls_from_account_info(info)
except ValueError as e1:
self.logger.warn("Invalid ACL stored in metadata: %r" % e1)
self.logger.warning("Invalid ACL stored in metadata: %r" % e1)
return None
except NotImplementedError as e2:
self.logger.warn("ACL version exceeds middleware version: %r" % e2)
self.logger.warning(
"ACL version exceeds middleware version: %r"
% e2)
return None
return acls
@ -629,7 +634,8 @@ class TempAuth(object):
req.start_time = time()
handler = None
try:
version, account, user, _junk = req.split_path(1, 4, True)
version, account, user, _junk = split_path(req.path_info,
1, 4, True)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
@ -763,7 +769,8 @@ class TempAuth(object):
memcache_client.set(memcache_user_key, token,
time=float(expires - time()))
resp = Response(request=req, headers={
'x-auth-token': token, 'x-storage-token': token})
'x-auth-token': token, 'x-storage-token': token,
'x-auth-token-expires': str(int(expires - time()))})
url = self.users[account_user]['url'].replace('$HOST', resp.host_url)
if self.storage_url_scheme != 'default':
url = self.storage_url_scheme + ':' + url.split(':', 1)[1]

View File

@ -375,7 +375,7 @@ class TempURL(object):
break
if not is_valid_hmac:
return self._invalid(env, start_response)
# disallowed headers prevent accidently allowing upload of a pointer
# disallowed headers prevent accidentally allowing upload of a pointer
# to data that the PUT tempurl would not otherwise allow access for.
# It should be safe to provide a GET tempurl for data that an
# untrusted client just uploaded with a PUT tempurl.
@ -540,7 +540,7 @@ class TempURL(object):
def _clean_disallowed_headers(self, env, start_response):
"""
Validate the absense of disallowed headers for "unsafe" operations.
Validate the absence of disallowed headers for "unsafe" operations.
:returns: None for safe operations or swob.HTTPBadResponse if the
request includes disallowed headers.

View File

@ -481,6 +481,9 @@ class SegmentedIterable(object):
self.logger.exception(_('ERROR: An error occurred '
'while retrieving segments'))
raise
finally:
if self.current_resp:
close_if_possible(self.current_resp.app_iter)
def app_iter_range(self, *a, **kw):
"""
@ -523,5 +526,4 @@ class SegmentedIterable(object):
Called when the client disconnect. Ensure that the connection to the
backend server is closed.
"""
if self.current_resp:
close_if_possible(self.current_resp.app_iter)
close_if_possible(self.app_iter)

View File

@ -139,6 +139,12 @@ class RingBuilder(object):
finally:
self.logger.disabled = True
@property
def min_part_seconds_left(self):
"""Get the total seconds until a rebalance can be performed"""
elapsed_seconds = int(time() - self._last_part_moves_epoch)
return max((self.min_part_hours * 3600) - elapsed_seconds, 0)
def weight_of_one_part(self):
"""
Returns the weight of each partition as calculated from the
@ -336,7 +342,10 @@ class RingBuilder(object):
if 'id' not in dev:
dev['id'] = 0
if self.devs:
dev['id'] = max(d['id'] for d in self.devs if d) + 1
try:
dev['id'] = self.devs.index(None)
except ValueError:
dev['id'] = len(self.devs)
if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None:
raise exceptions.DuplicateDeviceError(
'Duplicate device id: %d' % dev['id'])
@ -729,11 +738,12 @@ class RingBuilder(object):
def pretend_min_part_hours_passed(self):
"""
Override min_part_hours by marking all partitions as having been moved
255 hours ago. This can be used to force a full rebalance on the next
call to rebalance.
255 hours ago and last move epoch to 'the beginning of time'. This can
be used to force a full rebalance on the next call to rebalance.
"""
for part in range(self.parts):
self._last_part_moves[part] = 0xff
self._last_part_moves_epoch = 0
def get_part_devices(self, part):
"""
@ -835,6 +845,8 @@ class RingBuilder(object):
more recently than min_part_hours.
"""
elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600
if elapsed_hours <= 0:
return
for part in range(self.parts):
# The "min(self._last_part_moves[part] + elapsed_hours, 0xff)"
# which was here showed up in profiling, so it got inlined.

View File

@ -15,7 +15,6 @@
import array
import six.moves.cPickle as pickle
import inspect
import json
from collections import defaultdict
from gzip import GzipFile
@ -135,15 +134,8 @@ class RingData(object):
# Override the timestamp so that the same ring data creates
# the same bytes on disk. This makes a checksum comparison a
# good way to see if two rings are identical.
#
# This only works on Python 2.7; on 2.6, we always get the
# current time in the gzip output.
tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False)
if 'mtime' in inspect.getargspec(GzipFile.__init__).args:
gz_file = GzipFile(filename, mode='wb', fileobj=tempf,
mtime=mtime)
else:
gz_file = GzipFile(filename, mode='wb', fileobj=tempf)
gz_file = GzipFile(filename, mode='wb', fileobj=tempf, mtime=mtime)
self.serialize_v1(gz_file)
gz_file.close()
tempf.flush()
@ -203,12 +195,23 @@ class Ring(object):
# Do this now, when we know the data has changed, rather than
# doing it on every call to get_more_nodes().
#
# Since this is to speed up the finding of handoffs, we only
# consider devices with at least one partition assigned. This
# way, a region, zone, or server with no partitions assigned
# does not count toward our totals, thereby keeping the early
# bailouts in get_more_nodes() working.
dev_ids_with_parts = set()
for part2dev_id in self._replica2part2dev_id:
for dev_id in part2dev_id:
dev_ids_with_parts.add(dev_id)
regions = set()
zones = set()
ips = set()
self._num_devs = 0
for dev in self._devs:
if dev:
if dev and dev['id'] in dev_ids_with_parts:
regions.add(dev['region'])
zones.add((dev['region'], dev['zone']))
ips.add((dev['region'], dev['zone'], dev['ip']))

193
swift/common/storage_policy.py Normal file → Executable file
View File

@ -16,11 +16,9 @@ import os
import string
import textwrap
import six
from six.moves.configparser import ConfigParser
from swift.common.utils import (
config_true_value, SWIFT_CONF_FILE, whataremyips)
config_true_value, SWIFT_CONF_FILE, whataremyips, list_from_csv)
from swift.common.ring import Ring, RingData
from swift.common.utils import quorum_size
from swift.common.exceptions import RingValidationError
@ -84,7 +82,6 @@ class BindPortsCache(object):
class PolicyError(ValueError):
def __init__(self, msg, index=None):
if index is not None:
msg += ', for index %r' % index
@ -161,7 +158,7 @@ class BaseStoragePolicy(object):
policy_type_to_policy_cls = {}
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None):
object_ring=None, aliases=''):
# do not allow BaseStoragePolicy class to be instantiated directly
if type(self) == BaseStoragePolicy:
raise TypeError("Can't instantiate BaseStoragePolicy directly")
@ -172,18 +169,17 @@ class BaseStoragePolicy(object):
raise PolicyError('Invalid index', idx)
if self.idx < 0:
raise PolicyError('Invalid index', idx)
if not name:
self.alias_list = []
if not name or not self._validate_policy_name(name):
raise PolicyError('Invalid name %r' % name, idx)
# this is defensively restrictive, but could be expanded in the future
if not all(c in VALID_CHARS for c in name):
raise PolicyError('Names are used as HTTP headers, and can not '
'reliably contain any characters not in %r. '
'Invalid name %r' % (VALID_CHARS, name))
if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
msg = 'The name %s is reserved for policy index 0. ' \
'Invalid name %r' % (LEGACY_POLICY_NAME, name)
raise PolicyError(msg, idx)
self.name = name
self.alias_list.append(name)
if aliases:
names_list = list_from_csv(aliases)
for alias in names_list:
if alias == name:
continue
self._validate_policy_name(alias)
self.alias_list.append(alias)
self.is_deprecated = config_true_value(is_deprecated)
self.is_default = config_true_value(is_default)
if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls:
@ -191,9 +187,23 @@ class BaseStoragePolicy(object):
if self.is_deprecated and self.is_default:
raise PolicyError('Deprecated policy can not be default. '
'Invalid config', self.idx)
self.ring_name = _get_policy_string('object', self.idx)
self.object_ring = object_ring
@property
def name(self):
return self.alias_list[0]
@name.setter
def name_setter(self, name):
self._validate_policy_name(name)
self.alias_list[0] = name
@property
def aliases(self):
return ", ".join(self.alias_list)
def __int__(self):
return self.idx
@ -203,7 +213,7 @@ class BaseStoragePolicy(object):
def __repr__(self):
return ("%s(%d, %r, is_default=%s, "
"is_deprecated=%s, policy_type=%r)") % \
(self.__class__.__name__, self.idx, self.name,
(self.__class__.__name__, self.idx, self.alias_list,
self.is_default, self.is_deprecated, self.policy_type)
@classmethod
@ -213,6 +223,7 @@ class BaseStoragePolicy(object):
their StoragePolicy class. This will also set the policy_type
attribute on the registered implementation.
"""
def register_wrapper(policy_cls):
if policy_type in cls.policy_type_to_policy_cls:
raise PolicyError(
@ -222,6 +233,7 @@ class BaseStoragePolicy(object):
cls.policy_type_to_policy_cls[policy_type] = policy_cls
policy_cls.policy_type = policy_type
return policy_cls
return register_wrapper
@classmethod
@ -231,6 +243,7 @@ class BaseStoragePolicy(object):
"""
return {
'name': 'name',
'aliases': 'aliases',
'policy_type': 'policy_type',
'default': 'is_default',
'deprecated': 'is_deprecated',
@ -269,6 +282,77 @@ class BaseStoragePolicy(object):
info.pop('policy_type')
return info
def _validate_policy_name(self, name):
"""
Helper function to determine the validity of a policy name. Used
to check policy names before setting them.
:param name: a name string for a single policy name.
:returns: true if the name is valid.
:raises: PolicyError if the policy name is invalid.
"""
# this is defensively restrictive, but could be expanded in the future
if not all(c in VALID_CHARS for c in name):
raise PolicyError('Names are used as HTTP headers, and can not '
'reliably contain any characters not in %r. '
'Invalid name %r' % (VALID_CHARS, name))
if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
msg = 'The name %s is reserved for policy index 0. ' \
'Invalid name %r' % (LEGACY_POLICY_NAME, name)
raise PolicyError(msg, self.idx)
if name.upper() in (existing_name.upper() for existing_name
in self.alias_list):
msg = 'The name %s is already assigned to this policy.' % name
raise PolicyError(msg, self.idx)
return True
def add_name(self, name):
"""
Adds an alias name to the storage policy. Shouldn't be called
directly from the storage policy but instead through the
storage policy collection class, so lookups by name resolve
correctly.
:param name: a new alias for the storage policy
"""
if self._validate_policy_name(name):
self.alias_list.append(name)
def remove_name(self, name):
"""
Removes an alias name from the storage policy. Shouldn't be called
directly from the storage policy but instead through the storage
policy collection class, so lookups by name resolve correctly. If
the name removed is the primary name then the next available alias
will be adopted as the new primary name.
:param name: a name assigned to the storage policy
"""
if name not in self.alias_list:
raise PolicyError("%s is not a name assigned to policy %s"
% (name, self.idx))
if len(self.alias_list) == 1:
raise PolicyError("Cannot remove only name %s from policy %s. "
"Policies must have at least one name."
% (name, self.idx))
else:
self.alias_list.remove(name)
def change_primary_name(self, name):
"""
Changes the primary/default name of the policy to a specified name.
:param name: a string name to replace the current primary name.
"""
if name == self.name:
return
elif name in self.alias_list:
self.remove_name(name)
else:
self._validate_policy_name(name)
self.alias_list.insert(0, name)
def _validate_ring(self):
"""
Hook, called when the ring is loaded. Can be used to
@ -329,13 +413,15 @@ class ECStoragePolicy(BaseStoragePolicy):
:func:`~swift.common.storage_policy.reload_storage_policies` to load
POLICIES from ``swift.conf``.
"""
def __init__(self, idx, name='', is_default=False,
def __init__(self, idx, name='', aliases='', is_default=False,
is_deprecated=False, object_ring=None,
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE,
ec_type=None, ec_ndata=None, ec_nparity=None):
super(ECStoragePolicy, self).__init__(
idx, name, is_default, is_deprecated, object_ring)
idx=idx, name=name, aliases=aliases, is_default=is_default,
is_deprecated=is_deprecated, object_ring=object_ring)
# Validate erasure_coding policy specific members
# ec_type is one of the EC implementations supported by PyEClib
@ -441,8 +527,8 @@ class ECStoragePolicy(BaseStoragePolicy):
def __repr__(self):
return ("%s, EC config(ec_type=%s, ec_segment_size=%d, "
"ec_ndata=%d, ec_nparity=%d)") % (
super(ECStoragePolicy, self).__repr__(), self.ec_type,
"ec_ndata=%d, ec_nparity=%d)") % \
(super(ECStoragePolicy, self).__repr__(), self.ec_type,
self.ec_segment_size, self.ec_ndata, self.ec_nparity)
@classmethod
@ -532,6 +618,7 @@ class StoragePolicyCollection(object):
* Deprecated policies can not be declared the default
"""
def __init__(self, pols):
self.default = []
self.by_name = {}
@ -542,7 +629,8 @@ class StoragePolicyCollection(object):
"""
Add pre-validated policies to internal indexes.
"""
self.by_name[policy.name.upper()] = policy
for name in policy.alias_list:
self.by_name[name.upper()] = policy
self.by_index[int(policy)] = policy
def __repr__(self):
@ -570,9 +658,10 @@ class StoragePolicyCollection(object):
if int(policy) in self.by_index:
raise PolicyError('Duplicate index %s conflicts with %s' % (
policy, self.get_by_index(int(policy))))
if policy.name.upper() in self.by_name:
for name in policy.alias_list:
if name.upper() in self.by_name:
raise PolicyError('Duplicate name %s conflicts with %s' % (
policy, self.get_by_name(policy.name)))
policy, self.get_by_name(name)))
if policy.is_default:
if not self.default:
self.default = policy
@ -667,6 +756,62 @@ class StoragePolicyCollection(object):
policy_info.append(policy_entry)
return policy_info
def add_policy_alias(self, policy_index, *aliases):
"""
Adds a new name or names to a policy
:param policy_index: index of a policy in this policy collection.
:param *aliases: arbitrary number of string policy names to add.
"""
policy = self.get_by_index(policy_index)
for alias in aliases:
if alias.upper() in self.by_name:
raise PolicyError('Duplicate name %s in use '
'by policy %s' % (alias,
self.get_by_name(alias)))
else:
policy.add_name(alias)
self.by_name[alias.upper()] = policy
def remove_policy_alias(self, *aliases):
"""
Removes a name or names from a policy. If the name removed is the
primary name then the next available alias will be adopted
as the new primary name.
:param *aliases: arbitrary number of existing policy names to remove.
"""
for alias in aliases:
policy = self.get_by_name(alias)
if not policy:
raise PolicyError('No policy with name %s exists.' % alias)
if len(policy.alias_list) == 1:
raise PolicyError('Policy %s with name %s has only one name. '
'Policies must have at least one name.' % (
policy, alias))
else:
policy.remove_name(alias)
del self.by_name[alias.upper()]
def change_policy_primary_name(self, policy_index, new_name):
"""
Changes the primary or default name of a policy. The new primary
name can be an alias that already belongs to the policy or a
completely new name.
:param policy_index: index of a policy in this policy collection.
:param new_name: a string name to set as the new default name.
"""
policy = self.get_by_index(policy_index)
name_taken = self.get_by_name(new_name)
# if the name belongs to some other policy in the collection
if name_taken and name_taken != policy:
raise PolicyError('Other policy %s with name %s exists.' %
(self.get_by_name(new_name).idx, new_name))
else:
policy.change_primary_name(new_name)
self.by_name[new_name.upper()] = policy
def parse_storage_policies(conf):
"""

View File

@ -389,7 +389,7 @@ def load_libc_function(func_name, log_error=True,
if fail_if_missing:
raise
if log_error:
logging.warn(_("Unable to locate %s in libc. Leaving as a "
logging.warning(_("Unable to locate %s in libc. Leaving as a "
"no-op."), func_name)
return noop_libc_function
@ -424,7 +424,7 @@ def get_log_line(req, res, trans_time, additional_info):
:param trans_time: the time the request took to complete, a float.
:param additional_info: a string to log at the end of the line
:returns: a properly formated line for logging.
:returns: a properly formatted line for logging.
"""
policy_index = get_policy_index(req.headers, res.headers)
@ -580,7 +580,7 @@ class FallocateWrapper(object):
if self.fallocate is not noop_libc_function:
break
if self.fallocate is noop_libc_function:
logging.warn(_("Unable to locate fallocate, posix_fallocate in "
logging.warning(_("Unable to locate fallocate, posix_fallocate in "
"libc. Leaving as a no-op."))
def __call__(self, fd, mode, offset, length):
@ -664,7 +664,7 @@ def fsync_dir(dirpath):
if err.errno == errno.ENOTDIR:
# Raise error if someone calls fsync_dir on a non-directory
raise
logging.warn(_("Unable to perform fsync() on directory %s: %s"),
logging.warning(_("Unable to perform fsync() on directory %s: %s"),
dirpath, os.strerror(err.errno))
finally:
if dirfd:
@ -686,7 +686,7 @@ def drop_buffer_cache(fd, offset, length):
ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
ctypes.c_uint64(length), 4)
if ret != 0:
logging.warn("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
"-> %(ret)s", {'fd': fd, 'offset': offset,
'length': length, 'ret': ret})
@ -778,6 +778,10 @@ class Timestamp(object):
raise ValueError(
'delta must be greater than %d' % (-1 * self.raw))
self.timestamp = float(self.raw * PRECISION)
if self.timestamp < 0:
raise ValueError('timestamp cannot be negative')
if self.timestamp >= 10000000000:
raise ValueError('timestamp too large')
def __repr__(self):
return INTERNAL_FORMAT % (self.timestamp, self.offset)
@ -832,6 +836,9 @@ class Timestamp(object):
other = Timestamp(other)
return cmp(self.internal, other.internal)
def __hash__(self):
return hash(self.internal)
def normalize_timestamp(timestamp):
"""
@ -859,14 +866,10 @@ def last_modified_date_to_timestamp(last_modified_date_str):
start = datetime.datetime.strptime(last_modified_date_str,
'%Y-%m-%dT%H:%M:%S.%f')
delta = start - EPOCH
# TODO(sam): after we no longer support py2.6, this expression can
# simplify to Timestamp(delta.total_seconds()).
#
# This calculation is based on Python 2.7's Modules/datetimemodule.c,
# function delta_to_microseconds(), but written in Python.
return Timestamp(delta.days * 86400 +
delta.seconds +
delta.microseconds / 1000000.0)
return Timestamp(delta.total_seconds())
def normalize_delete_at_timestamp(timestamp):
@ -1041,22 +1044,27 @@ class RateLimitedIterator(object):
this many elements; default is 0 (rate limit
immediately)
"""
def __init__(self, iterable, elements_per_second, limit_after=0):
def __init__(self, iterable, elements_per_second, limit_after=0,
ratelimit_if=lambda _junk: True):
self.iterator = iter(iterable)
self.elements_per_second = elements_per_second
self.limit_after = limit_after
self.running_time = 0
self.ratelimit_if = ratelimit_if
def __iter__(self):
return self
def next(self):
next_value = next(self.iterator)
if self.ratelimit_if(next_value):
if self.limit_after > 0:
self.limit_after -= 1
else:
self.running_time = ratelimit_sleep(self.running_time,
self.elements_per_second)
return next(self.iterator)
return next_value
class GreenthreadSafeIterator(object):
@ -1166,14 +1174,16 @@ class StatsdClient(object):
parts.append('@%s' % (sample_rate,))
else:
return
if six.PY3:
parts = [part.encode('utf-8') for part in parts]
# Ideally, we'd cache a sending socket in self, but that
# results in a socket getting shared by multiple green threads.
with closing(self._open_socket()) as sock:
try:
return sock.sendto('|'.join(parts), self._target)
return sock.sendto(b'|'.join(parts), self._target)
except IOError as err:
if self.logger:
self.logger.warn(
self.logger.warning(
'Error sending UDP message to %r: %s',
self._target, err)
@ -1227,7 +1237,7 @@ def timing_stats(**dec_kwargs):
swift's wsgi server controllers, based on response code.
"""
def decorating_func(func):
method = func.func_name
method = func.__name__
@functools.wraps(func)
def _timing_stats(ctrl, *args, **kwargs):
@ -1258,7 +1268,6 @@ class LogAdapter(logging.LoggerAdapter, object):
def __init__(self, logger, server):
logging.LoggerAdapter.__init__(self, logger, {})
self.server = server
setattr(self, 'warn', self.warning)
@property
def txn_id(self):
@ -3581,7 +3590,8 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
except StopIteration:
pass
else:
logger.warn("More than one part in a single-part response?")
logger.warning(
"More than one part in a single-part response?")
return string_along(response_body_iter, ranges_iter, logger)

View File

@ -1096,7 +1096,7 @@ def make_env(env, method=None, path=None, agent='Swift', query_string=None,
'SERVER_PROTOCOL', 'swift.cache', 'swift.source',
'swift.trans_id', 'swift.authorize_override',
'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID',
'swift.metadata.checked'):
'HTTP_REFERER', 'swift.metadata.checked'):
if name in env:
newenv[name] = env[name]
if method:

View File

@ -20,6 +20,7 @@ import time
from collections import defaultdict
from eventlet import Timeout
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR
from swift.container.reconciler import (
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
@ -189,6 +190,13 @@ class ContainerReplicator(db_replicator.Replicator):
def _post_replicate_hook(self, broker, info, responses):
if info['account'] == MISPLACED_OBJECTS_ACCOUNT:
return
try:
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s' %
broker.db_file)
point = broker.get_reconciler_sync()
if not broker.has_multiple_policies() and info['max_row'] != point:
broker.update_reconciler_sync(info['max_row'])
@ -210,6 +218,13 @@ class ContainerReplicator(db_replicator.Replicator):
# this container shouldn't be here, make sure it's cleaned up
self.reconciler_cleanups[broker.container] = broker
return
try:
# DB is going to get deleted. Be preemptive about it
self.sync_store.remove_synced_container(broker)
except Exception:
self.logger.exception('Failed to remove sync_store entry %s' %
broker.db_file)
return super(ContainerReplicator, self).delete_db(broker)
def replicate_reconcilers(self):
@ -237,6 +252,9 @@ class ContainerReplicator(db_replicator.Replicator):
def run_once(self, *args, **kwargs):
self.reconciler_containers = {}
self.reconciler_cleanups = {}
self.sync_store = ContainerSyncStore(self.root,
self.logger,
self.mount_check)
rv = super(ContainerReplicator, self).run_once(*args, **kwargs)
if any([self.reconciler_containers, self.reconciler_cleanups]):
self.replicate_reconcilers()

View File

@ -23,6 +23,7 @@ from xml.etree.cElementTree import Element, SubElement, tostring
from eventlet import Timeout
import swift.common.db
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR
from swift.container.replicator import ContainerReplicatorRpc
from swift.common.db import DatabaseAlreadyExists
@ -110,6 +111,9 @@ class ContainerController(BaseStorageServer):
self.save_headers.append('x-versions-location')
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.sync_store = ContainerSyncStore(self.root,
self.logger,
self.mount_check)
def _get_container_broker(self, drive, part, account, container, **kwargs):
"""
@ -242,6 +246,13 @@ class ContainerController(BaseStorageServer):
else:
return None
def _update_sync_store(self, broker, method):
try:
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s during %s' %
broker.db_file, method)
@public
@timing_stats()
def DELETE(self, req):
@ -276,6 +287,7 @@ class ContainerController(BaseStorageServer):
broker.delete_db(req_timestamp.internal)
if not broker.is_deleted():
return HTTPConflict(request=req)
self._update_sync_store(broker, 'DELETE')
resp = self.account_update(req, account, container, broker)
if resp:
return resp
@ -381,6 +393,8 @@ class ContainerController(BaseStorageServer):
broker.metadata['X-Container-Sync-To'][0]:
broker.set_x_container_sync_points(-1, -1)
broker.update_metadata(metadata, validate_metadata=True)
if metadata:
self._update_sync_store(broker, 'PUT')
resp = self.account_update(req, account, container, broker)
if resp:
return resp
@ -564,6 +578,7 @@ class ContainerController(BaseStorageServer):
broker.metadata['X-Container-Sync-To'][0]:
broker.set_x_container_sync_points(-1, -1)
broker.update_metadata(metadata, validate_metadata=True)
self._update_sync_store(broker, 'POST')
return HTTPNoContent(request=req)
def __call__(self, env, start_response):

View File

@ -24,7 +24,9 @@ from struct import unpack_from
from eventlet import sleep, Timeout
import swift.common.db
from swift.container.backend import ContainerBroker, DATADIR
from swift.common.db import DatabaseConnectionError
from swift.container.backend import ContainerBroker
from swift.container.sync_store import ContainerSyncStore
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.internal_client import (
delete_object, put_object, InternalClient, UnexpectedResponse)
@ -32,7 +34,7 @@ from swift.common.exceptions import ClientException
from swift.common.ring import Ring
from swift.common.ring.utils import is_local_device
from swift.common.utils import (
audit_location_generator, clean_content_type, config_true_value,
clean_content_type, config_true_value,
FileLikeIter, get_logger, hash_path, quote, urlparse, validate_sync_to,
whataremyips, Timestamp)
from swift.common.daemon import Daemon
@ -187,6 +189,10 @@ class ContainerSync(Daemon):
a.strip()
for a in conf.get('sync_proxy', '').split(',')
if a.strip()]
#: ContainerSyncStore instance for iterating over synced containers
self.sync_store = ContainerSyncStore(self.devices,
self.logger,
self.mount_check)
#: Number of containers with sync turned on that were successfully
#: synced.
self.container_syncs = 0
@ -194,7 +200,8 @@ class ContainerSync(Daemon):
self.container_deletes = 0
#: Number of successful PUTs triggered.
self.container_puts = 0
#: Number of containers that didn't have sync turned on.
#: Number of containers whose sync has been turned off, but
#: are not yet cleared from the sync store.
self.container_skips = 0
#: Number of containers that had a failure of some type.
self.container_failures = 0
@ -247,10 +254,7 @@ class ContainerSync(Daemon):
sleep(random() * self.interval)
while True:
begin = time()
all_locs = audit_location_generator(self.devices, DATADIR, '.db',
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
for path in self.sync_store.synced_containers_generator():
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
@ -264,10 +268,7 @@ class ContainerSync(Daemon):
"""
self.logger.info(_('Begin container sync "once" mode'))
begin = time()
all_locs = audit_location_generator(self.devices, DATADIR, '.db',
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
for path in self.sync_store.synced_containers_generator():
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
@ -308,7 +309,20 @@ class ContainerSync(Daemon):
broker = None
try:
broker = ContainerBroker(path)
# The path we pass to the ContainerBroker is a real path of
# a container DB. If we get here, however, it means that this
# path is linked from the sync_containers dir. In rare cases
# of race or processes failures the link can be stale and
# the get_info below will raise a DB doesn't exist exception
# In this case we remove the stale link and raise an error
# since in most cases the db should be there.
try:
info = broker.get_info()
except DatabaseConnectionError as db_err:
if str(db_err).endswith("DB doesn't exist"):
self.sync_store.remove_synced_container(broker)
raise
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
@ -388,7 +402,7 @@ class ContainerSync(Daemon):
broker.set_x_container_sync_points(sync_point1, None)
self.container_syncs += 1
self.logger.increment('syncs')
except (Exception, Timeout) as err:
except (Exception, Timeout):
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'),

View File

@ -0,0 +1,177 @@
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import errno
from swift.common.utils import audit_location_generator, mkdirs
from swift.container.backend import DATADIR
SYNC_DATADIR = 'sync_containers'
class ContainerSyncStore(object):
"""
Filesystem based store for local containers that needs to be synced.
The store holds a list of containers that need to be synced by the
container sync daemon. The store is local to the container server node,
that is, only containers whose databases are kept locally on the node are
listed.
"""
def __init__(self, devices, logger, mount_check):
self.devices = os.path.normpath(os.path.join('/', devices)) + '/'
self.logger = logger
self.mount_check = mount_check
def _container_to_synced_container_path(self, path):
# path is assumed to be of the form:
# /srv/node/sdb/containers/part/.../*.db
# or more generally:
# devices/device/containers/part/.../*.db
# Below we split the path to the following parts:
# devices, device, rest
devices = self.devices
path = os.path.normpath(path)
device = path[len(devices):path.rfind(DATADIR)]
rest = path[path.rfind(DATADIR) + len(DATADIR) + 1:]
return os.path.join(devices, device, SYNC_DATADIR, rest)
def _synced_container_to_container_path(self, path):
# synced path is assumed to be of the form:
# /srv/node/sdb/sync_containers/part/.../*.db
# or more generally:
# devices/device/sync_containers/part/.../*.db
# Below we split the path to the following parts:
# devices, device, rest
devices = self.devices
path = os.path.normpath(path)
device = path[len(devices):path.rfind(SYNC_DATADIR)]
rest = path[path.rfind(SYNC_DATADIR) + len(SYNC_DATADIR) + 1:]
return os.path.join(devices, device, DATADIR, rest)
def add_synced_container(self, broker):
"""
Adds the container db represented by broker to the list of synced
containers.
:param broker: An instance of ContainerBroker representing the
container to add.
"""
sync_file = self._container_to_synced_container_path(broker.db_file)
stat = None
try:
stat = os.stat(sync_file)
except OSError as oserr:
if oserr.errno != errno.ENOENT:
raise oserr
if stat is not None:
return
sync_path = os.path.dirname(sync_file)
mkdirs(sync_path)
try:
os.symlink(broker.db_file, sync_file)
except OSError as oserr:
if (oserr.errno != errno.EEXIST or
not os.path.islink(sync_file)):
raise oserr
def remove_synced_container(self, broker):
"""
Removes the container db represented by broker from the list of synced
containers.
:param broker: An instance of ContainerBroker representing the
container to remove.
"""
sync_file = broker.db_file
sync_file = self._container_to_synced_container_path(sync_file)
try:
os.unlink(sync_file)
os.removedirs(os.path.dirname(sync_file))
except OSError as oserr:
if oserr.errno != errno.ENOENT:
raise oserr
def update_sync_store(self, broker):
"""
Add or remove a symlink to/from the sync-containers directory
according to the broker's metadata.
Decide according to the broker x-container-sync-to and
x-container-sync-key whether a symlink needs to be added or
removed.
We mention that if both metadata items do not appear
at all, the container has never been set for sync in reclaim_age
in which case we do nothing. This is important as this method is
called for ALL containers from the container replicator.
Once we realize that we do need to do something, we check if
the container is marked for delete, in which case we want to
remove the symlink
For adding a symlink we notice that both x-container-sync-to and
x-container-sync-key exist and are valid, that is, are not empty.
At this point we know we need to do something, the container
is not marked for delete and the condition to add a symlink
is not met. conclusion need to remove the symlink.
:param broker: An instance of ContainerBroker
"""
# If the broker metadata does not have both x-container-sync-to
# and x-container-sync-key it has *never* been set. Make sure
# we do nothing in this case
if ('X-Container-Sync-To' not in broker.metadata and
'X-Container-Sync-Key' not in broker.metadata):
return
if broker.is_deleted():
self.remove_synced_container(broker)
return
# If both x-container-sync-to and x-container-sync-key
# exist and valid, add the symlink
sync_to = sync_key = None
if 'X-Container-Sync-To' in broker.metadata:
sync_to = broker.metadata['X-Container-Sync-To'][0]
if 'X-Container-Sync-Key' in broker.metadata:
sync_key = broker.metadata['X-Container-Sync-Key'][0]
if sync_to and sync_key:
self.add_synced_container(broker)
return
self.remove_synced_container(broker)
def synced_containers_generator(self):
"""
Iterates over the list of synced containers
yielding the path of the container db
"""
all_locs = audit_location_generator(self.devices, SYNC_DATADIR, '.db',
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
# What we want to yield is the real path as its being used for
# initiating a container broker. The broker would break if not
# given the db real path, as it e.g. assumes the existence of
# .pending in the same path
yield self._synced_container_to_container_path(path)

View File

@ -89,7 +89,7 @@ class ContainerUpdater(Daemon):
for device in self._listdir(self.devices):
dev_path = os.path.join(self.devices, device)
if self.mount_check and not ismount(dev_path):
self.logger.warn(_('%s is not mounted'), device)
self.logger.warning(_('%s is not mounted'), device)
continue
con_path = os.path.join(dev_path, DATADIR)
if not os.path.exists(con_path):

View File

@ -1,4 +1,4 @@
# German translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -10,18 +10,19 @@
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: German\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -1,4 +1,4 @@
# Spanish translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -8,18 +8,19 @@
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-09-09 05:36+0000\n"
"Last-Translator: Carlos A. Muñoz <camunoz@redhat.com>\n"
"Language: es\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: Spanish\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -1,4 +1,4 @@
# French translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -8,18 +8,19 @@
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language: fr\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: French\n"
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -1,4 +1,4 @@
# Italian translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -7,18 +7,19 @@
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language: it\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: Italian\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -1,4 +1,4 @@
# Japanese translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -9,18 +9,19 @@
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-09-26 09:26+0000\n"
"Last-Translator: Akihiro Motoki <amotoki@gmail.com>\n"
"Language: ja\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: Japanese\n"
"Plural-Forms: nplurals=1; plural=0\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -1,4 +1,4 @@
# Korean (South Korea) translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -9,18 +9,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-09-09 05:10+0000\n"
"Last-Translator: Ying Chun Guo <daisy.ycguo@gmail.com>\n"
"Language: ko_KR\n"
"Language: ko-KR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: Korean (South Korea)\n"
"Plural-Forms: nplurals=1; plural=0\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -1,4 +1,4 @@
# Portuguese (Brazil) translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -11,18 +11,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language: pt_BR\n"
"Language: pt-BR\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: Portuguese (Brazil)\n"
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -1,26 +1,28 @@
# Russian translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
# Translators:
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Filatov Sergey <filatecs@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-01-17 10:49+0000\n"
"Last-Translator: Filatov Sergey <filatecs@gmail.com>\n"
"Language: ru\n"
"Language-Team: Russian\n"
"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
"%100>=11 && n%100<=14)? 2 : 3)\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
"%100>=11 && n%100<=14)? 2 : 3);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: Russian\n"
msgid ""
"\n"
@ -52,6 +54,16 @@ msgstr "Ответили как размонтированные: %(ip)s/%(devic
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
#, python-format
msgid ""
"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/"
"%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs "
"(%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
"Реконструированно разделов: %(reconstructed)d/%(total)d (%(percentage).2f%%) "
"partitions of %(device)d/%(dtotal)d (%(dpercentage).2f%%) за время "
"%(time).2fs (%(rate).2f/sec, осталось: %(remaining)s)"
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@ -88,6 +100,10 @@ msgstr "%s не существует"
msgid "%s is not mounted"
msgstr "%s не смонтирован"
#, python-format
msgid "%s responded as unmounted"
msgstr "%s ответил как размонтированный"
#, python-format
msgid "%s running (%s - %s)"
msgstr "%s выполняется (%s - %s)"
@ -225,6 +241,14 @@ msgid ""
msgstr ""
"Путь клиента %(client)s не соответствует пути в метаданных объекта %(meta)s"
msgid ""
"Configuration option internal_client_conf_path not defined. Using default "
"configuration, See internal-client.conf-sample for options"
msgstr ""
"Опция internal_client_conf_path конфигурации не определена. Используется "
"конфигурация по умолчанию. Используйте intenal-client.conf-sample для "
"информации об опциях"
msgid "Connection refused"
msgstr "Соединение отклонено"
@ -284,6 +308,10 @@ msgstr "Ошибка загрузки данных: %s"
msgid "Devices pass completed: %.02fs"
msgstr "Проход устройств выполнен: %.02fs"
#, python-format
msgid "Directory %r does not map to a valid policy (%s)"
msgstr "Каталог %r не связан со стратегией policy (%s)"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s"
@ -560,6 +588,9 @@ msgstr ""
msgid "Exception in top-level replication loop"
msgstr "Исключительная ситуация в цикле репликации верхнего уровня"
msgid "Exception in top-levelreconstruction loop"
msgstr "Исключение в цикле реконструкции верхнего уровня"
#, python-format
msgid "Exception while deleting container %s %s"
msgstr "Исключительная ситуация во время удаления контейнера %s %s"
@ -617,6 +648,10 @@ msgstr "Недопустимый хост %r в X-Container-Sync-To"
msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "Недопустимая ожидающая запись %(file)s: %(entry)s"
#, python-format
msgid "Invalid response %(resp)s from %(full_path)s"
msgstr "Недопустимый ответ %(resp)s от %(full_path)s"
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "Недопустимый ответ %(resp)s от %(ip)s"
@ -652,10 +687,18 @@ msgstr "Отсутствует конечная точка кластера дл
msgid "No permission to signal PID %d"
msgstr "Нет прав доступа для отправки сигнала в PID %d"
#, python-format
msgid "No policy with index %s"
msgstr "Не найдено стратегии с индексом %s"
#, python-format
msgid "No realm key for %r"
msgstr "Отсутствует ключ области для %r"
#, python-format
msgid "No space left on device for %s (%s)"
msgstr "Не устройстве %s (%s) закончилось место"
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "Ограниченная ошибка узла %(ip)s:%(port)s (%(device)s)"
@ -668,6 +711,10 @@ msgstr ""
"Не найдено: %(sync_from)r => %(sync_to)r - объект "
"%(obj_name)r"
#, python-format
msgid "Nothing reconstructed for %s seconds."
msgstr "Ничего не реконструировано за %s с."
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "Ничего не реплицировано за %s с."
@ -716,6 +763,10 @@ msgstr ""
msgid "Object audit stats: %s"
msgstr "Состояние контроля объекта: %s"
#, python-format
msgid "Object reconstruction complete (once). (%.02f minutes)"
msgstr "Реконструкция объекта выполнена (однократно). (%.02f мин.)"
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "Репликация объекта выполнена (однократно). (%.02f мин.)"
@ -775,6 +826,14 @@ msgstr "Требуется путь в X-Container-Sync-To"
msgid "Problem cleaning up %s"
msgstr "Неполадка при очистке %s"
#, python-format
msgid "Problem cleaning up %s (%s)"
msgstr "Возникла проблема при очистке %s (%s)"
#, fuzzy, python-format
msgid "Problem writing durable state file %s (%s)"
msgstr "Возникла проблема при записи файла состояния %s (%s)"
#, python-format
msgid "Profiling Error: %s"
msgstr "Ошибка профилирования: %s"
@ -818,6 +877,14 @@ msgstr "Удаление объектов %s"
msgid "Removing partition: %s"
msgstr "Удаление раздела: %s"
#, python-format
msgid "Removing pid file %s with invalid pid"
msgstr "Удаление pid файла %s с неверным pid-ом"
#, python-format
msgid "Removing pid file %s with wrong pid %d"
msgstr "Удаление pid файла %s с неверным pid-ом %d"
#, python-format
msgid "Removing stale pid file %s"
msgstr "Удаление устаревшего файла pid %s"
@ -837,6 +904,11 @@ msgstr ""
"Возвращено 498 для %(meth)s в %(acc)s/%(cont)s/%(obj)s . Ratelimit "
"(максимальная задержка): %(e)s"
msgid "Ring change detected. Aborting current reconstruction pass."
msgstr ""
"Обнаружено изменение кольца. Принудительное завершение текущего прохода "
"реконструкции."
msgid "Ring change detected. Aborting current replication pass."
msgstr ""
"Обнаружено кольцевое изменение. Принудительное завершение текущего прохода "
@ -846,6 +918,9 @@ msgstr ""
msgid "Running %s once"
msgstr "Однократное выполнение %s"
msgid "Running object reconstructor in script mode."
msgstr "Запуск утилиты реконструкции объектов в режиме скрипта."
msgid "Running object replicator in script mode."
msgstr "Запуск утилиты репликации объектов в режиме сценариев."
@ -889,6 +964,12 @@ msgstr "%s будет пропущен, так как он не смонтиро
msgid "Starting %s"
msgstr "Запуск %s"
msgid "Starting object reconstruction pass."
msgstr "Запуск прохода реконструкции объектов."
msgid "Starting object reconstructor in daemon mode."
msgstr "Запуск утилиты реконструкции объектов в режиме демона."
msgid "Starting object replication pass."
msgstr "Запуск прохода репликации объектов."
@ -914,10 +995,18 @@ msgstr ""
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr "Тайм-аут действия %(action)s для сохранения в кэш памяти: %(server)s"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Исключение по таймауту %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Попытка выполнения метода %(method)s %(path)s"
#, python-format
msgid "Trying to GET %(full_path)s"
msgstr "Попытка GET-запроса %(full_path)s"
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "Попытка получения конечного состояния PUT в %s"
@ -942,10 +1031,18 @@ msgstr "Необрабатываемая исключительная ситуа
msgid "Unable to find %s config section in %s"
msgstr "Не удалось найти раздел конфигурации %s в %s"
#, python-format
msgid "Unable to load internal client from config: %r (%s)"
msgstr "Не удалось загрузить клиент из конфигурации: %r (%s)"
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "Не удалось найти %s в libc. Оставлено как no-op."
#, python-format
msgid "Unable to locate config for %s"
msgstr "Не удалось найти конфигурационный файл для %s"
msgid ""
"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr ""
@ -970,6 +1067,11 @@ msgstr "Непредвиденный ответ: %s"
msgid "Unhandled exception"
msgstr "Необработанная исключительная ситуация"
#, python-format
msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr ""
"Неизвестное исключение в GET-запросе: %(account)r %(container)r %(object)r"
#, python-format
msgid "Update report failed for %(container)s %(dbfile)s"
msgstr "Отчет об обновлении для %(container)s %(dbfile)s не выполнен"
@ -1004,6 +1106,10 @@ msgstr ""
msgid "Waited %s seconds for %s to die; giving up"
msgstr "Система ожидала %s секунд для %s завершения; освобождение"
#, python-format
msgid "Waited %s seconds for %s to die; killing"
msgstr "Система ожидала %s секунд для %s завершения; Принудительное завершение"
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr ""
"Предупреждение: не удается ограничить скорость без клиента с кэшированием "

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
# Turkish (Turkey) translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -7,18 +7,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-09-04 07:42+0000\n"
"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
"Language: tr_TR\n"
"Language: tr-TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: Turkish (Turkey)\n"
"Plural-Forms: nplurals=1; plural=0\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -1,4 +1,4 @@
# Chinese (Simplified, China) translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -8,18 +8,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language: zh_Hans_CN\n"
"Language: zh-CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: Chinese (China)\n"
"Plural-Forms: nplurals=1; plural=0\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -1,4 +1,4 @@
# Chinese (Traditional, Taiwan) translations for swift.
# Translations template for swift.
# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the swift project.
#
@ -7,18 +7,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift 2.5.1.dev70\n"
"Project-Id-Version: swift 2.5.1.dev267\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
"POT-Creation-Date: 2016-01-16 12:32+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language: zh_Hant_TW\n"
"Language: zh-TW\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
"Language-Team: Chinese (Taiwan)\n"
"Plural-Forms: nplurals=1; plural=0\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.1.1\n"
msgid ""
"\n"

View File

@ -28,8 +28,7 @@ from swift.common.utils import get_logger, ratelimit_sleep, dump_recon_cache, \
list_from_csv, listdir
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist
from swift.common.daemon import Daemon
SLEEP_BETWEEN_AUDITS = 30
from swift.common.storage_policy import POLICIES
class AuditorWorker(object):
@ -39,7 +38,7 @@ class AuditorWorker(object):
self.conf = conf
self.logger = logger
self.devices = devices
self.diskfile_mgr = diskfile.DiskFileManager(conf, self.logger)
self.diskfile_router = diskfile.DiskFileRouter(conf, self.logger)
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))
@ -87,8 +86,16 @@ class AuditorWorker(object):
total_quarantines = 0
total_errors = 0
time_auditing = 0
all_locs = self.diskfile_mgr.object_audit_location_generator(
device_dirs=device_dirs)
# TODO: we should move audit-location generation to the storage policy,
# as we may (conceivably) have a different filesystem layout for each.
# We'd still need to generate the policies to audit from the actual
# directories found on-disk, and have appropriate error reporting if we
# find a directory that doesn't correspond to any known policy. This
# will require a sizable refactor, but currently all diskfile managers
# can find all diskfile locations regardless of policy -- so for now
# just use Policy-0's manager.
all_locs = (self.diskfile_router[POLICIES[0]]
.object_audit_location_generator(device_dirs=device_dirs))
for location in all_locs:
loop_time = time.time()
self.failsafe_object_audit(location)
@ -187,8 +194,9 @@ class AuditorWorker(object):
def raise_dfq(msg):
raise DiskFileQuarantined(msg)
diskfile_mgr = self.diskfile_router[location.policy]
try:
df = self.diskfile_mgr.get_diskfile_from_audit_location(location)
df = diskfile_mgr.get_diskfile_from_audit_location(location)
with df.open():
metadata = df.get_metadata()
obj_size = int(metadata['Content-Length'])
@ -230,9 +238,10 @@ class ObjectAuditor(Daemon):
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
self.interval = int(conf.get('interval', 30))
def _sleep(self):
time.sleep(SLEEP_BETWEEN_AUDITS)
time.sleep(self.interval)
def clear_recon_cache(self, auditor_type):
"""Clear recon cache entries"""
@ -261,7 +270,8 @@ class ObjectAuditor(Daemon):
try:
self.run_audit(**kwargs)
except Exception as e:
self.logger.error(_("ERROR: Unable to run auditing: %s") % e)
self.logger.exception(
_("ERROR: Unable to run auditing: %s") % e)
finally:
sys.exit()

View File

@ -303,7 +303,7 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
base, policy = split_policy_string(dir_)
except PolicyError as e:
if logger:
logger.warn(_('Directory %r does not map '
logger.warning(_('Directory %r does not map '
'to a valid policy (%s)') % (dir_, e))
continue
datadir_path = os.path.join(devices, device, dir_)
@ -420,7 +420,7 @@ class BaseDiskFileManager(object):
# If the operator wants zero-copy with splice() but we don't have the
# requisite kernel support, complain so they can go fix it.
if conf_wants_splice and not splice.available:
self.logger.warn(
self.logger.warning(
"Use of splice() requested (config says \"splice = %s\"), "
"but the system does not support it. "
"splice() will not be used." % conf.get('splice'))
@ -434,7 +434,7 @@ class BaseDiskFileManager(object):
# AF_ALG support), we can't use zero-copy.
if err.errno != errno.EAFNOSUPPORT:
raise
self.logger.warn("MD5 sockets not supported. "
self.logger.warning("MD5 sockets not supported. "
"splice() will not be used.")
else:
self.use_splice = True
@ -460,92 +460,175 @@ class BaseDiskFileManager(object):
"""
raise NotImplementedError
def _gather_on_disk_file(self, filename, ext, context, frag_index=None,
**kwargs):
def _process_ondisk_files(self, exts, results, **kwargs):
"""
Called by gather_ondisk_files() for each file in an object
datadir in reverse sorted order. If a file is considered part of a
valid on-disk file set it will be added to the context dict, keyed by
its extension. If a file is considered to be obsolete it will be added
to a list stored under the key 'obsolete' in the context dict.
Called by get_ondisk_files(). Should be over-ridden to implement
subclass specific handling of files.
:param filename: name of file to be accepted or not
:param ext: extension part of filename
:param context: a context dict that may have been populated by previous
calls to this method
:returns: True if a valid file set has been found, False otherwise
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
"""
raise NotImplementedError
def _verify_on_disk_files(self, accepted_files, **kwargs):
def _verify_ondisk_files(self, results, **kwargs):
"""
Verify that the final combination of on disk files complies with the
diskfile contract.
:param accepted_files: files that have been found and accepted
:param results: files that have been found and accepted
:returns: True if the file combination is compliant, False otherwise
"""
raise NotImplementedError
data_file, meta_file, ts_file = tuple(
[results[key]
for key in ('data_file', 'meta_file', 'ts_file')])
def gather_ondisk_files(self, files, include_obsolete=False,
verify=False, **kwargs):
return ((data_file is None and meta_file is None and ts_file is None)
or (ts_file is not None and data_file is None
and meta_file is None)
or (data_file is not None and ts_file is None))
def _split_list(self, original_list, condition):
"""
Given a simple list of files names, iterate over them to determine the
files that constitute a valid object, and optionally determine the
files that are obsolete and could be deleted. Note that some files may
fall into neither category.
Split a list into two lists. The first list contains the first N items
of the original list, in their original order, where 0 < N <=
len(original list). The second list contains the remaining items of the
original list, in their original order.
The index, N, at which the original list is split is the index of the
first item in the list that does not satisfy the given condition. Note
that the original list should be appropriately sorted if the second
list is to contain no items that satisfy the given condition.
:param original_list: the list to be split.
:param condition: a single argument function that will be used to test
for the list item to split on.
:return: a tuple of two lists.
"""
for i, item in enumerate(original_list):
if not condition(item):
return original_list[:i], original_list[i:]
return original_list, []
def _split_gt_timestamp(self, file_info_list, timestamp):
"""
Given a list of file info dicts, reverse sorted by timestamp, split the
list into two: items newer than timestamp, and items at same time or
older than timestamp.
:param file_info_list: a list of file_info dicts.
:param timestamp: a Timestamp.
:return: a tuple of two lists.
"""
return self._split_list(
file_info_list, lambda x: x['timestamp'] > timestamp)
def _split_gte_timestamp(self, file_info_list, timestamp):
"""
Given a list of file info dicts, reverse sorted by timestamp, split the
list into two: items newer than or at same time as the timestamp, and
items older than timestamp.
:param file_info_list: a list of file_info dicts.
:param timestamp: a Timestamp.
:return: a tuple of two lists.
"""
return self._split_list(
file_info_list, lambda x: x['timestamp'] >= timestamp)
def get_ondisk_files(self, files, datadir, verify=True, **kwargs):
"""
Given a simple list of files names, determine the files that constitute
a valid fileset i.e. a set of files that defines the state of an
object, and determine the files that are obsolete and could be deleted.
Note that some files may fall into neither category.
If a file is considered part of a valid fileset then its info dict will
be added to the results dict, keyed by <extension>_info. Any files that
are no longer required will have their info dicts added to a list
stored under the key 'obsolete'.
The results dict will always contain entries with keys 'ts_file',
'data_file' and 'meta_file'. Their values will be the fully qualified
path to a file of the corresponding type if there is such a file in the
valid fileset, or None.
:param files: a list of file names.
:param include_obsolete: By default the iteration will stop when a
valid file set has been found. Setting this
argument to True will cause the iteration to
continue in order to find all obsolete files.
:param datadir: directory name files are from.
:param verify: if True verify that the ondisk file contract has not
been violated, otherwise do not verify.
:returns: a dict that may contain: valid on disk files keyed by their
filename extension; a list of obsolete files stored under the
key 'obsolete'.
:returns: a dict that will contain keys:
ts_file -> path to a .ts file or None
data_file -> path to a .data file or None
meta_file -> path to a .meta file or None
and may contain keys:
ts_info -> a file info dict for a .ts file
data_info -> a file info dict for a .data file
meta_info -> a file info dict for a .meta file
obsolete -> a list of file info dicts for obsolete files
"""
files.sort(reverse=True)
results = {}
# Build the exts data structure:
# exts is a dict that maps file extensions to a list of file_info
# dicts for the files having that extension. The file_info dicts are of
# the form returned by parse_on_disk_filename, with the filename added.
# Each list is sorted in reverse timestamp order.
#
# The exts dict will be modified during subsequent processing as files
# are removed to be discarded or ignored.
exts = defaultdict(list)
for afile in files:
ts_file = results.get('.ts')
data_file = results.get('.data')
if not include_obsolete:
assert ts_file is None, "On-disk file search loop" \
" continuing after tombstone, %s, encountered" % ts_file
assert data_file is None, "On-disk file search loop" \
" continuing after data file, %s, encountered" % data_file
# Categorize files by extension
try:
file_info = self.parse_on_disk_filename(afile)
file_info['filename'] = afile
exts[file_info['ext']].append(file_info)
except DiskFileError as e:
self.logger.warning('Unexpected file %s: %s' %
(os.path.join(datadir or '', afile), e))
for ext in exts:
# For each extension sort files into reverse chronological order.
exts[ext] = sorted(
exts[ext], key=lambda info: info['timestamp'], reverse=True)
ext = splitext(afile)[1]
if self._gather_on_disk_file(
afile, ext, results, **kwargs):
if not include_obsolete:
break
# the results dict is used to collect results of file filtering
results = {}
# non-tombstones older than or equal to latest tombstone are obsolete
if exts.get('.ts'):
for ext in filter(lambda ext: ext != '.ts', exts.keys()):
exts[ext], older = self._split_gt_timestamp(
exts[ext], exts['.ts'][0]['timestamp'])
results.setdefault('obsolete', []).extend(older)
# all but most recent .meta and .ts are obsolete
for ext in ('.meta', '.ts'):
if ext in exts:
results.setdefault('obsolete', []).extend(exts[ext][1:])
exts[ext] = exts[ext][:1]
# delegate to subclass handler
self._process_ondisk_files(exts, results, **kwargs)
# set final choice of files
if exts.get('.ts'):
results['ts_info'] = exts['.ts'][0]
if 'data_info' in results and exts.get('.meta'):
# only report a meta file if there is a data file
results['meta_info'] = exts['.meta'][0]
# set ts_file, data_file and meta_file with path to chosen file or None
for info_key in ('data_info', 'meta_info', 'ts_info'):
info = results.get(info_key)
key = info_key[:-5] + '_file'
results[key] = join(datadir, info['filename']) if info else None
if verify:
assert self._verify_on_disk_files(
assert self._verify_ondisk_files(
results, **kwargs), \
"On-disk file search algorithm contract is broken: %s" \
% results.values()
% str(results)
return results
def get_ondisk_files(self, files, datadir, **kwargs):
"""
Given a simple list of files names, determine the files to use.
:param files: simple set of files as a python list
:param datadir: directory name files are from for convenience
:returns: dict of files to use having keys 'data_file', 'ts_file',
'meta_file' and optionally other policy specific keys
"""
file_info = self.gather_ondisk_files(files, verify=True, **kwargs)
for ext in ('.data', '.meta', '.ts'):
filename = file_info.get(ext)
key = '%s_file' % ext[1:]
file_info[key] = join(datadir, filename) if filename else None
return file_info
def cleanup_ondisk_files(self, hsh_path, reclaim_age=ONE_WEEK, **kwargs):
"""
Clean up on-disk files that are obsolete and gather the set of valid
@ -560,27 +643,24 @@ class BaseDiskFileManager(object):
key 'obsolete'; a list of files remaining in the directory,
reverse sorted, stored under the key 'files'.
"""
def is_reclaimable(filename):
timestamp = self.parse_on_disk_filename(filename)['timestamp']
def is_reclaimable(timestamp):
return (time.time() - float(timestamp)) > reclaim_age
files = listdir(hsh_path)
files.sort(reverse=True)
results = self.gather_ondisk_files(files, include_obsolete=True,
**kwargs)
# TODO ref to durables here
if '.durable' in results and not results.get('fragments'):
# a .durable with no .data is deleted as soon as it is found
results.setdefault('obsolete', []).append(results.pop('.durable'))
if '.ts' in results and is_reclaimable(results['.ts']):
results.setdefault('obsolete', []).append(results.pop('.ts'))
for filename in results.get('fragments_without_durable', []):
results = self.get_ondisk_files(
files, hsh_path, verify=False, **kwargs)
if 'ts_info' in results and is_reclaimable(
results['ts_info']['timestamp']):
remove_file(join(hsh_path, results['ts_info']['filename']))
files.remove(results.pop('ts_info')['filename'])
for file_info in results.get('possible_reclaim', []):
# stray fragments are not deleted until reclaim-age
if is_reclaimable(filename):
results.setdefault('obsolete', []).append(filename)
for filename in results.get('obsolete', []):
remove_file(join(hsh_path, filename))
files.remove(filename)
if is_reclaimable(file_info['timestamp']):
results.setdefault('obsolete', []).append(file_info)
for file_info in results.get('obsolete', []):
remove_file(join(hsh_path, file_info['filename']))
files.remove(file_info['filename'])
results['files'] = files
return results
@ -915,9 +995,9 @@ class BaseDiskFileManager(object):
(os.path.join(partition_path, suffix), suffix)
for suffix in suffixes)
key_preference = (
('ts_meta', '.meta'),
('ts_data', '.data'),
('ts_data', '.ts'),
('ts_meta', 'meta_info'),
('ts_data', 'data_info'),
('ts_data', 'ts_info'),
)
for suffix_path, suffix in suffixes:
for object_hash in self._listdir(suffix_path):
@ -926,11 +1006,10 @@ class BaseDiskFileManager(object):
results = self.cleanup_ondisk_files(
object_path, self.reclaim_age, **kwargs)
timestamps = {}
for ts_key, ext in key_preference:
if ext not in results:
for ts_key, info_key in key_preference:
if info_key not in results:
continue
timestamps[ts_key] = self.parse_on_disk_filename(
results[ext])['timestamp']
timestamps[ts_key] = results[info_key]['timestamp']
if 'ts_data' not in timestamps:
# file sets that do not include a .data or .ts
# file cannot be opened and therefore cannot
@ -1325,7 +1404,7 @@ class BaseDiskFileReader(object):
self._quarantined_dir = self._threadpool.run_in_thread(
self.manager.quarantine_renamer, self._device_path,
self._data_file)
self._logger.warn("Quarantined object %s: %s" % (
self._logger.warning("Quarantined object %s: %s" % (
self._data_file, msg))
self._logger.increment('quarantines')
self._quarantine_hook(msg)
@ -1430,6 +1509,7 @@ class BaseDiskFile(object):
self._obj = None
self._datadir = None
self._tmpdir = join(device_path, get_tmp_dir(policy))
self._ondisk_info = None
self._metadata = None
self._datafile_metadata = None
self._metafile_metadata = None
@ -1479,6 +1559,26 @@ class BaseDiskFile(object):
raise DiskFileNotOpen()
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
@property
def durable_timestamp(self):
"""
Provides the timestamp of the newest data file found in the object
directory.
:return: A Timestamp instance, or None if no data file was found.
:raises DiskFileNotOpen: if the open() method has not been previously
called on this instance.
"""
if self._ondisk_info is None:
raise DiskFileNotOpen()
if self._datafile_metadata:
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
return None
@property
def fragments(self):
return None
@classmethod
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy):
return cls(mgr, device_path, None, partition, _datadir=hash_dir_path,
@ -1524,8 +1624,8 @@ class BaseDiskFile(object):
# The data directory does not exist, so the object cannot exist.
files = []
# gather info about the valid files to us to open the DiskFile
file_info = self._get_ondisk_file(files)
# gather info about the valid files to use to open the DiskFile
file_info = self._get_ondisk_files(files)
self._data_file = file_info.get('data_file')
if not self._data_file:
@ -1574,12 +1674,12 @@ class BaseDiskFile(object):
"""
self._quarantined_dir = self._threadpool.run_in_thread(
self.manager.quarantine_renamer, self._device_path, data_file)
self._logger.warn("Quarantined object %s: %s" % (
self._logger.warning("Quarantined object %s: %s" % (
data_file, msg))
self._logger.increment('quarantines')
return DiskFileQuarantined(msg)
def _get_ondisk_file(self, files):
def _get_ondisk_files(self, files):
"""
Determine the on-disk files to use.
@ -1950,8 +2050,9 @@ class DiskFile(BaseDiskFile):
reader_cls = DiskFileReader
writer_cls = DiskFileWriter
def _get_ondisk_file(self, files):
return self.manager.get_ondisk_files(files, self._datadir)
def _get_ondisk_files(self, files):
self._ondisk_info = self.manager.get_ondisk_files(files, self._datadir)
return self._ondisk_info
@DiskFileRouter.register(REPL_POLICY)
@ -1967,89 +2068,44 @@ class DiskFileManager(BaseDiskFileManager):
* timestamp is a :class:`~swift.common.utils.Timestamp`
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extenstion.
the empty string if the filename has no extension.
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
filename, ext = splitext(filename)
float_part, ext = splitext(filename)
try:
timestamp = Timestamp(float_part)
except ValueError:
raise DiskFileError('Invalid Timestamp value in filename %r'
% filename)
return {
'timestamp': Timestamp(filename),
'timestamp': timestamp,
'ext': ext,
}
def _gather_on_disk_file(self, filename, ext, context, frag_index=None,
**kwargs):
def _process_ondisk_files(self, exts, results, **kwargs):
"""
Called by gather_ondisk_files() for each file in an object
datadir in reverse sorted order. If a file is considered part of a
valid on-disk file set it will be added to the context dict, keyed by
its extension. If a file is considered to be obsolete it will be added
to a list stored under the key 'obsolete' in the context dict.
Implement replication policy specific handling of .data files.
:param filename: name of file to be accepted or not
:param ext: extension part of filename
:param context: a context dict that may have been populated by previous
calls to this method
:returns: True if a valid file set has been found, False otherwise
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
"""
# if first file with given extension then add filename to context
# dict and return True
accept_first = lambda: context.setdefault(ext, filename) == filename
# add the filename to the list of obsolete files in context dict
discard = lambda: context.setdefault('obsolete', []).append(filename)
# set a flag in the context dict indicating that a valid fileset has
# been found
set_valid_fileset = lambda: context.setdefault('found_valid', True)
# return True if the valid fileset flag is set in the context dict
have_valid_fileset = lambda: context.get('found_valid')
if exts.get('.data'):
for ext in exts.keys():
if ext == '.data':
if have_valid_fileset():
# valid fileset means we must have a newer
# .data or .ts, so discard the older .data file
discard()
# older .data's are obsolete
exts[ext], obsolete = self._split_gte_timestamp(
exts[ext], exts['.data'][0]['timestamp'])
else:
accept_first()
set_valid_fileset()
elif ext == '.ts':
if have_valid_fileset() or not accept_first():
# newer .data or .ts already found so discard this
discard()
if not have_valid_fileset():
# remove any .meta that may have been previously found
context.pop('.meta', None)
set_valid_fileset()
elif ext == '.meta':
if have_valid_fileset() or not accept_first():
# newer .data, .durable or .ts already found so discard this
discard()
else:
# ignore unexpected files
pass
return have_valid_fileset()
# other files at same or older timestamp as most recent
# data are obsolete
exts[ext], obsolete = self._split_gt_timestamp(
exts[ext], exts['.data'][0]['timestamp'])
results.setdefault('obsolete', []).extend(obsolete)
def _verify_on_disk_files(self, accepted_files, **kwargs):
"""
Verify that the final combination of on disk files complies with the
replicated diskfile contract.
:param accepted_files: files that have been found and accepted
:returns: True if the file combination is compliant, False otherwise
"""
# mimic legacy behavior - .meta is ignored when .ts is found
if accepted_files.get('.ts'):
accepted_files.pop('.meta', None)
data_file, meta_file, ts_file, durable_file = tuple(
[accepted_files.get(ext)
for ext in ('.data', '.meta', '.ts', '.durable')])
return ((data_file is None and meta_file is None and ts_file is None)
or (ts_file is not None and data_file is None
and meta_file is None)
or (data_file is not None and ts_file is None))
# set results
results['data_info'] = exts['.data'][0]
def _hash_suffix(self, path, reclaim_age):
"""
@ -2153,14 +2209,47 @@ class ECDiskFile(BaseDiskFile):
if frag_index is not None:
self._frag_index = self.manager.validate_fragment_index(frag_index)
def _get_ondisk_file(self, files):
@property
def durable_timestamp(self):
"""
Provides the timestamp of the newest durable file found in the object
directory.
:return: A Timestamp instance, or None if no durable file was found.
:raises DiskFileNotOpen: if the open() method has not been previously
called on this instance.
"""
if self._ondisk_info is None:
raise DiskFileNotOpen()
if self._ondisk_info.get('durable_frag_set'):
return self._ondisk_info['durable_frag_set'][0]['timestamp']
return None
@property
def fragments(self):
"""
Provides information about all fragments that were found in the object
directory, including fragments without a matching durable file, and
including any fragment chosen to construct the opened diskfile.
:return: A dict mapping <Timestamp instance> -> <list of frag indexes>,
or None if the diskfile has not been opened or no fragments
were found.
"""
if self._ondisk_info:
frag_sets = self._ondisk_info['frag_sets']
return dict([(ts, [info['frag_index'] for info in frag_set])
for ts, frag_set in frag_sets.items()])
def _get_ondisk_files(self, files):
"""
The only difference between this method and the replication policy
DiskFile method is passing in the frag_index kwarg to our manager's
get_ondisk_files method.
"""
return self.manager.get_ondisk_files(
self._ondisk_info = self.manager.get_ondisk_files(
files, self._datadir, frag_index=self._frag_index)
return self._ondisk_info
def purge(self, timestamp, frag_index):
"""
@ -2254,9 +2343,13 @@ class ECDiskFileManager(BaseDiskFileManager):
validated.
"""
frag_index = None
filename, ext = splitext(filename)
parts = filename.split('#', 1)
timestamp = parts[0]
float_frag, ext = splitext(filename)
parts = float_frag.split('#', 1)
try:
timestamp = Timestamp(parts[0])
except ValueError:
raise DiskFileError('Invalid Timestamp value in filename %r'
% filename)
if ext == '.data':
# it is an error for an EC data file to not have a valid
# fragment index
@ -2267,137 +2360,94 @@ class ECDiskFileManager(BaseDiskFileManager):
pass
frag_index = self.validate_fragment_index(frag_index)
return {
'timestamp': Timestamp(timestamp),
'timestamp': timestamp,
'frag_index': frag_index,
'ext': ext,
}
def is_obsolete(self, filename, other_filename):
def _process_ondisk_files(self, exts, results, frag_index=None, **kwargs):
"""
Test if a given file is considered to be obsolete with respect to
another file in an object storage dir.
Implement EC policy specific handling of .data and .durable files.
Implements EC policy specific behavior when comparing files against a
.durable file.
A simple string comparison would consider t2#1.data to be older than
t2.durable (since t2#1.data < t2.durable). By stripping off the file
extensions we get the desired behavior: t2#1 > t2 without compromising
the detection of t1#1 < t2.
:param filename: a string representing an absolute filename
:param other_filename: a string representing an absolute filename
:returns: True if filename is considered obsolete, False otherwise.
"""
if other_filename.endswith('.durable'):
return splitext(filename)[0] < splitext(other_filename)[0]
return filename < other_filename
def _gather_on_disk_file(self, filename, ext, context, frag_index=None,
**kwargs):
"""
Called by gather_ondisk_files() for each file in an object
datadir in reverse sorted order. If a file is considered part of a
valid on-disk file set it will be added to the context dict, keyed by
its extension. If a file is considered to be obsolete it will be added
to a list stored under the key 'obsolete' in the context dict.
:param filename: name of file to be accepted or not
:param ext: extension part of filename
:param context: a context dict that may have been populated by previous
calls to this method
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
:param frag_index: if set, search for a specific fragment index .data
file, otherwise accept the first valid .data file.
:returns: True if a valid file set has been found, False otherwise
"""
durable_info = None
if exts.get('.durable'):
durable_info = exts['.durable'][0]
# Mark everything older than most recent .durable as obsolete
# and remove from the exts dict.
for ext in exts.keys():
exts[ext], older = self._split_gte_timestamp(
exts[ext], durable_info['timestamp'])
results.setdefault('obsolete', []).extend(older)
# if first file with given extension then add filename to context
# dict and return True
accept_first = lambda: context.setdefault(ext, filename) == filename
# add the filename to the list of obsolete files in context dict
discard = lambda: context.setdefault('obsolete', []).append(filename)
# set a flag in the context dict indicating that a valid fileset has
# been found
set_valid_fileset = lambda: context.setdefault('found_valid', True)
# return True if the valid fileset flag is set in the context dict
have_valid_fileset = lambda: context.get('found_valid')
# Split the list of .data files into sets of frags having the same
# timestamp, identifying the durable and newest sets (if any) as we go.
# To do this we can take advantage of the list of .data files being
# reverse-time ordered. Keep the resulting per-timestamp frag sets in
# a frag_sets dict mapping a Timestamp instance -> frag_set.
all_frags = exts.get('.data')
frag_sets = {}
durable_frag_set = None
while all_frags:
frag_set, all_frags = self._split_gte_timestamp(
all_frags, all_frags[0]['timestamp'])
# sort the frag set into ascending frag_index order
frag_set.sort(key=lambda info: info['frag_index'])
timestamp = frag_set[0]['timestamp']
frag_sets[timestamp] = frag_set
if durable_info and durable_info['timestamp'] == timestamp:
durable_frag_set = frag_set
if context.get('.durable'):
# a .durable file has been found
if ext == '.data':
if self.is_obsolete(filename, context.get('.durable')):
# this and remaining data files are older than durable
discard()
set_valid_fileset()
# Select a single chosen frag from the chosen frag_set, by either
# matching against a specified frag_index or taking the highest index.
chosen_frag = None
if durable_frag_set:
if frag_index is not None:
# search the frag set to find the exact frag_index
for info in durable_frag_set:
if info['frag_index'] == frag_index:
chosen_frag = info
break
else:
# accept the first .data file if it matches requested
# frag_index, or if no specific frag_index is requested
fi = self.parse_on_disk_filename(filename)['frag_index']
if frag_index is None or frag_index == int(fi):
accept_first()
set_valid_fileset()
# else: keep searching for a .data file to match frag_index
context.setdefault('fragments', []).append(filename)
else:
# there can no longer be a matching .data file so mark what has
# been found so far as the valid fileset
discard()
set_valid_fileset()
elif ext == '.data':
# not yet found a .durable
if have_valid_fileset():
# valid fileset means we must have a newer
# .ts, so discard the older .data file
discard()
else:
# .data newer than a .durable or .ts, don't discard yet
context.setdefault('fragments_without_durable', []).append(
filename)
elif ext == '.ts':
if have_valid_fileset() or not accept_first():
# newer .data, .durable or .ts already found so discard this
discard()
if not have_valid_fileset():
# remove any .meta that may have been previously found
context.pop('.meta', None)
set_valid_fileset()
elif ext in ('.meta', '.durable'):
if have_valid_fileset() or not accept_first():
# newer .data, .durable or .ts already found so discard this
discard()
else:
# ignore unexpected files
pass
return have_valid_fileset()
chosen_frag = durable_frag_set[-1]
def _verify_on_disk_files(self, accepted_files, frag_index=None, **kwargs):
# If we successfully found a frag then set results
if chosen_frag:
results['data_info'] = chosen_frag
results['durable_frag_set'] = durable_frag_set
results['frag_sets'] = frag_sets
# Mark any isolated .durable as obsolete
if exts.get('.durable') and not durable_frag_set:
results.setdefault('obsolete', []).extend(exts['.durable'])
exts.pop('.durable')
# Fragments *may* be ready for reclaim, unless they are durable or
# at the timestamp we have just chosen for constructing the diskfile.
for frag_set in frag_sets.values():
if frag_set == durable_frag_set:
continue
results.setdefault('possible_reclaim', []).extend(frag_set)
def _verify_ondisk_files(self, results, frag_index=None, **kwargs):
"""
Verify that the final combination of on disk files complies with the
erasure-coded diskfile contract.
:param accepted_files: files that have been found and accepted
:param results: files that have been found and accepted
:param frag_index: specifies a specific fragment index .data file
:returns: True if the file combination is compliant, False otherwise
"""
if not accepted_files.get('.data'):
# We may find only a .meta, which doesn't mean the on disk
# contract is broken. So we clear it to comply with
# superclass assertions.
accepted_files.pop('.meta', None)
data_file, meta_file, ts_file, durable_file = tuple(
[accepted_files.get(ext)
for ext in ('.data', '.meta', '.ts', '.durable')])
return ((data_file is None or durable_file is not None)
and (data_file is None and meta_file is None
and ts_file is None and durable_file is None)
or (ts_file is not None and data_file is None
and meta_file is None and durable_file is None)
or (data_file is not None and durable_file is not None
and ts_file is None)
or (durable_file is not None and meta_file is None
and ts_file is None))
if super(ECDiskFileManager, self)._verify_ondisk_files(
results, **kwargs):
have_data_file = results['data_file'] is not None
have_durable = results.get('durable_frag_set') is not None
return have_data_file == have_durable
return False
def _hash_suffix(self, path, reclaim_age):
"""

View File

@ -254,6 +254,7 @@ class DiskFile(object):
self._metadata = None
self._fp = None
self._filesystem = fs
self.fragments = None
def open(self):
"""
@ -421,3 +422,5 @@ class DiskFile(object):
return Timestamp(self._metadata.get('X-Timestamp'))
data_timestamp = timestamp
durable_timestamp = timestamp

View File

@ -819,7 +819,7 @@ class ObjectReconstructor(Daemon):
dev_path = self._df_router[policy].get_dev_path(
local_dev['device'])
if not dev_path:
self.logger.warn(_('%s is not mounted'),
self.logger.warning(_('%s is not mounted'),
local_dev['device'])
continue
obj_path = join(dev_path, data_dir)

View File

@ -85,10 +85,11 @@ class ObjectReplicator(Daemon):
if not self.rsync_module:
self.rsync_module = '{replication_ip}::object'
if config_true_value(conf.get('vm_test_mode', 'no')):
self.logger.warn('Option object-replicator/vm_test_mode is '
'deprecated and will be removed in a future '
'version. Update your configuration to use '
'option object-replicator/rsync_module.')
self.logger.warning('Option object-replicator/vm_test_mode '
'is deprecated and will be removed in a '
'future version. Update your '
'configuration to use option '
'object-replicator/rsync_module.')
self.rsync_module += '{replication_port}'
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
@ -109,7 +110,7 @@ class ObjectReplicator(Daemon):
self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)):
self.logger.warn('Handoff only mode is not intended for normal '
self.logger.warning('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and '
'handoff_delete before the next '
'normal rebalance')
@ -585,7 +586,8 @@ class ObjectReplicator(Daemon):
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warn(_('%s is not mounted'), local_dev['device'])
self.logger.warning(
_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
@ -701,7 +703,7 @@ class ObjectReplicator(Daemon):
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
self.logger.warn(_('%s is not mounted'), job['device'])
self.logger.warning(_('%s is not mounted'), job['device'])
continue
if not self.check_ring(job['policy'].object_ring):
self.logger.info(_("Ring change detected. Aborting "

View File

@ -558,7 +558,7 @@ class ObjectController(BaseStorageServer):
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined):
orig_metadata = {}
orig_timestamp = 0
orig_timestamp = Timestamp(0)
# Checks for If-None-Match
if request.if_none_match is not None and orig_metadata:

View File

@ -84,7 +84,7 @@ class ObjectUpdater(Daemon):
if self.mount_check and \
not ismount(os.path.join(self.devices, device)):
self.logger.increment('errors')
self.logger.warn(
self.logger.warning(
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
@ -127,7 +127,7 @@ class ObjectUpdater(Daemon):
if self.mount_check and \
not ismount(os.path.join(self.devices, device)):
self.logger.increment('errors')
self.logger.warn(
self.logger.warning(
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
@ -159,8 +159,9 @@ class ObjectUpdater(Daemon):
try:
base, policy = split_policy_string(asyncdir)
except PolicyError as e:
self.logger.warn(_('Directory %r does not map '
'to a valid policy (%s)') % (asyncdir, e))
self.logger.warning(_('Directory %r does not map '
'to a valid policy (%s)') %
(asyncdir, e))
continue
for prefix in self._listdir(async_pending):
prefix_path = os.path.join(async_pending, prefix)

View File

@ -235,17 +235,17 @@ def cors_validation(func):
# - headers provided by the user in
# x-container-meta-access-control-expose-headers
if 'Access-Control-Expose-Headers' not in resp.headers:
expose_headers = [
expose_headers = set([
'cache-control', 'content-language', 'content-type',
'expires', 'last-modified', 'pragma', 'etag',
'x-timestamp', 'x-trans-id']
'x-timestamp', 'x-trans-id'])
for header in resp.headers:
if header.startswith('X-Container-Meta') or \
header.startswith('X-Object-Meta'):
expose_headers.append(header.lower())
expose_headers.add(header.lower())
if cors_info.get('expose_headers'):
expose_headers.extend(
[header_line.strip()
expose_headers = expose_headers.union(
[header_line.strip().lower()
for header_line in
cors_info['expose_headers'].split(' ')
if header_line.strip()])
@ -941,13 +941,13 @@ class ResumingGetter(object):
_('Trying to read during GET'))
raise
except ChunkWriteTimeout:
self.app.logger.warn(
self.app.logger.warning(
_('Client did not read from proxy within %ss') %
self.app.client_timeout)
self.app.logger.increment('client_timeouts')
except GeneratorExit:
if not req.environ.get('swift.non_client_disconnect'):
self.app.logger.warn(_('Client disconnected on read'))
self.app.logger.warning(_('Client disconnected on read'))
except Exception:
self.app.logger.exception(_('Trying to send to client'))
raise

View File

@ -415,6 +415,11 @@ class BaseObjectController(Controller):
This method handles copying objects based on values set in the headers
'X-Copy-From' and 'X-Copy-From-Account'
Note that if the incomming request has some conditional headers (e.g.
'Range', 'If-Match'), *source* object will be evaluated for these
headers. i.e. if PUT with both 'X-Copy-From' and 'Range', Swift will
make a partial copy as a new object.
This method was added as part of the refactoring of the PUT method and
the functionality is expected to be moved to middleware
"""
@ -968,7 +973,7 @@ class ReplicatedObjectController(BaseObjectController):
msg='Object PUT exceptions after last send, '
'%(conns)s/%(nodes)s required connections')
except ChunkReadTimeout as err:
self.app.logger.warn(
self.app.logger.warning(
_('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
@ -976,7 +981,7 @@ class ReplicatedObjectController(BaseObjectController):
raise
except ChunkReadError:
req.client_disconnect = True
self.app.logger.warn(
self.app.logger.warning(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
@ -991,7 +996,7 @@ class ReplicatedObjectController(BaseObjectController):
raise HTTPInternalServerError(request=req)
if req.content_length and bytes_transferred < req.content_length:
req.client_disconnect = True
self.app.logger.warn(
self.app.logger.warning(
_('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
@ -2196,7 +2201,7 @@ class ECObjectController(BaseObjectController):
if req.content_length and (
bytes_transferred < req.content_length):
req.client_disconnect = True
self.app.logger.warn(
self.app.logger.warning(
_('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
@ -2265,13 +2270,13 @@ class ECObjectController(BaseObjectController):
for putter in putters:
putter.wait()
except ChunkReadTimeout as err:
self.app.logger.warn(
self.app.logger.warning(
_('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
except ChunkReadError:
req.client_disconnect = True
self.app.logger.warn(
self.app.logger.warning(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)

View File

@ -229,7 +229,8 @@ class Application(object):
Check the configuration for possible errors
"""
if self._read_affinity and self.sorting_method != 'affinity':
self.logger.warn("sorting_method is set to '%s', not 'affinity'; "
self.logger.warning(
"sorting_method is set to '%s', not 'affinity'; "
"read_affinity setting will have no effect." %
self.sorting_method)

View File

@ -10,6 +10,7 @@ nosexcover
nosehtmloutput
oslosphinx
sphinx>=1.1.2,<1.2
os-testr>=0.4.1
mock>=1.0
python-swiftclient
python-keystoneclient>=1.3.0

View File

@ -27,11 +27,11 @@ import functools
import random
from time import time, sleep
from nose import SkipTest
from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from unittest2 import SkipTest
from six.moves.configparser import ConfigParser, NoSectionError
from six.moves import http_client
@ -109,7 +109,7 @@ orig_hash_path_suff_pref = ('', '')
orig_swift_conf_name = None
in_process = False
_testdir = _test_servers = _test_coros = None
_testdir = _test_servers = _test_coros = _test_socks = None
policy_specified = None
@ -290,6 +290,7 @@ def in_process_setup(the_object_server=object_server):
_info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
_info('Using object_server class: %s' % the_object_server.__name__)
conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
show_debug_logs = os.environ.get('SWIFT_TEST_DEBUG_LOGS')
if conf_src_dir is not None:
if not os.path.isdir(conf_src_dir):
@ -339,10 +340,13 @@ def in_process_setup(the_object_server=object_server):
orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
utils.validate_hash_conf()
global _test_socks
_test_socks = []
# We create the proxy server listening socket to get its port number so
# that we can add it as the "auth_port" value for the functional test
# clients.
prolis = eventlet.listen(('localhost', 0))
_test_socks.append(prolis)
# The following set of configuration values is used both for the
# functional test frame work and for the various proxy, account, container
@ -388,6 +392,7 @@ def in_process_setup(the_object_server=object_server):
acc2lis = eventlet.listen(('localhost', 0))
con1lis = eventlet.listen(('localhost', 0))
con2lis = eventlet.listen(('localhost', 0))
_test_socks += [acc1lis, acc2lis, con1lis, con2lis] + obj_sockets
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
@ -416,22 +421,29 @@ def in_process_setup(the_object_server=object_server):
# Default to only 4 seconds for in-process functional test runs
eventlet.wsgi.WRITE_TIMEOUT = 4
def get_logger_name(name):
if show_debug_logs:
return debug_logger(name)
else:
return None
acc1srv = account_server.AccountController(
config, logger=debug_logger('acct1'))
config, logger=get_logger_name('acct1'))
acc2srv = account_server.AccountController(
config, logger=debug_logger('acct2'))
config, logger=get_logger_name('acct2'))
con1srv = container_server.ContainerController(
config, logger=debug_logger('cont1'))
config, logger=get_logger_name('cont1'))
con2srv = container_server.ContainerController(
config, logger=debug_logger('cont2'))
config, logger=get_logger_name('cont2'))
objsrvs = [
(obj_sockets[index],
the_object_server.ObjectController(
config, logger=debug_logger('obj%d' % (index + 1))))
config, logger=get_logger_name('obj%d' % (index + 1))))
for index in range(len(obj_sockets))
]
if show_debug_logs:
logger = debug_logger('proxy')
def get_logger(name, *args, **kwargs):
@ -446,6 +458,8 @@ def in_process_setup(the_object_server=object_server):
raise InProcessException(e)
nl = utils.NullLogger()
global proxy_srv
proxy_srv = prolis
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl)
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
@ -487,6 +501,7 @@ def get_cluster_info():
# We'll update those constraints based on what the /info API provides, if
# anything.
global cluster_info
global config
try:
conn = Connection(config)
conn.authenticate()
@ -536,6 +551,7 @@ def setup_package():
global in_process
global config
if use_in_process:
# Explicitly set to True, so barrel on ahead with in-process
# functional test setup.
@ -722,7 +738,6 @@ def setup_package():
% policy_specified)
raise Exception('Failed to find specified policy %s'
% policy_specified)
get_cluster_info()
@ -731,16 +746,21 @@ def teardown_package():
locale.setlocale(locale.LC_COLLATE, orig_collate)
# clean up containers and objects left behind after running tests
global config
conn = Connection(config)
conn.authenticate()
account = Account(conn, config.get('account', config['username']))
account.delete_containers()
global in_process
global _test_socks
if in_process:
try:
for server in _test_coros:
for i, server in enumerate(_test_coros):
server.kill()
if not server.dead:
# kill it from the socket level
_test_socks[i].close()
except Exception:
pass
try:
@ -751,6 +771,7 @@ def teardown_package():
orig_hash_path_suff_pref
utils.SWIFT_CONF_FILE = orig_swift_conf_name
constraints.reload_constraints()
reset_globals()
class AuthError(Exception):
@ -768,6 +789,17 @@ parsed = [None, None, None, None, None]
conn = [None, None, None, None, None]
def reset_globals():
global url, token, service_token, parsed, conn, config
url = [None, None, None, None, None]
token = [None, None, None, None, None]
service_token = [None, None, None, None, None]
parsed = [None, None, None, None, None]
conn = [None, None, None, None, None]
if config:
config = {}
def connection(url):
if has_insecure:
parsed_url, http_conn = http_connection(url, insecure=insecure)

View File

@ -20,7 +20,7 @@ import random
import socket
import time
from nose import SkipTest
from unittest2 import SkipTest
from xml.dom import minidom
import six

View File

@ -15,10 +15,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import unittest2
import json
from uuid import uuid4
from nose import SkipTest
from unittest2 import SkipTest
from string import letters
from six.moves import range
@ -29,7 +29,15 @@ from test.functional import check_response, retry, requires_acls, \
import test.functional as tf
class TestAccount(unittest.TestCase):
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestAccount(unittest2.TestCase):
def setUp(self):
self.max_meta_count = load_constraint('max_meta_count')
@ -854,7 +862,7 @@ class TestAccount(unittest.TestCase):
self.assertEqual(resp.status, 400)
class TestAccountInNonDefaultDomain(unittest.TestCase):
class TestAccountInNonDefaultDomain(unittest2.TestCase):
def setUp(self):
if tf.skip or tf.skip2 or tf.skip_if_not_v3:
raise SkipTest('AUTH VERSION 3 SPECIFIC TEST')
@ -883,4 +891,4 @@ class TestAccountInNonDefaultDomain(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
unittest2.main()

View File

@ -16,8 +16,8 @@
# limitations under the License.
import json
import unittest
from nose import SkipTest
import unittest2
from unittest2 import SkipTest
from uuid import uuid4
from test.functional import check_response, retry, requires_acls, \
@ -27,7 +27,15 @@ import test.functional as tf
from six.moves import range
class TestContainer(unittest.TestCase):
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestContainer(unittest2.TestCase):
def setUp(self):
if tf.skip:
@ -1551,7 +1559,7 @@ class TestContainer(unittest.TestCase):
policy['name'])
class BaseTestContainerACLs(unittest.TestCase):
class BaseTestContainerACLs(unittest2.TestCase):
# subclasses can change the account in which container
# is created/deleted by setUp/tearDown
account = 1
@ -1726,4 +1734,4 @@ class TestContainerACLsAccount4(BaseTestContainerACLs):
if __name__ == '__main__':
unittest.main()
unittest2.main()

View File

@ -16,8 +16,8 @@
# limitations under the License.
import json
import unittest
from nose import SkipTest
import unittest2
from unittest2 import SkipTest
from uuid import uuid4
from six.moves import range
@ -27,7 +27,15 @@ from test.functional import check_response, retry, requires_acls, \
import test.functional as tf
class TestObject(unittest.TestCase):
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestObject(unittest2.TestCase):
def setUp(self):
if tf.skip:
@ -1246,4 +1254,4 @@ class TestObject(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
unittest2.main()

View File

@ -25,11 +25,11 @@ import random
import six
from six.moves import urllib
import time
import unittest
import unittest2
import uuid
from copy import deepcopy
import eventlet
from nose import SkipTest
from unittest2 import SkipTest
from swift.common.http import is_success, is_client_error
from test.functional import normalized_urls, load_constraint, cluster_info
@ -39,6 +39,14 @@ from test.functional.swift_test_client import Account, Connection, File, \
ResponseError
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class Utils(object):
@classmethod
def create_ascii_name(cls, length=None):
@ -62,7 +70,7 @@ class Utils(object):
create_name = create_ascii_name
class Base(unittest.TestCase):
class Base(unittest2.TestCase):
def setUp(self):
cls = type(self)
if not cls.set_up:
@ -2178,13 +2186,22 @@ class TestDloEnv(object):
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
config2 = tf.config.copy()
config2['username'] = tf.config['username3']
config2['password'] = tf.config['password3']
cls.conn2 = Connection(config2)
cls.conn2.authenticate()
cls.account = Account(cls.conn, tf.config.get('account',
tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
cls.container2 = cls.account.container(Utils.create_name())
if not cls.container.create():
for cont in (cls.container, cls.container2):
if not cont.create():
raise ResponseError(cls.conn.response)
# avoid getting a prefix that stops halfway through an encoded
@ -2199,13 +2216,18 @@ class TestDloEnv(object):
file_item = cls.container.file("%s/seg_upper%s" % (prefix, letter))
file_item.write(letter.upper() * 10)
for letter in ('f', 'g', 'h', 'i', 'j'):
file_item = cls.container2.file("%s/seg_lower%s" %
(prefix, letter))
file_item.write(letter * 10)
man1 = cls.container.file("man1")
man1.write('man1-contents',
hdrs={"X-Object-Manifest": "%s/%s/seg_lower" %
(cls.container.name, prefix)})
man1 = cls.container.file("man2")
man1.write('man2-contents',
man2 = cls.container.file("man2")
man2.write('man2-contents',
hdrs={"X-Object-Manifest": "%s/%s/seg_upper" %
(cls.container.name, prefix)})
@ -2214,6 +2236,12 @@ class TestDloEnv(object):
hdrs={"X-Object-Manifest": "%s/%s/seg" %
(cls.container.name, prefix)})
mancont2 = cls.container.file("mancont2")
mancont2.write(
'mancont2-contents',
hdrs={"X-Object-Manifest": "%s/%s/seg_lower" %
(cls.container2.name, prefix)})
class TestDlo(Base):
env = TestDloEnv
@ -2375,6 +2403,31 @@ class TestDlo(Base):
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
def test_dlo_referer_on_segment_container(self):
# First the account2 (test3) should fail
headers = {'X-Auth-Token': self.env.conn2.storage_token,
'Referer': 'http://blah.example.com'}
dlo_file = self.env.container.file("mancont2")
self.assertRaises(ResponseError, dlo_file.read,
hdrs=headers)
self.assert_status(403)
# Now set the referer on the dlo container only
referer_metadata = {'X-Container-Read': '.r:*.example.com,.rlistings'}
self.env.container.update_metadata(referer_metadata)
self.assertRaises(ResponseError, dlo_file.read,
hdrs=headers)
self.assert_status(403)
# Finally set the referer on the segment container
self.env.container2.update_metadata(referer_metadata)
contents = dlo_file.read(hdrs=headers)
self.assertEqual(
contents,
"ffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj")
class TestDloUTF8(Base2, TestDlo):
set_up = False
@ -2425,6 +2478,15 @@ class TestFileComparison(Base):
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
def testIfMatchMultipleEtags(self):
for file_item in self.env.files:
hdrs = {'If-Match': '"bogus1", "%s", "bogus2"' % file_item.md5}
self.assertTrue(file_item.read(hdrs=hdrs))
hdrs = {'If-Match': '"bogus1", "bogus2", "bogus3"'}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(412)
def testIfNoneMatch(self):
for file_item in self.env.files:
hdrs = {'If-None-Match': 'bogus'}
@ -2434,6 +2496,16 @@ class TestFileComparison(Base):
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
def testIfNoneMatchMultipleEtags(self):
for file_item in self.env.files:
hdrs = {'If-None-Match': '"bogus1", "bogus2", "bogus3"'}
self.assertTrue(file_item.read(hdrs=hdrs))
hdrs = {'If-None-Match':
'"bogus1", "bogus2", "%s"' % file_item.md5}
self.assertRaises(ResponseError, file_item.read, hdrs=hdrs)
self.assert_status(304)
def testIfModifiedSince(self):
for file_item in self.env.files:
hdrs = {'If-Modified-Since': self.env.time_old_f1}
@ -2516,6 +2588,11 @@ class TestSloEnv(object):
cls.conn2.authenticate()
cls.account2 = cls.conn2.get_account()
cls.account2.delete_containers()
config3 = tf.config.copy()
config3['username'] = tf.config['username3']
config3['password'] = tf.config['password3']
cls.conn3 = Connection(config3)
cls.conn3.authenticate()
if cls.slo_enabled is None:
cls.slo_enabled = 'slo' in cluster_info
@ -2527,8 +2604,10 @@ class TestSloEnv(object):
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
cls.container2 = cls.account.container(Utils.create_name())
if not cls.container.create():
for cont in (cls.container, cls.container2):
if not cont.create():
raise ResponseError(cls.conn.response)
cls.seg_info = seg_info = {}
@ -2552,6 +2631,14 @@ class TestSloEnv(object):
seg_info['seg_e']]),
parms={'multipart-manifest': 'put'})
# Put the same manifest in the container2
file_item = cls.container2.file("manifest-abcde")
file_item.write(
json.dumps([seg_info['seg_a'], seg_info['seg_b'],
seg_info['seg_c'], seg_info['seg_d'],
seg_info['seg_e']]),
parms={'multipart-manifest': 'put'})
file_item = cls.container.file('manifest-cd')
cd_json = json.dumps([seg_info['seg_c'], seg_info['seg_d']])
file_item.write(cd_json, parms={'multipart-manifest': 'put'})
@ -3090,6 +3177,33 @@ class TestSlo(Base):
manifest.info(hdrs={'If-None-Match': "not-%s" % etag})
self.assert_status(200)
def test_slo_referer_on_segment_container(self):
# First the account2 (test3) should fail
headers = {'X-Auth-Token': self.env.conn3.storage_token,
'Referer': 'http://blah.example.com'}
slo_file = self.env.container2.file('manifest-abcde')
self.assertRaises(ResponseError, slo_file.read,
hdrs=headers)
self.assert_status(403)
# Now set the referer on the slo container only
referer_metadata = {'X-Container-Read': '.r:*.example.com,.rlistings'}
self.env.container2.update_metadata(referer_metadata)
self.assertRaises(ResponseError, slo_file.read,
hdrs=headers)
self.assert_status(409)
# Finally set the referer on the segment container
self.env.container.update_metadata(referer_metadata)
contents = slo_file.read(hdrs=headers)
self.assertEqual(4 * 1024 * 1024 + 1, len(contents))
self.assertEqual('a', contents[0])
self.assertEqual('a', contents[1024 * 1024 - 1])
self.assertEqual('b', contents[1024 * 1024])
self.assertEqual('d', contents[-2])
self.assertEqual('e', contents[-1])
class TestSloUTF8(Base2, TestSlo):
set_up = False
@ -4147,7 +4261,7 @@ class TestSloTempurlUTF8(Base2, TestSloTempurl):
set_up = False
class TestServiceToken(unittest.TestCase):
class TestServiceToken(unittest2.TestCase):
def setUp(self):
if tf.skip_service_tokens:
@ -4315,4 +4429,4 @@ class TestServiceToken(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
unittest2.main()

View File

@ -18,8 +18,9 @@ from nose import SkipTest
import unittest
from six.moves.urllib.parse import urlparse
from swiftclient import client
from swiftclient import client, ClientException
from swift.common.http import HTTP_NOT_FOUND
from swift.common.manager import Manager
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
@ -49,25 +50,27 @@ class TestContainerSync(ReplProbeTest):
super(TestContainerSync, self).setUp()
self.realm, self.cluster = get_current_realm_cluster(self.url)
def test_sync(self):
base_headers = {'X-Container-Sync-Key': 'secret'}
def _setup_synced_containers(self, skey='secret', dkey='secret'):
# setup dest container
dest_container = 'dest-container-%s' % uuid.uuid4()
dest_headers = base_headers.copy()
dest_headers = {}
dest_policy = None
if len(ENABLED_POLICIES) > 1:
dest_policy = random.choice(ENABLED_POLICIES)
dest_headers['X-Storage-Policy'] = dest_policy.name
if dkey is not None:
dest_headers['X-Container-Sync-Key'] = dkey
client.put_container(self.url, self.token, dest_container,
headers=dest_headers)
# setup source container
source_container = 'source-container-%s' % uuid.uuid4()
source_headers = base_headers.copy()
source_headers = {}
sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account,
dest_container)
source_headers['X-Container-Sync-To'] = sync_to
if skey is not None:
source_headers['X-Container-Sync-Key'] = skey
if dest_policy:
source_policy = random.choice([p for p in ENABLED_POLICIES
if p is not dest_policy])
@ -75,6 +78,11 @@ class TestContainerSync(ReplProbeTest):
client.put_container(self.url, self.token, source_container,
headers=source_headers)
return source_container, dest_container
def test_sync(self):
source_container, dest_container = self._setup_synced_containers()
# upload to source
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
@ -83,11 +91,63 @@ class TestContainerSync(ReplProbeTest):
# cycle container-sync
Manager(['container-sync']).once()
# retrieve from sync'd container
headers, body = client.get_object(self.url, self.token,
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'test-body')
def test_sync_lazy_skey(self):
# Create synced containers, but with no key at source
source_container, dest_container =\
self._setup_synced_containers(None, 'secret')
# upload to source
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
'test-body')
# cycle container-sync, nothing should happen
Manager(['container-sync']).once()
with self.assertRaises(ClientException) as err:
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(err.exception.http_status, HTTP_NOT_FOUND)
# amend source key
source_headers = {'X-Container-Sync-Key': 'secret'}
client.put_container(self.url, self.token, source_container,
headers=source_headers)
# cycle container-sync, should replicate
Manager(['container-sync']).once()
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'test-body')
def test_sync_lazy_dkey(self):
# Create synced containers, but with no key at dest
source_container, dest_container =\
self._setup_synced_containers('secret', None)
# upload to source
object_name = 'object-%s' % uuid.uuid4()
client.put_object(self.url, self.token, source_container, object_name,
'test-body')
# cycle container-sync, nothing should happen
Manager(['container-sync']).once()
with self.assertRaises(ClientException) as err:
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(err.exception.http_status, HTTP_NOT_FOUND)
# amend dest key
dest_headers = {'X-Container-Sync-Key': 'secret'}
client.put_container(self.url, self.token, dest_container,
headers=dest_headers)
# cycle container-sync, should replicate
Manager(['container-sync']).once()
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'test-body')
if __name__ == "__main__":
unittest.main()

View File

@ -203,13 +203,6 @@ class FakeRing(Ring):
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
base_port=1000):
"""
:param part_power: make part calculation based on the path
If you set a part_power when you setup your FakeRing the parts you get
out of ring methods will actually be based on the path - otherwise we
exercise the real ring code, but ignore the result and return 1.
"""
self._base_port = base_port
self.max_more_nodes = max_more_nodes
self._part_shift = 32 - part_power
@ -477,6 +470,12 @@ class UnmockTimeModule(object):
logging.time = UnmockTimeModule()
class WARN_DEPRECATED(Exception):
def __init__(self, msg):
self.msg = msg
print(self.msg)
class FakeLogger(logging.Logger, object):
# a thread safe fake logger
@ -499,6 +498,9 @@ class FakeLogger(logging.Logger, object):
NOTICE: 'notice',
}
def warn(self, *args, **kwargs):
raise WARN_DEPRECATED("Deprecated Method warn use warning instead")
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python

View File

@ -37,7 +37,7 @@ from test.unit import patch_policies, with_tempdir, make_timestamp_iter
from swift.common.db import DatabaseConnectionError
from swift.common.storage_policy import StoragePolicy, POLICIES
from test.unit.common.test_db import TestExampleBroker
from test.unit.common import test_db
@patch_policies
@ -979,7 +979,7 @@ def premetadata_create_account_stat_table(self, conn, put_timestamp):
put_timestamp))
class TestCommonAccountBroker(TestExampleBroker):
class TestCommonAccountBroker(test_db.TestExampleBroker):
broker_class = AccountBroker

View File

@ -164,17 +164,17 @@ class TestRecon(unittest.TestCase):
self.assertEqual(stats.get('perc_none'), 25.0)
def test_ptime(self):
with mock.patch('time.localtime') as mock_localtime:
mock_localtime.return_value = time.struct_time(
with mock.patch('time.gmtime') as mock_gmtime:
mock_gmtime.return_value = time.struct_time(
(2013, 12, 17, 10, 0, 0, 1, 351, 0))
timestamp = self.recon_instance._ptime(1387274400)
self.assertEqual(timestamp, "2013-12-17 10:00:00")
mock_localtime.assert_called_with(1387274400)
mock_gmtime.assert_called_with(1387274400)
timestamp2 = self.recon_instance._ptime()
self.assertEqual(timestamp2, "2013-12-17 10:00:00")
mock_localtime.assert_called_with()
mock_gmtime.assert_called_with()
def test_get_devices(self):
ringbuilder = builder.RingBuilder(2, 3, 1)
@ -750,10 +750,6 @@ class TestReconCommands(unittest.TestCase):
mock.call('1/2 hosts matched, 0 error[s] while checking hosts.'),
]
def mock_localtime(*args, **kwargs):
return time.gmtime(*args, **kwargs)
with mock.patch("time.localtime", mock_localtime):
cli.time_check([('127.0.0.1', 6010), ('127.0.0.1', 6020)])
# We need any_order=True because the order of calls depends on the dict

View File

@ -1739,7 +1739,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
"64 partitions, 3.000000 replicas, 4 regions, 4 zones, " \
"4 devices, 100.00 balance, 0.00 dispersion\n" \
"The minimum number of hours before a partition can be " \
"reassigned is 1\n" \
"reassigned is 1 (0:00:00 remaining)\n" \
"The overload factor is 0.00%% (0.000000)\n" \
"Devices: id region zone ip address port " \
"replication ip replication port name weight " \
@ -1796,6 +1796,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ring = RingBuilder.load(self.tmpfile)
ring.set_dev_weight(3, 0.0)
ring.rebalance()
ring.pretend_min_part_hours_passed()
ring.remove_dev(3)
ring.save(self.tmpfile)
@ -1806,6 +1807,64 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
self.assertTrue(ring.validate())
self.assertEqual(ring.devs[3], None)
def test_rebalance_resets_time_remaining(self):
self.create_sample_ring()
ring = RingBuilder.load(self.tmpfile)
time_path = 'swift.common.ring.builder.time'
argv = ["", self.tmpfile, "rebalance", "3"]
time = 0
# first rebalance, should have 1 hour left before next rebalance
time += 3600
with mock.patch(time_path, return_value=time):
self.assertEqual(ring.min_part_seconds_left, 0)
self.assertRaises(SystemExit, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.min_part_seconds_left, 3600)
# min part hours passed, change ring and save for rebalance
ring.set_dev_weight(0, ring.devs[0]['weight'] * 2)
ring.save(self.tmpfile)
# second rebalance, should have 1 hour left
time += 3600
with mock.patch(time_path, return_value=time):
self.assertEqual(ring.min_part_seconds_left, 0)
self.assertRaises(SystemExit, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertTrue(ring.min_part_seconds_left, 3600)
def test_rebalance_failure_does_not_reset_last_moves_epoch(self):
ring = RingBuilder(8, 3, 1)
ring.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 6010, 'device': 'sda1'})
ring.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 6020, 'device': 'sdb1'})
ring.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 6030, 'device': 'sdc1'})
time_path = 'swift.common.ring.builder.time'
argv = ["", self.tmpfile, "rebalance", "3"]
with mock.patch(time_path, return_value=0):
ring.rebalance()
ring.save(self.tmpfile)
# min part hours not passed
with mock.patch(time_path, return_value=(3600 * 0.6)):
self.assertRaises(SystemExit, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.min_part_seconds_left, 3600 * 0.4)
ring.save(self.tmpfile)
# min part hours passed, no partitions need to be moved
with mock.patch(time_path, return_value=(3600 * 1.5)):
self.assertRaises(SystemExit, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
self.assertEqual(ring.min_part_seconds_left, 0)
def test_rebalance_with_seed(self):
self.create_sample_ring()
# Test rebalance using explicit seed parameter

View File

@ -56,7 +56,7 @@ class FakeSwift(object):
self.container_ring = FakeRing()
self.get_object_ring = lambda policy_index: FakeRing()
def _get_response(self, method, path):
def _find_response(self, method, path):
resp = self._responses[(method, path)]
if isinstance(resp, list):
try:
@ -84,16 +84,17 @@ class FakeSwift(object):
self.swift_sources.append(env.get('swift.source'))
try:
resp_class, raw_headers, body = self._get_response(method, path)
resp_class, raw_headers, body = self._find_response(method, path)
headers = swob.HeaderKeyDict(raw_headers)
except KeyError:
if (env.get('QUERY_STRING')
and (method, env['PATH_INFO']) in self._responses):
resp_class, raw_headers, body = self._get_response(
resp_class, raw_headers, body = self._find_response(
method, env['PATH_INFO'])
headers = swob.HeaderKeyDict(raw_headers)
elif method == 'HEAD' and ('GET', path) in self._responses:
resp_class, raw_headers, body = self._get_response('GET', path)
resp_class, raw_headers, body = self._find_response(
'GET', path)
body = None
headers = swob.HeaderKeyDict(raw_headers)
elif method == 'GET' and obj and path in self.uploaded:

View File

@ -26,7 +26,7 @@ from swift.common import swob, utils
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.middleware import slo
from swift.common.swob import Request, Response, HTTPException
from swift.common.utils import quote, closing_if_possible
from swift.common.utils import quote, closing_if_possible, close_if_possible
from test.unit.common.middleware.helpers import FakeSwift
@ -55,8 +55,8 @@ def md5hex(s):
class SloTestCase(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.slo = slo.filter_factory({})(self.app)
self.slo.min_segment_size = 1
slo_conf = {'rate_limit_under_size': '0'}
self.slo = slo.filter_factory(slo_conf)(self.app)
self.slo.logger = self.app.logger
def call_app(self, req, app=None, expect_exception=False):
@ -120,18 +120,14 @@ class TestSloMiddleware(SloTestCase):
resp.startswith('X-Static-Large-Object is a reserved header'))
def _put_bogus_slo(self, manifest_text,
manifest_path='/v1/a/c/the-manifest',
min_segment_size=1):
manifest_path='/v1/a/c/the-manifest'):
with self.assertRaises(HTTPException) as catcher:
slo.parse_and_validate_input(manifest_text, manifest_path,
min_segment_size)
slo.parse_and_validate_input(manifest_text, manifest_path)
self.assertEqual(400, catcher.exception.status_int)
return catcher.exception.body
def _put_slo(self, manifest_text, manifest_path='/v1/a/c/the-manifest',
min_segment_size=1):
return slo.parse_and_validate_input(manifest_text, manifest_path,
min_segment_size)
def _put_slo(self, manifest_text, manifest_path='/v1/a/c/the-manifest'):
return slo.parse_and_validate_input(manifest_text, manifest_path)
def test_bogus_input(self):
self.assertEqual('Manifest must be valid JSON.\n',
@ -248,19 +244,18 @@ class TestSloMiddleware(SloTestCase):
def test_bogus_input_undersize_segment(self):
self.assertEqual(
"Index 1: too small; each segment, except the last, "
"must be at least 1000 bytes.\n"
"Index 2: too small; each segment, except the last, "
"must be at least 1000 bytes.\n",
"Index 1: too small; each segment "
"must be at least 1 byte.\n"
"Index 2: too small; each segment "
"must be at least 1 byte.\n",
self._put_bogus_slo(
json.dumps([
{'path': u'/c/s1', 'etag': 'a', 'size_bytes': 1000},
{'path': u'/c/s2', 'etag': 'b', 'size_bytes': 999},
{'path': u'/c/s3', 'etag': 'c', 'size_bytes': 998},
{'path': u'/c/s1', 'etag': 'a', 'size_bytes': 1},
{'path': u'/c/s2', 'etag': 'b', 'size_bytes': 0},
{'path': u'/c/s3', 'etag': 'c', 'size_bytes': 0},
# No error for this one since size_bytes is unspecified
{'path': u'/c/s4', 'etag': 'd', 'size_bytes': None},
{'path': u'/c/s5', 'etag': 'e', 'size_bytes': 996}]),
min_segment_size=1000))
{'path': u'/c/s5', 'etag': 'e', 'size_bytes': 1000}])))
def test_valid_input(self):
data = json.dumps(
@ -268,19 +263,19 @@ class TestSloMiddleware(SloTestCase):
'size_bytes': 100}])
self.assertEqual(
'/cont/object',
slo.parse_and_validate_input(data, '/v1/a/cont/man', 1)[0]['path'])
slo.parse_and_validate_input(data, '/v1/a/cont/man')[0]['path'])
data = json.dumps(
[{'path': '/cont/object', 'etag': 'etagoftheobjectsegment',
'size_bytes': 100, 'range': '0-40'}])
parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man', 1)
parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man')
self.assertEqual('/cont/object', parsed[0]['path'])
self.assertEqual([(0, 40)], parsed[0]['range'].ranges)
data = json.dumps(
[{'path': '/cont/object', 'etag': 'etagoftheobjectsegment',
'size_bytes': None, 'range': '0-40'}])
parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man', 1)
parsed = slo.parse_and_validate_input(data, '/v1/a/cont/man')
self.assertEqual('/cont/object', parsed[0]['path'])
self.assertEqual(None, parsed[0]['size_bytes'])
self.assertEqual([(0, 40)], parsed[0]['range'].ranges)
@ -316,6 +311,11 @@ class TestSloPutManifest(SloTestCase):
swob.HTTPOk,
{'Content-Length': '10', 'Etag': 'etagoftheobjectsegment'},
None)
self.app.register(
'HEAD', '/v1/AUTH_test/cont/empty_object',
swob.HTTPOk,
{'Content-Length': '0', 'Etag': 'etagoftheobjectsegment'},
None)
self.app.register(
'HEAD', u'/v1/AUTH_test/cont/あ_1',
swob.HTTPOk,
@ -340,11 +340,17 @@ class TestSloPutManifest(SloTestCase):
{'Content-Length': '2', 'Etag': 'b',
'Last-Modified': 'Fri, 01 Feb 2012 20:38:36 GMT'},
None)
_manifest_json = json.dumps(
[{'name': '/checktest/a_5', 'hash': md5hex("a" * 5),
'content_type': 'text/plain', 'bytes': '5'}])
self.app.register(
'GET', '/v1/AUTH_test/checktest/slob',
swob.HTTPOk,
{'X-Static-Large-Object': 'true', 'Etag': 'slob-etag'},
None)
{'X-Static-Large-Object': 'true', 'Etag': 'slob-etag',
'Content-Type': 'cat/picture;swift_bytes=12345',
'Content-Length': len(_manifest_json)},
_manifest_json)
self.app.register(
'PUT', '/v1/AUTH_test/checktest/man_3', swob.HTTPCreated, {}, None)
@ -367,21 +373,6 @@ class TestSloPutManifest(SloTestCase):
pass
self.assertEqual(e.status_int, 413)
with patch.object(self.slo, 'min_segment_size', 1000):
test_json_data_2obj = json.dumps(
[{'path': '/cont/small_object1',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10},
{'path': '/cont/small_object2',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10}])
req = Request.blank('/v1/a/c/o', body=test_json_data_2obj)
try:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 400)
req = Request.blank('/v1/a/c/o', headers={'X-Copy-From': 'lala'})
try:
self.slo.handle_multipart_put(req, fake_start_response)
@ -411,49 +402,29 @@ class TestSloPutManifest(SloTestCase):
self.slo(req.environ, my_fake_start_response)
self.assertTrue('X-Static-Large-Object' in req.headers)
def test_handle_multipart_put_success_allow_small_last_segment(self):
with patch.object(self.slo, 'min_segment_size', 50):
def test_handle_multipart_put_disallow_empty_first_segment(self):
test_json_data = json.dumps([{'path': '/cont/object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 0},
{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100}])
req = Request.blank('/v1/a/c/o', body=test_json_data)
with self.assertRaises(HTTPException) as catcher:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(catcher.exception.status_int, 400)
def test_handle_multipart_put_disallow_empty_last_segment(self):
test_json_data = json.dumps([{'path': '/cont/object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100},
{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10}])
req = Request.blank(
'/v1/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
body=test_json_data)
self.assertTrue('X-Static-Large-Object' not in req.headers)
self.slo(req.environ, fake_start_response)
self.assertTrue('X-Static-Large-Object' in req.headers)
def test_handle_multipart_put_success_allow_only_one_small_segment(self):
with patch.object(self.slo, 'min_segment_size', 50):
test_json_data = json.dumps([{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10}])
req = Request.blank(
'/v1/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
body=test_json_data)
self.assertTrue('X-Static-Large-Object' not in req.headers)
self.slo(req.environ, fake_start_response)
self.assertTrue('X-Static-Large-Object' in req.headers)
def test_handle_multipart_put_disallow_small_first_segment(self):
with patch.object(self.slo, 'min_segment_size', 50):
test_json_data = json.dumps([{'path': '/cont/object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 10},
{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100}])
'size_bytes': 0}])
req = Request.blank('/v1/a/c/o', body=test_json_data)
try:
with self.assertRaises(HTTPException) as catcher:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 400)
self.assertEqual(catcher.exception.status_int, 400)
def test_handle_multipart_put_success_unicode(self):
test_json_data = json.dumps([{'path': u'/cont/object\u2661',
@ -543,7 +514,7 @@ class TestSloPutManifest(SloTestCase):
{'path': '/checktest/badreq', 'etag': 'a', 'size_bytes': '1'},
{'path': '/checktest/b_2', 'etag': 'not-b', 'size_bytes': '2'},
{'path': '/checktest/slob', 'etag': 'not-slob',
'size_bytes': '2'}])
'size_bytes': '12345'}])
req = Request.blank(
'/v1/AUTH_test/checktest/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'},
@ -553,6 +524,7 @@ class TestSloPutManifest(SloTestCase):
status, headers, body = self.call_slo(req)
self.assertEqual(self.app.call_count, 5)
errors = json.loads(body)['Errors']
self.assertEqual(len(errors), 5)
self.assertEqual(errors[0][0], '/checktest/a_1')
self.assertEqual(errors[0][1], 'Size Mismatch')
@ -587,8 +559,7 @@ class TestSloPutManifest(SloTestCase):
self.assertEqual(2, manifest_data[1]['bytes'])
def test_handle_multipart_put_skip_size_check_still_uses_min_size(self):
with patch.object(self.slo, 'min_segment_size', 50):
test_json_data = json.dumps([{'path': '/cont/small_object',
test_json_data = json.dumps([{'path': '/cont/empty_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': None},
{'path': '/cont/small_object',
@ -600,11 +571,10 @@ class TestSloPutManifest(SloTestCase):
self.assertEqual(cm.exception.status_int, 400)
def test_handle_multipart_put_skip_size_check_no_early_bailout(self):
with patch.object(self.slo, 'min_segment_size', 50):
# The first is too small (it's 10 bytes but min size is 50), and
# The first is too small (it's 0 bytes), and
# the second has a bad etag. Make sure both errors show up in
# the response.
test_json_data = json.dumps([{'path': '/cont/small_object',
test_json_data = json.dumps([{'path': '/cont/empty_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': None},
{'path': '/cont/object2',
@ -614,7 +584,7 @@ class TestSloPutManifest(SloTestCase):
with self.assertRaises(HTTPException) as cm:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(cm.exception.status_int, 400)
self.assertIn('at least 50 bytes', cm.exception.body)
self.assertIn('at least 1 byte', cm.exception.body)
self.assertIn('Etag Mismatch', cm.exception.body)
def test_handle_multipart_put_skip_etag_check(self):
@ -1126,6 +1096,46 @@ class TestSloGetManifest(SloTestCase):
swob.HTTPOk, {'Content-Length': '20',
'Etag': md5hex('d' * 20)},
'd' * 20)
self.app.register(
'GET', '/v1/AUTH_test/gettest/e_25',
swob.HTTPOk, {'Content-Length': '25',
'Etag': md5hex('e' * 25)},
'e' * 25)
self.app.register(
'GET', '/v1/AUTH_test/gettest/f_30',
swob.HTTPOk, {'Content-Length': '30',
'Etag': md5hex('f' * 30)},
'f' * 30)
self.app.register(
'GET', '/v1/AUTH_test/gettest/g_35',
swob.HTTPOk, {'Content-Length': '35',
'Etag': md5hex('g' * 35)},
'g' * 35)
self.app.register(
'GET', '/v1/AUTH_test/gettest/h_40',
swob.HTTPOk, {'Content-Length': '40',
'Etag': md5hex('h' * 40)},
'h' * 40)
self.app.register(
'GET', '/v1/AUTH_test/gettest/i_45',
swob.HTTPOk, {'Content-Length': '45',
'Etag': md5hex('i' * 45)},
'i' * 45)
self.app.register(
'GET', '/v1/AUTH_test/gettest/j_50',
swob.HTTPOk, {'Content-Length': '50',
'Etag': md5hex('j' * 50)},
'j' * 50)
self.app.register(
'GET', '/v1/AUTH_test/gettest/k_55',
swob.HTTPOk, {'Content-Length': '55',
'Etag': md5hex('k' * 55)},
'k' * 55)
self.app.register(
'GET', '/v1/AUTH_test/gettest/l_60',
swob.HTTPOk, {'Content-Length': '60',
'Etag': md5hex('l' * 60)},
'l' * 60)
_bc_manifest_json = json.dumps(
[{'name': '/gettest/b_10', 'hash': md5hex('b' * 10), 'bytes': '10',
@ -1156,6 +1166,39 @@ class TestSloGetManifest(SloTestCase):
'Etag': md5(_abcd_manifest_json).hexdigest()},
_abcd_manifest_json)
_abcdefghijkl_manifest_json = json.dumps(
[{'name': '/gettest/a_5', 'hash': md5hex("a" * 5),
'content_type': 'text/plain', 'bytes': '5'},
{'name': '/gettest/b_10', 'hash': md5hex("b" * 10),
'content_type': 'text/plain', 'bytes': '10'},
{'name': '/gettest/c_15', 'hash': md5hex("c" * 15),
'content_type': 'text/plain', 'bytes': '15'},
{'name': '/gettest/d_20', 'hash': md5hex("d" * 20),
'content_type': 'text/plain', 'bytes': '20'},
{'name': '/gettest/e_25', 'hash': md5hex("e" * 25),
'content_type': 'text/plain', 'bytes': '25'},
{'name': '/gettest/f_30', 'hash': md5hex("f" * 30),
'content_type': 'text/plain', 'bytes': '30'},
{'name': '/gettest/g_35', 'hash': md5hex("g" * 35),
'content_type': 'text/plain', 'bytes': '35'},
{'name': '/gettest/h_40', 'hash': md5hex("h" * 40),
'content_type': 'text/plain', 'bytes': '40'},
{'name': '/gettest/i_45', 'hash': md5hex("i" * 45),
'content_type': 'text/plain', 'bytes': '45'},
{'name': '/gettest/j_50', 'hash': md5hex("j" * 50),
'content_type': 'text/plain', 'bytes': '50'},
{'name': '/gettest/k_55', 'hash': md5hex("k" * 55),
'content_type': 'text/plain', 'bytes': '55'},
{'name': '/gettest/l_60', 'hash': md5hex("l" * 60),
'content_type': 'text/plain', 'bytes': '60'}])
self.app.register(
'GET', '/v1/AUTH_test/gettest/manifest-abcdefghijkl',
swob.HTTPOk, {
'Content-Type': 'application/json',
'X-Static-Large-Object': 'true',
'Etag': md5(_abcdefghijkl_manifest_json).hexdigest()},
_abcdefghijkl_manifest_json)
self.manifest_abcd_etag = md5hex(
md5hex("a" * 5) + md5hex(md5hex("b" * 10) + md5hex("c" * 15)) +
md5hex("d" * 20))
@ -1361,6 +1404,65 @@ class TestSloGetManifest(SloTestCase):
'bytes=0-14,0-14',
'bytes=0-19,0-19'])
def test_get_manifest_ratelimiting(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcdefghijkl',
environ={'REQUEST_METHOD': 'GET'})
the_time = [time.time()]
sleeps = []
def mock_time():
return the_time[0]
def mock_sleep(duration):
sleeps.append(duration)
the_time[0] += duration
with patch('time.time', mock_time), \
patch('eventlet.sleep', mock_sleep), \
patch.object(self.slo, 'rate_limit_under_size', 999999999), \
patch.object(self.slo, 'rate_limit_after_segment', 0):
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(sleeps, [2.0, 2.0, 2.0, 2.0, 2.0])
# give the client the first 4 segments without ratelimiting; we'll
# sleep less
del sleeps[:]
with patch('time.time', mock_time), \
patch('eventlet.sleep', mock_sleep), \
patch.object(self.slo, 'rate_limit_under_size', 999999999), \
patch.object(self.slo, 'rate_limit_after_segment', 4):
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(sleeps, [2.0, 2.0, 2.0])
# ratelimit segments under 35 bytes; this affects a-f
del sleeps[:]
with patch('time.time', mock_time), \
patch('eventlet.sleep', mock_sleep), \
patch.object(self.slo, 'rate_limit_under_size', 35), \
patch.object(self.slo, 'rate_limit_after_segment', 0):
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(sleeps, [2.0, 2.0])
# ratelimit segments under 36 bytes; this now affects a-g, netting
# us one more sleep than before
del sleeps[:]
with patch('time.time', mock_time), \
patch('eventlet.sleep', mock_sleep), \
patch.object(self.slo, 'rate_limit_under_size', 36), \
patch.object(self.slo, 'rate_limit_after_segment', 0):
status, headers, body = self.call_slo(req)
self.assertEqual(status, '200 OK') # sanity check
self.assertEqual(sleeps, [2.0, 2.0, 2.0])
def test_if_none_match_matches(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
@ -1944,6 +2046,68 @@ class TestSloGetManifest(SloTestCase):
self.assertEqual(headers['X-Object-Meta-Fish'], 'Bass')
self.assertEqual(body, '')
def test_generator_closure(self):
# Test that the SLO WSGI iterable closes its internal .app_iter when
# it receives a close() message.
#
# This is sufficient to fix a memory leak. The memory leak arises
# due to cyclic references involving a running generator; a running
# generator sometimes preventes the GC from collecting it in the
# same way that an object with a defined __del__ does.
#
# There are other ways to break the cycle and fix the memory leak as
# well; calling .close() on the generator is sufficient, but not
# necessary. However, having this test is better than nothing for
# preventing regressions.
leaks = [0]
class LeakTracker(object):
def __init__(self, inner_iter):
leaks[0] += 1
self.inner_iter = iter(inner_iter)
def __iter__(self):
return self
def next(self):
return next(self.inner_iter)
def close(self):
leaks[0] -= 1
close_if_possible(self.inner_iter)
class LeakTrackingSegmentedIterable(slo.SegmentedIterable):
def _internal_iter(self, *a, **kw):
it = super(
LeakTrackingSegmentedIterable, self)._internal_iter(
*a, **kw)
return LeakTracker(it)
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET',
'HTTP_ACCEPT': 'application/json'})
# can't self.call_slo() here since we don't want to consume the
# whole body
with patch.object(slo, 'SegmentedIterable',
LeakTrackingSegmentedIterable):
app_resp = self.slo(req.environ, start_response)
self.assertEqual(status[0], '200 OK') # sanity check
body_iter = iter(app_resp)
chunk = next(body_iter)
self.assertEqual(chunk, 'aaaaa') # sanity check
app_resp.close()
self.assertEqual(0, leaks[0])
def test_head_manifest_is_efficient(self):
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
@ -2446,8 +2610,7 @@ class TestSwiftInfo(unittest.TestCase):
self.assertTrue('slo' in swift_info)
self.assertEqual(swift_info['slo'].get('max_manifest_segments'),
mware.max_manifest_segments)
self.assertEqual(swift_info['slo'].get('min_segment_size'),
mware.min_segment_size)
self.assertEqual(swift_info['slo'].get('min_segment_size'), 1)
self.assertEqual(swift_info['slo'].get('max_manifest_size'),
mware.max_manifest_size)

View File

@ -515,6 +515,22 @@ class TestAuth(unittest.TestCase):
self.assertEqual(resp.status_int, 200)
self.assertTrue(resp.headers['x-storage-url'].endswith('/v1/AUTH_ac'))
self.assertTrue(resp.headers['x-auth-token'].startswith('AUTH_'))
self.assertEqual(resp.headers['x-auth-token'],
resp.headers['x-storage-token'])
self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']),
auth.DEFAULT_TOKEN_LIFE - 0.5, delta=0.5)
self.assertGreater(len(resp.headers['x-auth-token']), 10)
def test_get_token_success_other_auth_prefix(self):
test_auth = auth.filter_factory({'user_ac_user': 'testing',
'auth_prefix': '/other/'})(FakeApp())
req = self._make_request(
'/other/v1.0',
headers={'X-Auth-User': 'ac:user', 'X-Auth-Key': 'testing'})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 200)
self.assertTrue(resp.headers['x-storage-url'].endswith('/v1/AUTH_ac'))
self.assertTrue(resp.headers['x-auth-token'].startswith('AUTH_'))
self.assertTrue(len(resp.headers['x-auth-token']) > 10)
def test_use_token_success(self):
@ -641,11 +657,16 @@ class TestAuth(unittest.TestCase):
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
req.environ['swift.cache'].set('AUTH_/user/test:tester', 'uuid_token')
expires = time() + 180
req.environ['swift.cache'].set('AUTH_/token/uuid_token',
(time() + 180, 'test,test:tester'))
(expires, 'test,test:tester'))
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-auth-token'], 'uuid_token')
self.assertEqual(resp.headers['x-auth-token'],
resp.headers['x-storage-token'])
self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']),
179.5, delta=0.5)
def test_old_token_overdate(self):
self.test_auth = \
@ -664,6 +685,8 @@ class TestAuth(unittest.TestCase):
self.assertEqual(resp.status_int, 200)
self.assertNotEqual(resp.headers['x-auth-token'], 'uuid_token')
self.assertEqual(resp.headers['x-auth-token'][:7], 'AUTH_tk')
self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']),
auth.DEFAULT_TOKEN_LIFE - 0.5, delta=0.5)
def test_old_token_with_old_data(self):
self.test_auth = \
@ -682,6 +705,8 @@ class TestAuth(unittest.TestCase):
self.assertEqual(resp.status_int, 200)
self.assertNotEqual(resp.headers['x-auth-token'], 'uuid_token')
self.assertEqual(resp.headers['x-auth-token'][:7], 'AUTH_tk')
self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']),
auth.DEFAULT_TOKEN_LIFE - 0.5, delta=0.5)
def test_reseller_admin_is_owner(self):
orig_authorize = self.test_auth.authorize

View File

@ -1319,11 +1319,11 @@ class TestRingBuilder(unittest.TestCase):
def test_fractional_replicas_rebalance(self):
rb = ring.RingBuilder(8, 2.5, 0)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 2, 'weight': 1,
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance() # passes by not crashing
rb.validate() # also passes by not crashing
@ -1332,13 +1332,13 @@ class TestRingBuilder(unittest.TestCase):
def test_create_add_dev_add_replica_rebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.set_replicas(4)
rb.rebalance() # this would crash since parts_wanted was not set
@ -1348,15 +1348,15 @@ class TestRingBuilder(unittest.TestCase):
rb = ring.RingBuilder(8, 3, 1)
# 5 devices: 5 is the smallest number that does not divide 3 * 2^8,
# which forces some rounding to happen.
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.rebalance()
rb.validate()
@ -1381,31 +1381,31 @@ class TestRingBuilder(unittest.TestCase):
def test_add_replicas_then_rebalance_respects_weight(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 1, 'weight': 3,
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 1, 'weight': 3,
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 2, 'weight': 3,
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdi'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 2, 'weight': 3,
rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'region': 0, 'zone': 2, 'weight': 1,
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'region': 0, 'zone': 2, 'weight': 1,
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdl'})
rb.rebalance(seed=1)
@ -1439,31 +1439,31 @@ class TestRingBuilder(unittest.TestCase):
def test_overload(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2,
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2,
rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2,
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2,
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdl'})
rb.rebalance(seed=12345)
@ -1517,31 +1517,31 @@ class TestRingBuilder(unittest.TestCase):
# Overload doesn't prevent optimal balancing.
rb = ring.RingBuilder(8, 3, 1)
rb.set_overload(0.125)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2,
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2,
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 10, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2,
rb.add_dev({'id': 10, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 11, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2,
rb.add_dev({'id': 11, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.rebalance(seed=12345)
@ -1577,16 +1577,16 @@ class TestRingBuilder(unittest.TestCase):
self.assertEqual(part_counts['127.0.0.3'], 256)
# Add a new server: balance stays optimal
rb.add_dev({'id': 12, 'region': 0, 'region': 0, 'zone': 0,
rb.add_dev({'id': 12, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 13, 'region': 0, 'region': 0, 'zone': 0,
rb.add_dev({'id': 13, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 14, 'region': 0, 'region': 0, 'zone': 0,
rb.add_dev({'id': 14, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 15, 'region': 0, 'region': 0, 'zone': 0,
rb.add_dev({'id': 15, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
@ -1609,29 +1609,29 @@ class TestRingBuilder(unittest.TestCase):
def test_overload_keeps_balanceable_things_balanced_initially(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8,
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8,
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'})
rb.set_overload(99999)
@ -1653,29 +1653,29 @@ class TestRingBuilder(unittest.TestCase):
def test_overload_keeps_balanceable_things_balanced_on_rebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8,
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8,
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'})
rb.set_overload(99999)
@ -1719,28 +1719,28 @@ class TestRingBuilder(unittest.TestCase):
def test_server_per_port(self):
# 3 servers, 3 disks each, with each disk on its own port
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10001, 'device': 'sdy'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10001, 'device': 'sdy'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10001, 'device': 'sdy'})
rb.rebalance(seed=1)
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10002, 'device': 'sdz'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10002, 'device': 'sdz'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10002, 'device': 'sdz'})
rb.pretend_min_part_hours_passed()
@ -2318,6 +2318,72 @@ class TestRingBuilder(unittest.TestCase):
msg = 'Replica count of 3 requires more than 2 devices'
self.assertIn(msg, str(e.exception))
def _add_dev_delete_first_n(self, add_dev_count, n):
rb = ring.RingBuilder(8, 3, 1)
dev_names = ['sda', 'sdb', 'sdc', 'sdd', 'sde', 'sdf']
for i in range(add_dev_count):
if i < len(dev_names):
dev_name = dev_names[i]
else:
dev_name = 'sda'
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6000, 'weight': 1.0, 'device': dev_name})
rb.rebalance()
if (n > 0):
rb.pretend_min_part_hours_passed()
# remove first n
for i in range(n):
rb.remove_dev(i)
rb.pretend_min_part_hours_passed()
rb.rebalance()
return rb
def test_reuse_of_dev_holes_without_id(self):
# try with contiguous holes at beginning
add_dev_count = 6
rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3)
new_dev_id = rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6000, 'weight': 1.0, 'device': 'sda'})
self.assertTrue(new_dev_id < add_dev_count)
# try with non-contiguous holes
# [0, 1, None, 3, 4, None]
rb2 = ring.RingBuilder(8, 3, 1)
for i in range(6):
rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6000, 'weight': 1.0, 'device': 'sda'})
rb2.rebalance()
rb2.pretend_min_part_hours_passed()
rb2.remove_dev(2)
rb2.remove_dev(5)
rb2.pretend_min_part_hours_passed()
rb2.rebalance()
first = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6000, 'weight': 1.0, 'device': 'sda'})
second = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6000, 'weight': 1.0, 'device': 'sda'})
# add a new one (without reusing a hole)
third = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6000, 'weight': 1.0, 'device': 'sda'})
self.assertEqual(first, 2)
self.assertEqual(second, 5)
self.assertEqual(third, 6)
def test_reuse_of_dev_holes_with_id(self):
add_dev_count = 6
rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3)
# add specifying id
exp_new_dev_id = 2
# [dev, dev, None, dev, dev, None]
try:
new_dev_id = rb.add_dev({'id': exp_new_dev_id, 'region': 0,
'zone': 0, 'ip': '127.0.0.1',
'port': 6000, 'weight': 1.0,
'device': 'sda'})
self.assertEqual(new_dev_id, exp_new_dev_id)
except exceptions.DuplicateDeviceError:
self.fail("device hole not reused")
class TestGetRequiredOverload(unittest.TestCase):

View File

@ -16,7 +16,6 @@
import array
import six.moves.cPickle as pickle
import os
import sys
import unittest
import stat
from contextlib import closing
@ -109,11 +108,7 @@ class TestRingData(unittest.TestCase):
def test_deterministic_serialization(self):
"""
Two identical rings should produce identical .gz files on disk.
Only true on Python 2.7 or greater.
"""
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
return
os.mkdir(os.path.join(self.testdir, '1'))
os.mkdir(os.path.join(self.testdir, '2'))
# These have to have the same filename (not full path,

View File

@ -26,8 +26,7 @@ from six.moves import range
from six.moves.urllib.parse import quote
from test.unit import FakeLogger
from eventlet.green import urllib2
from swift.common import internal_client
from swift.common import swob
from swift.common import exceptions, internal_client, swob
from swift.common.storage_policy import StoragePolicy
from test.unit import with_tempdir, write_fake_ring, patch_policies
@ -1329,7 +1328,7 @@ class TestSimpleClient(unittest.TestCase):
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(urllib2.HTTPError,
self.assertRaises(exceptions.ClientException,
c.retry_request, request_method, retries=1)
self.assertEqual(mock_sleep.call_count, 1)
self.assertEqual(mock_urlopen.call_count, 2)
@ -1347,7 +1346,7 @@ class TestSimpleClient(unittest.TestCase):
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(urllib2.HTTPError,
self.assertRaises(exceptions.ClientException,
c.retry_request, request_method,
container='con', retries=1)
self.assertEqual(mock_sleep.call_count, 1)
@ -1366,7 +1365,7 @@ class TestSimpleClient(unittest.TestCase):
mock_urlopen.side_effect = urllib2.HTTPError(*[None] * 5)
with mock.patch('swift.common.internal_client.sleep') \
as mock_sleep:
self.assertRaises(urllib2.HTTPError,
self.assertRaises(exceptions.ClientException,
c.retry_request, request_method,
container='con', name='obj', retries=1)
self.assertEqual(mock_sleep.call_count, 1)

View File

@ -1916,13 +1916,18 @@ class TestManager(unittest.TestCase):
continue
yield server, pid
def mock_kill_group(pid, sig):
self.fail('kill_group should not be called')
_orig_server = manager.Server
_orig_watch_server_pids = manager.watch_server_pids
_orig_kill_group = manager.kill_group
try:
manager.watch_server_pids = mock_watch_server_pids
manager.kill_group = mock_kill_group
# test stop one server
server_pids = {
'test': [1]
'test': {1: "dummy.pid"}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
@ -1930,7 +1935,7 @@ class TestManager(unittest.TestCase):
self.assertEqual(status, 0)
# test not running
server_pids = {
'test': []
'test': {}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
@ -1938,7 +1943,7 @@ class TestManager(unittest.TestCase):
self.assertEqual(status, 1)
# test kill not running
server_pids = {
'test': []
'test': {}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
@ -1946,7 +1951,7 @@ class TestManager(unittest.TestCase):
self.assertEqual(status, 0)
# test won't die
server_pids = {
'test': [None]
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
@ -1956,6 +1961,83 @@ class TestManager(unittest.TestCase):
finally:
manager.Server = _orig_server
manager.watch_server_pids = _orig_watch_server_pids
manager.kill_group = _orig_kill_group
def test_stop_kill_after_timeout(self):
class MockServerFactory(object):
class MockServer(object):
def __init__(self, pids, run_dir=manager.RUN_DIR):
self.pids = pids
def stop(self, **kwargs):
return self.pids
def status(self, **kwargs):
return not self.pids
def __init__(self, server_pids, run_dir=manager.RUN_DIR):
self.server_pids = server_pids
def __call__(self, server, run_dir=manager.RUN_DIR):
return MockServerFactory.MockServer(self.server_pids[server])
def mock_watch_server_pids(server_pids, **kwargs):
for server, pids in server_pids.items():
for pid in pids:
if pid is None:
continue
yield server, pid
mock_kill_group_called = []
def mock_kill_group(*args):
mock_kill_group_called.append(args)
def mock_kill_group_oserr(*args):
raise OSError()
def mock_kill_group_oserr_ESRCH(*args):
raise OSError(errno.ESRCH, 'No such process')
_orig_server = manager.Server
_orig_watch_server_pids = manager.watch_server_pids
_orig_kill_group = manager.kill_group
try:
manager.watch_server_pids = mock_watch_server_pids
manager.kill_group = mock_kill_group
# test stop one server
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop(kill_after_timeout=True)
self.assertEqual(status, 1)
self.assertEqual(mock_kill_group_called, [(None, 9)])
manager.kill_group = mock_kill_group_oserr
# test stop one server - OSError
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
with self.assertRaises(OSError):
status = m.stop(kill_after_timeout=True)
manager.kill_group = mock_kill_group_oserr_ESRCH
# test stop one server - OSError: No such process
server_pids = {
'test': {None: None}
}
manager.Server = MockServerFactory(server_pids)
m = manager.Manager(['test'])
status = m.stop(kill_after_timeout=True)
self.assertEqual(status, 1)
finally:
manager.Server = _orig_server
manager.watch_server_pids = _orig_watch_server_pids
manager.kill_group = _orig_kill_group
# TODO(clayg): more tests
def test_shutdown(self):

View File

@ -182,9 +182,121 @@ class TestMemcached(unittest.TestCase):
one = False
if peeripport == sock2ipport:
two = False
self.assertEqual(len(memcache_client._errors[sock1ipport]), 0)
self.assertEqual(len(memcache_client._errors[sock2ip]), 0)
finally:
memcached.DEFAULT_MEMCACHED_PORT = orig_port
def test_get_conns_v6(self):
if not socket.has_ipv6:
return
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0, 0, 0))
sock.listen(1)
sock_addr = sock.getsockname()
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
memcache_client = memcached.MemcacheRing([server_socket])
key = uuid4().hex
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
self.assertEqual(peer_socket, server_socket)
self.assertEqual(len(memcache_client._errors[server_socket]), 0)
finally:
sock.close()
def test_get_conns_v6_default(self):
if not socket.has_ipv6:
return
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
sock.listen(1)
sock_addr = sock.getsockname()
server_socket = '[%s]:%s' % (sock_addr[0], sock_addr[1])
server_host = '[%s]' % sock_addr[0]
memcached.DEFAULT_MEMCACHED_PORT = sock_addr[1]
memcache_client = memcached.MemcacheRing([server_host])
key = uuid4().hex
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
self.assertEqual(peer_socket, server_socket)
self.assertEqual(len(memcache_client._errors[server_host]), 0)
finally:
sock.close()
def test_get_conns_bad_v6(self):
if not socket.has_ipv6:
return
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
sock.listen(1)
sock_addr = sock.getsockname()
# IPv6 address with missing [] is invalid
server_socket = '%s:%s' % (sock_addr[0], sock_addr[1])
memcache_client = memcached.MemcacheRing([server_socket])
key = uuid4().hex
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0], peer_sockaddr[1])
self.assertEqual(peer_socket, server_socket)
# Expect a parsing error when creating the socket
self.assertEqual(len(memcache_client._errors[server_socket]), 1)
finally:
sock.close()
def test_get_conns_hostname(self):
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
sock_addr = sock.getsockname()
fqdn = socket.getfqdn()
server_socket = '%s:%s' % (fqdn, sock_addr[1])
addrinfo.return_value = [(socket.AF_INET,
socket.SOCK_STREAM, 0, '',
('127.0.0.1', sock_addr[1]))]
memcache_client = memcached.MemcacheRing([server_socket])
key = uuid4().hex
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '%s:%s' % (peer_sockaddr[0],
peer_sockaddr[1])
self.assertEqual(peer_socket,
'127.0.0.1:%d' % sock_addr[1])
self.assertEqual(len(memcache_client._errors[server_socket]),
0)
finally:
sock.close()
def test_get_conns_hostname6(self):
with patch('swift.common.memcached.socket.getaddrinfo') as addrinfo:
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
sock.listen(1)
sock_addr = sock.getsockname()
fqdn = socket.getfqdn()
server_socket = '%s:%s' % (fqdn, sock_addr[1])
addrinfo.return_value = [(socket.AF_INET6,
socket.SOCK_STREAM, 0, '',
('::1', sock_addr[1]))]
memcache_client = memcached.MemcacheRing([server_socket])
key = uuid4().hex
for conn in memcache_client._get_conns(key):
peer_sockaddr = conn[2].getpeername()
peer_socket = '[%s]:%s' % (peer_sockaddr[0],
peer_sockaddr[1])
self.assertEqual(peer_socket, '[::1]:%d' % sock_addr[1])
self.assertEqual(len(memcache_client._errors[server_socket]),
0)
finally:
sock.close()
def test_set_get(self):
memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'])
mock = MockMemcached()
@ -201,16 +313,11 @@ class TestMemcached(unittest.TestCase):
self.assertEqual(
memcache_client.get('some_key'), ['simple str', u'utf8 str éà'])
self.assertTrue(float(mock.cache.values()[0][1]) == 0)
memcache_client.set('some_key', [1, 2, 3], timeout=10)
self.assertEqual(mock.cache.values()[0][1], '10')
memcache_client.set('some_key', [1, 2, 3], time=20)
self.assertEqual(mock.cache.values()[0][1], '20')
sixtydays = 60 * 24 * 60 * 60
esttimeout = time.time() + sixtydays
memcache_client.set('some_key', [1, 2, 3], timeout=sixtydays)
self.assertTrue(
-1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1)
memcache_client.set('some_key', [1, 2, 3], time=sixtydays)
self.assertTrue(
-1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1)
@ -313,11 +420,6 @@ class TestMemcached(unittest.TestCase):
[[4, 5, 6], [1, 2, 3]])
self.assertEqual(mock.cache.values()[0][1], '0')
self.assertEqual(mock.cache.values()[1][1], '0')
memcache_client.set_multi(
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
timeout=10)
self.assertEqual(mock.cache.values()[0][1], '10')
self.assertEqual(mock.cache.values()[1][1], '10')
memcache_client.set_multi(
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
time=20)
@ -328,7 +430,7 @@ class TestMemcached(unittest.TestCase):
esttimeout = time.time() + fortydays
memcache_client.set_multi(
{'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key',
timeout=fortydays)
time=fortydays)
self.assertTrue(
-1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1)
self.assertTrue(
@ -359,6 +461,13 @@ class TestMemcached(unittest.TestCase):
def test_connection_pooling(self):
with patch('swift.common.memcached.socket') as mock_module:
def mock_getaddrinfo(host, port, family=socket.AF_INET,
socktype=socket.SOCK_STREAM, proto=0,
flags=0):
return [(family, socktype, proto, '', (host, port))]
mock_module.getaddrinfo = mock_getaddrinfo
# patch socket, stub socket.socket, mock sock
mock_sock = mock_module.socket.return_value
@ -472,5 +581,27 @@ class TestMemcached(unittest.TestCase):
finally:
memcached.MemcacheConnPool = orig_conn_pool
def test_connection_pool_parser(self):
default = memcached.DEFAULT_MEMCACHED_PORT
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
pool = memcached.MemcacheConnPool(addr, 1, 0)
if expected_host:
host, port = pool._get_addr()
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
pool._get_addr()
if __name__ == '__main__':
unittest.main()

241
test/unit/common/test_storage_policy.py Normal file → Executable file
View File

@ -17,7 +17,6 @@ import unittest
import os
import mock
from functools import partial
from six.moves.configparser import ConfigParser
from tempfile import NamedTemporaryFile
from test.unit import patch_policies, FakeRing, temptree, DEFAULT_TEST_EC_TYPE
@ -36,6 +35,7 @@ class FakeStoragePolicy(BaseStoragePolicy):
Test StoragePolicy class - the only user at the moment is
test_validate_policies_type_invalid()
"""
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None):
super(FakeStoragePolicy, self).__init__(
@ -43,7 +43,6 @@ class FakeStoragePolicy(BaseStoragePolicy):
class TestStoragePolicies(unittest.TestCase):
def _conf(self, conf_str):
conf_str = "\n".join(line.strip() for line in conf_str.split("\n"))
conf = ConfigParser()
@ -75,10 +74,10 @@ class TestStoragePolicies(unittest.TestCase):
])
def test_swift_info(self):
# the deprecated 'three' should not exist in expect
expect = [{'default': True, 'name': 'zero'},
{'name': 'two'},
{'name': 'one'},
{'name': 'ten'}]
expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', },
{'aliases': 'two', 'name': 'two'},
{'aliases': 'one', 'name': 'one'},
{'aliases': 'ten', 'name': 'ten'}]
swift_info = POLICIES.get_policy_info()
self.assertEqual(sorted(expect, key=lambda k: k['name']),
sorted(swift_info, key=lambda k: k['name']))
@ -286,6 +285,7 @@ class TestStoragePolicies(unittest.TestCase):
def test_validate_policies_type_invalid(self):
class BogusStoragePolicy(FakeStoragePolicy):
policy_type = 'bogus'
# unsupported policy type - initialization with FakeStoragePolicy
self.assertRaisesWithMessage(PolicyError, 'Invalid type',
BogusStoragePolicy, 1, 'one')
@ -330,6 +330,221 @@ class TestStoragePolicies(unittest.TestCase):
self.assertEqual(pol1, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'One')
def test_multiple_names(self):
# checking duplicate on insert
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False, aliases='zero')]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
# checking correct retrival using other names
test_policies = [StoragePolicy(0, 'zero', True, aliases='cero, kore'),
StoragePolicy(1, 'one', False, aliases='uno, tahi'),
StoragePolicy(2, 'two', False, aliases='dos, rua')]
policies = StoragePolicyCollection(test_policies)
for name in ('zero', 'cero', 'kore'):
self.assertEqual(policies.get_by_name(name), test_policies[0])
for name in ('two', 'dos', 'rua'):
self.assertEqual(policies.get_by_name(name), test_policies[2])
# Testing parsing of conf files/text
good_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, tahi
default = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.get_by_name('one'),
policies[0])
self.assertEqual(policies.get_by_name('one'),
policies.get_by_name('tahi'))
name_repeat_conf = self._conf("""
[storage-policy:0]
name = one
aliases = one
default = yes
""")
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
policies = parse_storage_policies(name_repeat_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, uno
default = yes
""")
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_conf)
def test_multiple_names_EC(self):
# checking duplicate names on insert
test_policies_ec = [
ECStoragePolicy(
0, 'ec8-2',
aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=8),
is_default=True),
ECStoragePolicy(
1, 'ec10-4',
aliases='ec8-2',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=10))]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies_ec)
# checking correct retrival using other names
good_test_policies_EC = [
ECStoragePolicy(0, 'ec8-2', aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=8),
is_default=True),
ECStoragePolicy(1, 'ec10-4', aliases='athena, minerva',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=10)),
ECStoragePolicy(2, 'ec4-2', aliases='poseidon, neptune',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
object_ring=FakeRing(replicas=7)),
]
ec_policies = StoragePolicyCollection(good_test_policies_EC)
for name in ('ec8-2', 'zeus', 'jupiter'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[0])
for name in ('ec10-4', 'athena', 'minerva'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[1])
# Testing parsing of conf files/text
good_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, jupiter
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
[storage-policy:1]
name = ec10-4
aliases = poseidon, neptune
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
ec_policies = parse_storage_policies(good_ec_conf)
self.assertEqual(ec_policies.get_by_name('ec8-2'),
ec_policies[0])
self.assertEqual(ec_policies.get_by_name('ec10-4'),
ec_policies.get_by_name('poseidon'))
name_repeat_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = ec8-2
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
ec_policies = parse_storage_policies(name_repeat_ec_conf)
bad_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, zeus
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_ec_conf)
def test_add_remove_names(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
# add names
policies.add_policy_alias(1, 'tahi')
self.assertEqual(policies.get_by_name('tahi'), test_policies[1])
policies.add_policy_alias(2, 'rua', 'dos')
self.assertEqual(policies.get_by_name('rua'), test_policies[2])
self.assertEqual(policies.get_by_name('dos'), test_policies[2])
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, 'double\n')
# try to add existing name
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 2, 'two')
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 1, 'two')
# remove name
policies.remove_policy_alias('tahi')
self.assertEqual(policies.get_by_name('tahi'), None)
# remove only name
self.assertRaisesWithMessage(PolicyError,
'Policies must have at least one name.',
policies.remove_policy_alias, 'zero')
# remove non-existent name
self.assertRaisesWithMessage(PolicyError,
'No policy with name',
policies.remove_policy_alias, 'three')
# remove default name
policies.remove_policy_alias('two')
self.assertEqual(policies.get_by_name('two'), None)
self.assertEqual(policies.get_by_index(2).name, 'rua')
# change default name to a new name
policies.change_policy_primary_name(2, 'two')
self.assertEqual(policies.get_by_name('two'), test_policies[2])
self.assertEqual(policies.get_by_index(2).name, 'two')
# change default name to an existing alias
policies.change_policy_primary_name(2, 'dos')
self.assertEqual(policies.get_by_index(2).name, 'dos')
# change default name to a bad new name
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.change_policy_primary_name,
2, 'bad\nname')
# change default name to a name belonging to another policy
self.assertRaisesWithMessage(PolicyError,
'Other policy',
policies.change_policy_primary_name,
1, 'dos')
def test_deprecated_default(self):
bad_conf = self._conf("""
[storage-policy:1]
@ -933,14 +1148,14 @@ class TestStoragePolicies(unittest.TestCase):
msg = 'EC ring for policy %s needs to be configured with ' \
'exactly %d nodes.' % \
(policy.name, policy.ec_ndata + policy.ec_nparity)
self.assertRaisesWithMessage(
RingValidationError, msg,
self.assertRaisesWithMessage(RingValidationError, msg,
policy._validate_ring)
def test_storage_policy_get_info(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one', is_deprecated=True),
StoragePolicy(1, 'one', is_deprecated=True,
aliases='tahi, uno'),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
@ -953,28 +1168,33 @@ class TestStoragePolicies(unittest.TestCase):
# default replication
(0, True): {
'name': 'zero',
'aliases': 'zero',
'default': True,
'deprecated': False,
'policy_type': REPL_POLICY
},
(0, False): {
'name': 'zero',
'aliases': 'zero',
'default': True,
},
# deprecated replication
(1, True): {
'name': 'one',
'aliases': 'one, tahi, uno',
'default': False,
'deprecated': True,
'policy_type': REPL_POLICY
},
(1, False): {
'name': 'one',
'aliases': 'one, tahi, uno',
'deprecated': True,
},
# enabled ec
(10, True): {
'name': 'ten',
'aliases': 'ten',
'default': False,
'deprecated': False,
'policy_type': EC_POLICY,
@ -985,10 +1205,12 @@ class TestStoragePolicies(unittest.TestCase):
},
(10, False): {
'name': 'ten',
'aliases': 'ten',
},
# deprecated ec
(11, True): {
'name': 'done',
'aliases': 'done',
'default': False,
'deprecated': True,
'policy_type': EC_POLICY,
@ -999,6 +1221,7 @@ class TestStoragePolicies(unittest.TestCase):
},
(11, False): {
'name': 'done',
'aliases': 'done',
'deprecated': True,
},
}

View File

@ -158,120 +158,122 @@ class TestHeaderKeyDict(unittest.TestCase):
class TestRange(unittest.TestCase):
def test_range(self):
range = swift.common.swob.Range('bytes=1-7')
self.assertEqual(range.ranges[0], (1, 7))
swob_range = swift.common.swob.Range('bytes=1-7')
self.assertEqual(swob_range.ranges[0], (1, 7))
def test_upsidedown_range(self):
range = swift.common.swob.Range('bytes=5-10')
self.assertEqual(range.ranges_for_length(2), [])
swob_range = swift.common.swob.Range('bytes=5-10')
self.assertEqual(swob_range.ranges_for_length(2), [])
def test_str(self):
for range_str in ('bytes=1-7', 'bytes=1-', 'bytes=-1',
'bytes=1-7,9-12', 'bytes=-7,9-'):
range = swift.common.swob.Range(range_str)
self.assertEqual(str(range), range_str)
swob_range = swift.common.swob.Range(range_str)
self.assertEqual(str(swob_range), range_str)
def test_ranges_for_length(self):
range = swift.common.swob.Range('bytes=1-7')
self.assertEqual(range.ranges_for_length(10), [(1, 8)])
self.assertEqual(range.ranges_for_length(5), [(1, 5)])
self.assertEqual(range.ranges_for_length(None), None)
swob_range = swift.common.swob.Range('bytes=1-7')
self.assertEqual(swob_range.ranges_for_length(10), [(1, 8)])
self.assertEqual(swob_range.ranges_for_length(5), [(1, 5)])
self.assertEqual(swob_range.ranges_for_length(None), None)
def test_ranges_for_large_length(self):
range = swift.common.swob.Range('bytes=-1000000000000000000000000000')
self.assertEqual(range.ranges_for_length(100), [(0, 100)])
swob_range = swift.common.swob.Range('bytes=-100000000000000000000000')
self.assertEqual(swob_range.ranges_for_length(100), [(0, 100)])
def test_ranges_for_length_no_end(self):
range = swift.common.swob.Range('bytes=1-')
self.assertEqual(range.ranges_for_length(10), [(1, 10)])
self.assertEqual(range.ranges_for_length(5), [(1, 5)])
self.assertEqual(range.ranges_for_length(None), None)
swob_range = swift.common.swob.Range('bytes=1-')
self.assertEqual(swob_range.ranges_for_length(10), [(1, 10)])
self.assertEqual(swob_range.ranges_for_length(5), [(1, 5)])
self.assertEqual(swob_range.ranges_for_length(None), None)
# This used to freak out:
range = swift.common.swob.Range('bytes=100-')
self.assertEqual(range.ranges_for_length(5), [])
self.assertEqual(range.ranges_for_length(None), None)
swob_range = swift.common.swob.Range('bytes=100-')
self.assertEqual(swob_range.ranges_for_length(5), [])
self.assertEqual(swob_range.ranges_for_length(None), None)
range = swift.common.swob.Range('bytes=4-6,100-')
self.assertEqual(range.ranges_for_length(5), [(4, 5)])
swob_range = swift.common.swob.Range('bytes=4-6,100-')
self.assertEqual(swob_range.ranges_for_length(5), [(4, 5)])
def test_ranges_for_length_no_start(self):
range = swift.common.swob.Range('bytes=-7')
self.assertEqual(range.ranges_for_length(10), [(3, 10)])
self.assertEqual(range.ranges_for_length(5), [(0, 5)])
self.assertEqual(range.ranges_for_length(None), None)
swob_range = swift.common.swob.Range('bytes=-7')
self.assertEqual(swob_range.ranges_for_length(10), [(3, 10)])
self.assertEqual(swob_range.ranges_for_length(5), [(0, 5)])
self.assertEqual(swob_range.ranges_for_length(None), None)
range = swift.common.swob.Range('bytes=4-6,-100')
self.assertEqual(range.ranges_for_length(5), [(4, 5), (0, 5)])
swob_range = swift.common.swob.Range('bytes=4-6,-100')
self.assertEqual(swob_range.ranges_for_length(5), [(4, 5), (0, 5)])
def test_ranges_for_length_multi(self):
range = swift.common.swob.Range('bytes=-20,4-')
self.assertEqual(len(range.ranges_for_length(200)), 2)
swob_range = swift.common.swob.Range('bytes=-20,4-')
self.assertEqual(len(swob_range.ranges_for_length(200)), 2)
# the actual length greater than each range element
self.assertEqual(range.ranges_for_length(200), [(180, 200), (4, 200)])
self.assertEqual(swob_range.ranges_for_length(200),
[(180, 200), (4, 200)])
range = swift.common.swob.Range('bytes=30-150,-10')
self.assertEqual(len(range.ranges_for_length(200)), 2)
swob_range = swift.common.swob.Range('bytes=30-150,-10')
self.assertEqual(len(swob_range.ranges_for_length(200)), 2)
# the actual length lands in the middle of a range
self.assertEqual(range.ranges_for_length(90), [(30, 90), (80, 90)])
self.assertEqual(swob_range.ranges_for_length(90),
[(30, 90), (80, 90)])
# the actual length greater than any of the range
self.assertEqual(range.ranges_for_length(200),
self.assertEqual(swob_range.ranges_for_length(200),
[(30, 151), (190, 200)])
self.assertEqual(range.ranges_for_length(None), None)
self.assertEqual(swob_range.ranges_for_length(None), None)
def test_ranges_for_length_edges(self):
range = swift.common.swob.Range('bytes=0-1, -7')
self.assertEqual(range.ranges_for_length(10),
swob_range = swift.common.swob.Range('bytes=0-1, -7')
self.assertEqual(swob_range.ranges_for_length(10),
[(0, 2), (3, 10)])
range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEqual(range.ranges_for_length(10),
swob_range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEqual(swob_range.ranges_for_length(10),
[(3, 10), (0, 2)])
range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEqual(range.ranges_for_length(5),
swob_range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEqual(swob_range.ranges_for_length(5),
[(0, 5), (0, 2)])
def test_ranges_for_length_overlapping(self):
# Fewer than 3 overlaps is okay
range = swift.common.swob.Range('bytes=10-19,15-24')
self.assertEqual(range.ranges_for_length(100),
swob_range = swift.common.swob.Range('bytes=10-19,15-24')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 20), (15, 25)])
range = swift.common.swob.Range('bytes=10-19,15-24,20-29')
self.assertEqual(range.ranges_for_length(100),
swob_range = swift.common.swob.Range('bytes=10-19,15-24,20-29')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 20), (15, 25), (20, 30)])
# Adjacent ranges, though suboptimal, don't overlap
range = swift.common.swob.Range('bytes=10-19,20-29,30-39')
self.assertEqual(range.ranges_for_length(100),
swob_range = swift.common.swob.Range('bytes=10-19,20-29,30-39')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 20), (20, 30), (30, 40)])
# Ranges that share a byte do overlap
range = swift.common.swob.Range('bytes=10-20,20-30,30-40,40-50')
self.assertEqual(range.ranges_for_length(100), [])
swob_range = swift.common.swob.Range('bytes=10-20,20-30,30-40,40-50')
self.assertEqual(swob_range.ranges_for_length(100), [])
# With suffix byte range specs (e.g. bytes=-2), make sure that we
# correctly determine overlapping-ness based on the entity length
range = swift.common.swob.Range('bytes=10-15,15-20,30-39,-9')
self.assertEqual(range.ranges_for_length(100),
swob_range = swift.common.swob.Range('bytes=10-15,15-20,30-39,-9')
self.assertEqual(swob_range.ranges_for_length(100),
[(10, 16), (15, 21), (30, 40), (91, 100)])
self.assertEqual(range.ranges_for_length(20), [])
self.assertEqual(swob_range.ranges_for_length(20), [])
def test_ranges_for_length_nonascending(self):
few_ranges = ("bytes=100-109,200-209,300-309,500-509,"
"400-409,600-609,700-709")
many_ranges = few_ranges + ",800-809"
range = swift.common.swob.Range(few_ranges)
self.assertEqual(range.ranges_for_length(100000),
swob_range = swift.common.swob.Range(few_ranges)
self.assertEqual(swob_range.ranges_for_length(100000),
[(100, 110), (200, 210), (300, 310), (500, 510),
(400, 410), (600, 610), (700, 710)])
range = swift.common.swob.Range(many_ranges)
self.assertEqual(range.ranges_for_length(100000), [])
swob_range = swift.common.swob.Range(many_ranges)
self.assertEqual(swob_range.ranges_for_length(100000), [])
def test_ranges_for_length_too_many(self):
at_the_limit_ranges = (

View File

@ -34,6 +34,7 @@ import sys
import json
import math
import six
from six import BytesIO, StringIO
from six.moves.queue import Queue, Empty
from six.moves import range
@ -139,10 +140,6 @@ class MockSys(object):
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
@property
def version_info(self):
return sys.version_info
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
@ -779,6 +776,15 @@ class TestTimestamp(unittest.TestCase):
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
def test_hashable(self):
ts_0 = utils.Timestamp('1402444821.72589')
ts_0_also = utils.Timestamp('1402444821.72589')
self.assertEqual(ts_0, ts_0_also) # sanity
self.assertEqual(hash(ts_0), hash(ts_0_also))
d = {ts_0: 'whatever'}
self.assertIn(ts_0, d) # sanity
self.assertIn(ts_0_also, d)
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
@ -1212,7 +1218,7 @@ class TestUtils(unittest.TestCase):
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warn('test1')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
@ -1224,7 +1230,7 @@ class TestUtils(unittest.TestCase):
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warn('test4')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
@ -1482,7 +1488,7 @@ class TestUtils(unittest.TestCase):
self.assertTrue('12345' not in log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warn('test 12345 test')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
@ -1508,7 +1514,7 @@ class TestUtils(unittest.TestCase):
self.assertTrue('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warn('test 1.2.3.4 test 12345')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
@ -3663,7 +3669,7 @@ class TestStatsdLogging(unittest.TestCase):
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@0.5"))
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
@ -3689,8 +3695,10 @@ class TestStatsdLogging(unittest.TestCase):
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
@ -3698,8 +3706,10 @@ class TestStatsdLogging(unittest.TestCase):
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
@ -3915,6 +3925,26 @@ class TestRateLimitedIterator(unittest.TestCase):
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
@ -3996,7 +4026,7 @@ class TestStatsdLoggingDelegation(unittest.TestCase):
while True:
try:
payload = self.sock.recv(4096)
if payload and 'STOP' in payload:
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
@ -4019,10 +4049,14 @@ class TestStatsdLoggingDelegation(unittest.TestCase):
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
@ -4191,7 +4225,7 @@ class TestStatsdLoggingDelegation(unittest.TestCase):
utils.get_valid_utf8_str(valid_utf8_str))
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(unicode_sample))
self.assertEqual('\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
self.assertEqual(b'\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
utils.get_valid_utf8_str(invalid_utf8_str))
@reset_logger_state

View File

@ -825,6 +825,13 @@ class TestWSGI(unittest.TestCase):
self.assertTrue('HTTP_X_PROJECT_ID' in newenv)
self.assertEqual(newenv['HTTP_X_PROJECT_ID'], '5678')
def test_make_env_keeps_referer(self):
oldenv = {'HTTP_REFERER': 'http://blah.example.com'}
newenv = wsgi.make_env(oldenv)
self.assertTrue('HTTP_REFERER' in newenv)
self.assertEqual(newenv['HTTP_REFERER'], 'http://blah.example.com')
class TestServersPerPortStrategy(unittest.TestCase):
def setUp(self):

View File

@ -36,7 +36,7 @@ import mock
from test.unit import (patch_policies, with_tempdir, make_timestamp_iter,
EMPTY_ETAG)
from test.unit.common.test_db import TestExampleBroker
from test.unit.common import test_db
class TestContainerBroker(unittest.TestCase):
@ -1680,7 +1680,7 @@ class TestContainerBroker(unittest.TestCase):
self.assertEqual(broker.get_policy_stats(), expected)
class TestCommonContainerBroker(TestExampleBroker):
class TestCommonContainerBroker(test_db.TestExampleBroker):
broker_class = ContainerBroker

View File

@ -36,7 +36,7 @@ from test.unit.common.middleware.helpers import FakeSwift
def timestamp_to_last_modified(timestamp):
return datetime.fromtimestamp(
return datetime.utcfromtimestamp(
float(Timestamp(timestamp))).strftime('%Y-%m-%dT%H:%M:%S.%f')

View File

@ -23,14 +23,14 @@ import random
import sqlite3
from swift.common import db_replicator
from swift.container import replicator, backend, server
from swift.container import replicator, backend, server, sync_store
from swift.container.reconciler import (
MISPLACED_OBJECTS_ACCOUNT, get_reconciler_container_name)
from swift.common.utils import Timestamp
from swift.common.storage_policy import POLICIES
from test.unit.common import test_db_replicator
from test.unit import patch_policies, make_timestamp_iter
from test.unit import patch_policies, make_timestamp_iter, FakeLogger
from contextlib import contextmanager
@ -998,6 +998,135 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, len(calls))
def test_update_sync_store_exception(self):
class FakeContainerSyncStore(object):
def update_sync_store(self, broker):
raise OSError(1, '1')
logger = FakeLogger()
daemon = replicator.ContainerReplicator({}, logger)
daemon.sync_store = FakeContainerSyncStore()
ts_iter = make_timestamp_iter()
broker = self._get_broker('a', 'c', node_index=0)
timestamp = next(ts_iter)
broker.initialize(timestamp.internal, POLICIES.default.idx)
info = broker.get_replication_info()
daemon._post_replicate_hook(broker, info, [])
log_lines = logger.get_lines_for_level('error')
self.assertEqual(1, len(log_lines))
self.assertIn('Failed to update sync_store', log_lines[0])
def test_update_sync_store(self):
klass = 'swift.container.sync_store.ContainerSyncStore'
daemon = replicator.ContainerReplicator({})
daemon.sync_store = sync_store.ContainerSyncStore(
daemon.root, daemon.logger, daemon.mount_check)
ts_iter = make_timestamp_iter()
broker = self._get_broker('a', 'c', node_index=0)
timestamp = next(ts_iter)
broker.initialize(timestamp.internal, POLICIES.default.idx)
info = broker.get_replication_info()
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_remove.call_count)
self.assertEqual(0, mock_add.call_count)
timestamp = next(ts_iter)
# sync-to and sync-key empty - remove from store
broker.update_metadata(
{'X-Container-Sync-To': ('', timestamp.internal),
'X-Container-Sync-Key': ('', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_add.call_count)
mock_remove.assert_called_once_with(broker)
timestamp = next(ts_iter)
# sync-to is not empty sync-key is empty - remove from store
broker.update_metadata(
{'X-Container-Sync-To': ('a', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_add.call_count)
mock_remove.assert_called_once_with(broker)
timestamp = next(ts_iter)
# sync-to is empty sync-key is not empty - remove from store
broker.update_metadata(
{'X-Container-Sync-To': ('', timestamp.internal),
'X-Container-Sync-Key': ('secret', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_add.call_count)
mock_remove.assert_called_once_with(broker)
timestamp = next(ts_iter)
# sync-to, sync-key both not empty - add to store
broker.update_metadata(
{'X-Container-Sync-To': ('a', timestamp.internal),
'X-Container-Sync-Key': ('secret', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
mock_add.assert_called_once_with(broker)
self.assertEqual(0, mock_remove.call_count)
timestamp = next(ts_iter)
# container is removed - need to remove from store
broker.delete_db(timestamp.internal)
broker.update_metadata(
{'X-Container-Sync-To': ('a', timestamp.internal),
'X-Container-Sync-Key': ('secret', timestamp.internal)})
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
daemon._post_replicate_hook(broker, info, [])
self.assertEqual(0, mock_add.call_count)
mock_remove.assert_called_once_with(broker)
def test_sync_triggers_sync_store_update(self):
klass = 'swift.container.sync_store.ContainerSyncStore'
ts_iter = make_timestamp_iter()
# Create two containers as follows:
# broker_1 which is not set for sync
# broker_2 which is set for sync and then unset
# test that while replicating both we see no activity
# for broker_1, and the anticipated activity for broker_2
broker_1 = self._get_broker('a', 'c', node_index=0)
broker_1.initialize(next(ts_iter).internal, POLICIES.default.idx)
broker_2 = self._get_broker('b', 'd', node_index=0)
broker_2.initialize(next(ts_iter).internal, POLICIES.default.idx)
broker_2.update_metadata(
{'X-Container-Sync-To': ('a', next(ts_iter).internal),
'X-Container-Sync-Key': ('secret', next(ts_iter).internal)})
# replicate once according to broker_1
# relying on the fact that FakeRing would place both
# in the same partition.
part, node = self._get_broker_part_node(broker_1)
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
self._run_once(node)
self.assertEqual(1, mock_add.call_count)
self.assertEqual(broker_2.db_file, mock_add.call_args[0][0].db_file)
self.assertEqual(0, mock_remove.call_count)
broker_2.update_metadata(
{'X-Container-Sync-To': ('', next(ts_iter).internal)})
# replicate once this time according to broker_2
# relying on the fact that FakeRing would place both
# in the same partition.
part, node = self._get_broker_part_node(broker_2)
with mock.patch(klass + '.remove_synced_container') as mock_remove:
with mock.patch(klass + '.add_synced_container') as mock_add:
self._run_once(node)
self.assertEqual(0, mock_add.call_count)
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(broker_2.db_file, mock_remove.call_args[0][0].db_file)
if __name__ == '__main__':
unittest.main()

Some files were not shown because too many files have changed in this diff Show More