Merge branch 'master' into feature/deep
Conflicts: swift/obj/updater.py test/unit/common/test_db_replicator.py test/unit/container/test_server.py Change-Id: I7997e882d5e04dfc9e6ff82d5e08890d5ec250da
This commit is contained in:
commit
5c564e5396
@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
from gettext import gettext as _
|
||||
from sys import argv, exit, stderr
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
argv[0:1] = ['swift', 'tempurl']
|
||||
print("", file=stderr)
|
||||
print(_("NOTE: This command is deprecated and will be removed "
|
||||
"in the future. Please use 'swift tempurl' instead."), file=stderr)
|
||||
print("", file=stderr)
|
||||
try:
|
||||
from swiftclient.shell import main
|
||||
except ImportError:
|
||||
print(_("ERROR: python-swiftclient not installed."), file=stderr)
|
||||
exit(1)
|
||||
exit(main(argv))
|
182
doc/manpages/container-reconciler.conf.5
Normal file
182
doc/manpages/container-reconciler.conf.5
Normal file
@ -0,0 +1,182 @@
|
||||
.\"
|
||||
.\" Author: HCLTech-SSW <hcl_ss_oss@hcl.com>
|
||||
.\" Copyright (c) 2010-2017 OpenStack Foundation.
|
||||
.\"
|
||||
.\" Licensed under the Apache License, Version 2.0 (the "License");
|
||||
.\" you may not use this file except in compliance with the License.
|
||||
.\" You may obtain a copy of the License at
|
||||
.\"
|
||||
.\" http://www.apache.org/licenses/LICENSE-2.0
|
||||
.\"
|
||||
.\" Unless required by applicable law or agreed to in writing, software
|
||||
.\" distributed under the License is distributed on an "AS IS" BASIS,
|
||||
.\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
.\" implied.
|
||||
.\" See the License for the specific language governing permissions and
|
||||
.\" limitations under the License.
|
||||
.\"
|
||||
.TH container-reconciler.conf 5 "10/25/2017" "Linux" "OpenStack Swift"
|
||||
|
||||
.SH NAME
|
||||
.LP
|
||||
.B container-reconciler.conf
|
||||
\- configuration file for the OpenStack Swift container reconciler
|
||||
|
||||
|
||||
.SH SYNOPSIS
|
||||
.LP
|
||||
.B container-reconciler.conf
|
||||
|
||||
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
This is the configuration file used by the container reconciler.
|
||||
|
||||
The configuration file follows the python-pastedeploy syntax. The file is divided
|
||||
into sections, which are enclosed by square brackets. Each section will contain a
|
||||
certain number of key/value parameters which are described later.
|
||||
|
||||
Any line that begins with a '#' symbol is ignored.
|
||||
|
||||
You can find more information about python-pastedeploy configuration format at
|
||||
\fIhttp://pythonpaste.org/deploy/#config-format\fR
|
||||
|
||||
|
||||
.SH GLOBAL SECTION
|
||||
.PD 1
|
||||
.RS 0
|
||||
This is indicated by section named [DEFAULT]. Below are the parameters that
|
||||
are acceptable within this section.
|
||||
|
||||
.IP "\fBlog_address\fR"
|
||||
Location where syslog sends the logs to. The default is /dev/log.
|
||||
.IP "\fBlog_custom_handlers \fR"
|
||||
Comma-separated list of functions to call to setup custom log handlers.
|
||||
.IP "\fBlog_facility\fR"
|
||||
Syslog log facility. The default is LOG_LOCAL0.
|
||||
.IP "\fBlog_level\fR"
|
||||
Log level used for logging. The default is INFO.
|
||||
.IP "\fBlog_name\fR"
|
||||
Label used when logging. The default is swift.
|
||||
.IP "\fBlog_statsd_default_sample_rate\fR"
|
||||
Defines the probability of sending a sample for any given event or
|
||||
timing measurement. The default is 1.0.
|
||||
.IP "\fBlog_statsd_host\fR"
|
||||
If not set, the StatsD feature is disabled. The default is localhost.
|
||||
.IP "\fBlog_statsd_metric_prefix\fR"
|
||||
Value will be prepended to every metric sent to the StatsD server.
|
||||
.IP "\fBlog_statsd_port\fR"
|
||||
The port value for the StatsD server. The default is 8125.
|
||||
.IP "\fBlog_statsd_sample_rate_factor\fR"
|
||||
It is not recommended to set this to a value less than 1.0, if frequency of
|
||||
logging is too high, tune the log_statsd_default_sample_rate instead.
|
||||
The default value is 1.0.
|
||||
.IP "\fBlog_udp_host\fR"
|
||||
If not set, the UDP receiver for syslog is disabled.
|
||||
.IP "\fBlog_udp_port\fR"
|
||||
Port value for UDP receiver, if enabled. The default is 514.
|
||||
.IP "\fBswift_dir\fR"
|
||||
Swift configuration directory. The default is /etc/swift.
|
||||
.IP "\fBuser\fR"
|
||||
User to run as. The default is swift.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH CONTAINER RECONCILER SECTION
|
||||
.PD 1
|
||||
.RS 0
|
||||
.IP "\fB[container-reconciler]\fR"
|
||||
.RE
|
||||
.RS 3
|
||||
.IP "\fBinterval\fR"
|
||||
Minimum time for a pass to take. The default is 30 seconds.
|
||||
.IP "\fBreclaim_age\fR"
|
||||
Time elapsed in seconds before an object can be reclaimed. The default is 604800 seconds.
|
||||
.IP "\fBrequest_tries\fR"
|
||||
Server errors from requests will be retried by default. The default is 3.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH PIPELINE SECTION
|
||||
.PD 1
|
||||
.RS 0
|
||||
.IP "\fB[pipeline:main]\fR"
|
||||
.RE
|
||||
.RS 3
|
||||
.IP "\fBpipeline\fR"
|
||||
Pipeline to use for processing operations. The default is "catch_errors proxy-logging cache proxy-server".
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH APP SECTION
|
||||
.PD 1
|
||||
.RS 0
|
||||
\fBFor details of the available options see proxy-server.conf.5.\fR
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[app:proxy-server]\fR"
|
||||
.RE
|
||||
.RS 3
|
||||
.IP "\fBuse\fR"
|
||||
Entry point for paste.deploy in the server.
|
||||
This is normally \fBegg:swift#proxy\fR.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH FILTER SECTIONS
|
||||
.PD 1
|
||||
.RS 0
|
||||
Any section that has its name prefixed by "filter:" indicates a filter section.
|
||||
Filters are used to specify configuration parameters for specific swift middlewares.
|
||||
Below are the filters available and respective acceptable parameters.
|
||||
|
||||
\fBFor details of the available options for each filter section see proxy-server.conf.5.\fR
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:cache]\fR"
|
||||
.RE
|
||||
Caching middleware that manages caching in swift.
|
||||
|
||||
.RS 3
|
||||
.IP "\fBuse\fR"
|
||||
Entry point for paste.deploy in the server.
|
||||
This is normally \fBegg:swift#memcache\fR.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:catch_errors]\fR"
|
||||
.RE
|
||||
.RS 3
|
||||
.IP "\fBuse\fR"
|
||||
Entry point for paste.deploy in the server.
|
||||
This is normally \fBegg:swift#catch_errors\fR.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:proxy-logging]\fR"
|
||||
.RE
|
||||
.RS 3
|
||||
.IP "\fBuse\fR"
|
||||
Entry point for paste.deploy in the server.
|
||||
This is normally \fBegg:swift#proxy_logging\fR.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH DOCUMENTATION
|
||||
.LP
|
||||
More in depth documentation in regards to
|
||||
.BI swift-container-reconciler
|
||||
and also about OpenStack Swift as a whole can be found at
|
||||
.BI https://docs.openstack.org/swift/latest/overview_policies.html.
|
||||
|
||||
.SH "SEE ALSO"
|
||||
.BR swift-container-reconciler(1)
|
@ -225,11 +225,12 @@ should not specify any value for "replication_server".
|
||||
Set to restrict the number of concurrent incoming SSYNC requests
|
||||
Set to 0 for unlimited (the default is 4). Note that SSYNC requests are only used
|
||||
by the object reconstructor or the object replicator when configured to use ssync.
|
||||
.IP "\fBreplication_one_per_device\fR"
|
||||
Restricts incoming SSYNC requests to one per device,
|
||||
replication_currency above allowing. This can help control I/O to each
|
||||
device, but you may wish to set this to False to allow multiple SSYNC
|
||||
requests (up to the above replication_concurrency setting) per device. The default is true.
|
||||
.IP "\fBreplication_concurrency_per_device\fR"
|
||||
Set to restrict the number of concurrent incoming SSYNC requests per device;
|
||||
set to 0 for unlimited requests per devices. This can help control I/O to each
|
||||
device. This does not override replication_concurrency described above, so you
|
||||
may need to adjust both parameters depending on your hardware or network
|
||||
capacity. Defaults to 1.
|
||||
.IP "\fBreplication_lock_timeout\fR"
|
||||
Number of seconds to wait for an existing replication device lock before
|
||||
giving up. The default is 15.
|
||||
|
@ -1,56 +0,0 @@
|
||||
.\"
|
||||
.\" Copyright (c) 2016 OpenStack Foundation.
|
||||
.\"
|
||||
.\" Licensed under the Apache License, Version 2.0 (the "License");
|
||||
.\" you may not use this file except in compliance with the License.
|
||||
.\" You may obtain a copy of the License at
|
||||
.\"
|
||||
.\" http://www.apache.org/licenses/LICENSE-2.0
|
||||
.\"
|
||||
.\" Unless required by applicable law or agreed to in writing, software
|
||||
.\" distributed under the License is distributed on an "AS IS" BASIS,
|
||||
.\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
.\" implied.
|
||||
.\" See the License for the specific language governing permissions and
|
||||
.\" limitations under the License.
|
||||
.\"
|
||||
.TH SWIFT-TEMP-URL "1" "August 2016" "OpenStack Swift"
|
||||
|
||||
.SH NAME
|
||||
swift\-temp\-url \- generates the query parameters for OpenStack Swift Temporary URL middleware
|
||||
|
||||
.SH DESCRIPTION
|
||||
.PP
|
||||
Tool that generates the query parameters which can be used to access Swift
|
||||
objects directly from a browser by using the Temporary URL middleware.
|
||||
|
||||
.B NOTE: This command is deprecated and will be removed
|
||||
.B in the future. Please use 'swift tempurl' instead.
|
||||
|
||||
.SH SYNOPSIS
|
||||
.B swift\-temp\-url
|
||||
\fImethod\fR \fIseconds\fR \fIpath\fR \fIkey\fR
|
||||
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
.I method
|
||||
The method to allow; GET for example.
|
||||
.TP
|
||||
.I seconds
|
||||
The number of seconds from now to allow requests.
|
||||
.TP
|
||||
.I path
|
||||
The full path to the resource.
|
||||
Example: \fI/v1/AUTH_account/c/o\fP
|
||||
.TP
|
||||
.I key
|
||||
The X\-Account\-Meta\-Temp\-URL\-Key for the account.
|
||||
|
||||
.SH DOCUMENTATION
|
||||
.LP
|
||||
More in depth documentation in regards to
|
||||
.BI swift\-temp\-url
|
||||
and also about OpenStack Swift as a whole can be found at
|
||||
.BI https://docs.openstack.org/swift/latest/
|
||||
and
|
||||
.BI https://docs.openstack.org
|
@ -51,7 +51,7 @@ To create a static large object, divide your content into pieces and
|
||||
create (upload) a segment object to contain each piece.
|
||||
|
||||
Create a manifest object. Include the ``multipart-manifest=put``
|
||||
query string at the end of the manifest object name to indicate that
|
||||
query parameter at the end of the manifest object name to indicate that
|
||||
this is a manifest object.
|
||||
|
||||
The body of the **PUT** request on the manifest object comprises a json
|
||||
@ -102,7 +102,7 @@ contrast to dynamic large objects.
|
||||
}
|
||||
]
|
||||
|
||||
|
|
||||
|
|
||||
|
||||
The ``Content-Length`` request header must contain the length of the
|
||||
json content—not the length of the segment objects. However, after the
|
||||
@ -113,9 +113,22 @@ of the concatenated ``ETag`` values of the object segments. You can also
|
||||
set the ``Content-Type`` request header and custom object metadata.
|
||||
|
||||
When the **PUT** operation sees the ``multipart-manifest=put`` query
|
||||
string, it reads the request body and verifies that each segment
|
||||
parameter, it reads the request body and verifies that each segment
|
||||
object exists and that the sizes and ETags match. If there is a
|
||||
mismatch, the **PUT**\ operation fails.
|
||||
mismatch, the **PUT** operation fails.
|
||||
|
||||
This verification process can take a long time to complete, particularly
|
||||
as the number of segments increases. You may include a ``heartbeat=on``
|
||||
query parameter to have the server:
|
||||
|
||||
1. send a ``202 Accepted`` response before it begins validating segments,
|
||||
2. periodically send whitespace characters to keep the connection alive, and
|
||||
3. send a final response code in the body.
|
||||
|
||||
.. note::
|
||||
The server may still immediately respond with ``400 Bad Request``
|
||||
if it can determine that the request is invalid before making
|
||||
backend requests.
|
||||
|
||||
If everything matches, the manifest object is created. The
|
||||
``X-Static-Large-Object`` metadata is set to ``true`` indicating that
|
||||
@ -124,18 +137,18 @@ this is a static object manifest.
|
||||
Normally when you perform a **GET** operation on the manifest object,
|
||||
the response body contains the concatenated content of the segment
|
||||
objects. To download the manifest list, use the
|
||||
``multipart-manifest=get`` query string. The resulting list is not
|
||||
``multipart-manifest=get`` query parameter. The resulting list is not
|
||||
formatted the same as the manifest you originally used in the **PUT**
|
||||
operation.
|
||||
|
||||
If you use the **DELETE** operation on a manifest object, the manifest
|
||||
object is deleted. The segment objects are not affected. However, if you
|
||||
add the ``multipart-manifest=delete`` query string, the segment
|
||||
add the ``multipart-manifest=delete`` query parameter, the segment
|
||||
objects are deleted and if all are successfully deleted, the manifest
|
||||
object is also deleted.
|
||||
|
||||
To change the manifest, use a **PUT** operation with the
|
||||
``multipart-manifest=put`` query string. This request creates a
|
||||
``multipart-manifest=put`` query parameter. This request creates a
|
||||
manifest object. You can also update the object metadata in the usual
|
||||
way.
|
||||
|
||||
@ -326,7 +339,7 @@ a manifest object but a normal object with content same as what you would
|
||||
get on a **GET** request to the original manifest object.
|
||||
|
||||
To copy the manifest object, you include the ``multipart-manifest=get``
|
||||
query string in the **COPY** request. The new object contains the same
|
||||
query parameter in the **COPY** request. The new object contains the same
|
||||
manifest as the original. The segment objects are not copied. Instead,
|
||||
both the original and new manifest objects share the same set of segment
|
||||
objects.
|
||||
|
@ -563,119 +563,119 @@ ionice_priority None I/O scheduling priority of server
|
||||
[object-server]
|
||||
***************
|
||||
|
||||
============================= ====================== ===============================================
|
||||
Option Default Description
|
||||
----------------------------- ---------------------- -----------------------------------------------
|
||||
use paste.deploy entry point for the
|
||||
object server. For most cases,
|
||||
this should be
|
||||
`egg:swift#object`.
|
||||
set log_name object-server Label used when logging
|
||||
set log_facility LOG_LOCAL0 Syslog log facility
|
||||
set log_level INFO Logging level
|
||||
set log_requests True Whether or not to log each
|
||||
request
|
||||
set log_address /dev/log Logging directory
|
||||
user swift User to run as
|
||||
max_upload_time 86400 Maximum time allowed to upload an
|
||||
object
|
||||
slow 0 If > 0, Minimum time in seconds for a PUT or
|
||||
DELETE request to complete. This is only
|
||||
useful to simulate slow devices during testing
|
||||
and development.
|
||||
mb_per_sync 512 On PUT requests, sync file every
|
||||
n MB
|
||||
keep_cache_size 5242880 Largest object size to keep in
|
||||
buffer cache
|
||||
keep_cache_private false Allow non-public objects to stay
|
||||
in kernel's buffer cache
|
||||
allowed_headers Content-Disposition, Comma separated list of headers
|
||||
Content-Encoding, that can be set in metadata on an object.
|
||||
X-Delete-At, This list is in addition to
|
||||
X-Object-Manifest, X-Object-Meta-* headers and cannot include
|
||||
X-Static-Large-Object Content-Type, etag, Content-Length, or deleted
|
||||
auto_create_account_prefix . Prefix used when automatically
|
||||
creating accounts.
|
||||
replication_server Configure parameter for creating
|
||||
specific server. To handle all verbs,
|
||||
including replication verbs, do not
|
||||
specify "replication_server"
|
||||
(this is the default). To only
|
||||
handle replication, set to a True
|
||||
value (e.g. "True" or "1").
|
||||
To handle only non-replication
|
||||
verbs, set to "False". Unless you
|
||||
have a separate replication network, you
|
||||
should not specify any value for
|
||||
"replication_server".
|
||||
replication_concurrency 4 Set to restrict the number of
|
||||
concurrent incoming SSYNC
|
||||
requests; set to 0 for unlimited
|
||||
replication_one_per_device True Restricts incoming SSYNC
|
||||
requests to one per device,
|
||||
replication_currency above
|
||||
allowing. This can help control
|
||||
I/O to each device, but you may
|
||||
wish to set this to False to
|
||||
allow multiple SSYNC
|
||||
requests (up to the above
|
||||
replication_concurrency setting)
|
||||
per device.
|
||||
replication_lock_timeout 15 Number of seconds to wait for an
|
||||
existing replication device lock
|
||||
before giving up.
|
||||
replication_failure_threshold 100 The number of subrequest failures
|
||||
before the
|
||||
replication_failure_ratio is
|
||||
checked
|
||||
replication_failure_ratio 1.0 If the value of failures /
|
||||
successes of SSYNC
|
||||
subrequests exceeds this ratio,
|
||||
the overall SSYNC request
|
||||
will be aborted
|
||||
splice no Use splice() for zero-copy object
|
||||
GETs. This requires Linux kernel
|
||||
version 3.0 or greater. If you set
|
||||
"splice = yes" but the kernel
|
||||
does not support it, error messages
|
||||
will appear in the object server
|
||||
logs at startup, but your object
|
||||
servers should continue to function.
|
||||
nice_priority None Scheduling priority of server processes.
|
||||
Niceness values range from -20 (most
|
||||
favorable to the process) to 19 (least
|
||||
favorable to the process). The default
|
||||
does not modify priority.
|
||||
ionice_class None I/O scheduling class of server processes.
|
||||
I/O niceness class values are IOPRIO_CLASS_RT
|
||||
(realtime), IOPRIO_CLASS_BE (best-effort),
|
||||
and IOPRIO_CLASS_IDLE (idle).
|
||||
The default does not modify class and
|
||||
priority. Linux supports io scheduling
|
||||
priorities and classes since 2.6.13 with
|
||||
the CFQ io scheduler.
|
||||
Work only with ionice_priority.
|
||||
ionice_priority None I/O scheduling priority of server
|
||||
processes. I/O niceness priority is
|
||||
a number which goes from 0 to 7.
|
||||
The higher the value, the lower the I/O
|
||||
priority of the process. Work only with
|
||||
ionice_class.
|
||||
Ignored if IOPRIO_CLASS_IDLE is set.
|
||||
eventlet_tpool_num_threads auto The number of threads in eventlet's thread pool.
|
||||
Most IO will occur in the object server's main
|
||||
thread, but certain "heavy" IO operations will
|
||||
occur in separate IO threads, managed by
|
||||
eventlet.
|
||||
The default value is auto, whose actual value
|
||||
is dependent on the servers_per_port value.
|
||||
If servers_per_port is zero then it uses
|
||||
eventlet's default (currently 20 threads).
|
||||
If the servers_per_port is nonzero then it'll
|
||||
only use 1 thread per process.
|
||||
This value can be overridden with an integer
|
||||
value.
|
||||
============================= ====================== ===============================================
|
||||
================================== ====================== ===============================================
|
||||
Option Default Description
|
||||
---------------------------------- ---------------------- -----------------------------------------------
|
||||
use paste.deploy entry point for the
|
||||
object server. For most cases,
|
||||
this should be
|
||||
`egg:swift#object`.
|
||||
set log_name object-server Label used when logging
|
||||
set log_facility LOG_LOCAL0 Syslog log facility
|
||||
set log_level INFO Logging level
|
||||
set log_requests True Whether or not to log each
|
||||
request
|
||||
set log_address /dev/log Logging directory
|
||||
user swift User to run as
|
||||
max_upload_time 86400 Maximum time allowed to upload an
|
||||
object
|
||||
slow 0 If > 0, Minimum time in seconds for a PUT or
|
||||
DELETE request to complete. This is only
|
||||
useful to simulate slow devices during testing
|
||||
and development.
|
||||
mb_per_sync 512 On PUT requests, sync file every
|
||||
n MB
|
||||
keep_cache_size 5242880 Largest object size to keep in
|
||||
buffer cache
|
||||
keep_cache_private false Allow non-public objects to stay
|
||||
in kernel's buffer cache
|
||||
allowed_headers Content-Disposition, Comma separated list of headers
|
||||
Content-Encoding, that can be set in metadata on an object.
|
||||
X-Delete-At, This list is in addition to
|
||||
X-Object-Manifest, X-Object-Meta-* headers and cannot include
|
||||
X-Static-Large-Object Content-Type, etag, Content-Length, or deleted
|
||||
auto_create_account_prefix . Prefix used when automatically
|
||||
creating accounts.
|
||||
replication_server Configure parameter for creating
|
||||
specific server. To handle all verbs,
|
||||
including replication verbs, do not
|
||||
specify "replication_server"
|
||||
(this is the default). To only
|
||||
handle replication, set to a True
|
||||
value (e.g. "True" or "1").
|
||||
To handle only non-replication
|
||||
verbs, set to "False". Unless you
|
||||
have a separate replication network, you
|
||||
should not specify any value for
|
||||
"replication_server".
|
||||
replication_concurrency 4 Set to restrict the number of
|
||||
concurrent incoming SSYNC
|
||||
requests; set to 0 for unlimited
|
||||
replication_concurrency_per_device 1 Set to restrict the number of
|
||||
concurrent incoming SSYNC
|
||||
requests per device; set to 0 for
|
||||
unlimited requests per devices.
|
||||
This can help control I/O to each
|
||||
device. This does not override
|
||||
replication_concurrency described
|
||||
above, so you may need to adjust
|
||||
both parameters depending on your
|
||||
hardware or network capacity.
|
||||
replication_lock_timeout 15 Number of seconds to wait for an
|
||||
existing replication device lock
|
||||
before giving up.
|
||||
replication_failure_threshold 100 The number of subrequest failures
|
||||
before the
|
||||
replication_failure_ratio is
|
||||
checked
|
||||
replication_failure_ratio 1.0 If the value of failures /
|
||||
successes of SSYNC
|
||||
subrequests exceeds this ratio,
|
||||
the overall SSYNC request
|
||||
will be aborted
|
||||
splice no Use splice() for zero-copy object
|
||||
GETs. This requires Linux kernel
|
||||
version 3.0 or greater. If you set
|
||||
"splice = yes" but the kernel
|
||||
does not support it, error messages
|
||||
will appear in the object server
|
||||
logs at startup, but your object
|
||||
servers should continue to function.
|
||||
nice_priority None Scheduling priority of server processes.
|
||||
Niceness values range from -20 (most
|
||||
favorable to the process) to 19 (least
|
||||
favorable to the process). The default
|
||||
does not modify priority.
|
||||
ionice_class None I/O scheduling class of server processes.
|
||||
I/O niceness class values are IOPRIO_CLASS_RT
|
||||
(realtime), IOPRIO_CLASS_BE (best-effort),
|
||||
and IOPRIO_CLASS_IDLE (idle).
|
||||
The default does not modify class and
|
||||
priority. Linux supports io scheduling
|
||||
priorities and classes since 2.6.13 with
|
||||
the CFQ io scheduler.
|
||||
Work only with ionice_priority.
|
||||
ionice_priority None I/O scheduling priority of server
|
||||
processes. I/O niceness priority is
|
||||
a number which goes from 0 to 7.
|
||||
The higher the value, the lower the I/O
|
||||
priority of the process. Work only with
|
||||
ionice_class.
|
||||
Ignored if IOPRIO_CLASS_IDLE is set.
|
||||
eventlet_tpool_num_threads auto The number of threads in eventlet's thread pool.
|
||||
Most IO will occur in the object server's main
|
||||
thread, but certain "heavy" IO operations will
|
||||
occur in separate IO threads, managed by
|
||||
eventlet.
|
||||
The default value is auto, whose actual value
|
||||
is dependent on the servers_per_port value.
|
||||
If servers_per_port is zero then it uses
|
||||
eventlet's default (currently 20 threads).
|
||||
If the servers_per_port is nonzero then it'll
|
||||
only use 1 thread per process.
|
||||
This value can be overridden with an integer
|
||||
value.
|
||||
================================== ====================== ===============================================
|
||||
|
||||
*******************
|
||||
[object-replicator]
|
||||
|
@ -131,6 +131,12 @@ Element Description
|
||||
does not require a token. In addition,
|
||||
``.r:*`` does not grant access to the
|
||||
container listing.
|
||||
``<role_name>`` A user with the specified role *name* on the
|
||||
project within which the container is stored is
|
||||
granted access. A user token scoped to the
|
||||
project must be included in the request. Access
|
||||
to the container is also granted when used in
|
||||
``X-Container-Read``.
|
||||
============================== ================================================
|
||||
|
||||
.. note::
|
||||
@ -211,6 +217,18 @@ project must be included in the request::
|
||||
--write-acl "77b8f82565f14814bece56e50c4c240f:*"
|
||||
|
||||
|
||||
Example: Sharing a Container with Users having a specified Role
|
||||
---------------------------------------------------------------
|
||||
|
||||
The following allows any user that has been assigned the
|
||||
``my_read_access_role`` on the project within which the ``www`` container is
|
||||
stored to download objects or to list the contents of the ``www`` container. A
|
||||
user token scoped to the project must be included in the download or list
|
||||
request::
|
||||
|
||||
swift post www --read-acl "my_read_access_role"
|
||||
|
||||
|
||||
Example: Allowing a Referrer Domain to Download Objects
|
||||
-------------------------------------------------------
|
||||
|
||||
|
@ -219,13 +219,13 @@ used. :ref:`configure-policy` describes how to deprecate a policy.
|
||||
|
||||
Swift's behavior with deprecated policies is as follows:
|
||||
|
||||
* The deprecated policy will not appear in /info
|
||||
* PUT/GET/DELETE/POST/HEAD are still allowed on the pre-existing containers
|
||||
created with a deprecated policy
|
||||
* Clients will get an ''400 Bad Request'' error when trying to create a new
|
||||
container using the deprecated policy
|
||||
* Clients still have access to policy statistics via HEAD on pre-existing
|
||||
containers
|
||||
* The deprecated policy will not appear in /info
|
||||
* PUT/GET/DELETE/POST/HEAD are still allowed on the pre-existing containers
|
||||
created with a deprecated policy
|
||||
* Clients will get an ''400 Bad Request'' error when trying to create a new
|
||||
container using the deprecated policy
|
||||
* Clients still have access to policy statistics via HEAD on pre-existing
|
||||
containers
|
||||
|
||||
.. note::
|
||||
|
||||
@ -272,10 +272,10 @@ section name must be of the form ``[storage-policy:<N>]`` where ``<N>`` is the
|
||||
policy index. There's no reason other than readability that policy indexes be
|
||||
sequential but the following rules are enforced:
|
||||
|
||||
* If a policy with index ``0`` is not declared and no other policies are
|
||||
defined, Swift will create a default policy with index ``0``.
|
||||
* The policy index must be a non-negative integer.
|
||||
* Policy indexes must be unique.
|
||||
* If a policy with index ``0`` is not declared and no other policies are
|
||||
defined, Swift will create a default policy with index ``0``.
|
||||
* The policy index must be a non-negative integer.
|
||||
* Policy indexes must be unique.
|
||||
|
||||
.. warning::
|
||||
|
||||
@ -284,46 +284,46 @@ sequential but the following rules are enforced:
|
||||
|
||||
Each policy section contains the following options:
|
||||
|
||||
* ``name = <policy_name>`` (required)
|
||||
- The primary name of the policy.
|
||||
- Policy names are case insensitive.
|
||||
- Policy names must contain only letters, digits or a dash.
|
||||
- Policy names must be unique.
|
||||
- Policy names can be changed.
|
||||
- The name ``Policy-0`` can only be used for the policy with
|
||||
index ``0``.
|
||||
* ``alias = <policy_name>[, <policy_name>, ...]`` (optional)
|
||||
- A comma-separated list of alternative names for the policy.
|
||||
- The default value is an empty list (i.e. no aliases).
|
||||
- All alias names must follow the rules for the ``name`` option.
|
||||
- Aliases can be added to and removed from the list.
|
||||
- Aliases can be useful to retain support for old primary names if the
|
||||
primary name is changed.
|
||||
* ``default = [true|false]`` (optional)
|
||||
- If ``true`` then this policy will be used when the client does not
|
||||
specify a policy.
|
||||
- The default value is ``false``.
|
||||
- The default policy can be changed at any time, by setting
|
||||
``default = true`` in the desired policy section.
|
||||
- If no policy is declared as the default and no other policies are
|
||||
defined, the policy with index ``0`` is set as the default;
|
||||
- Otherwise, exactly one policy must be declared default.
|
||||
- Deprecated policies cannot be declared the default.
|
||||
- See :ref:`default-policy` for more information.
|
||||
* ``deprecated = [true|false]`` (optional)
|
||||
- If ``true`` then new containers cannot be created using this policy.
|
||||
- The default value is ``false``.
|
||||
- Any policy may be deprecated by adding the ``deprecated`` option to
|
||||
the desired policy section. However, a deprecated policy may not also
|
||||
be declared the default. Therefore, since there must always be a
|
||||
default policy, there must also always be at least one policy which
|
||||
is not deprecated.
|
||||
- See :ref:`deprecate-policy` for more information.
|
||||
* ``policy_type = [replication|erasure_coding]`` (optional)
|
||||
- The option ``policy_type`` is used to distinguish between different
|
||||
policy types.
|
||||
- The default value is ``replication``.
|
||||
- When defining an EC policy use the value ``erasure_coding``.
|
||||
* ``name = <policy_name>`` (required)
|
||||
- The primary name of the policy.
|
||||
- Policy names are case insensitive.
|
||||
- Policy names must contain only letters, digits or a dash.
|
||||
- Policy names must be unique.
|
||||
- Policy names can be changed.
|
||||
- The name ``Policy-0`` can only be used for the policy with
|
||||
index ``0``.
|
||||
* ``aliases = <policy_name>[, <policy_name>, ...]`` (optional)
|
||||
- A comma-separated list of alternative names for the policy.
|
||||
- The default value is an empty list (i.e. no aliases).
|
||||
- All alias names must follow the rules for the ``name`` option.
|
||||
- Aliases can be added to and removed from the list.
|
||||
- Aliases can be useful to retain support for old primary names if the
|
||||
primary name is changed.
|
||||
* ``default = [true|false]`` (optional)
|
||||
- If ``true`` then this policy will be used when the client does not
|
||||
specify a policy.
|
||||
- The default value is ``false``.
|
||||
- The default policy can be changed at any time, by setting
|
||||
``default = true`` in the desired policy section.
|
||||
- If no policy is declared as the default and no other policies are
|
||||
defined, the policy with index ``0`` is set as the default;
|
||||
- Otherwise, exactly one policy must be declared default.
|
||||
- Deprecated policies cannot be declared the default.
|
||||
- See :ref:`default-policy` for more information.
|
||||
* ``deprecated = [true|false]`` (optional)
|
||||
- If ``true`` then new containers cannot be created using this policy.
|
||||
- The default value is ``false``.
|
||||
- Any policy may be deprecated by adding the ``deprecated`` option to
|
||||
the desired policy section. However, a deprecated policy may not also
|
||||
be declared the default. Therefore, since there must always be a
|
||||
default policy, there must also always be at least one policy which
|
||||
is not deprecated.
|
||||
- See :ref:`deprecate-policy` for more information.
|
||||
* ``policy_type = [replication|erasure_coding]`` (optional)
|
||||
- The option ``policy_type`` is used to distinguish between different
|
||||
policy types.
|
||||
- The default value is ``replication``.
|
||||
- When defining an EC policy use the value ``erasure_coding``.
|
||||
|
||||
The EC policy type has additional required options. See
|
||||
:ref:`using_ec_policy` for details.
|
||||
@ -648,10 +648,10 @@ that you wanted to take an existing cluster that already has lots of data on it
|
||||
and upgrade to Swift with Storage Policies. From there you want to go ahead and
|
||||
create a policy and test a few things out. All you need to do is:
|
||||
|
||||
#. Upgrade all of your Swift nodes to a policy-aware version of Swift
|
||||
#. Define your policies in ``/etc/swift/swift.conf``
|
||||
#. Create the corresponding object rings
|
||||
#. Create containers and objects and confirm their placement is as expected
|
||||
#. Upgrade all of your Swift nodes to a policy-aware version of Swift
|
||||
#. Define your policies in ``/etc/swift/swift.conf``
|
||||
#. Create the corresponding object rings
|
||||
#. Create containers and objects and confirm their placement is as expected
|
||||
|
||||
For a specific example that takes you through these steps, please see
|
||||
:doc:`policies_saio`
|
||||
|
@ -161,11 +161,12 @@ use = egg:swift#object
|
||||
# object replicator when configured to use ssync.
|
||||
# replication_concurrency = 4
|
||||
#
|
||||
# Restricts incoming SSYNC requests to one per device,
|
||||
# replication_currency above allowing. This can help control I/O to each
|
||||
# device, but you may wish to set this to False to allow multiple SSYNC
|
||||
# requests (up to the above replication_concurrency setting) per device.
|
||||
# replication_one_per_device = True
|
||||
# Set to restrict the number of concurrent incoming SSYNC requests per
|
||||
# device; set to 0 for unlimited requests per device. This can help control
|
||||
# I/O to each device. This does not override replication_concurrency described
|
||||
# above, so you may need to adjust both parameters depending on your hardware
|
||||
# or network capacity.
|
||||
# replication_concurrency_per_device = 1
|
||||
#
|
||||
# Number of seconds to wait for an existing replication device lock before
|
||||
# giving up.
|
||||
|
@ -766,6 +766,11 @@ use = egg:swift#slo
|
||||
# Default is to use the concurrency value from above; all of the same caveats
|
||||
# apply regarding recommended ranges.
|
||||
# delete_concurrency = 2
|
||||
#
|
||||
# In order to keep a connection active during a potentially long PUT request,
|
||||
# clients may request that Swift send whitespace ahead of the final response
|
||||
# body. This whitespace will be yielded at most every yield_frequency seconds.
|
||||
# yield_frequency = 10
|
||||
|
||||
# Note: Put after auth and staticweb in the pipeline.
|
||||
# If you don't put it in the pipeline, it will be inserted for you.
|
||||
|
@ -62,7 +62,6 @@ scripts =
|
||||
bin/swift-recon-cron
|
||||
bin/swift-ring-builder
|
||||
bin/swift-ring-builder-analyzer
|
||||
bin/swift-temp-url
|
||||
|
||||
[extras]
|
||||
kms_keymaster =
|
||||
|
@ -28,13 +28,14 @@ import six
|
||||
|
||||
import swift.common.db
|
||||
from swift.account.backend import AccountBroker, DATADIR
|
||||
from swift.common.constraints import check_drive
|
||||
from swift.common.direct_client import direct_delete_container, \
|
||||
direct_delete_object, direct_get_container
|
||||
from swift.common.exceptions import ClientException
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.ring.utils import is_local_device
|
||||
from swift.common.utils import get_logger, whataremyips, ismount, \
|
||||
config_true_value, Timestamp
|
||||
from swift.common.utils import get_logger, whataremyips, config_true_value, \
|
||||
Timestamp
|
||||
from swift.common.daemon import Daemon
|
||||
from swift.common.storage_policy import POLICIES, PolicyError
|
||||
|
||||
@ -133,8 +134,7 @@ class AccountReaper(Daemon):
|
||||
begin = time()
|
||||
try:
|
||||
for device in os.listdir(self.devices):
|
||||
if self.mount_check and not ismount(
|
||||
os.path.join(self.devices, device)):
|
||||
if not check_drive(self.devices, device, self.mount_check):
|
||||
self.logger.increment('errors')
|
||||
self.logger.debug(
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
|
@ -21,6 +21,7 @@ import signal
|
||||
from re import sub
|
||||
|
||||
import eventlet.debug
|
||||
from eventlet.hubs import use_hub
|
||||
|
||||
from swift.common import utils
|
||||
|
||||
@ -266,6 +267,8 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
|
||||
# and results in an exit code of 1.
|
||||
sys.exit(e)
|
||||
|
||||
use_hub(utils.get_hub())
|
||||
|
||||
# once on command line (i.e. daemonize=false) will over-ride config
|
||||
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
|
||||
|
||||
|
@ -333,7 +333,9 @@ class DatabaseBroker(object):
|
||||
exc_hint = 'malformed'
|
||||
elif 'malformed database schema' in str(exc_value):
|
||||
exc_hint = 'malformed'
|
||||
elif 'file is encrypted or is not a database' in str(exc_value):
|
||||
elif ' is not a database' in str(exc_value):
|
||||
# older versions said 'file is not a database'
|
||||
# now 'file is encrypted or is not a database'
|
||||
exc_hint = 'corrupted'
|
||||
elif 'disk I/O error' in str(exc_value):
|
||||
exc_hint = 'disk error while accessing'
|
||||
|
@ -28,10 +28,11 @@ from eventlet import GreenPool, sleep, Timeout
|
||||
from eventlet.green import subprocess
|
||||
|
||||
import swift.common.db
|
||||
from swift.common.constraints import check_drive
|
||||
from swift.common.direct_client import quote
|
||||
from swift.common.utils import get_logger, whataremyips, storage_directory, \
|
||||
renamer, mkdirs, lock_parent_directory, config_true_value, \
|
||||
unlink_older_than, dump_recon_cache, rsync_module_interpolation, ismount, \
|
||||
unlink_older_than, dump_recon_cache, rsync_module_interpolation, \
|
||||
json, Timestamp
|
||||
from swift.common import ring
|
||||
from swift.common.ring.utils import is_local_device
|
||||
@ -688,8 +689,8 @@ class Replicator(Daemon):
|
||||
node['replication_ip'],
|
||||
node['replication_port']):
|
||||
found_local = True
|
||||
if self.mount_check and not ismount(
|
||||
os.path.join(self.root, node['device'])):
|
||||
if not check_drive(self.root, node['device'],
|
||||
self.mount_check):
|
||||
self._add_failure_stats(
|
||||
[(failure_dev['replication_ip'],
|
||||
failure_dev['device'])
|
||||
@ -751,7 +752,7 @@ class ReplicatorRpc(object):
|
||||
return HTTPBadRequest(body='Invalid object type')
|
||||
op = args.pop(0)
|
||||
drive, partition, hsh = replicate_args
|
||||
if self.mount_check and not ismount(os.path.join(self.root, drive)):
|
||||
if not check_drive(self.root, drive, self.mount_check):
|
||||
return Response(status='507 %s is not mounted' % drive)
|
||||
|
||||
db_file = os.path.join(self.root, drive,
|
||||
|
@ -218,40 +218,48 @@ ACCEPTABLE_FORMATS = ['text/plain', 'application/json', 'application/xml',
|
||||
'text/xml']
|
||||
|
||||
|
||||
def get_response_body(data_format, data_dict, error_list):
|
||||
def get_response_body(data_format, data_dict, error_list, root_tag):
|
||||
"""
|
||||
Returns a properly formatted response body according to format. Handles
|
||||
json and xml, otherwise will return text/plain. Note: xml response does not
|
||||
include xml declaration.
|
||||
Returns a properly formatted response body according to format.
|
||||
|
||||
Handles json and xml, otherwise will return text/plain.
|
||||
Note: xml response does not include xml declaration.
|
||||
|
||||
:params data_format: resulting format
|
||||
:params data_dict: generated data about results.
|
||||
:params error_list: list of quoted filenames that failed
|
||||
:params root_tag: the tag name to use for root elements when returning XML;
|
||||
e.g. 'extract' or 'delete'
|
||||
"""
|
||||
if data_format == 'application/json':
|
||||
data_dict['Errors'] = error_list
|
||||
return json.dumps(data_dict)
|
||||
if data_format and data_format.endswith('/xml'):
|
||||
output = '<delete>\n'
|
||||
output = ['<', root_tag, '>\n']
|
||||
for key in sorted(data_dict):
|
||||
xml_key = key.replace(' ', '_').lower()
|
||||
output += '<%s>%s</%s>\n' % (xml_key, data_dict[key], xml_key)
|
||||
output += '<errors>\n'
|
||||
output += '\n'.join(
|
||||
['<object>'
|
||||
'<name>%s</name><status>%s</status>'
|
||||
'</object>' % (saxutils.escape(name), status) for
|
||||
name, status in error_list])
|
||||
output += '</errors>\n</delete>\n'
|
||||
return output
|
||||
output.extend([
|
||||
'<', xml_key, '>',
|
||||
saxutils.escape(str(data_dict[key])),
|
||||
'</', xml_key, '>\n',
|
||||
])
|
||||
output.append('<errors>\n')
|
||||
for name, status in error_list:
|
||||
output.extend([
|
||||
'<object><name>', saxutils.escape(name), '</name><status>',
|
||||
saxutils.escape(status), '</status></object>\n',
|
||||
])
|
||||
output.extend(['</errors>\n</', root_tag, '>\n'])
|
||||
return ''.join(output)
|
||||
|
||||
output = ''
|
||||
output = []
|
||||
for key in sorted(data_dict):
|
||||
output += '%s: %s\n' % (key, data_dict[key])
|
||||
output += 'Errors:\n'
|
||||
output += '\n'.join(
|
||||
['%s, %s' % (name, status)
|
||||
for name, status in error_list])
|
||||
return output
|
||||
output.append('%s: %s\n' % (key, data_dict[key]))
|
||||
output.append('Errors:\n')
|
||||
output.extend(
|
||||
'%s, %s\n' % (name, status)
|
||||
for name, status in error_list)
|
||||
return ''.join(output)
|
||||
|
||||
|
||||
def pax_key_to_swift_header(pax_key):
|
||||
@ -485,7 +493,7 @@ class Bulk(object):
|
||||
resp_dict['Response Status'] = HTTPServerError().status
|
||||
|
||||
yield separator + get_response_body(out_content_type,
|
||||
resp_dict, failed_files)
|
||||
resp_dict, failed_files, 'delete')
|
||||
|
||||
def handle_extract_iter(self, req, compress_type,
|
||||
out_content_type='text/plain'):
|
||||
@ -639,7 +647,7 @@ class Bulk(object):
|
||||
resp_dict['Response Status'] = HTTPServerError().status
|
||||
|
||||
yield separator + get_response_body(
|
||||
out_content_type, resp_dict, failed_files)
|
||||
out_content_type, resp_dict, failed_files, 'extract')
|
||||
|
||||
def _process_delete(self, resp, pile, obj_name, resp_dict,
|
||||
failed_files, failed_file_response, retry=0):
|
||||
@ -675,7 +683,11 @@ class Bulk(object):
|
||||
'tar.bz2': 'bz2'}.get(extract_type.lower().strip('.'))
|
||||
if archive_type is not None:
|
||||
resp = HTTPOk(request=req)
|
||||
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
|
||||
try:
|
||||
out_content_type = req.accept.best_match(
|
||||
ACCEPTABLE_FORMATS)
|
||||
except ValueError:
|
||||
out_content_type = None # Ignore invalid header
|
||||
if out_content_type:
|
||||
resp.content_type = out_content_type
|
||||
resp.app_iter = self.handle_extract_iter(
|
||||
@ -684,7 +696,10 @@ class Bulk(object):
|
||||
resp = HTTPBadRequest("Unsupported archive format")
|
||||
if 'bulk-delete' in req.params and req.method in ['POST', 'DELETE']:
|
||||
resp = HTTPOk(request=req)
|
||||
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
|
||||
try:
|
||||
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
|
||||
except ValueError:
|
||||
out_content_type = None # Ignore invalid header
|
||||
if out_content_type:
|
||||
resp.content_type = out_content_type
|
||||
resp.app_iter = self.handle_delete_iter(
|
||||
|
@ -21,7 +21,7 @@ from swift.common.constraints import valid_api_version
|
||||
from swift.common.http import HTTP_NO_CONTENT
|
||||
from swift.common.request_helpers import get_param
|
||||
from swift.common.swob import HTTPException, HTTPNotAcceptable, Request, \
|
||||
RESPONSE_REASONS
|
||||
RESPONSE_REASONS, HTTPBadRequest
|
||||
|
||||
|
||||
#: Mapping of query string ``format=`` values to their corresponding
|
||||
@ -51,8 +51,11 @@ def get_listing_content_type(req):
|
||||
if query_format:
|
||||
req.accept = FORMAT2CONTENT_TYPE.get(
|
||||
query_format.lower(), FORMAT2CONTENT_TYPE['plain'])
|
||||
out_content_type = req.accept.best_match(
|
||||
['text/plain', 'application/json', 'application/xml', 'text/xml'])
|
||||
try:
|
||||
out_content_type = req.accept.best_match(
|
||||
['text/plain', 'application/json', 'application/xml', 'text/xml'])
|
||||
except ValueError:
|
||||
raise HTTPBadRequest(request=req, body='Invalid Accept header')
|
||||
if not out_content_type:
|
||||
raise HTTPNotAcceptable(request=req)
|
||||
return out_content_type
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
r"""
|
||||
Middleware that will provide Static Large Object (SLO) support.
|
||||
|
||||
This feature is very similar to Dynamic Large Object (DLO) support in that
|
||||
@ -72,6 +72,33 @@ found, size/etag mismatch, below minimum size, invalid range) then the user
|
||||
will receive a 4xx error response. If everything does match, the user will
|
||||
receive a 2xx response and the SLO object is ready for downloading.
|
||||
|
||||
Note that large manifests may take a long time to verify; historically,
|
||||
clients would need to use a long read timeout for the connection to give
|
||||
Swift enough time to send a final ``201 Created`` or ``400 Bad Request``
|
||||
response. Now, clients should use the query parameters::
|
||||
|
||||
?multipart-manifest=put&heartbeat=on
|
||||
|
||||
to request that Swift send an immediate ``202 Accepted`` response and periodic
|
||||
whitespace to keep the connection alive. A final response code will appear in
|
||||
the body. The format of the response body defaults to text/plain but can be
|
||||
either json or xml depending on the ``Accept`` header. An example body is as
|
||||
follows::
|
||||
|
||||
Response Status: 201 Created
|
||||
Response Body:
|
||||
Etag: "8f481cede6d2ddc07cb36aa084d9a64d"
|
||||
Last Modified: Wed, 25 Oct 2017 17:08:55 GMT
|
||||
Errors:
|
||||
|
||||
Or, as a json response::
|
||||
|
||||
{"Response Status": "201 Created",
|
||||
"Response Body": "",
|
||||
"Etag": "\"8f481cede6d2ddc07cb36aa084d9a64d\"",
|
||||
"Last Modified": "Wed, 25 Oct 2017 17:08:55 GMT",
|
||||
"Errors": []}
|
||||
|
||||
Behind the scenes, on success, a JSON manifest generated from the user input is
|
||||
sent to object servers with an extra ``X-Static-Large-Object: True`` header
|
||||
and a modified ``Content-Type``. The items in this manifest will include the
|
||||
@ -251,12 +278,14 @@ import json
|
||||
import mimetypes
|
||||
import re
|
||||
import six
|
||||
import time
|
||||
from hashlib import md5
|
||||
from swift.common.exceptions import ListingIterError, SegmentError
|
||||
from swift.common.swob import Request, HTTPBadRequest, HTTPServerError, \
|
||||
HTTPMethodNotAllowed, HTTPRequestEntityTooLarge, HTTPLengthRequired, \
|
||||
HTTPOk, HTTPPreconditionFailed, HTTPException, HTTPNotFound, \
|
||||
HTTPUnauthorized, HTTPConflict, HTTPUnprocessableEntity, Response, Range
|
||||
HTTPUnauthorized, HTTPConflict, HTTPUnprocessableEntity, Response, Range, \
|
||||
RESPONSE_REASONS
|
||||
from swift.common.utils import get_logger, config_true_value, \
|
||||
get_valid_utf8_str, override_bytes_from_content_type, split_path, \
|
||||
register_swift_info, RateLimitedIterator, quote, close_if_possible, \
|
||||
@ -273,6 +302,7 @@ from swift.common.middleware.bulk import get_response_body, \
|
||||
DEFAULT_RATE_LIMIT_UNDER_SIZE = 1024 * 1024 # 1 MiB
|
||||
DEFAULT_MAX_MANIFEST_SEGMENTS = 1000
|
||||
DEFAULT_MAX_MANIFEST_SIZE = 1024 * 1024 * 2 # 2 MiB
|
||||
DEFAULT_YIELD_FREQUENCY = 10
|
||||
|
||||
|
||||
REQUIRED_SLO_KEYS = set(['path'])
|
||||
@ -366,7 +396,7 @@ def parse_and_validate_input(req_body, req_path):
|
||||
except (TypeError, ValueError):
|
||||
errors.append("Index %d: invalid size_bytes" % seg_index)
|
||||
continue
|
||||
if seg_size < 1:
|
||||
if seg_size < 1 and seg_index != (len(parsed_data) - 1):
|
||||
errors.append("Index %d: too small; each segment must be "
|
||||
"at least 1 byte."
|
||||
% (seg_index,))
|
||||
@ -430,7 +460,7 @@ class SloGetContext(WSGIContext):
|
||||
if not sub_resp.is_success:
|
||||
close_if_possible(sub_resp.app_iter)
|
||||
raise ListingIterError(
|
||||
'ERROR: while fetching %s, GET of submanifest %s '
|
||||
'while fetching %s, GET of submanifest %s '
|
||||
'failed with status %d' % (req.path, sub_req.path,
|
||||
sub_resp.status_int))
|
||||
|
||||
@ -439,7 +469,7 @@ class SloGetContext(WSGIContext):
|
||||
return json.loads(''.join(sub_resp.app_iter))
|
||||
except ValueError as err:
|
||||
raise ListingIterError(
|
||||
'ERROR: while fetching %s, JSON-decoding of submanifest %s '
|
||||
'while fetching %s, JSON-decoding of submanifest %s '
|
||||
'failed with %s' % (req.path, sub_req.path, err))
|
||||
|
||||
def _segment_length(self, seg_dict):
|
||||
@ -526,7 +556,9 @@ class SloGetContext(WSGIContext):
|
||||
# do this check here so that we can avoid fetching this last
|
||||
# manifest before raising the exception
|
||||
if recursion_depth >= self.max_slo_recursion_depth:
|
||||
raise ListingIterError("Max recursion depth exceeded")
|
||||
raise ListingIterError(
|
||||
"While processing manifest %r, "
|
||||
"max recursion depth was exceeded" % req.path)
|
||||
|
||||
sub_path = get_valid_utf8_str(seg_dict['name'])
|
||||
sub_cont, sub_obj = split_path(sub_path, 2, 2, True)
|
||||
@ -860,16 +892,26 @@ class StaticLargeObject(object):
|
||||
|
||||
:param app: The next WSGI filter or app in the paste.deploy chain.
|
||||
:param conf: The configuration dict for the middleware.
|
||||
:param max_manifest_segments: The maximum number of segments allowed in
|
||||
newly-created static large objects.
|
||||
:param max_manifest_size: The maximum size (in bytes) of newly-created
|
||||
static-large-object manifests.
|
||||
:param yield_frequency: If the client included ``heartbeat=on`` in the
|
||||
query parameters when creating a new static large
|
||||
object, the period of time to wait between sending
|
||||
whitespace to keep the connection alive.
|
||||
"""
|
||||
|
||||
def __init__(self, app, conf,
|
||||
max_manifest_segments=DEFAULT_MAX_MANIFEST_SEGMENTS,
|
||||
max_manifest_size=DEFAULT_MAX_MANIFEST_SIZE):
|
||||
max_manifest_size=DEFAULT_MAX_MANIFEST_SIZE,
|
||||
yield_frequency=DEFAULT_YIELD_FREQUENCY):
|
||||
self.conf = conf
|
||||
self.app = app
|
||||
self.logger = get_logger(conf, log_route='slo')
|
||||
self.max_manifest_segments = max_manifest_segments
|
||||
self.max_manifest_size = max_manifest_size
|
||||
self.yield_frequency = yield_frequency
|
||||
self.max_get_time = int(self.conf.get('max_get_time', 86400))
|
||||
self.rate_limit_under_size = int(self.conf.get(
|
||||
'rate_limit_under_size', DEFAULT_RATE_LIMIT_UNDER_SIZE))
|
||||
@ -928,8 +970,10 @@ class StaticLargeObject(object):
|
||||
raise HTTPRequestEntityTooLarge(
|
||||
'Number of segments must be <= %d' %
|
||||
self.max_manifest_segments)
|
||||
total_size = 0
|
||||
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
|
||||
try:
|
||||
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
|
||||
except ValueError:
|
||||
out_content_type = 'text/plain' # Ignore invalid header
|
||||
if not out_content_type:
|
||||
out_content_type = 'text/plain'
|
||||
data_for_storage = []
|
||||
@ -948,7 +992,8 @@ class StaticLargeObject(object):
|
||||
agent='%(orig)s SLO MultipartPUT', swift_source='SLO')
|
||||
return obj_name, sub_req.get_response(self)
|
||||
|
||||
def validate_seg_dict(seg_dict, head_seg_resp):
|
||||
def validate_seg_dict(seg_dict, head_seg_resp, allow_empty_segment):
|
||||
obj_name = seg_dict['path']
|
||||
if not head_seg_resp.is_success:
|
||||
problem_segments.append([quote(obj_name),
|
||||
head_seg_resp.status])
|
||||
@ -976,7 +1021,7 @@ class StaticLargeObject(object):
|
||||
seg_dict['range'] = '%d-%d' % (rng[0], rng[1] - 1)
|
||||
segment_length = rng[1] - rng[0]
|
||||
|
||||
if segment_length < 1:
|
||||
if segment_length < 1 and not allow_empty_segment:
|
||||
problem_segments.append(
|
||||
[quote(obj_name),
|
||||
'Too small; each segment must be at least 1 byte.'])
|
||||
@ -1006,60 +1051,115 @@ class StaticLargeObject(object):
|
||||
seg_data['sub_slo'] = True
|
||||
return segment_length, seg_data
|
||||
|
||||
heartbeat = config_true_value(req.params.get('heartbeat'))
|
||||
separator = ''
|
||||
if heartbeat:
|
||||
# Apparently some ways of deploying require that this to happens
|
||||
# *before* the return? Not sure why.
|
||||
req.environ['eventlet.minimum_write_chunk_size'] = 0
|
||||
start_response('202 Accepted', [ # NB: not 201 !
|
||||
('Content-Type', out_content_type),
|
||||
])
|
||||
separator = '\r\n\r\n'
|
||||
data_for_storage = [None] * len(parsed_data)
|
||||
with StreamingPile(self.concurrency) as pile:
|
||||
for obj_name, resp in pile.asyncstarmap(do_head, (
|
||||
(path, ) for path in path2indices)):
|
||||
for i in path2indices[obj_name]:
|
||||
segment_length, seg_data = validate_seg_dict(
|
||||
parsed_data[i], resp)
|
||||
data_for_storage[i] = seg_data
|
||||
total_size += segment_length
|
||||
|
||||
if problem_segments:
|
||||
resp_body = get_response_body(
|
||||
out_content_type, {}, problem_segments)
|
||||
raise HTTPBadRequest(resp_body, content_type=out_content_type)
|
||||
def resp_iter():
|
||||
total_size = 0
|
||||
# wsgi won't propagate start_response calls until some data has
|
||||
# been yielded so make sure first heartbeat is sent immediately
|
||||
if heartbeat:
|
||||
yield ' '
|
||||
last_yield_time = time.time()
|
||||
with StreamingPile(self.concurrency) as pile:
|
||||
for obj_name, resp in pile.asyncstarmap(do_head, (
|
||||
(path, ) for path in path2indices)):
|
||||
now = time.time()
|
||||
if heartbeat and (now - last_yield_time >
|
||||
self.yield_frequency):
|
||||
# Make sure we've called start_response before
|
||||
# sending data
|
||||
yield ' '
|
||||
last_yield_time = now
|
||||
for i in path2indices[obj_name]:
|
||||
segment_length, seg_data = validate_seg_dict(
|
||||
parsed_data[i], resp,
|
||||
allow_empty_segment=(i == len(parsed_data) - 1))
|
||||
data_for_storage[i] = seg_data
|
||||
total_size += segment_length
|
||||
|
||||
slo_etag = md5()
|
||||
for seg_data in data_for_storage:
|
||||
if seg_data.get('range'):
|
||||
slo_etag.update('%s:%s;' % (seg_data['hash'],
|
||||
seg_data['range']))
|
||||
if problem_segments:
|
||||
err = HTTPBadRequest(content_type=out_content_type)
|
||||
resp_dict = {}
|
||||
if heartbeat:
|
||||
resp_dict['Response Status'] = err.status
|
||||
resp_dict['Response Body'] = err.body or '\n'.join(
|
||||
RESPONSE_REASONS.get(err.status_int, ['']))
|
||||
else:
|
||||
start_response(err.status,
|
||||
[(h, v) for h, v in err.headers.items()
|
||||
if h.lower() != 'content-length'])
|
||||
yield separator + get_response_body(
|
||||
out_content_type, resp_dict, problem_segments, 'upload')
|
||||
return
|
||||
|
||||
slo_etag = md5()
|
||||
for seg_data in data_for_storage:
|
||||
if seg_data.get('range'):
|
||||
slo_etag.update('%s:%s;' % (seg_data['hash'],
|
||||
seg_data['range']))
|
||||
else:
|
||||
slo_etag.update(seg_data['hash'])
|
||||
|
||||
slo_etag = slo_etag.hexdigest()
|
||||
client_etag = req.headers.get('Etag')
|
||||
if client_etag and client_etag.strip('"') != slo_etag:
|
||||
err = HTTPUnprocessableEntity(request=req)
|
||||
if heartbeat:
|
||||
yield separator + get_response_body(out_content_type, {
|
||||
'Response Status': err.status,
|
||||
'Response Body': err.body or '\n'.join(
|
||||
RESPONSE_REASONS.get(err.status_int, [''])),
|
||||
}, problem_segments, 'upload')
|
||||
else:
|
||||
for chunk in err(req.environ, start_response):
|
||||
yield chunk
|
||||
return
|
||||
|
||||
json_data = json.dumps(data_for_storage)
|
||||
if six.PY3:
|
||||
json_data = json_data.encode('utf-8')
|
||||
req.body = json_data
|
||||
req.headers.update({
|
||||
SYSMETA_SLO_ETAG: slo_etag,
|
||||
SYSMETA_SLO_SIZE: total_size,
|
||||
'X-Static-Large-Object': 'True',
|
||||
'Etag': md5(json_data).hexdigest(),
|
||||
})
|
||||
|
||||
env = req.environ
|
||||
if not env.get('CONTENT_TYPE'):
|
||||
guessed_type, _junk = mimetypes.guess_type(req.path_info)
|
||||
env['CONTENT_TYPE'] = (guessed_type or
|
||||
'application/octet-stream')
|
||||
env['swift.content_type_overridden'] = True
|
||||
env['CONTENT_TYPE'] += ";swift_bytes=%d" % total_size
|
||||
|
||||
resp = req.get_response(self.app)
|
||||
resp_dict = {'Response Status': resp.status}
|
||||
if resp.is_success:
|
||||
resp.etag = slo_etag
|
||||
resp_dict['Etag'] = resp.headers['Etag']
|
||||
resp_dict['Last Modified'] = resp.headers['Last-Modified']
|
||||
|
||||
if heartbeat:
|
||||
resp_dict['Response Body'] = resp.body
|
||||
yield separator + get_response_body(
|
||||
out_content_type, resp_dict, [], 'upload')
|
||||
else:
|
||||
slo_etag.update(seg_data['hash'])
|
||||
for chunk in resp(req.environ, start_response):
|
||||
yield chunk
|
||||
|
||||
slo_etag = slo_etag.hexdigest()
|
||||
client_etag = req.headers.get('Etag')
|
||||
if client_etag and client_etag.strip('"') != slo_etag:
|
||||
raise HTTPUnprocessableEntity(request=req)
|
||||
|
||||
json_data = json.dumps(data_for_storage)
|
||||
if six.PY3:
|
||||
json_data = json_data.encode('utf-8')
|
||||
req.body = json_data
|
||||
req.headers.update({
|
||||
SYSMETA_SLO_ETAG: slo_etag,
|
||||
SYSMETA_SLO_SIZE: total_size,
|
||||
'X-Static-Large-Object': 'True',
|
||||
'Etag': md5(json_data).hexdigest(),
|
||||
})
|
||||
|
||||
env = req.environ
|
||||
if not env.get('CONTENT_TYPE'):
|
||||
guessed_type, _junk = mimetypes.guess_type(req.path_info)
|
||||
env['CONTENT_TYPE'] = guessed_type or 'application/octet-stream'
|
||||
env['swift.content_type_overridden'] = True
|
||||
env['CONTENT_TYPE'] += ";swift_bytes=%d" % total_size
|
||||
|
||||
def start_response_wrapper(status, headers, exc_info=None):
|
||||
for i, (header, _value) in enumerate(headers):
|
||||
if header.lower() == 'etag':
|
||||
headers[i] = ('Etag', '"%s"' % slo_etag)
|
||||
break
|
||||
return start_response(status, headers, exc_info)
|
||||
|
||||
return self.app(env, start_response_wrapper)
|
||||
return resp_iter()
|
||||
|
||||
def get_segments_to_delete_iter(self, req):
|
||||
"""
|
||||
@ -1154,7 +1254,10 @@ class StaticLargeObject(object):
|
||||
"""
|
||||
req.headers['Content-Type'] = None # Ignore content-type from client
|
||||
resp = HTTPOk(request=req)
|
||||
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
|
||||
try:
|
||||
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
|
||||
except ValueError:
|
||||
out_content_type = None # Ignore invalid header
|
||||
if out_content_type:
|
||||
resp.content_type = out_content_type
|
||||
resp.app_iter = self.bulk_deleter.handle_delete_iter(
|
||||
@ -1205,10 +1308,13 @@ def filter_factory(global_conf, **local_conf):
|
||||
DEFAULT_MAX_MANIFEST_SEGMENTS))
|
||||
max_manifest_size = int(conf.get('max_manifest_size',
|
||||
DEFAULT_MAX_MANIFEST_SIZE))
|
||||
yield_frequency = int(conf.get('yield_frequency',
|
||||
DEFAULT_YIELD_FREQUENCY))
|
||||
|
||||
register_swift_info('slo',
|
||||
max_manifest_segments=max_manifest_segments,
|
||||
max_manifest_size=max_manifest_size,
|
||||
yield_frequency=yield_frequency,
|
||||
# this used to be configurable; report it as 1 for
|
||||
# clients that might still care
|
||||
min_segment_size=1)
|
||||
@ -1217,5 +1323,6 @@ def filter_factory(global_conf, **local_conf):
|
||||
return StaticLargeObject(
|
||||
app, conf,
|
||||
max_manifest_segments=max_manifest_segments,
|
||||
max_manifest_size=max_manifest_size)
|
||||
max_manifest_size=max_manifest_size,
|
||||
yield_frequency=yield_frequency)
|
||||
return slo_filter
|
||||
|
@ -339,7 +339,7 @@ class SegmentedIterable(object):
|
||||
seg_size is not None and last_byte == seg_size - 1)
|
||||
if time.time() - start_time > self.max_get_time:
|
||||
raise SegmentError(
|
||||
'ERROR: While processing manifest %s, '
|
||||
'While processing manifest %s, '
|
||||
'max LO GET time of %ds exceeded' %
|
||||
(self.name, self.max_get_time))
|
||||
# The "multipart-manifest=get" query param ensures that the
|
||||
@ -396,7 +396,7 @@ class SegmentedIterable(object):
|
||||
e_type, e_value, e_traceback = sys.exc_info()
|
||||
if time.time() - start_time > self.max_get_time:
|
||||
raise SegmentError(
|
||||
'ERROR: While processing manifest %s, '
|
||||
'While processing manifest %s, '
|
||||
'max LO GET time of %ds exceeded' %
|
||||
(self.name, self.max_get_time))
|
||||
if pending_req:
|
||||
@ -405,7 +405,7 @@ class SegmentedIterable(object):
|
||||
|
||||
if time.time() - start_time > self.max_get_time:
|
||||
raise SegmentError(
|
||||
'ERROR: While processing manifest %s, '
|
||||
'While processing manifest %s, '
|
||||
'max LO GET time of %ds exceeded' %
|
||||
(self.name, self.max_get_time))
|
||||
if pending_req:
|
||||
@ -420,7 +420,7 @@ class SegmentedIterable(object):
|
||||
if not is_success(seg_resp.status_int):
|
||||
close_if_possible(seg_resp.app_iter)
|
||||
raise SegmentError(
|
||||
'ERROR: While processing manifest %s, '
|
||||
'While processing manifest %s, '
|
||||
'got %d while retrieving %s' %
|
||||
(self.name, seg_resp.status_int, seg_req.path))
|
||||
|
||||
@ -485,10 +485,10 @@ class SegmentedIterable(object):
|
||||
if bytes_left:
|
||||
raise SegmentError(
|
||||
'Not enough bytes for %s; closing connection' % self.name)
|
||||
except (ListingIterError, SegmentError):
|
||||
self.logger.exception(_('ERROR: An error occurred '
|
||||
'while retrieving segments'))
|
||||
raise
|
||||
except (ListingIterError, SegmentError) as err:
|
||||
self.logger.error(err)
|
||||
if not self.validated_first_segment:
|
||||
raise
|
||||
finally:
|
||||
if self.current_resp:
|
||||
close_if_possible(self.current_resp.app_iter)
|
||||
@ -533,12 +533,13 @@ class SegmentedIterable(object):
|
||||
"""
|
||||
if self.validated_first_segment:
|
||||
return
|
||||
self.validated_first_segment = True
|
||||
|
||||
try:
|
||||
self.peeked_chunk = next(self.app_iter)
|
||||
except StopIteration:
|
||||
pass
|
||||
finally:
|
||||
self.validated_first_segment = True
|
||||
|
||||
def __iter__(self):
|
||||
if self.peeked_chunk is not None:
|
||||
|
@ -130,7 +130,7 @@ class RingBuilder(object):
|
||||
# within a given number of hours (24 is my usual test). Removing
|
||||
# a device overrides this behavior as it's assumed that's only
|
||||
# done because of device failure.
|
||||
self._last_part_moves = None
|
||||
self._last_part_moves = array('B', itertools.repeat(0, self.parts))
|
||||
# _part_moved_bitmap record parts have been moved
|
||||
self._part_moved_bitmap = None
|
||||
# _last_part_moves_epoch indicates the time the offsets in
|
||||
@ -167,7 +167,7 @@ class RingBuilder(object):
|
||||
|
||||
@property
|
||||
def ever_rebalanced(self):
|
||||
return self._last_part_moves is not None
|
||||
return self._replica2part2dev is not None
|
||||
|
||||
def _set_part_moved(self, part):
|
||||
self._last_part_moves[part] = 0
|
||||
@ -507,7 +507,7 @@ class RingBuilder(object):
|
||||
|
||||
if not self.ever_rebalanced:
|
||||
self.logger.debug("New builder; performing initial balance")
|
||||
self._last_part_moves = array('B', itertools.repeat(0, self.parts))
|
||||
|
||||
self._update_last_part_moves()
|
||||
|
||||
with _set_random_seed(seed):
|
||||
@ -925,7 +925,7 @@ class RingBuilder(object):
|
||||
"""
|
||||
self._part_moved_bitmap = bytearray(max(2 ** (self.part_power - 3), 1))
|
||||
elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600
|
||||
if elapsed_hours <= 0 or not self._last_part_moves:
|
||||
if elapsed_hours <= 0:
|
||||
return
|
||||
for part in range(self.parts):
|
||||
# The "min(self._last_part_moves[part] + elapsed_hours, 0xff)"
|
||||
|
@ -639,6 +639,7 @@ class CompositeRingBuilder(object):
|
||||
component builder.
|
||||
"""
|
||||
self._load_components()
|
||||
self.update_last_part_moves()
|
||||
component_builders = zip(self._builder_files, self._builders)
|
||||
# don't let the same builder go first each time
|
||||
shuffle(component_builders)
|
||||
@ -678,10 +679,10 @@ class CompositeRingBuilder(object):
|
||||
Updates the record of how many hours ago each partition was moved in
|
||||
all component builders.
|
||||
"""
|
||||
# Called by component builders. We need all component builders to be at
|
||||
# same last_part_moves epoch before any builder starts moving parts;
|
||||
# this will effectively be a no-op for builders that have already been
|
||||
# updated in last hour
|
||||
# Called at start of each composite rebalance. We need all component
|
||||
# builders to be at same last_part_moves epoch before any builder
|
||||
# starts moving parts; this will effectively be a no-op for builders
|
||||
# that have already been updated in last hour
|
||||
for b in self._builders:
|
||||
b.update_last_part_moves()
|
||||
|
||||
@ -723,8 +724,11 @@ class CooperativeRingBuilder(RingBuilder):
|
||||
super(CooperativeRingBuilder, self)._can_part_move(part))
|
||||
|
||||
def _update_last_part_moves(self):
|
||||
# overrides superclass method to delegate to parent builder
|
||||
return self.parent_builder.update_last_part_moves()
|
||||
# overrides superclass method - parent builder should have called
|
||||
# update_last_part_moves() before rebalance; calling the superclass
|
||||
# method here would reset _part_moved_bitmap which is state we rely on
|
||||
# when min_part_hours is zero
|
||||
pass
|
||||
|
||||
def update_last_part_moves(self):
|
||||
"""
|
||||
|
@ -21,7 +21,7 @@ import six
|
||||
from six.moves.configparser import ConfigParser
|
||||
from swift.common.utils import (
|
||||
config_true_value, quorum_size, whataremyips, list_from_csv,
|
||||
config_positive_int_value)
|
||||
config_positive_int_value, get_zero_indexed_base_string)
|
||||
from swift.common.ring import Ring, RingData
|
||||
from swift.common import utils
|
||||
from swift.common.exceptions import RingLoadError
|
||||
@ -92,11 +92,7 @@ class PolicyError(ValueError):
|
||||
|
||||
|
||||
def _get_policy_string(base, policy_index):
|
||||
if policy_index == 0 or policy_index is None:
|
||||
return_string = base
|
||||
else:
|
||||
return_string = base + "-%d" % int(policy_index)
|
||||
return return_string
|
||||
return get_zero_indexed_base_string(base, policy_index)
|
||||
|
||||
|
||||
def get_policy_string(base, policy_or_index):
|
||||
|
@ -691,11 +691,9 @@ class Accept(object):
|
||||
Returns None if no available options are acceptable to the client.
|
||||
|
||||
:param options: a list of content-types the server can respond with
|
||||
:raises ValueError: if the header is malformed
|
||||
"""
|
||||
try:
|
||||
types = self._get_types()
|
||||
except ValueError:
|
||||
return None
|
||||
types = self._get_types()
|
||||
if not types and options:
|
||||
return options[0]
|
||||
for pattern in types:
|
||||
|
@ -53,6 +53,7 @@ import datetime
|
||||
import eventlet
|
||||
import eventlet.debug
|
||||
import eventlet.greenthread
|
||||
import eventlet.patcher
|
||||
import eventlet.semaphore
|
||||
from eventlet import GreenPool, sleep, Timeout, tpool
|
||||
from eventlet.green import socket, threading
|
||||
@ -79,12 +80,6 @@ from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
from swift.common.linkat import linkat
|
||||
|
||||
if six.PY3:
|
||||
stdlib_queue = eventlet.patcher.original('queue')
|
||||
else:
|
||||
stdlib_queue = eventlet.patcher.original('Queue')
|
||||
stdlib_threading = eventlet.patcher.original('threading')
|
||||
|
||||
# logging doesn't import patched as cleanly as one would like
|
||||
from logging.handlers import SysLogHandler
|
||||
import logging
|
||||
@ -487,6 +482,18 @@ def config_read_prefixed_options(conf, prefix_name, defaults):
|
||||
return params
|
||||
|
||||
|
||||
def eventlet_monkey_patch():
|
||||
"""
|
||||
Install the appropriate Eventlet monkey patches.
|
||||
"""
|
||||
# NOTE(sileht):
|
||||
# monkey-patching thread is required by python-keystoneclient;
|
||||
# monkey-patching select is required by oslo.messaging pika driver
|
||||
# if thread is monkey-patched.
|
||||
eventlet.patcher.monkey_patch(all=False, socket=True, select=True,
|
||||
thread=True)
|
||||
|
||||
|
||||
def noop_libc_function(*args):
|
||||
return 0
|
||||
|
||||
@ -1999,6 +2006,25 @@ def get_hub():
|
||||
getting swallowed somewhere. Then when that file descriptor
|
||||
was re-used, eventlet would freak right out because it still
|
||||
thought it was waiting for activity from it in some other coro.
|
||||
|
||||
Another note about epoll: it's hard to use when forking. epoll works
|
||||
like so:
|
||||
|
||||
* create an epoll instance: efd = epoll_create(...)
|
||||
|
||||
* register file descriptors of interest with epoll_ctl(efd,
|
||||
EPOLL_CTL_ADD, fd, ...)
|
||||
|
||||
* wait for events with epoll_wait(efd, ...)
|
||||
|
||||
If you fork, you and all your child processes end up using the same
|
||||
epoll instance, and everyone becomes confused. It is possible to use
|
||||
epoll and fork and still have a correct program as long as you do the
|
||||
right things, but eventlet doesn't do those things. Really, it can't
|
||||
even try to do those things since it doesn't get notified of forks.
|
||||
|
||||
In contrast, both poll() and select() specify the set of interesting
|
||||
file descriptors with each call, so there's no problem with forking.
|
||||
"""
|
||||
try:
|
||||
import select
|
||||
@ -2271,8 +2297,45 @@ def hash_path(account, container=None, object=None, raw_digest=False):
|
||||
+ HASH_PATH_SUFFIX).hexdigest()
|
||||
|
||||
|
||||
def get_zero_indexed_base_string(base, index):
|
||||
"""
|
||||
This allows the caller to make a list of things with indexes, where the
|
||||
first item (zero indexed) is just the bare base string, and subsequent
|
||||
indexes are appended '-1', '-2', etc.
|
||||
|
||||
e.g.::
|
||||
|
||||
'lock', None => 'lock'
|
||||
'lock', 0 => 'lock'
|
||||
'lock', 1 => 'lock-1'
|
||||
'object', 2 => 'object-2'
|
||||
|
||||
:param base: a string, the base string; when ``index`` is 0 (or None) this
|
||||
is the identity function.
|
||||
:param index: a digit, typically an integer (or None); for values other
|
||||
than 0 or None this digit is appended to the base string
|
||||
separated by a hyphen.
|
||||
"""
|
||||
if index == 0 or index is None:
|
||||
return_string = base
|
||||
else:
|
||||
return_string = base + "-%d" % int(index)
|
||||
return return_string
|
||||
|
||||
|
||||
def _get_any_lock(fds):
|
||||
for fd in fds:
|
||||
try:
|
||||
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
return True
|
||||
except IOError as err:
|
||||
if err.errno != errno.EAGAIN:
|
||||
raise
|
||||
return False
|
||||
|
||||
|
||||
@contextmanager
|
||||
def lock_path(directory, timeout=10, timeout_class=None):
|
||||
def lock_path(directory, timeout=10, timeout_class=None, limit=1):
|
||||
"""
|
||||
Context manager that acquires a lock on a directory. This will block until
|
||||
the lock can be acquired, or the timeout time has expired (whichever occurs
|
||||
@ -2288,12 +2351,16 @@ def lock_path(directory, timeout=10, timeout_class=None):
|
||||
lock cannot be granted within the timeout. Will be
|
||||
constructed as timeout_class(timeout, lockpath). Default:
|
||||
LockTimeout
|
||||
:param limit: the maximum number of locks that may be held concurrently on
|
||||
the same directory; defaults to 1
|
||||
"""
|
||||
if timeout_class is None:
|
||||
timeout_class = swift.common.exceptions.LockTimeout
|
||||
mkdirs(directory)
|
||||
lockpath = '%s/.lock' % directory
|
||||
fd = os.open(lockpath, os.O_WRONLY | os.O_CREAT)
|
||||
fds = [os.open(get_zero_indexed_base_string(lockpath, i),
|
||||
os.O_WRONLY | os.O_CREAT)
|
||||
for i in range(limit)]
|
||||
sleep_time = 0.01
|
||||
slower_sleep_time = max(timeout * 0.01, sleep_time)
|
||||
slowdown_at = timeout * 0.01
|
||||
@ -2301,19 +2368,16 @@ def lock_path(directory, timeout=10, timeout_class=None):
|
||||
try:
|
||||
with timeout_class(timeout, lockpath):
|
||||
while True:
|
||||
try:
|
||||
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
if _get_any_lock(fds):
|
||||
break
|
||||
except IOError as err:
|
||||
if err.errno != errno.EAGAIN:
|
||||
raise
|
||||
if time_slept > slowdown_at:
|
||||
sleep_time = slower_sleep_time
|
||||
sleep(sleep_time)
|
||||
time_slept += sleep_time
|
||||
yield True
|
||||
finally:
|
||||
os.close(fd)
|
||||
for fd in fds:
|
||||
os.close(fd)
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
@ -412,12 +412,7 @@ def run_server(conf, logger, sock, global_conf=None):
|
||||
wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60)
|
||||
|
||||
eventlet.hubs.use_hub(get_hub())
|
||||
# NOTE(sileht):
|
||||
# monkey-patching thread is required by python-keystoneclient;
|
||||
# monkey-patching select is required by oslo.messaging pika driver
|
||||
# if thread is monkey-patched.
|
||||
eventlet.patcher.monkey_patch(all=False, socket=True, select=True,
|
||||
thread=True)
|
||||
utils.eventlet_monkey_patch()
|
||||
eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no'))
|
||||
eventlet.debug.hub_exceptions(eventlet_debug)
|
||||
wsgi_logger = NullLogger()
|
||||
|
@ -23,15 +23,17 @@ from swift import gettext_ as _
|
||||
from random import random, shuffle
|
||||
from tempfile import mkstemp
|
||||
|
||||
from eventlet import spawn, patcher, Timeout
|
||||
from eventlet import spawn, Timeout
|
||||
|
||||
import swift.common.db
|
||||
from swift.common.constraints import check_drive
|
||||
from swift.container.backend import ContainerBroker, DATADIR
|
||||
from swift.common.bufferedhttp import http_connect
|
||||
from swift.common.exceptions import ConnectionTimeout
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import get_logger, config_true_value, ismount, \
|
||||
dump_recon_cache, majority_size, Timestamp, ratelimit_sleep
|
||||
from swift.common.utils import get_logger, config_true_value, \
|
||||
dump_recon_cache, majority_size, Timestamp, ratelimit_sleep, \
|
||||
eventlet_monkey_patch
|
||||
from swift.common.daemon import Daemon
|
||||
from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR
|
||||
|
||||
@ -39,9 +41,9 @@ from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR
|
||||
class ContainerUpdater(Daemon):
|
||||
"""Update container information in account listings."""
|
||||
|
||||
def __init__(self, conf):
|
||||
def __init__(self, conf, logger=None):
|
||||
self.conf = conf
|
||||
self.logger = get_logger(conf, log_route='container-updater')
|
||||
self.logger = logger or get_logger(conf, log_route='container-updater')
|
||||
self.devices = conf.get('devices', '/srv/node')
|
||||
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||
@ -99,8 +101,8 @@ class ContainerUpdater(Daemon):
|
||||
"""
|
||||
paths = []
|
||||
for device in self._listdir(self.devices):
|
||||
dev_path = os.path.join(self.devices, device)
|
||||
if self.mount_check and not ismount(dev_path):
|
||||
dev_path = check_drive(self.devices, device, self.mount_check)
|
||||
if not dev_path:
|
||||
self.logger.warning(_('%s is not mounted'), device)
|
||||
continue
|
||||
con_path = os.path.join(dev_path, DATADIR)
|
||||
@ -155,8 +157,7 @@ class ContainerUpdater(Daemon):
|
||||
pid2filename[pid] = tmpfilename
|
||||
else:
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
patcher.monkey_patch(all=False, socket=True, select=True,
|
||||
thread=True)
|
||||
eventlet_monkey_patch()
|
||||
self.no_changes = 0
|
||||
self.successes = 0
|
||||
self.failures = 0
|
||||
@ -190,7 +191,7 @@ class ContainerUpdater(Daemon):
|
||||
"""
|
||||
Run the updater once.
|
||||
"""
|
||||
patcher.monkey_patch(all=False, socket=True, select=True, thread=True)
|
||||
eventlet_monkey_patch()
|
||||
self.logger.info(_('Begin container update single threaded sweep'))
|
||||
begin = time.time()
|
||||
self.no_changes = 0
|
||||
|
@ -289,9 +289,9 @@ class AuditorWorker(object):
|
||||
class ObjectAuditor(Daemon):
|
||||
"""Audit objects."""
|
||||
|
||||
def __init__(self, conf, **options):
|
||||
def __init__(self, conf, logger=None, **options):
|
||||
self.conf = conf
|
||||
self.logger = get_logger(conf, log_route='object-auditor')
|
||||
self.logger = logger or get_logger(conf, log_route='object-auditor')
|
||||
self.devices = conf.get('devices', '/srv/node')
|
||||
self.concurrency = int(conf.get('concurrency', 1))
|
||||
self.conf_zero_byte_fps = int(
|
||||
|
@ -62,7 +62,7 @@ from swift.common.request_helpers import is_sys_meta
|
||||
from swift.common.utils import mkdirs, Timestamp, \
|
||||
storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \
|
||||
fsync_dir, drop_buffer_cache, lock_path, write_pickle, \
|
||||
config_true_value, listdir, split_path, ismount, remove_file, \
|
||||
config_true_value, listdir, split_path, remove_file, \
|
||||
get_md5_socket, F_SETPIPE_SZ, decode_timestamps, encode_timestamps, \
|
||||
tpool_reraise, MD5_OF_EMPTY_STRING, link_fd_to_path, o_tmpfile_supported, \
|
||||
O_TMPFILE, makedirs_count, replace_partition_in_path
|
||||
@ -429,11 +429,11 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
|
||||
shuffle(device_dirs)
|
||||
|
||||
for device in device_dirs:
|
||||
if mount_check and not \
|
||||
ismount(os.path.join(devices, device)):
|
||||
if not check_drive(devices, device, mount_check):
|
||||
if logger:
|
||||
logger.debug(
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
'Skipping %s as it is not %s', device,
|
||||
'mounted' if mount_check else 'a dir')
|
||||
continue
|
||||
# loop through object dirs for all policies
|
||||
device_dir = os.path.join(devices, device)
|
||||
@ -624,8 +624,28 @@ class BaseDiskFileManager(object):
|
||||
self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
|
||||
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||
self.reclaim_age = int(conf.get('reclaim_age', DEFAULT_RECLAIM_AGE))
|
||||
self.replication_one_per_device = config_true_value(
|
||||
conf.get('replication_one_per_device', 'true'))
|
||||
replication_concurrency_per_device = conf.get(
|
||||
'replication_concurrency_per_device')
|
||||
replication_one_per_device = conf.get('replication_one_per_device')
|
||||
if replication_concurrency_per_device is None \
|
||||
and replication_one_per_device is not None:
|
||||
self.logger.warning('Option replication_one_per_device is '
|
||||
'deprecated and will be removed in a future '
|
||||
'version. Update your configuration to use '
|
||||
'option replication_concurrency_per_device.')
|
||||
if config_true_value(replication_one_per_device):
|
||||
replication_concurrency_per_device = 1
|
||||
else:
|
||||
replication_concurrency_per_device = 0
|
||||
elif replication_one_per_device is not None:
|
||||
self.logger.warning('Option replication_one_per_device ignored as '
|
||||
'replication_concurrency_per_device is '
|
||||
'defined.')
|
||||
if replication_concurrency_per_device is None:
|
||||
self.replication_concurrency_per_device = 1
|
||||
else:
|
||||
self.replication_concurrency_per_device = int(
|
||||
replication_concurrency_per_device)
|
||||
self.replication_lock_timeout = int(conf.get(
|
||||
'replication_lock_timeout', 15))
|
||||
|
||||
@ -1189,14 +1209,15 @@ class BaseDiskFileManager(object):
|
||||
:returns: full path to the device, None if the path to the device is
|
||||
not a proper mount point or directory.
|
||||
"""
|
||||
# we'll do some kind of check unless explicitly forbidden
|
||||
if mount_check is not False:
|
||||
if mount_check or self.mount_check:
|
||||
mount_check = True
|
||||
else:
|
||||
mount_check = False
|
||||
return check_drive(self.devices, device, mount_check)
|
||||
return join(self.devices, device)
|
||||
if mount_check is False:
|
||||
# explicitly forbidden from syscall, just return path
|
||||
return join(self.devices, device)
|
||||
# we'll do some kind of check if not explicitly forbidden
|
||||
if mount_check or self.mount_check:
|
||||
mount_check = True
|
||||
else:
|
||||
mount_check = False
|
||||
return check_drive(self.devices, device, mount_check)
|
||||
|
||||
@contextmanager
|
||||
def replication_lock(self, device):
|
||||
@ -1208,12 +1229,13 @@ class BaseDiskFileManager(object):
|
||||
:raises ReplicationLockTimeout: If the lock on the device
|
||||
cannot be granted within the configured timeout.
|
||||
"""
|
||||
if self.replication_one_per_device:
|
||||
if self.replication_concurrency_per_device:
|
||||
dev_path = self.get_dev_path(device)
|
||||
with lock_path(
|
||||
dev_path,
|
||||
timeout=self.replication_lock_timeout,
|
||||
timeout_class=ReplicationLockTimeout):
|
||||
timeout_class=ReplicationLockTimeout,
|
||||
limit=self.replication_concurrency_per_device):
|
||||
yield True
|
||||
else:
|
||||
yield True
|
||||
|
@ -26,14 +26,13 @@ import six
|
||||
import six.moves.cPickle as pickle
|
||||
import shutil
|
||||
|
||||
from eventlet import (GreenPile, GreenPool, Timeout, sleep, hubs, tpool,
|
||||
spawn)
|
||||
from eventlet import (GreenPile, GreenPool, Timeout, sleep, tpool, spawn)
|
||||
from eventlet.support.greenlets import GreenletExit
|
||||
|
||||
from swift import gettext_ as _
|
||||
from swift.common.utils import (
|
||||
whataremyips, unlink_older_than, compute_eta, get_logger,
|
||||
dump_recon_cache, mkdirs, config_true_value, list_from_csv, get_hub,
|
||||
dump_recon_cache, mkdirs, config_true_value, list_from_csv,
|
||||
tpool_reraise, GreenAsyncPile, Timestamp, remove_file)
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
from swift.common.bufferedhttp import http_connect
|
||||
@ -51,9 +50,6 @@ from swift.common.exceptions import ConnectionTimeout, DiskFileError, \
|
||||
SYNC, REVERT = ('sync_only', 'sync_revert')
|
||||
|
||||
|
||||
hubs.use_hub(get_hub())
|
||||
|
||||
|
||||
def _get_partners(frag_index, part_nodes):
|
||||
"""
|
||||
Returns the left and right partners of the node whose index is
|
||||
|
@ -25,15 +25,16 @@ import six.moves.cPickle as pickle
|
||||
from swift import gettext_ as _
|
||||
|
||||
import eventlet
|
||||
from eventlet import GreenPool, tpool, Timeout, sleep, hubs
|
||||
from eventlet import GreenPool, tpool, Timeout, sleep
|
||||
from eventlet.green import subprocess
|
||||
from eventlet.support.greenlets import GreenletExit
|
||||
|
||||
from swift.common.constraints import check_drive
|
||||
from swift.common.ring.utils import is_local_device
|
||||
from swift.common.utils import whataremyips, unlink_older_than, \
|
||||
compute_eta, get_logger, dump_recon_cache, ismount, \
|
||||
compute_eta, get_logger, dump_recon_cache, \
|
||||
rsync_module_interpolation, mkdirs, config_true_value, list_from_csv, \
|
||||
get_hub, tpool_reraise, config_auto_int_value, storage_directory
|
||||
tpool_reraise, config_auto_int_value, storage_directory
|
||||
from swift.common.bufferedhttp import http_connect
|
||||
from swift.common.daemon import Daemon
|
||||
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
|
||||
@ -43,8 +44,6 @@ from swift.common.storage_policy import POLICIES, REPL_POLICY
|
||||
|
||||
DEFAULT_RSYNC_TIMEOUT = 900
|
||||
|
||||
hubs.use_hub(get_hub())
|
||||
|
||||
|
||||
def _do_listdir(partition, replication_cycle):
|
||||
return (((partition + replication_cycle) % 10) == 0)
|
||||
@ -587,10 +586,9 @@ class ObjectReplicator(Daemon):
|
||||
and (override_devices is None
|
||||
or dev['device'] in override_devices))]:
|
||||
found_local = True
|
||||
dev_path = join(self.devices_dir, local_dev['device'])
|
||||
obj_path = join(dev_path, data_dir)
|
||||
tmp_path = join(dev_path, get_tmp_dir(policy))
|
||||
if self.mount_check and not ismount(dev_path):
|
||||
dev_path = check_drive(self.devices_dir, local_dev['device'],
|
||||
self.mount_check)
|
||||
if not dev_path:
|
||||
self._add_failure_stats(
|
||||
[(failure_dev['replication_ip'],
|
||||
failure_dev['device'])
|
||||
@ -599,6 +597,8 @@ class ObjectReplicator(Daemon):
|
||||
self.logger.warning(
|
||||
_('%s is not mounted'), local_dev['device'])
|
||||
continue
|
||||
obj_path = join(dev_path, data_dir)
|
||||
tmp_path = join(dev_path, get_tmp_dir(policy))
|
||||
unlink_older_than(tmp_path, time.time() -
|
||||
df_mgr.reclaim_age)
|
||||
if not os.path.exists(obj_path):
|
||||
@ -730,8 +730,9 @@ class ObjectReplicator(Daemon):
|
||||
if override_partitions and \
|
||||
job['partition'] not in override_partitions:
|
||||
continue
|
||||
dev_path = join(self.devices_dir, job['device'])
|
||||
if self.mount_check and not ismount(dev_path):
|
||||
dev_path = check_drive(self.devices_dir, job['device'],
|
||||
self.mount_check)
|
||||
if not dev_path:
|
||||
self._add_failure_stats([(failure_dev['replication_ip'],
|
||||
failure_dev['device'])
|
||||
for failure_dev in job['nodes']])
|
||||
|
@ -21,14 +21,15 @@ import time
|
||||
from swift import gettext_ as _
|
||||
from random import random
|
||||
|
||||
from eventlet import spawn, patcher, Timeout
|
||||
from eventlet import spawn, Timeout
|
||||
|
||||
from swift.common.bufferedhttp import http_connect
|
||||
from swift.common.constraints import check_drive
|
||||
from swift.common.exceptions import ConnectionTimeout
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import get_logger, renamer, write_pickle, \
|
||||
dump_recon_cache, config_true_value, ismount, ratelimit_sleep, urlparse, \
|
||||
split_path
|
||||
dump_recon_cache, config_true_value, ratelimit_sleep, urlparse, \
|
||||
split_path, eventlet_monkey_patch
|
||||
from swift.common.daemon import Daemon
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
from swift.common.storage_policy import split_policy_string, PolicyError
|
||||
@ -95,8 +96,7 @@ class ObjectUpdater(Daemon):
|
||||
# read from container ring to ensure it's fresh
|
||||
self.get_container_ring().get_nodes('')
|
||||
for device in self._listdir(self.devices):
|
||||
if self.mount_check and \
|
||||
not ismount(os.path.join(self.devices, device)):
|
||||
if not check_drive(self.devices, device, self.mount_check):
|
||||
self.logger.increment('errors')
|
||||
self.logger.warning(
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
@ -108,8 +108,7 @@ class ObjectUpdater(Daemon):
|
||||
pids.append(pid)
|
||||
else:
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
patcher.monkey_patch(all=False, socket=True, select=True,
|
||||
thread=True)
|
||||
eventlet_monkey_patch()
|
||||
self.successes = 0
|
||||
self.failures = 0
|
||||
forkbegin = time.time()
|
||||
@ -139,8 +138,7 @@ class ObjectUpdater(Daemon):
|
||||
self.successes = 0
|
||||
self.failures = 0
|
||||
for device in self._listdir(self.devices):
|
||||
if self.mount_check and \
|
||||
not ismount(os.path.join(self.devices, device)):
|
||||
if not check_drive(self.devices, device, self.mount_check):
|
||||
self.logger.increment('errors')
|
||||
self.logger.warning(
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
|
@ -93,7 +93,8 @@ def source_key(resp):
|
||||
|
||||
:param resp: bufferedhttp response object
|
||||
"""
|
||||
return Timestamp(resp.getheader('x-backend-timestamp') or
|
||||
return Timestamp(resp.getheader('x-backend-data-timestamp') or
|
||||
resp.getheader('x-backend-timestamp') or
|
||||
resp.getheader('x-put-timestamp') or
|
||||
resp.getheader('x-timestamp') or 0)
|
||||
|
||||
@ -765,6 +766,7 @@ class ResumingGetter(object):
|
||||
self.concurrency = concurrency
|
||||
self.node = None
|
||||
self.header_provider = header_provider
|
||||
self.latest_404_timestamp = Timestamp(0)
|
||||
|
||||
# stuff from request
|
||||
self.req_method = req.method
|
||||
@ -1162,32 +1164,51 @@ class ResumingGetter(object):
|
||||
self.source_headers.append([])
|
||||
close_swift_conn(possible_source)
|
||||
else:
|
||||
if self.used_source_etag:
|
||||
src_headers = dict(
|
||||
(k.lower(), v) for k, v in
|
||||
possible_source.getheaders())
|
||||
src_headers = dict(
|
||||
(k.lower(), v) for k, v in
|
||||
possible_source.getheaders())
|
||||
if self.used_source_etag and \
|
||||
self.used_source_etag != src_headers.get(
|
||||
'x-object-sysmeta-ec-etag',
|
||||
src_headers.get('etag', '')).strip('"'):
|
||||
self.statuses.append(HTTP_NOT_FOUND)
|
||||
self.reasons.append('')
|
||||
self.bodies.append('')
|
||||
self.source_headers.append([])
|
||||
return False
|
||||
|
||||
if self.used_source_etag != src_headers.get(
|
||||
'x-object-sysmeta-ec-etag',
|
||||
src_headers.get('etag', '')).strip('"'):
|
||||
self.statuses.append(HTTP_NOT_FOUND)
|
||||
self.reasons.append('')
|
||||
self.bodies.append('')
|
||||
self.source_headers.append([])
|
||||
return False
|
||||
|
||||
self.statuses.append(possible_source.status)
|
||||
self.reasons.append(possible_source.reason)
|
||||
self.bodies.append(None)
|
||||
self.source_headers.append(possible_source.getheaders())
|
||||
self.sources.append((possible_source, node))
|
||||
if not self.newest: # one good source is enough
|
||||
return True
|
||||
# a possible source should only be added as a valid source
|
||||
# if its timestamp is newer than previously found tombstones
|
||||
ps_timestamp = Timestamp(
|
||||
src_headers.get('x-backend-data-timestamp') or
|
||||
src_headers.get('x-backend-timestamp') or
|
||||
src_headers.get('x-put-timestamp') or
|
||||
src_headers.get('x-timestamp') or 0)
|
||||
if ps_timestamp >= self.latest_404_timestamp:
|
||||
self.statuses.append(possible_source.status)
|
||||
self.reasons.append(possible_source.reason)
|
||||
self.bodies.append(None)
|
||||
self.source_headers.append(possible_source.getheaders())
|
||||
self.sources.append((possible_source, node))
|
||||
if not self.newest: # one good source is enough
|
||||
return True
|
||||
else:
|
||||
|
||||
self.statuses.append(possible_source.status)
|
||||
self.reasons.append(possible_source.reason)
|
||||
self.bodies.append(possible_source.read())
|
||||
self.source_headers.append(possible_source.getheaders())
|
||||
|
||||
# if 404, record the timestamp. If a good source shows up, its
|
||||
# timestamp will be compared to the latest 404.
|
||||
# For now checking only on objects, but future work could include
|
||||
# the same check for account and containers. See lp 1560574.
|
||||
if self.server_type == 'Object' and \
|
||||
possible_source.status == HTTP_NOT_FOUND:
|
||||
hdrs = HeaderKeyDict(possible_source.getheaders())
|
||||
ts = Timestamp(hdrs.get('X-Backend-Timestamp', 0))
|
||||
if ts > self.latest_404_timestamp:
|
||||
self.latest_404_timestamp = ts
|
||||
if possible_source.status == HTTP_INSUFFICIENT_STORAGE:
|
||||
self.app.error_limit(node, _('ERROR Insufficient Storage'))
|
||||
elif is_server_error(possible_source.status):
|
||||
@ -1225,6 +1246,12 @@ class ResumingGetter(object):
|
||||
# ran out of nodes, see if any stragglers will finish
|
||||
any(pile)
|
||||
|
||||
# this helps weed out any sucess status that were found before a 404
|
||||
# and added to the list in the case of x-newest.
|
||||
if self.sources:
|
||||
self.sources = [s for s in self.sources
|
||||
if source_key(s[0]) >= self.latest_404_timestamp]
|
||||
|
||||
if self.sources:
|
||||
self.sources.sort(key=lambda s: source_key(s[0]))
|
||||
source, node = self.sources.pop()
|
||||
|
@ -347,6 +347,8 @@ class Connection(object):
|
||||
self.connection.send('0\r\n\r\n')
|
||||
|
||||
self.response = self.connection.getresponse()
|
||||
# Hope it isn't big!
|
||||
self.response.body = self.response.read()
|
||||
self.connection.close()
|
||||
return self.response.status
|
||||
|
||||
|
@ -40,7 +40,7 @@ def tearDownModule():
|
||||
class TestObject(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
if tf.skip:
|
||||
if tf.skip or tf.skip2:
|
||||
raise SkipTest
|
||||
self.container = uuid4().hex
|
||||
|
||||
@ -583,7 +583,7 @@ class TestObject(unittest2.TestCase):
|
||||
self.assertIn(resp.status, (204, 404))
|
||||
|
||||
def test_copy_between_accounts(self):
|
||||
if tf.skip:
|
||||
if tf.skip2:
|
||||
raise SkipTest
|
||||
|
||||
source = '%s/%s' % (self.container, self.obj)
|
||||
|
@ -792,6 +792,101 @@ class TestSlo(Base):
|
||||
except ValueError:
|
||||
self.fail("COPY didn't copy the manifest (invalid json on GET)")
|
||||
|
||||
def test_slo_put_heartbeating(self):
|
||||
if 'yield_frequency' not in cluster_info['slo']:
|
||||
# old swift?
|
||||
raise SkipTest('Swift does not seem to support heartbeating')
|
||||
|
||||
def do_put(headers=None, include_error=False):
|
||||
file_item = self.env.container.file("manifest-heartbeat")
|
||||
seg_info = self.env.seg_info
|
||||
manifest_data = [seg_info['seg_a'], seg_info['seg_b'],
|
||||
seg_info['seg_c'], seg_info['seg_d'],
|
||||
seg_info['seg_e']]
|
||||
if include_error:
|
||||
manifest_data.append({'path': 'non-existent/segment'})
|
||||
resp = file_item.write(
|
||||
json.dumps(manifest_data),
|
||||
parms={'multipart-manifest': 'put', 'heartbeat': 'on'},
|
||||
hdrs=headers, return_resp=True)
|
||||
self.assertEqual(resp.status, 202)
|
||||
self.assertTrue(resp.chunked)
|
||||
body_lines = resp.body.split('\n', 2)
|
||||
self.assertFalse(body_lines[0].strip()) # all whitespace
|
||||
self.assertEqual('\r', body_lines[1])
|
||||
return body_lines[2]
|
||||
|
||||
body_lines = do_put().split('\n')
|
||||
self.assertIn('Response Status: 201 Created', body_lines)
|
||||
self.assertIn('Etag', [line.split(':', 1)[0] for line in body_lines])
|
||||
self.assertIn('Last Modified', [line.split(':', 1)[0]
|
||||
for line in body_lines])
|
||||
|
||||
body_lines = do_put({'Accept': 'text/plain'}).split('\n')
|
||||
self.assertIn('Response Status: 201 Created', body_lines)
|
||||
self.assertIn('Etag', [line.split(':', 1)[0] for line in body_lines])
|
||||
self.assertIn('Last Modified', [line.split(':', 1)[0]
|
||||
for line in body_lines])
|
||||
|
||||
body = do_put({'Accept': 'application/json'})
|
||||
try:
|
||||
resp = json.loads(body)
|
||||
except ValueError:
|
||||
self.fail('Expected JSON, got %r' % body)
|
||||
self.assertIn('Etag', resp)
|
||||
del resp['Etag']
|
||||
self.assertIn('Last Modified', resp)
|
||||
del resp['Last Modified']
|
||||
self.assertEqual(resp, {
|
||||
'Response Status': '201 Created',
|
||||
'Response Body': '',
|
||||
'Errors': [],
|
||||
})
|
||||
|
||||
body_lines = do_put(include_error=True).split('\n')
|
||||
self.assertIn('Response Status: 400 Bad Request', body_lines)
|
||||
self.assertIn('Response Body: Bad Request', body_lines)
|
||||
self.assertNotIn('Etag', [line.split(':', 1)[0]
|
||||
for line in body_lines])
|
||||
self.assertNotIn('Last Modified', [line.split(':', 1)[0]
|
||||
for line in body_lines])
|
||||
self.assertEqual(body_lines[-3:], [
|
||||
'Errors:',
|
||||
'non-existent/segment, 404 Not Found',
|
||||
'',
|
||||
])
|
||||
|
||||
body = do_put({'Accept': 'application/json'}, include_error=True)
|
||||
try:
|
||||
resp = json.loads(body)
|
||||
except ValueError:
|
||||
self.fail('Expected JSON, got %r' % body)
|
||||
self.assertNotIn('Etag', resp)
|
||||
self.assertNotIn('Last Modified', resp)
|
||||
self.assertEqual(resp, {
|
||||
'Response Status': '400 Bad Request',
|
||||
'Response Body': 'Bad Request\nThe server could not comply with '
|
||||
'the request since it is either malformed or '
|
||||
'otherwise incorrect.',
|
||||
'Errors': [
|
||||
['non-existent/segment', '404 Not Found'],
|
||||
],
|
||||
})
|
||||
|
||||
body = do_put({'Accept': 'application/json', 'ETag': 'bad etag'})
|
||||
try:
|
||||
resp = json.loads(body)
|
||||
except ValueError:
|
||||
self.fail('Expected JSON, got %r' % body)
|
||||
self.assertNotIn('Etag', resp)
|
||||
self.assertNotIn('Last Modified', resp)
|
||||
self.assertEqual(resp, {
|
||||
'Response Status': '422 Unprocessable Entity',
|
||||
'Response Body': 'Unprocessable Entity\nUnable to process the '
|
||||
'contained instructions',
|
||||
'Errors': [],
|
||||
})
|
||||
|
||||
def _make_manifest(self):
|
||||
file_item = self.env.container.file("manifest-post")
|
||||
seg_info = self.env.seg_info
|
||||
|
@ -444,7 +444,7 @@ class ProbeTest(unittest.TestCase):
|
||||
def revive_drive(self, device):
|
||||
disabled_name = device + "X"
|
||||
if os.path.isdir(disabled_name):
|
||||
renamer(device + "X", device)
|
||||
renamer(disabled_name, device)
|
||||
else:
|
||||
os.system('sudo mount %s' % device)
|
||||
|
||||
|
@ -246,6 +246,69 @@ class TestObjectHandoff(ReplProbeTest):
|
||||
else:
|
||||
self.fail("Expected ClientException but didn't get it")
|
||||
|
||||
def test_stale_reads(self):
|
||||
# Create container
|
||||
container = 'container-%s' % uuid4()
|
||||
client.put_container(self.url, self.token, container,
|
||||
headers={'X-Storage-Policy':
|
||||
self.policy.name})
|
||||
|
||||
# Kill one primary obj server
|
||||
obj = 'object-%s' % uuid4()
|
||||
opart, onodes = self.object_ring.get_nodes(
|
||||
self.account, container, obj)
|
||||
onode = onodes[0]
|
||||
kill_server((onode['ip'], onode['port']), self.ipport2server)
|
||||
|
||||
# Create container/obj (goes to two primaries and one handoff)
|
||||
client.put_object(self.url, self.token, container, obj, 'VERIFY')
|
||||
odata = client.get_object(self.url, self.token, container, obj)[-1]
|
||||
if odata != 'VERIFY':
|
||||
raise Exception('Object GET did not return VERIFY, instead it '
|
||||
'returned: %s' % repr(odata))
|
||||
|
||||
# Stash the on disk data from a primary for future comparison with the
|
||||
# handoff - this may not equal 'VERIFY' if for example the proxy has
|
||||
# crypto enabled
|
||||
direct_get_data = direct_client.direct_get_object(
|
||||
onodes[1], opart, self.account, container, obj, headers={
|
||||
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
|
||||
|
||||
# Restart the first container/obj primary server again
|
||||
start_server((onode['ip'], onode['port']), self.ipport2server)
|
||||
|
||||
# send a delete request to primaries
|
||||
client.delete_object(self.url, self.token, container, obj)
|
||||
|
||||
# there should be .ts files in all primaries now
|
||||
for node in onodes:
|
||||
try:
|
||||
direct_client.direct_get_object(
|
||||
node, opart, self.account, container, obj, headers={
|
||||
'X-Backend-Storage-Policy-Index': self.policy.idx})
|
||||
except ClientException as err:
|
||||
self.assertEqual(err.http_status, 404)
|
||||
else:
|
||||
self.fail("Expected ClientException but didn't get it")
|
||||
|
||||
# verify that handoff still has the data, DELETEs should have gone
|
||||
# only to primaries
|
||||
another_onode = next(self.object_ring.get_more_nodes(opart))
|
||||
handoff_data = direct_client.direct_get_object(
|
||||
another_onode, opart, self.account, container, obj, headers={
|
||||
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
|
||||
self.assertEqual(handoff_data, direct_get_data)
|
||||
|
||||
# Indirectly (i.e., through proxy) try to GET object, it should return
|
||||
# a 404, before bug #1560574, the proxy would return the stale object
|
||||
# from the handoff
|
||||
try:
|
||||
client.get_object(self.url, self.token, container, obj)
|
||||
except client.ClientException as err:
|
||||
self.assertEqual(err.http_status, 404)
|
||||
else:
|
||||
self.fail("Expected ClientException but didn't get it")
|
||||
|
||||
|
||||
class TestECObjectHandoff(ECProbeTest):
|
||||
|
||||
|
@ -19,6 +19,7 @@ from uuid import uuid4
|
||||
import os
|
||||
import time
|
||||
import shutil
|
||||
import re
|
||||
|
||||
from swiftclient import client
|
||||
from swift.obj.diskfile import get_data_dir
|
||||
@ -26,7 +27,7 @@ from swift.obj.diskfile import get_data_dir
|
||||
from test.probe.common import ReplProbeTest
|
||||
from swift.common.utils import readconf
|
||||
|
||||
EXCLUDE_FILES = ['hashes.pkl', 'hashes.invalid', '.lock']
|
||||
EXCLUDE_FILES = re.compile('^(hashes\.(pkl|invalid)|lock(-\d+)?)$')
|
||||
|
||||
|
||||
def collect_info(path_list):
|
||||
@ -43,7 +44,7 @@ def collect_info(path_list):
|
||||
temp_files_list = []
|
||||
temp_dir_list = []
|
||||
for root, dirs, files in os.walk(path):
|
||||
files = [f for f in files if f not in EXCLUDE_FILES]
|
||||
files = [f for f in files if not EXCLUDE_FILES.match(f)]
|
||||
temp_files_list += files
|
||||
temp_dir_list += dirs
|
||||
files_list.append(temp_files_list)
|
||||
|
@ -713,8 +713,8 @@ def quiet_eventlet_exceptions():
|
||||
def mock_check_drive(isdir=False, ismount=False):
|
||||
"""
|
||||
All device/drive/mount checking should be done through the constraints
|
||||
module if we keep the mocking consistly w/i that module we can keep our
|
||||
test robust to further rework on that interface.
|
||||
module. If we keep the mocking consistently within that module, we can
|
||||
keep our tests robust to further rework on that interface.
|
||||
|
||||
Replace the constraint modules underlying os calls with mocks.
|
||||
|
||||
|
@ -806,16 +806,14 @@ class TestReaper(unittest.TestCase):
|
||||
devices = prepare_data_dir()
|
||||
r = init_reaper(devices)
|
||||
|
||||
with patch('swift.account.reaper.ismount', lambda x: True):
|
||||
with patch(
|
||||
'swift.account.reaper.AccountReaper.reap_device') as foo:
|
||||
r.run_once()
|
||||
with patch('swift.account.reaper.AccountReaper.reap_device') as foo, \
|
||||
unit.mock_check_drive(ismount=True):
|
||||
r.run_once()
|
||||
self.assertEqual(foo.called, 1)
|
||||
|
||||
with patch('swift.account.reaper.ismount', lambda x: False):
|
||||
with patch(
|
||||
'swift.account.reaper.AccountReaper.reap_device') as foo:
|
||||
r.run_once()
|
||||
with patch('swift.account.reaper.AccountReaper.reap_device') as foo, \
|
||||
unit.mock_check_drive(ismount=False):
|
||||
r.run_once()
|
||||
self.assertFalse(foo.called)
|
||||
|
||||
with patch('swift.account.reaper.AccountReaper.reap_device') as foo:
|
||||
|
@ -353,6 +353,13 @@ class TestAccountController(unittest.TestCase):
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 406)
|
||||
|
||||
def test_HEAD_invalid_accept(self):
|
||||
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'},
|
||||
headers={'Accept': 'application/plain;q=1;q=0.5'})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 400)
|
||||
self.assertEqual(resp.body, '')
|
||||
|
||||
def test_HEAD_invalid_format(self):
|
||||
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
|
||||
req = Request.blank('/sda1/p/a?format=' + format,
|
||||
@ -787,6 +794,13 @@ class TestAccountController(unittest.TestCase):
|
||||
self.assertEqual(resp.headers['Content-Type'],
|
||||
'application/xml; charset=utf-8')
|
||||
|
||||
def test_GET_invalid_accept(self):
|
||||
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Accept': 'application/plain;q=foo'})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 400)
|
||||
self.assertEqual(resp.body, 'Invalid Accept header')
|
||||
|
||||
def test_GET_over_limit(self):
|
||||
req = Request.blank(
|
||||
'/sda1/p/a?limit=%d' % (constraints.ACCOUNT_LISTING_LIMIT + 1),
|
||||
@ -2096,8 +2110,8 @@ class TestAccountController(unittest.TestCase):
|
||||
StoragePolicy(2, 'two', False),
|
||||
StoragePolicy(3, 'three', False)])
|
||||
class TestNonLegacyDefaultStoragePolicy(TestAccountController):
|
||||
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -604,11 +604,15 @@ class TestUntar(unittest.TestCase):
|
||||
|
||||
def test_get_response_body(self):
|
||||
txt_body = bulk.get_response_body(
|
||||
'bad_formay', {'hey': 'there'}, [['json > xml', '202 Accepted']])
|
||||
'bad_formay', {'hey': 'there'}, [['json > xml', '202 Accepted']],
|
||||
"doesn't matter for text")
|
||||
self.assertTrue('hey: there' in txt_body)
|
||||
xml_body = bulk.get_response_body(
|
||||
'text/xml', {'hey': 'there'}, [['json > xml', '202 Accepted']])
|
||||
'text/xml', {'hey': 'there'}, [['json > xml', '202 Accepted']],
|
||||
'root_tag')
|
||||
self.assertTrue('>' in xml_body)
|
||||
self.assertTrue(xml_body.startswith('<root_tag>\n'))
|
||||
self.assertTrue(xml_body.endswith('\n</root_tag>\n'))
|
||||
|
||||
|
||||
class TestDelete(unittest.TestCase):
|
||||
|
@ -23,7 +23,7 @@ from textwrap import dedent
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from swift.common import exceptions, swob
|
||||
from swift.common import swob
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
from swift.common.middleware import dlo
|
||||
from swift.common.utils import closing_if_possible
|
||||
@ -38,7 +38,7 @@ def md5hex(s):
|
||||
|
||||
|
||||
class DloTestCase(unittest.TestCase):
|
||||
def call_dlo(self, req, app=None, expect_exception=False):
|
||||
def call_dlo(self, req, app=None):
|
||||
if app is None:
|
||||
app = self.dlo
|
||||
|
||||
@ -53,22 +53,11 @@ class DloTestCase(unittest.TestCase):
|
||||
|
||||
body_iter = app(req.environ, start_response)
|
||||
body = ''
|
||||
caught_exc = None
|
||||
try:
|
||||
# appease the close-checker
|
||||
with closing_if_possible(body_iter):
|
||||
for chunk in body_iter:
|
||||
body += chunk
|
||||
except Exception as exc:
|
||||
if expect_exception:
|
||||
caught_exc = exc
|
||||
else:
|
||||
raise
|
||||
|
||||
if expect_exception:
|
||||
return status[0], headers[0], body, caught_exc
|
||||
else:
|
||||
return status[0], headers[0], body
|
||||
# appease the close-checker
|
||||
with closing_if_possible(body_iter):
|
||||
for chunk in body_iter:
|
||||
body += chunk
|
||||
return status[0], headers[0], body
|
||||
|
||||
def setUp(self):
|
||||
self.app = FakeSwift()
|
||||
@ -561,7 +550,7 @@ class TestDloGetManifest(DloTestCase):
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'If-Modified-Since': 'Wed, 12 Feb 2014 22:24:52 GMT',
|
||||
'If-Unmodified-Since': 'Thu, 13 Feb 2014 23:25:53 GMT'})
|
||||
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
|
||||
status, headers, body = self.call_dlo(req)
|
||||
|
||||
for _, _, hdrs in self.app.calls_with_headers[1:]:
|
||||
self.assertFalse('If-Modified-Since' in hdrs)
|
||||
@ -576,10 +565,10 @@ class TestDloGetManifest(DloTestCase):
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body = self.call_dlo(req)
|
||||
self.assertEqual(status, "409 Conflict")
|
||||
err_lines = self.dlo.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(err_lines), 1)
|
||||
self.assertTrue(err_lines[0].startswith(
|
||||
'ERROR: An error occurred while retrieving segments'))
|
||||
self.assertEqual(self.dlo.logger.get_lines_for_level('error'), [
|
||||
'While processing manifest /v1/AUTH_test/mancon/manifest, '
|
||||
'got 403 while retrieving /v1/AUTH_test/c/seg_01',
|
||||
])
|
||||
|
||||
def test_error_fetching_second_segment(self):
|
||||
self.app.register(
|
||||
@ -588,16 +577,15 @@ class TestDloGetManifest(DloTestCase):
|
||||
|
||||
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
|
||||
status, headers, body = self.call_dlo(req)
|
||||
headers = HeaderKeyDict(headers)
|
||||
|
||||
self.assertTrue(isinstance(exc, exceptions.SegmentError))
|
||||
self.assertEqual(status, "200 OK")
|
||||
self.assertEqual(''.join(body), "aaaaa") # first segment made it out
|
||||
err_lines = self.dlo.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(err_lines), 1)
|
||||
self.assertTrue(err_lines[0].startswith(
|
||||
'ERROR: An error occurred while retrieving segments'))
|
||||
self.assertEqual(self.dlo.logger.get_lines_for_level('error'), [
|
||||
'While processing manifest /v1/AUTH_test/mancon/manifest, '
|
||||
'got 403 while retrieving /v1/AUTH_test/c/seg_02',
|
||||
])
|
||||
|
||||
def test_error_listing_container_first_listing_request(self):
|
||||
self.app.register(
|
||||
@ -620,9 +608,7 @@ class TestDloGetManifest(DloTestCase):
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Range': 'bytes=-5'})
|
||||
with mock.patch(LIMIT, 3):
|
||||
status, headers, body, exc = self.call_dlo(
|
||||
req, expect_exception=True)
|
||||
self.assertTrue(isinstance(exc, exceptions.ListingIterError))
|
||||
status, headers, body = self.call_dlo(req)
|
||||
self.assertEqual(status, "200 OK")
|
||||
self.assertEqual(body, "aaaaabbbbbccccc")
|
||||
|
||||
@ -634,10 +620,9 @@ class TestDloGetManifest(DloTestCase):
|
||||
|
||||
req = swob.Request.blank('/v1/AUTH_test/mancon/manifest',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
|
||||
status, headers, body = self.call_dlo(req)
|
||||
headers = HeaderKeyDict(headers)
|
||||
|
||||
self.assertTrue(isinstance(exc, exceptions.SegmentError))
|
||||
self.assertEqual(status, "200 OK")
|
||||
self.assertEqual(''.join(body), "aaaaabbWRONGbb") # stop after error
|
||||
|
||||
@ -712,12 +697,10 @@ class TestDloGetManifest(DloTestCase):
|
||||
mock.patch('swift.common.request_helpers.is_success',
|
||||
mock_is_success), \
|
||||
mock.patch.object(dlo, 'is_success', mock_is_success):
|
||||
status, headers, body, exc = self.call_dlo(
|
||||
req, expect_exception=True)
|
||||
status, headers, body = self.call_dlo(req)
|
||||
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertEqual(body, 'aaaaabbbbbccccc')
|
||||
self.assertTrue(isinstance(exc, exceptions.SegmentError))
|
||||
|
||||
def test_get_oversize_segment(self):
|
||||
# If we send a Content-Length header to the client, it's based on the
|
||||
@ -735,13 +718,12 @@ class TestDloGetManifest(DloTestCase):
|
||||
req = swob.Request.blank(
|
||||
'/v1/AUTH_test/mancon/manifest',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
|
||||
status, headers, body = self.call_dlo(req)
|
||||
headers = HeaderKeyDict(headers)
|
||||
|
||||
self.assertEqual(status, '200 OK') # sanity check
|
||||
self.assertEqual(headers.get('Content-Length'), '25') # sanity check
|
||||
self.assertEqual(body, 'aaaaabbbbbccccccccccccccc')
|
||||
self.assertTrue(isinstance(exc, exceptions.SegmentError))
|
||||
self.assertEqual(
|
||||
self.app.calls,
|
||||
[('GET', '/v1/AUTH_test/mancon/manifest'),
|
||||
@ -768,13 +750,12 @@ class TestDloGetManifest(DloTestCase):
|
||||
req = swob.Request.blank(
|
||||
'/v1/AUTH_test/mancon/manifest',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
|
||||
status, headers, body = self.call_dlo(req)
|
||||
headers = HeaderKeyDict(headers)
|
||||
|
||||
self.assertEqual(status, '200 OK') # sanity check
|
||||
self.assertEqual(headers.get('Content-Length'), '25') # sanity check
|
||||
self.assertEqual(body, 'aaaaabbbbbccccdddddeeeee')
|
||||
self.assertTrue(isinstance(exc, exceptions.SegmentError))
|
||||
|
||||
def test_get_undersize_segment_range(self):
|
||||
# Shrink it by a single byte
|
||||
@ -787,13 +768,12 @@ class TestDloGetManifest(DloTestCase):
|
||||
'/v1/AUTH_test/mancon/manifest',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Range': 'bytes=0-14'})
|
||||
status, headers, body, exc = self.call_dlo(req, expect_exception=True)
|
||||
status, headers, body = self.call_dlo(req)
|
||||
headers = HeaderKeyDict(headers)
|
||||
|
||||
self.assertEqual(status, '206 Partial Content') # sanity check
|
||||
self.assertEqual(headers.get('Content-Length'), '15') # sanity check
|
||||
self.assertEqual(body, 'aaaaabbbbbcccc')
|
||||
self.assertTrue(isinstance(exc, exceptions.SegmentError))
|
||||
|
||||
def test_get_with_auth_overridden(self):
|
||||
auth_got_called = [0]
|
||||
|
@ -108,10 +108,7 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
self.assertTrue(isinstance(resp['swift.cache'], MemcacheRing))
|
||||
|
||||
def test_conf_default_read(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = ExcConfigParser
|
||||
count = 0
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
|
||||
for d in ({},
|
||||
{'memcache_servers': '6.7.8.9:10'},
|
||||
{'memcache_serialization_support': '0'},
|
||||
@ -123,39 +120,27 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
{'memcache_serialization_support': '0',
|
||||
'memcache_max_connections': '30'}
|
||||
):
|
||||
try:
|
||||
with self.assertRaises(Exception) as catcher:
|
||||
memcache.MemcacheMiddleware(FakeApp(), d)
|
||||
except Exception as err:
|
||||
self.assertEqual(
|
||||
str(err),
|
||||
"read called with '/etc/swift/memcache.conf'")
|
||||
count += 1
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(count, 7)
|
||||
self.assertEqual(
|
||||
str(catcher.exception),
|
||||
"read called with '/etc/swift/memcache.conf'")
|
||||
|
||||
def test_conf_set_no_read(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = ExcConfigParser
|
||||
exc = None
|
||||
try:
|
||||
memcache.MemcacheMiddleware(
|
||||
FakeApp(), {'memcache_servers': '1.2.3.4:5',
|
||||
'memcache_serialization_support': '2',
|
||||
'memcache_max_connections': '30'})
|
||||
except Exception as err:
|
||||
exc = err
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser):
|
||||
exc = None
|
||||
try:
|
||||
memcache.MemcacheMiddleware(
|
||||
FakeApp(), {'memcache_servers': '1.2.3.4:5',
|
||||
'memcache_serialization_support': '2',
|
||||
'memcache_max_connections': '30'})
|
||||
except Exception as err:
|
||||
exc = err
|
||||
self.assertIsNone(exc)
|
||||
|
||||
def test_conf_default(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = EmptyConfigParser
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', EmptyConfigParser):
|
||||
app = memcache.MemcacheMiddleware(FakeApp(), {})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
|
||||
self.assertEqual(app.memcache._allow_pickle, False)
|
||||
self.assertEqual(app.memcache._allow_unpickle, False)
|
||||
@ -163,16 +148,12 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
|
||||
|
||||
def test_conf_inline(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = get_config_parser()
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
|
||||
app = memcache.MemcacheMiddleware(
|
||||
FakeApp(),
|
||||
{'memcache_servers': '6.7.8.9:10',
|
||||
'memcache_serialization_support': '0',
|
||||
'memcache_max_connections': '5'})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
|
||||
self.assertEqual(app.memcache._allow_pickle, True)
|
||||
self.assertEqual(app.memcache._allow_unpickle, True)
|
||||
@ -180,12 +161,9 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
app.memcache._client_cache['6.7.8.9:10'].max_size, 5)
|
||||
|
||||
def test_conf_extra_no_section(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = get_config_parser(section='foobar')
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser',
|
||||
get_config_parser(section='foobar')):
|
||||
app = memcache.MemcacheMiddleware(FakeApp(), {})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
|
||||
self.assertEqual(app.memcache._allow_pickle, False)
|
||||
self.assertEqual(app.memcache._allow_unpickle, False)
|
||||
@ -193,14 +171,11 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
|
||||
|
||||
def test_conf_extra_no_option(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = get_config_parser(
|
||||
replacement_parser = get_config_parser(
|
||||
memcache_servers='error', memcache_serialization_support='error',
|
||||
memcache_max_connections='error')
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', replacement_parser):
|
||||
app = memcache.MemcacheMiddleware(FakeApp(), {})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '127.0.0.1:11211')
|
||||
self.assertEqual(app.memcache._allow_pickle, False)
|
||||
self.assertEqual(app.memcache._allow_unpickle, False)
|
||||
@ -208,16 +183,12 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
app.memcache._client_cache['127.0.0.1:11211'].max_size, 2)
|
||||
|
||||
def test_conf_inline_other_max_conn(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = get_config_parser()
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
|
||||
app = memcache.MemcacheMiddleware(
|
||||
FakeApp(),
|
||||
{'memcache_servers': '6.7.8.9:10',
|
||||
'memcache_serialization_support': '0',
|
||||
'max_connections': '5'})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
|
||||
self.assertEqual(app.memcache._allow_pickle, True)
|
||||
self.assertEqual(app.memcache._allow_unpickle, True)
|
||||
@ -225,16 +196,12 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
app.memcache._client_cache['6.7.8.9:10'].max_size, 5)
|
||||
|
||||
def test_conf_inline_bad_max_conn(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = get_config_parser()
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
|
||||
app = memcache.MemcacheMiddleware(
|
||||
FakeApp(),
|
||||
{'memcache_servers': '6.7.8.9:10',
|
||||
'memcache_serialization_support': '0',
|
||||
'max_connections': 'bad42'})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
|
||||
self.assertEqual(app.memcache._allow_pickle, True)
|
||||
self.assertEqual(app.memcache._allow_unpickle, True)
|
||||
@ -242,12 +209,8 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
app.memcache._client_cache['6.7.8.9:10'].max_size, 4)
|
||||
|
||||
def test_conf_from_extra_conf(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = get_config_parser()
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
|
||||
app = memcache.MemcacheMiddleware(FakeApp(), {})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '1.2.3.4:5')
|
||||
self.assertEqual(app.memcache._allow_pickle, False)
|
||||
self.assertEqual(app.memcache._allow_unpickle, True)
|
||||
@ -255,13 +218,9 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
app.memcache._client_cache['1.2.3.4:5'].max_size, 4)
|
||||
|
||||
def test_conf_from_extra_conf_bad_max_conn(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = get_config_parser(
|
||||
memcache_max_connections='bad42')
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', get_config_parser(
|
||||
memcache_max_connections='bad42')):
|
||||
app = memcache.MemcacheMiddleware(FakeApp(), {})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '1.2.3.4:5')
|
||||
self.assertEqual(app.memcache._allow_pickle, False)
|
||||
self.assertEqual(app.memcache._allow_unpickle, True)
|
||||
@ -269,15 +228,11 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
app.memcache._client_cache['1.2.3.4:5'].max_size, 2)
|
||||
|
||||
def test_conf_from_inline_and_maxc_from_extra_conf(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = get_config_parser()
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
|
||||
app = memcache.MemcacheMiddleware(
|
||||
FakeApp(),
|
||||
{'memcache_servers': '6.7.8.9:10',
|
||||
'memcache_serialization_support': '0'})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
|
||||
self.assertEqual(app.memcache._allow_pickle, True)
|
||||
self.assertEqual(app.memcache._allow_unpickle, True)
|
||||
@ -285,15 +240,11 @@ class TestCacheMiddleware(unittest.TestCase):
|
||||
app.memcache._client_cache['6.7.8.9:10'].max_size, 4)
|
||||
|
||||
def test_conf_from_inline_and_sers_from_extra_conf(self):
|
||||
orig_parser = memcache.ConfigParser
|
||||
memcache.ConfigParser = get_config_parser()
|
||||
try:
|
||||
with mock.patch.object(memcache, 'ConfigParser', get_config_parser()):
|
||||
app = memcache.MemcacheMiddleware(
|
||||
FakeApp(),
|
||||
{'memcache_servers': '6.7.8.9:10',
|
||||
'memcache_max_connections': '42'})
|
||||
finally:
|
||||
memcache.ConfigParser = orig_parser
|
||||
self.assertEqual(app.memcache_servers, '6.7.8.9:10')
|
||||
self.assertEqual(app.memcache._allow_pickle, False)
|
||||
self.assertEqual(app.memcache._allow_unpickle, True)
|
||||
|
@ -19,6 +19,7 @@ import mock
|
||||
import os
|
||||
from posix import stat_result, statvfs_result
|
||||
from shutil import rmtree
|
||||
import tempfile
|
||||
import unittest
|
||||
from unittest import TestCase
|
||||
|
||||
@ -212,9 +213,7 @@ class FakeRecon(object):
|
||||
class TestReconSuccess(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# can't use mkdtemp here as 2.6 gzip puts the filename in the header
|
||||
# which will cause ring md5 checks to fail
|
||||
self.tempdir = '/tmp/swift_recon_md5_test'
|
||||
self.tempdir = tempfile.mkdtemp(prefix='swift_recon_md5_test')
|
||||
utils.mkdirs(self.tempdir)
|
||||
self.app = self._get_app()
|
||||
self.mockos = MockOS()
|
||||
@ -269,21 +268,7 @@ class TestReconSuccess(TestCase):
|
||||
return app
|
||||
|
||||
def _create_ring(self, ringpath, replica_map, devs, part_shift):
|
||||
def fake_time():
|
||||
return 0
|
||||
|
||||
def fake_base(fname):
|
||||
# least common denominator with gzip versions is to
|
||||
# not use the .gz extension in the gzip header
|
||||
return fname[:-3]
|
||||
|
||||
# eliminate time from the equation as gzip 2.6 includes
|
||||
# it in the header resulting in md5 file mismatch, also
|
||||
# have to mock basename as one version uses it, one doesn't
|
||||
with mock.patch("time.time", fake_time):
|
||||
with mock.patch("os.path.basename", fake_base):
|
||||
ring.RingData(replica_map, devs, part_shift).save(ringpath,
|
||||
mtime=None)
|
||||
ring.RingData(replica_map, devs, part_shift).save(ringpath)
|
||||
|
||||
def _create_rings(self):
|
||||
# make the rings unique so they have different md5 sums
|
||||
|
@ -23,7 +23,6 @@ import unittest
|
||||
from mock import patch
|
||||
from StringIO import StringIO
|
||||
from swift.common import swob, utils
|
||||
from swift.common.exceptions import ListingIterError, SegmentError
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
from swift.common.middleware import slo
|
||||
from swift.common.swob import Request, HTTPException
|
||||
@ -61,7 +60,7 @@ class SloTestCase(unittest.TestCase):
|
||||
self.slo = slo.filter_factory(slo_conf)(self.app)
|
||||
self.slo.logger = self.app.logger
|
||||
|
||||
def call_app(self, req, app=None, expect_exception=False):
|
||||
def call_app(self, req, app=None):
|
||||
if app is None:
|
||||
app = self.app
|
||||
|
||||
@ -76,22 +75,11 @@ class SloTestCase(unittest.TestCase):
|
||||
|
||||
body_iter = app(req.environ, start_response)
|
||||
body = ''
|
||||
caught_exc = None
|
||||
try:
|
||||
# appease the close-checker
|
||||
with closing_if_possible(body_iter):
|
||||
for chunk in body_iter:
|
||||
body += chunk
|
||||
except Exception as exc:
|
||||
if expect_exception:
|
||||
caught_exc = exc
|
||||
else:
|
||||
raise
|
||||
|
||||
if expect_exception:
|
||||
return status[0], headers[0], body, caught_exc
|
||||
else:
|
||||
return status[0], headers[0], body
|
||||
# appease the close-checker
|
||||
with closing_if_possible(body_iter):
|
||||
for chunk in body_iter:
|
||||
body += chunk
|
||||
return status[0], headers[0], body
|
||||
|
||||
def call_slo(self, req, **kwargs):
|
||||
return self.call_app(req, app=self.slo, **kwargs)
|
||||
@ -324,6 +312,9 @@ class TestSloPutManifest(SloTestCase):
|
||||
self.app.register(
|
||||
'PUT', '/', swob.HTTPOk, {}, 'passed')
|
||||
|
||||
self.app.register(
|
||||
'HEAD', '/v1/AUTH_test/cont/missing-object',
|
||||
swob.HTTPNotFound, {}, None)
|
||||
self.app.register(
|
||||
'HEAD', '/v1/AUTH_test/cont/object',
|
||||
swob.HTTPOk,
|
||||
@ -355,7 +346,8 @@ class TestSloPutManifest(SloTestCase):
|
||||
{'Content-Length': '1', 'Etag': 'a'},
|
||||
None)
|
||||
self.app.register(
|
||||
'PUT', '/v1/AUTH_test/c/man', swob.HTTPCreated, {}, None)
|
||||
'PUT', '/v1/AUTH_test/c/man', swob.HTTPCreated,
|
||||
{'Last-Modified': 'Fri, 01 Feb 2012 20:38:36 GMT'}, None)
|
||||
self.app.register(
|
||||
'DELETE', '/v1/AUTH_test/c/man', swob.HTTPNoContent, {}, None)
|
||||
|
||||
@ -444,6 +436,219 @@ class TestSloPutManifest(SloTestCase):
|
||||
'Content-Type %r does not end with swift_bytes=100' %
|
||||
req.headers['Content-Type'])
|
||||
|
||||
@patch('swift.common.middleware.slo.time')
|
||||
def test_handle_multipart_put_fast_heartbeat(self, mock_time):
|
||||
mock_time.time.side_effect = [
|
||||
0, # start time
|
||||
1, # first segment's fast
|
||||
2, # second segment's also fast!
|
||||
]
|
||||
test_json_data = json.dumps([{'path': u'/cont/object\u2661',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100},
|
||||
{'path': '/cont/object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100}])
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/c/man?multipart-manifest=put&heartbeat=on',
|
||||
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
|
||||
body=test_json_data)
|
||||
|
||||
status, headers, body = self.call_slo(req)
|
||||
self.assertEqual('202 Accepted', status)
|
||||
headers_found = [h.lower() for h, v in headers]
|
||||
self.assertNotIn('etag', headers_found)
|
||||
body = ''.join(body)
|
||||
gen_etag = '"' + md5hex('etagoftheobjectsegment' * 2) + '"'
|
||||
self.assertTrue(body.startswith(' \r\n\r\n'),
|
||||
'Expected body to start with single space and two '
|
||||
'blank lines; got %r' % body)
|
||||
self.assertIn('\nResponse Status: 201 Created\n', body)
|
||||
self.assertIn('\nResponse Body: \n', body)
|
||||
self.assertIn('\nEtag: %s\n' % gen_etag, body)
|
||||
self.assertIn('\nLast Modified: Fri, 01 Feb 2012 20:38:36 GMT\n', body)
|
||||
|
||||
@patch('swift.common.middleware.slo.time')
|
||||
def test_handle_multipart_long_running_put_success(self, mock_time):
|
||||
mock_time.time.side_effect = [
|
||||
0, # start time
|
||||
1, # first segment's fast
|
||||
20, # second segment's slow
|
||||
]
|
||||
test_json_data = json.dumps([{'path': u'/cont/object\u2661',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100},
|
||||
{'path': '/cont/object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100}])
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/c/man?multipart-manifest=put&heartbeat=on',
|
||||
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
|
||||
body=test_json_data)
|
||||
|
||||
status, headers, body = self.call_slo(req)
|
||||
self.assertEqual('202 Accepted', status)
|
||||
headers_found = [h.lower() for h, v in headers]
|
||||
self.assertNotIn('etag', headers_found)
|
||||
body = ''.join(body)
|
||||
gen_etag = '"' + md5hex('etagoftheobjectsegment' * 2) + '"'
|
||||
self.assertTrue(body.startswith(' \r\n\r\n'),
|
||||
'Expected body to start with two spaces and two '
|
||||
'blank lines; got %r' % body)
|
||||
self.assertIn('\nResponse Status: 201 Created\n', body)
|
||||
self.assertIn('\nResponse Body: \n', body)
|
||||
self.assertIn('\nEtag: %s\n' % gen_etag, body)
|
||||
self.assertIn('\nLast Modified: Fri, 01 Feb 2012 20:38:36 GMT\n', body)
|
||||
|
||||
@patch('swift.common.middleware.slo.time')
|
||||
def test_handle_multipart_long_running_put_success_json(self, mock_time):
|
||||
mock_time.time.side_effect = [
|
||||
0, # start time
|
||||
11, # first segment's slow
|
||||
22, # second segment's also slow
|
||||
]
|
||||
test_json_data = json.dumps([{'path': u'/cont/object\u2661',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100},
|
||||
{'path': '/cont/object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100}])
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/c/man?multipart-manifest=put&heartbeat=on',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Accept': 'application/json'},
|
||||
body=test_json_data)
|
||||
|
||||
status, headers, body = self.call_slo(req)
|
||||
self.assertEqual('202 Accepted', status)
|
||||
headers_found = [h.lower() for h, v in headers]
|
||||
self.assertNotIn('etag', headers_found)
|
||||
body = ''.join(body)
|
||||
gen_etag = '"' + md5hex('etagoftheobjectsegment' * 2) + '"'
|
||||
self.assertTrue(body.startswith(' \r\n\r\n'),
|
||||
'Expected body to start with three spaces and two '
|
||||
'blank lines; got %r' % body)
|
||||
body = json.loads(body)
|
||||
self.assertEqual(body['Response Status'], '201 Created')
|
||||
self.assertEqual(body['Response Body'], '')
|
||||
self.assertEqual(body['Etag'], gen_etag)
|
||||
self.assertEqual(body['Last Modified'],
|
||||
'Fri, 01 Feb 2012 20:38:36 GMT')
|
||||
|
||||
@patch('swift.common.middleware.slo.time')
|
||||
def test_handle_multipart_long_running_put_failure(self, mock_time):
|
||||
mock_time.time.side_effect = [
|
||||
0, # start time
|
||||
1, # first segment's fast
|
||||
20, # second segment's slow
|
||||
]
|
||||
test_json_data = json.dumps([{'path': u'/cont/missing-object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100},
|
||||
{'path': '/cont/object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 99}])
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/c/man?multipart-manifest=put&heartbeat=on',
|
||||
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
|
||||
body=test_json_data)
|
||||
|
||||
status, headers, body = self.call_slo(req)
|
||||
self.assertEqual('202 Accepted', status)
|
||||
headers_found = [h.lower() for h, v in headers]
|
||||
self.assertNotIn('etag', headers_found)
|
||||
body = ''.join(body).split('\n')
|
||||
self.assertEqual([' \r', '\r'], body[:2],
|
||||
'Expected body to start with two spaces and two '
|
||||
'blank lines; got %r' % '\n'.join(body))
|
||||
self.assertIn('Response Status: 400 Bad Request', body[2:5])
|
||||
self.assertIn('Response Body: Bad Request', body)
|
||||
self.assertIn('The server could not comply with the request since it '
|
||||
'is either malformed or otherwise incorrect.', body)
|
||||
self.assertFalse(any(line.startswith('Etag: ') for line in body))
|
||||
self.assertFalse(any(line.startswith('Last Modified: ')
|
||||
for line in body))
|
||||
self.assertEqual(body[-4], 'Errors:')
|
||||
self.assertEqual(sorted(body[-3:-1]), [
|
||||
'/cont/missing-object, 404 Not Found',
|
||||
'/cont/object, Size Mismatch',
|
||||
])
|
||||
self.assertEqual(body[-1], '')
|
||||
|
||||
@patch('swift.common.middleware.slo.time')
|
||||
def test_handle_multipart_long_running_put_failure_json(self, mock_time):
|
||||
mock_time.time.side_effect = [
|
||||
0, # start time
|
||||
11, # first segment's slow
|
||||
22, # second segment's also slow
|
||||
]
|
||||
test_json_data = json.dumps([{'path': u'/cont/object\u2661',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 99},
|
||||
{'path': '/cont/object',
|
||||
'etag': 'some other etag',
|
||||
'size_bytes': 100}])
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/c/man?multipart-manifest=put&heartbeat=on',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Accept': 'application/json'},
|
||||
body=test_json_data)
|
||||
|
||||
status, headers, body = self.call_slo(req)
|
||||
self.assertEqual('202 Accepted', status)
|
||||
headers_found = [h.lower() for h, v in headers]
|
||||
self.assertNotIn('etag', headers_found)
|
||||
body = ''.join(body)
|
||||
self.assertTrue(body.startswith(' \r\n\r\n'),
|
||||
'Expected body to start with three spaces and two '
|
||||
'blank lines; got %r' % body)
|
||||
body = json.loads(body)
|
||||
self.assertEqual(body['Response Status'], '400 Bad Request')
|
||||
self.assertEqual(body['Response Body'], 'Bad Request\nThe server '
|
||||
'could not comply with the request since it is '
|
||||
'either malformed or otherwise incorrect.')
|
||||
self.assertNotIn('Etag', body)
|
||||
self.assertNotIn('Last Modified', body)
|
||||
self.assertEqual(sorted(body['Errors']), [
|
||||
['/cont/object', 'Etag Mismatch'],
|
||||
[quote(u'/cont/object\u2661'.encode('utf8')), 'Size Mismatch'],
|
||||
])
|
||||
|
||||
@patch('swift.common.middleware.slo.time')
|
||||
def test_handle_multipart_long_running_put_bad_etag_json(self, mock_time):
|
||||
mock_time.time.side_effect = [
|
||||
0, # start time
|
||||
11, # first segment's slow
|
||||
22, # second segment's also slow
|
||||
]
|
||||
test_json_data = json.dumps([{'path': u'/cont/object\u2661',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100},
|
||||
{'path': '/cont/object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100}])
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/c/man?multipart-manifest=put&heartbeat=on',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'Accept': 'application/json', 'ETag': 'bad etag'},
|
||||
body=test_json_data)
|
||||
|
||||
status, headers, body = self.call_slo(req)
|
||||
self.assertEqual('202 Accepted', status)
|
||||
headers_found = [h.lower() for h, v in headers]
|
||||
self.assertNotIn('etag', headers_found)
|
||||
body = ''.join(body)
|
||||
self.assertTrue(body.startswith(' \r\n\r\n'),
|
||||
'Expected body to start with three spaces and two '
|
||||
'blank lines; got %r' % body)
|
||||
body = json.loads(body)
|
||||
self.assertEqual(body['Response Status'], '422 Unprocessable Entity')
|
||||
self.assertEqual('Unprocessable Entity\nUnable to process the '
|
||||
'contained instructions', body['Response Body'])
|
||||
self.assertNotIn('Etag', body)
|
||||
self.assertNotIn('Last Modified', body)
|
||||
self.assertEqual(body['Errors'], [])
|
||||
|
||||
def test_manifest_put_no_etag_success(self):
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/c/man?multipart-manifest=put',
|
||||
@ -476,10 +681,10 @@ class TestSloPutManifest(SloTestCase):
|
||||
self.assertEqual(resp.status_int, 422)
|
||||
|
||||
def test_handle_multipart_put_disallow_empty_first_segment(self):
|
||||
test_json_data = json.dumps([{'path': '/cont/object',
|
||||
test_json_data = json.dumps([{'path': '/cont/small_object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 0},
|
||||
{'path': '/cont/small_object',
|
||||
{'path': '/cont/object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100}])
|
||||
req = Request.blank('/v1/a/c/o?multipart-manifest=put',
|
||||
@ -487,17 +692,17 @@ class TestSloPutManifest(SloTestCase):
|
||||
status, headers, body = self.call_slo(req)
|
||||
self.assertEqual(status, '400 Bad Request')
|
||||
|
||||
def test_handle_multipart_put_disallow_empty_last_segment(self):
|
||||
def test_handle_multipart_put_allow_empty_last_segment(self):
|
||||
test_json_data = json.dumps([{'path': '/cont/object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 100},
|
||||
{'path': '/cont/small_object',
|
||||
{'path': '/cont/empty_object',
|
||||
'etag': 'etagoftheobjectsegment',
|
||||
'size_bytes': 0}])
|
||||
req = Request.blank('/v1/a/c/o?multipart-manifest=put',
|
||||
req = Request.blank('/v1/AUTH_test/c/man?multipart-manifest=put',
|
||||
method='PUT', body=test_json_data)
|
||||
status, headers, body = self.call_slo(req)
|
||||
self.assertEqual(status, '400 Bad Request')
|
||||
self.assertEqual(status, '201 Created')
|
||||
|
||||
def test_handle_multipart_put_success_unicode(self):
|
||||
test_json_data = json.dumps([{'path': u'/cont/object\u2661',
|
||||
@ -2473,15 +2678,19 @@ class TestSloGetManifest(SloTestCase):
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/gettest/man1',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_slo(req, expect_exception=True)
|
||||
status, headers, body = self.call_slo(req)
|
||||
headers = HeaderKeyDict(headers)
|
||||
|
||||
self.assertIsInstance(exc, ListingIterError)
|
||||
# we don't know at header-sending time that things are going to go
|
||||
# wrong, so we end up with a 200 and a truncated body
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertEqual(body, ('body01body02body03body04body05' +
|
||||
'body06body07body08body09body10'))
|
||||
# but the error shows up in logs
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
"While processing manifest '/v1/AUTH_test/gettest/man1', "
|
||||
"max recursion depth was exceeded"
|
||||
])
|
||||
# make sure we didn't keep asking for segments
|
||||
self.assertEqual(self.app.call_count, 20)
|
||||
|
||||
@ -2592,10 +2801,10 @@ class TestSloGetManifest(SloTestCase):
|
||||
|
||||
self.assertEqual(status, '409 Conflict')
|
||||
self.assertEqual(self.app.call_count, 10)
|
||||
error_lines = self.slo.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(error_lines), 1)
|
||||
self.assertTrue(error_lines[0].startswith(
|
||||
'ERROR: An error occurred while retrieving segments'))
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
"While processing manifest '/v1/AUTH_test/gettest/man1', "
|
||||
"max recursion depth was exceeded"
|
||||
])
|
||||
|
||||
def test_get_with_if_modified_since(self):
|
||||
# It's important not to pass the If-[Un]Modified-Since header to the
|
||||
@ -2606,7 +2815,8 @@ class TestSloGetManifest(SloTestCase):
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'If-Modified-Since': 'Wed, 12 Feb 2014 22:24:52 GMT',
|
||||
'If-Unmodified-Since': 'Thu, 13 Feb 2014 23:25:53 GMT'})
|
||||
status, headers, body, exc = self.call_slo(req, expect_exception=True)
|
||||
status, headers, body = self.call_slo(req)
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [])
|
||||
|
||||
for _, _, hdrs in self.app.calls_with_headers[1:]:
|
||||
self.assertFalse('If-Modified-Since' in hdrs)
|
||||
@ -2619,11 +2829,14 @@ class TestSloGetManifest(SloTestCase):
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/gettest/manifest-abcd',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_slo(req, expect_exception=True)
|
||||
status, headers, body = self.call_slo(req)
|
||||
headers = HeaderKeyDict(headers)
|
||||
|
||||
self.assertIsInstance(exc, SegmentError)
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'While processing manifest /v1/AUTH_test/gettest/manifest-abcd, '
|
||||
'got 401 while retrieving /v1/AUTH_test/gettest/c_15'
|
||||
])
|
||||
self.assertEqual(self.app.calls, [
|
||||
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
|
||||
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
|
||||
@ -2638,11 +2851,15 @@ class TestSloGetManifest(SloTestCase):
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/gettest/manifest-abcd',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_slo(req, expect_exception=True)
|
||||
status, headers, body = self.call_slo(req)
|
||||
|
||||
self.assertIsInstance(exc, ListingIterError)
|
||||
self.assertEqual("200 OK", status)
|
||||
self.assertEqual("aaaaa", body)
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'while fetching /v1/AUTH_test/gettest/manifest-abcd, GET of '
|
||||
'submanifest /v1/AUTH_test/gettest/manifest-bc failed with '
|
||||
'status 401'
|
||||
])
|
||||
self.assertEqual(self.app.calls, [
|
||||
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
|
||||
# This one has the error, and so is the last one we fetch.
|
||||
@ -2672,10 +2889,11 @@ class TestSloGetManifest(SloTestCase):
|
||||
status, headers, body = self.call_slo(req)
|
||||
|
||||
self.assertEqual('409 Conflict', status)
|
||||
error_lines = self.slo.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(error_lines), 1)
|
||||
self.assertTrue(error_lines[0].startswith(
|
||||
'ERROR: An error occurred while retrieving segments'))
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'while fetching /v1/AUTH_test/gettest/manifest-manifest-a, GET '
|
||||
'of submanifest /v1/AUTH_test/gettest/manifest-a failed with '
|
||||
'status 403'
|
||||
])
|
||||
|
||||
def test_invalid_json_submanifest(self):
|
||||
self.app.register(
|
||||
@ -2688,11 +2906,15 @@ class TestSloGetManifest(SloTestCase):
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/gettest/manifest-abcd',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_slo(req, expect_exception=True)
|
||||
status, headers, body = self.call_slo(req)
|
||||
|
||||
self.assertIsInstance(exc, ListingIterError)
|
||||
self.assertEqual('200 OK', status)
|
||||
self.assertEqual(body, 'aaaaa')
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'while fetching /v1/AUTH_test/gettest/manifest-abcd, '
|
||||
'JSON-decoding of submanifest /v1/AUTH_test/gettest/manifest-bc '
|
||||
'failed with No JSON object could be decoded'
|
||||
])
|
||||
|
||||
def test_mismatched_etag(self):
|
||||
self.app.register(
|
||||
@ -2709,11 +2931,14 @@ class TestSloGetManifest(SloTestCase):
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/gettest/manifest-a-b-badetag-c',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_slo(req, expect_exception=True)
|
||||
status, headers, body = self.call_slo(req)
|
||||
|
||||
self.assertIsInstance(exc, SegmentError)
|
||||
self.assertEqual('200 OK', status)
|
||||
self.assertEqual(body, 'aaaaa')
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'Object segment no longer valid: /v1/AUTH_test/gettest/b_10 '
|
||||
'etag: 82136b4240d6ce4ea7d03e51469a393b != wrong! or 10 != 10.'
|
||||
])
|
||||
|
||||
def test_mismatched_size(self):
|
||||
self.app.register(
|
||||
@ -2730,11 +2955,15 @@ class TestSloGetManifest(SloTestCase):
|
||||
req = Request.blank(
|
||||
'/v1/AUTH_test/gettest/manifest-a-b-badsize-c',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
status, headers, body, exc = self.call_slo(req, expect_exception=True)
|
||||
status, headers, body = self.call_slo(req)
|
||||
|
||||
self.assertIsInstance(exc, SegmentError)
|
||||
self.assertEqual('200 OK', status)
|
||||
self.assertEqual(body, 'aaaaa')
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'Object segment no longer valid: /v1/AUTH_test/gettest/b_10 '
|
||||
'etag: 82136b4240d6ce4ea7d03e51469a393b != '
|
||||
'82136b4240d6ce4ea7d03e51469a393b or 10 != 999999.'
|
||||
])
|
||||
|
||||
def test_first_segment_mismatched_etag(self):
|
||||
self.app.register('GET', '/v1/AUTH_test/gettest/manifest-badetag',
|
||||
@ -2750,10 +2979,10 @@ class TestSloGetManifest(SloTestCase):
|
||||
status, headers, body = self.call_slo(req)
|
||||
|
||||
self.assertEqual('409 Conflict', status)
|
||||
error_lines = self.slo.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(error_lines), 1)
|
||||
self.assertTrue(error_lines[0].startswith(
|
||||
'ERROR: An error occurred while retrieving segments'))
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'Object segment no longer valid: /v1/AUTH_test/gettest/a_5 '
|
||||
'etag: 594f803b380a41396ed63dca39503542 != wrong! or 5 != 5.'
|
||||
])
|
||||
|
||||
def test_first_segment_mismatched_size(self):
|
||||
self.app.register('GET', '/v1/AUTH_test/gettest/manifest-badsize',
|
||||
@ -2769,10 +2998,11 @@ class TestSloGetManifest(SloTestCase):
|
||||
status, headers, body = self.call_slo(req)
|
||||
|
||||
self.assertEqual('409 Conflict', status)
|
||||
error_lines = self.slo.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(error_lines), 1)
|
||||
self.assertTrue(error_lines[0].startswith(
|
||||
'ERROR: An error occurred while retrieving segments'))
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'Object segment no longer valid: /v1/AUTH_test/gettest/a_5 '
|
||||
'etag: 594f803b380a41396ed63dca39503542 != '
|
||||
'594f803b380a41396ed63dca39503542 or 5 != 999999.'
|
||||
])
|
||||
|
||||
@patch('swift.common.request_helpers.time')
|
||||
def test_download_takes_too_long(self, mock_time):
|
||||
@ -2791,11 +3021,13 @@ class TestSloGetManifest(SloTestCase):
|
||||
'/v1/AUTH_test/gettest/manifest-abcd',
|
||||
environ={'REQUEST_METHOD': 'GET'})
|
||||
|
||||
status, headers, body, exc = self.call_slo(
|
||||
req, expect_exception=True)
|
||||
status, headers, body = self.call_slo(req)
|
||||
|
||||
self.assertIsInstance(exc, SegmentError)
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'While processing manifest /v1/AUTH_test/gettest/manifest-abcd, '
|
||||
'max LO GET time of 86400s exceeded'
|
||||
])
|
||||
self.assertEqual(self.app.calls, [
|
||||
('GET', '/v1/AUTH_test/gettest/manifest-abcd'),
|
||||
('GET', '/v1/AUTH_test/gettest/manifest-bc'),
|
||||
@ -2820,10 +3052,11 @@ class TestSloGetManifest(SloTestCase):
|
||||
status, headers, body = self.call_slo(req)
|
||||
|
||||
self.assertEqual('409 Conflict', status)
|
||||
error_lines = self.slo.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(error_lines), 1)
|
||||
self.assertTrue(error_lines[0].startswith(
|
||||
'ERROR: An error occurred while retrieving segments'))
|
||||
self.assertEqual(self.slo.logger.get_lines_for_level('error'), [
|
||||
'While processing manifest /v1/AUTH_test/gettest/'
|
||||
'manifest-not-exists, got 404 while retrieving /v1/AUTH_test/'
|
||||
'gettest/not_exists_obj'
|
||||
])
|
||||
|
||||
|
||||
class TestSloConditionalGetOldManifest(SloTestCase):
|
||||
@ -3093,6 +3326,35 @@ class TestSwiftInfo(unittest.TestCase):
|
||||
self.assertEqual(swift_info['slo'].get('min_segment_size'), 1)
|
||||
self.assertEqual(swift_info['slo'].get('max_manifest_size'),
|
||||
mware.max_manifest_size)
|
||||
self.assertEqual(1000, mware.max_manifest_segments)
|
||||
self.assertEqual(2097152, mware.max_manifest_size)
|
||||
self.assertEqual(1048576, mware.rate_limit_under_size)
|
||||
self.assertEqual(10, mware.rate_limit_after_segment)
|
||||
self.assertEqual(1, mware.rate_limit_segments_per_sec)
|
||||
self.assertEqual(10, mware.yield_frequency)
|
||||
self.assertEqual(2, mware.concurrency)
|
||||
self.assertEqual(2, mware.bulk_deleter.delete_concurrency)
|
||||
|
||||
def test_registered_non_defaults(self):
|
||||
conf = dict(
|
||||
max_manifest_segments=500, max_manifest_size=1048576,
|
||||
rate_limit_under_size=2097152, rate_limit_after_segment=20,
|
||||
rate_limit_segments_per_sec=2, yield_frequency=5, concurrency=1,
|
||||
delete_concurrency=3)
|
||||
mware = slo.filter_factory(conf)('have to pass in an app')
|
||||
swift_info = utils.get_swift_info()
|
||||
self.assertTrue('slo' in swift_info)
|
||||
self.assertEqual(swift_info['slo'].get('max_manifest_segments'), 500)
|
||||
self.assertEqual(swift_info['slo'].get('min_segment_size'), 1)
|
||||
self.assertEqual(swift_info['slo'].get('max_manifest_size'), 1048576)
|
||||
self.assertEqual(500, mware.max_manifest_segments)
|
||||
self.assertEqual(1048576, mware.max_manifest_size)
|
||||
self.assertEqual(2097152, mware.rate_limit_under_size)
|
||||
self.assertEqual(20, mware.rate_limit_after_segment)
|
||||
self.assertEqual(2, mware.rate_limit_segments_per_sec)
|
||||
self.assertEqual(5, mware.yield_frequency)
|
||||
self.assertEqual(1, mware.concurrency)
|
||||
self.assertEqual(3, mware.bulk_deleter.delete_concurrency)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -69,6 +69,7 @@ class TestRingBuilder(unittest.TestCase):
|
||||
self.assertEqual(rb.devs, [])
|
||||
self.assertFalse(rb.devs_changed)
|
||||
self.assertEqual(rb.version, 0)
|
||||
self.assertIsNotNone(rb._last_part_moves)
|
||||
|
||||
def test_overlarge_part_powers(self):
|
||||
expected_msg = 'part_power must be at most 32 (was 33)'
|
||||
@ -841,6 +842,25 @@ class TestRingBuilder(unittest.TestCase):
|
||||
}
|
||||
self.assertEqual(parts_with_moved_count, expected)
|
||||
|
||||
def test_ever_rebalanced(self):
|
||||
rb = ring.RingBuilder(8, 3, 1)
|
||||
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
|
||||
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
|
||||
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
|
||||
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
|
||||
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
|
||||
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
|
||||
self.assertFalse(rb.ever_rebalanced)
|
||||
builder_file = os.path.join(self.testdir, 'test.buider')
|
||||
rb.save(builder_file)
|
||||
rb = ring.RingBuilder.load(builder_file)
|
||||
self.assertFalse(rb.ever_rebalanced)
|
||||
rb.rebalance()
|
||||
self.assertTrue(rb.ever_rebalanced)
|
||||
rb.save(builder_file)
|
||||
rb = ring.RingBuilder.load(builder_file)
|
||||
self.assertTrue(rb.ever_rebalanced)
|
||||
|
||||
def test_rerebalance(self):
|
||||
rb = ring.RingBuilder(8, 3, 1)
|
||||
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
|
||||
@ -849,13 +869,16 @@ class TestRingBuilder(unittest.TestCase):
|
||||
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
|
||||
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
|
||||
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
|
||||
self.assertFalse(rb.ever_rebalanced)
|
||||
rb.rebalance()
|
||||
self.assertTrue(rb.ever_rebalanced)
|
||||
counts = self._partition_counts(rb)
|
||||
self.assertEqual(counts, {0: 256, 1: 256, 2: 256})
|
||||
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
|
||||
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
|
||||
rb.pretend_min_part_hours_passed()
|
||||
rb.rebalance()
|
||||
self.assertTrue(rb.ever_rebalanced)
|
||||
counts = self._partition_counts(rb)
|
||||
self.assertEqual(counts, {0: 192, 1: 192, 2: 192, 3: 192})
|
||||
rb.set_dev_weight(3, 100)
|
||||
|
@ -22,6 +22,7 @@ import tempfile
|
||||
import unittest
|
||||
import shutil
|
||||
import copy
|
||||
import time
|
||||
|
||||
from collections import defaultdict, Counter
|
||||
|
||||
@ -965,8 +966,9 @@ class TestComposeLoadComponents(TestLoadComponents):
|
||||
|
||||
|
||||
class TestCooperativeRingBuilder(BaseTestCompositeBuilder):
|
||||
def _make_coop_builder(self, region, composite_builder, rebalance=False):
|
||||
rb = CooperativeRingBuilder(8, 3, 1, composite_builder)
|
||||
def _make_coop_builder(self, region, composite_builder, rebalance=False,
|
||||
min_part_hours=1):
|
||||
rb = CooperativeRingBuilder(8, 3, min_part_hours, composite_builder)
|
||||
if composite_builder._builders is None:
|
||||
composite_builder._builders = [rb]
|
||||
for i in range(3):
|
||||
@ -986,93 +988,231 @@ class TestCooperativeRingBuilder(BaseTestCompositeBuilder):
|
||||
for part2dev_id in builder._replica2part2dev
|
||||
for dev_id in part2dev_id)
|
||||
|
||||
def get_moved_parts(self, after, before):
|
||||
def uniqueness(dev):
|
||||
return dev['ip'], dev['port'], dev['device']
|
||||
moved_parts = set()
|
||||
for p in range(before.parts):
|
||||
if ({uniqueness(dev) for dev in before._devs_for_part(p)} !=
|
||||
{uniqueness(dev) for dev in after._devs_for_part(p)}):
|
||||
moved_parts.add(p)
|
||||
return moved_parts
|
||||
|
||||
def num_parts_can_move(self, builder):
|
||||
# note that can_part_move() gives consideration to the
|
||||
# _part_moved_bitmap which is only reset when a rebalance starts
|
||||
return len(
|
||||
[p for p in range(builder.parts)
|
||||
if super(CooperativeRingBuilder, builder)._can_part_move(p)])
|
||||
|
||||
@mock.patch('swift.common.ring.builder.time')
|
||||
def test_rebalance_respects_cobuilder_part_moves(self, mock_time):
|
||||
def do_rebalance(builder):
|
||||
old_part_devs = [builder._devs_for_part(part)
|
||||
for part in range(builder.parts)]
|
||||
num_moved, _, _ = builder.rebalance()
|
||||
moved_parts = {
|
||||
p for p in range(builder.parts)
|
||||
if old_part_devs[p] != builder._devs_for_part(p)}
|
||||
self.assertEqual(len(moved_parts), num_moved) # sanity check
|
||||
return num_moved, moved_parts
|
||||
|
||||
def num_parts_can_move(builder):
|
||||
# note that can_part_move() gives consideration to the
|
||||
# _part_moved_bitmap which is only reset when a rebalance starts
|
||||
return len(
|
||||
[p for p in range(builder.parts)
|
||||
if super(CooperativeRingBuilder, builder)._can_part_move(p)])
|
||||
|
||||
mock_time.return_value = 0
|
||||
def _check_rebalance_respects_cobuilder_part_moves(
|
||||
self, min_part_hours, mock_time):
|
||||
mock_time.return_value = now = int(time.time())
|
||||
builder_files = []
|
||||
cb = CompositeRingBuilder()
|
||||
rb1 = self._make_coop_builder(1, cb)
|
||||
rb2 = self._make_coop_builder(2, cb)
|
||||
rb3 = self._make_coop_builder(3, cb)
|
||||
cb._builders = [rb1, rb2, rb3]
|
||||
for i in (1, 2, 3):
|
||||
b = self._make_coop_builder(i, cb, min_part_hours=min_part_hours)
|
||||
fname = os.path.join(self.tmpdir, 'builder_%s.builder' % i)
|
||||
b.save(fname)
|
||||
builder_files.append(fname)
|
||||
builder_files, builders = cb.load_components(builder_files)
|
||||
|
||||
# all cobuilders can perform initial rebalance
|
||||
for rb in (rb1, rb2, rb3):
|
||||
rb.rebalance()
|
||||
actual = self._partition_counts(rb)
|
||||
exp = {0: 256, 1: 256, 2: 256}
|
||||
self.assertEqual(exp, actual,
|
||||
'Expected %s but got %s for region %s' %
|
||||
(exp, actual, next(rb._iter_devs())['region']))
|
||||
cb.rebalance()
|
||||
exp = {0: 256, 1: 256, 2: 256}
|
||||
self.assertEqual(exp, self._partition_counts(builders[0]))
|
||||
self.assertEqual(exp, self._partition_counts(builders[1]))
|
||||
self.assertEqual(exp, self._partition_counts(builders[2]))
|
||||
exp = min_part_hours * 3600
|
||||
self.assertEqual(exp, builders[0].min_part_seconds_left)
|
||||
self.assertEqual(exp, builders[1].min_part_seconds_left)
|
||||
self.assertEqual(exp, builders[2].min_part_seconds_left)
|
||||
|
||||
# jump forwards min_part_hours, both builders can move all parts
|
||||
mock_time.return_value = 3600
|
||||
self.add_dev(rb1)
|
||||
# sanity checks: rb1 and rb2 are both ready for rebalance
|
||||
self.assertEqual(0, rb2.min_part_seconds_left)
|
||||
self.assertEqual(0, rb1.min_part_seconds_left)
|
||||
# jump forwards min_part_hours
|
||||
now += min_part_hours * 3600
|
||||
mock_time.return_value = now
|
||||
old_builders = []
|
||||
for builder in builders:
|
||||
old_builder = CooperativeRingBuilder(8, 3, min_part_hours, None)
|
||||
old_builder.copy_from(copy.deepcopy(builder.to_dict()))
|
||||
old_builders.append(old_builder)
|
||||
|
||||
for builder in builders:
|
||||
self.add_dev(builder)
|
||||
# sanity checks: all builders are ready for rebalance
|
||||
self.assertEqual(0, builders[0].min_part_seconds_left)
|
||||
self.assertEqual(0, builders[1].min_part_seconds_left)
|
||||
self.assertEqual(0, builders[2].min_part_seconds_left)
|
||||
# ... but last_part_moves not yet updated to current epoch
|
||||
self.assertEqual(0, num_parts_can_move(rb1))
|
||||
self.assertEqual(0, num_parts_can_move(rb2))
|
||||
# rebalancing rb1 will update epoch for both builders' last_part_moves
|
||||
num_moved, rb1_parts_moved = do_rebalance(rb1)
|
||||
self.assertEqual(192, num_moved)
|
||||
self.assertEqual(self._partition_counts(rb1),
|
||||
if min_part_hours > 0:
|
||||
self.assertEqual(0, self.num_parts_can_move(builders[0]))
|
||||
self.assertEqual(0, self.num_parts_can_move(builders[1]))
|
||||
self.assertEqual(0, self.num_parts_can_move(builders[2]))
|
||||
|
||||
with mock.patch('swift.common.ring.composite_builder.shuffle',
|
||||
lambda x: x):
|
||||
cb.rebalance()
|
||||
|
||||
rb1_parts_moved = self.get_moved_parts(builders[0], old_builders[0])
|
||||
self.assertEqual(192, len(rb1_parts_moved))
|
||||
self.assertEqual(self._partition_counts(builders[0]),
|
||||
{0: 192, 1: 192, 2: 192, 3: 192})
|
||||
|
||||
rb2_parts_moved = self.get_moved_parts(builders[1], old_builders[1])
|
||||
self.assertEqual(64, len(rb2_parts_moved))
|
||||
counts = self._partition_counts(builders[1])
|
||||
self.assertEqual(counts[3], 64)
|
||||
self.assertEqual([234, 235, 235], sorted(counts.values()[:3]))
|
||||
self.assertFalse(rb2_parts_moved.intersection(rb1_parts_moved))
|
||||
|
||||
# rb3 can't rebalance - all parts moved while rebalancing rb1 and rb2
|
||||
self.assertEqual(
|
||||
0, len(self.get_moved_parts(builders[2], old_builders[2])))
|
||||
|
||||
# jump forwards min_part_hours, all builders can move all parts again,
|
||||
# so now rb2 should be able to further rebalance
|
||||
now += min_part_hours * 3600
|
||||
mock_time.return_value = now
|
||||
old_builders = []
|
||||
for builder in builders:
|
||||
old_builder = CooperativeRingBuilder(8, 3, min_part_hours, None)
|
||||
old_builder.copy_from(copy.deepcopy(builder.to_dict()))
|
||||
old_builders.append(old_builder)
|
||||
with mock.patch('swift.common.ring.composite_builder.shuffle',
|
||||
lambda x: x):
|
||||
cb.rebalance()
|
||||
|
||||
rb2_parts_moved = self.get_moved_parts(builders[1], old_builders[1])
|
||||
self.assertGreater(len(rb2_parts_moved), 64)
|
||||
self.assertGreater(self._partition_counts(builders[1])[3], 64)
|
||||
self.assertLess(self.num_parts_can_move(builders[2]), 256)
|
||||
self.assertEqual(256, self.num_parts_can_move(builders[0]))
|
||||
# and rb3 should also have been able to move some parts
|
||||
rb3_parts_moved = self.get_moved_parts(builders[2], old_builders[2])
|
||||
self.assertGreater(len(rb3_parts_moved), 0)
|
||||
self.assertFalse(rb3_parts_moved.intersection(rb2_parts_moved))
|
||||
|
||||
# but cobuilders will not prevent a new rb rebalancing for first time
|
||||
rb4 = self._make_coop_builder(4, cb, rebalance=False,
|
||||
min_part_hours=min_part_hours)
|
||||
builders.append(rb4)
|
||||
builder_files = []
|
||||
for i, builder in enumerate(builders):
|
||||
fname = os.path.join(self.tmpdir, 'builder_%s.builder' % i)
|
||||
builder.save(fname)
|
||||
builder_files.append(fname)
|
||||
cb = CompositeRingBuilder()
|
||||
builder_files, builders = cb.load_components(builder_files)
|
||||
cb.rebalance()
|
||||
self.assertEqual(256, len(self.get_moved_parts(builders[3], rb4)))
|
||||
|
||||
def test_rebalance_respects_cobuilder_part_moves(self):
|
||||
self._check_rebalance_respects_cobuilder_part_moves(1)
|
||||
self._check_rebalance_respects_cobuilder_part_moves(0)
|
||||
|
||||
@mock.patch('swift.common.ring.builder.time')
|
||||
def _check_rebalance_cobuilder_states(
|
||||
self, min_part_hours, mock_time):
|
||||
|
||||
@contextmanager
|
||||
def mock_rebalance():
|
||||
# wrap rebalance() in order to capture builder states before and
|
||||
# after each component rebalance
|
||||
orig_rebalance = RingBuilder.rebalance
|
||||
# a dict mapping builder -> (list of captured builder states)
|
||||
captured_builder_states = defaultdict(list)
|
||||
|
||||
def update_states():
|
||||
for b in cb._builders:
|
||||
rb = CooperativeRingBuilder(8, 3, min_part_hours, None)
|
||||
rb.copy_from(copy.deepcopy(b.to_dict()))
|
||||
rb._part_moved_bitmap = bytearray(b._part_moved_bitmap)
|
||||
captured_builder_states[b].append(rb)
|
||||
|
||||
def wrap_rebalance(builder_instance):
|
||||
update_states()
|
||||
results = orig_rebalance(builder_instance)
|
||||
update_states()
|
||||
return results
|
||||
|
||||
with mock.patch('swift.common.ring.RingBuilder.rebalance',
|
||||
wrap_rebalance):
|
||||
yield captured_builder_states
|
||||
|
||||
mock_time.return_value = now = int(time.time())
|
||||
builder_files = []
|
||||
cb = CompositeRingBuilder()
|
||||
for i in (1, 2, 3):
|
||||
b = self._make_coop_builder(i, cb, min_part_hours=min_part_hours)
|
||||
fname = os.path.join(self.tmpdir, 'builder_%s.builder' % i)
|
||||
b.save(fname)
|
||||
builder_files.append(fname)
|
||||
builder_files, builders = cb.load_components(builder_files)
|
||||
|
||||
# all cobuilders can perform initial rebalance
|
||||
cb.rebalance()
|
||||
# jump forwards min_part_hours
|
||||
now += min_part_hours * 3600
|
||||
mock_time.return_value = now
|
||||
for builder in builders:
|
||||
self.add_dev(builder)
|
||||
|
||||
with mock.patch('swift.common.ring.composite_builder.shuffle',
|
||||
lambda x: x):
|
||||
with mock_rebalance() as captured_states:
|
||||
cb.rebalance()
|
||||
|
||||
# sanity - state captured before and after each component rebalance
|
||||
self.assertEqual(len(builders), len(captured_states))
|
||||
for states in captured_states.values():
|
||||
self.assertEqual(2 * len(builders), len(states))
|
||||
# for each component we have a list of it's builder states
|
||||
rb1s = captured_states[builders[0]]
|
||||
rb2s = captured_states[builders[1]]
|
||||
rb3s = captured_states[builders[2]]
|
||||
|
||||
# rebalancing will update epoch for all builders' last_part_moves
|
||||
self.assertEqual(now, rb1s[0]._last_part_moves_epoch)
|
||||
self.assertEqual(now, rb2s[0]._last_part_moves_epoch)
|
||||
self.assertEqual(now, rb3s[0]._last_part_moves_epoch)
|
||||
# so, in state before any component rebalance, all can now move parts
|
||||
# N.B. num_parts_can_move gathers super class's (i.e. RingBuilder)
|
||||
# _can_part_move so that it doesn't refer cobuilders state.
|
||||
self.assertEqual(256, num_parts_can_move(rb2))
|
||||
self.assertEqual(64, num_parts_can_move(rb1))
|
||||
# _can_part_move so that it doesn't refer to cobuilders state.
|
||||
self.assertEqual(256, self.num_parts_can_move(rb1s[0]))
|
||||
self.assertEqual(256, self.num_parts_can_move(rb2s[0]))
|
||||
self.assertEqual(256, self.num_parts_can_move(rb3s[0]))
|
||||
|
||||
# after first component has been rebalanced it has moved parts
|
||||
self.assertEqual(64, self.num_parts_can_move(rb1s[1]))
|
||||
self.assertEqual(256, self.num_parts_can_move(rb2s[2]))
|
||||
self.assertEqual(256, self.num_parts_can_move(rb3s[2]))
|
||||
|
||||
rb1_parts_moved = self.get_moved_parts(rb1s[1], rb1s[0])
|
||||
self.assertEqual(192, len(rb1_parts_moved))
|
||||
self.assertEqual(self._partition_counts(rb1s[1]),
|
||||
{0: 192, 1: 192, 2: 192, 3: 192})
|
||||
|
||||
# rebalancing rb2 - rb2 in isolation could potentially move all parts
|
||||
# so would move 192 parts to new device, but it is constrained by rb1
|
||||
# only having 64 parts that can move
|
||||
self.add_dev(rb2)
|
||||
num_moved, rb2_parts_moved = do_rebalance(rb2)
|
||||
self.assertEqual(64, num_moved)
|
||||
counts = self._partition_counts(rb2)
|
||||
rb2_parts_moved = self.get_moved_parts(rb2s[3], rb2s[2])
|
||||
self.assertEqual(64, len(rb2_parts_moved))
|
||||
counts = self._partition_counts(rb2s[3])
|
||||
self.assertEqual(counts[3], 64)
|
||||
self.assertEqual([234, 235, 235], sorted(counts.values()[:3]))
|
||||
self.assertFalse(rb2_parts_moved.intersection(rb1_parts_moved))
|
||||
self.assertEqual(192, num_parts_can_move(rb2))
|
||||
self.assertEqual(64, num_parts_can_move(rb1))
|
||||
self.assertEqual(192, self.num_parts_can_move(rb2s[3]))
|
||||
self.assertEqual(64, self.num_parts_can_move(rb1s[3]))
|
||||
|
||||
# rb3 can't rebalance - all parts moved while rebalancing rb1 and rb2
|
||||
self.add_dev(rb3)
|
||||
num_moved, rb3_parts_moved = do_rebalance(rb3)
|
||||
self.assertEqual(0, num_moved)
|
||||
self.assertEqual(0, len(self.get_moved_parts(rb3s[5], rb3s[0])))
|
||||
|
||||
# jump forwards min_part_hours, both builders can move all parts again,
|
||||
# so now rb2 should be able to further rebalance
|
||||
mock_time.return_value = 7200
|
||||
do_rebalance(rb2)
|
||||
self.assertGreater(self._partition_counts(rb2)[3], 64)
|
||||
self.assertLess(num_parts_can_move(rb2), 256)
|
||||
self.assertEqual(256, num_parts_can_move(rb1)) # sanity check
|
||||
def test_rebalance_cobuilder_states(self):
|
||||
self._check_rebalance_cobuilder_states(1)
|
||||
self._check_rebalance_cobuilder_states(0)
|
||||
|
||||
# but cobuilders will not prevent a rb rebalancing for first time
|
||||
rb4 = self._make_coop_builder(4, cb, rebalance=False)
|
||||
cb._builders.append(rb4)
|
||||
num_moved, _, _ = rb4.rebalance()
|
||||
self.assertEqual(3 * 256, num_moved)
|
||||
|
||||
def test_rebalance_cobuilders(self):
|
||||
def _check_rebalance_cobuilders_calls(self, min_part_hours):
|
||||
# verify that co-builder methods are called during one builder's
|
||||
# rebalance
|
||||
@contextmanager
|
||||
@ -1107,41 +1247,37 @@ class TestCooperativeRingBuilder(BaseTestCompositeBuilder):
|
||||
fake_can_part_move):
|
||||
yield calls
|
||||
|
||||
# single component builder in parent builder
|
||||
cb = CompositeRingBuilder()
|
||||
rb1 = self._make_coop_builder(1, cb)
|
||||
with mock_update_last_part_moves() as update_calls:
|
||||
with mock_can_part_move() as can_part_move_calls:
|
||||
rb1.rebalance()
|
||||
self.assertEqual([rb1], update_calls)
|
||||
self.assertEqual([rb1], can_part_move_calls.keys())
|
||||
self.assertEqual(768, len(can_part_move_calls[rb1]))
|
||||
|
||||
# two component builders with same parent builder
|
||||
cb = CompositeRingBuilder()
|
||||
rb1 = self._make_coop_builder(1, cb)
|
||||
rb2 = self._make_coop_builder(2, cb)
|
||||
rb1 = self._make_coop_builder(1, cb, min_part_hours=min_part_hours)
|
||||
rb2 = self._make_coop_builder(2, cb, min_part_hours=min_part_hours)
|
||||
cb._builders = [rb1, rb2]
|
||||
# composite rebalance updates last_part_moves before any component
|
||||
# rebalance - after that expect no more updates
|
||||
with mock_update_last_part_moves() as update_calls:
|
||||
cb.update_last_part_moves()
|
||||
self.assertEqual(sorted([rb1, rb2]), sorted(update_calls))
|
||||
|
||||
with mock_update_last_part_moves() as update_calls:
|
||||
with mock_can_part_move() as can_part_move_calls:
|
||||
rb2.rebalance()
|
||||
# both builders get updated
|
||||
self.assertEqual(sorted([rb1, rb2]), sorted(update_calls))
|
||||
self.assertFalse(update_calls)
|
||||
# rb1 has never been rebalanced so no calls propagate from its
|
||||
# can_part_move method to to its superclass _can_part_move method
|
||||
# can_part_move method to its superclass _can_part_move method
|
||||
self.assertEqual([rb2], can_part_move_calls.keys())
|
||||
|
||||
with mock_update_last_part_moves() as update_calls:
|
||||
with mock_can_part_move() as can_part_move_calls:
|
||||
rb1.rebalance()
|
||||
# both builders get updated
|
||||
self.assertEqual(sorted([rb1, rb2]), sorted(update_calls))
|
||||
|
||||
self.assertFalse(update_calls)
|
||||
# rb1 is being rebalanced so gets checked, and rb2 also gets checked
|
||||
self.assertEqual(sorted([rb1, rb2]), sorted(can_part_move_calls))
|
||||
self.assertEqual(768, len(can_part_move_calls[rb1]))
|
||||
self.assertEqual(768, len(can_part_move_calls[rb2]))
|
||||
|
||||
def test_rebalance_cobuilders_calls(self):
|
||||
self._check_rebalance_cobuilders_calls(1)
|
||||
self._check_rebalance_cobuilders_calls(0)
|
||||
|
||||
def test_save_then_load(self):
|
||||
cb = CompositeRingBuilder()
|
||||
coop_rb = self._make_coop_builder(1, cb, rebalance=True)
|
||||
|
@ -212,18 +212,10 @@ class TestRing(TestRingBase):
|
||||
self.assertEqual(self.ring.reload_time, self.intended_reload_time)
|
||||
self.assertEqual(self.ring.serialized_path, self.testgz)
|
||||
# test invalid endcap
|
||||
_orig_hash_path_suffix = utils.HASH_PATH_SUFFIX
|
||||
_orig_hash_path_prefix = utils.HASH_PATH_PREFIX
|
||||
_orig_swift_conf_file = utils.SWIFT_CONF_FILE
|
||||
try:
|
||||
utils.HASH_PATH_SUFFIX = ''
|
||||
utils.HASH_PATH_PREFIX = ''
|
||||
utils.SWIFT_CONF_FILE = ''
|
||||
with mock.patch.object(utils, 'HASH_PATH_SUFFIX', ''), \
|
||||
mock.patch.object(utils, 'HASH_PATH_PREFIX', ''), \
|
||||
mock.patch.object(utils, 'SWIFT_CONF_FILE', ''):
|
||||
self.assertRaises(SystemExit, ring.Ring, self.testdir, 'whatever')
|
||||
finally:
|
||||
utils.HASH_PATH_SUFFIX = _orig_hash_path_suffix
|
||||
utils.HASH_PATH_PREFIX = _orig_hash_path_prefix
|
||||
utils.SWIFT_CONF_FILE = _orig_swift_conf_file
|
||||
|
||||
def test_has_changed(self):
|
||||
self.assertFalse(self.ring.has_changed())
|
||||
|
@ -98,9 +98,8 @@ class TestBufferedHTTP(unittest.TestCase):
|
||||
raise Exception(err)
|
||||
|
||||
def test_nonstr_header_values(self):
|
||||
origHTTPSConnection = bufferedhttp.HTTPSConnection
|
||||
bufferedhttp.HTTPSConnection = MockHTTPSConnection
|
||||
try:
|
||||
with mock.patch('swift.common.bufferedhttp.HTTPSConnection',
|
||||
MockHTTPSConnection):
|
||||
bufferedhttp.http_connect(
|
||||
'127.0.0.1', 8080, 'sda', 1, 'GET', '/',
|
||||
headers={'x-one': '1', 'x-two': 2, 'x-three': 3.0,
|
||||
@ -109,8 +108,6 @@ class TestBufferedHTTP(unittest.TestCase):
|
||||
'127.0.0.1', 8080, 'GET', '/',
|
||||
headers={'x-one': '1', 'x-two': 2, 'x-three': 3.0,
|
||||
'x-four': {'crazy': 'value'}}, ssl=True)
|
||||
finally:
|
||||
bufferedhttp.HTTPSConnection = origHTTPSConnection
|
||||
|
||||
def test_unicode_values(self):
|
||||
with mock.patch('swift.common.bufferedhttp.HTTPSConnection',
|
||||
|
@ -139,13 +139,16 @@ class TestRunDaemon(unittest.TestCase):
|
||||
|
||||
def test_run_daemon(self):
|
||||
sample_conf = "[my-daemon]\nuser = %s\n" % getuser()
|
||||
with tmpfile(sample_conf) as conf_file:
|
||||
with mock.patch.dict('os.environ', {'TZ': ''}):
|
||||
with mock.patch('time.tzset') as mock_tzset:
|
||||
daemon.run_daemon(MyDaemon, conf_file)
|
||||
self.assertTrue(MyDaemon.forever_called)
|
||||
self.assertEqual(os.environ['TZ'], 'UTC+0')
|
||||
self.assertEqual(mock_tzset.mock_calls, [mock.call()])
|
||||
with tmpfile(sample_conf) as conf_file, \
|
||||
mock.patch('swift.common.daemon.use_hub') as mock_use_hub:
|
||||
with mock.patch.dict('os.environ', {'TZ': ''}), \
|
||||
mock.patch('time.tzset') as mock_tzset:
|
||||
daemon.run_daemon(MyDaemon, conf_file)
|
||||
self.assertTrue(MyDaemon.forever_called)
|
||||
self.assertEqual(os.environ['TZ'], 'UTC+0')
|
||||
self.assertEqual(mock_tzset.mock_calls, [mock.call()])
|
||||
self.assertEqual(mock_use_hub.mock_calls,
|
||||
[mock.call(utils.get_hub())])
|
||||
daemon.run_daemon(MyDaemon, conf_file, once=True)
|
||||
self.assertEqual(MyDaemon.once_called, True)
|
||||
|
||||
@ -182,7 +185,8 @@ class TestRunDaemon(unittest.TestCase):
|
||||
self.assertEqual(18000, time.timezone)
|
||||
|
||||
sample_conf = "[my-daemon]\nuser = %s\n" % getuser()
|
||||
with tmpfile(sample_conf) as conf_file:
|
||||
with tmpfile(sample_conf) as conf_file, \
|
||||
mock.patch('swift.common.daemon.use_hub'):
|
||||
daemon.run_daemon(MyDaemon, conf_file)
|
||||
self.assertFalse(MyDaemon.once_called)
|
||||
self.assertTrue(MyDaemon.forever_called)
|
||||
|
@ -38,7 +38,6 @@ from swift.common.swob import HTTPException
|
||||
|
||||
from test import unit
|
||||
from test.unit.common.test_db import ExampleBroker
|
||||
from test.unit import with_tempdir
|
||||
|
||||
|
||||
TEST_ACCOUNT_NAME = 'a c t'
|
||||
@ -525,13 +524,14 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(replicator.mount_check, True)
|
||||
self.assertEqual(replicator.port, 6200)
|
||||
|
||||
def mock_ismount(path):
|
||||
self.assertEqual(path,
|
||||
os.path.join(replicator.root,
|
||||
replicator.ring.devs[0]['device']))
|
||||
return False
|
||||
def mock_check_drive(root, device, mount_check):
|
||||
self.assertEqual(root, replicator.root)
|
||||
self.assertEqual(device, replicator.ring.devs[0]['device'])
|
||||
self.assertEqual(mount_check, True)
|
||||
return None
|
||||
|
||||
self._patch(patch.object, db_replicator, 'ismount', mock_ismount)
|
||||
self._patch(patch.object, db_replicator, 'check_drive',
|
||||
mock_check_drive)
|
||||
replicator.run_once()
|
||||
|
||||
self.assertEqual(
|
||||
@ -560,7 +560,6 @@ class TestDBReplicator(unittest.TestCase):
|
||||
|
||||
self._patch(patch.object, db_replicator, 'whataremyips',
|
||||
lambda *a, **kw: ['1.1.1.1'])
|
||||
self._patch(patch.object, db_replicator, 'ismount', lambda *args: True)
|
||||
self._patch(patch.object, db_replicator, 'unlink_older_than',
|
||||
mock_unlink_older_than)
|
||||
self._patch(patch.object, db_replicator, 'roundrobin_datadirs',
|
||||
@ -568,13 +567,19 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self._patch(patch.object, replicator.cpool, 'spawn_n', mock_spawn_n)
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(ismount=True) as mocks:
|
||||
mock_os.path.isdir.return_value = True
|
||||
replicator.run_once()
|
||||
mock_os.path.isdir.assert_called_with(
|
||||
os.path.join(replicator.root,
|
||||
replicator.ring.devs[0]['device'],
|
||||
replicator.datadir))
|
||||
self.assertEqual([
|
||||
mock.call(os.path.join(
|
||||
replicator.root,
|
||||
replicator.ring.devs[0]['device'])),
|
||||
], mocks['ismount'].call_args_list)
|
||||
|
||||
def test_usync(self):
|
||||
fake_http = ReplHttp()
|
||||
@ -879,27 +884,28 @@ class TestDBReplicator(unittest.TestCase):
|
||||
'/some/foo/some_device/deeper/and/deeper'))
|
||||
|
||||
def test_dispatch_no_arg_pop(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
response = rpc.dispatch(('a',), 'arg')
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
with unit.mock_check_drive(isdir=True):
|
||||
response = rpc.dispatch(('a',), 'arg')
|
||||
self.assertEqual('Invalid object type', response.body)
|
||||
self.assertEqual(400, response.status_int)
|
||||
|
||||
def test_dispatch_drive_not_mounted(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, True)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=True)
|
||||
|
||||
def mock_ismount(path):
|
||||
self.assertEqual('/drive', path)
|
||||
return False
|
||||
|
||||
self._patch(patch.object, db_replicator, 'ismount', mock_ismount)
|
||||
|
||||
response = rpc.dispatch(('drive', 'part', 'hash'), ['method'])
|
||||
with unit.mock_check_drive() as mocks:
|
||||
response = rpc.dispatch(('drive', 'part', 'hash'), ['method'])
|
||||
self.assertEqual([mock.call(os.path.join('/drive'))],
|
||||
mocks['ismount'].call_args_list)
|
||||
|
||||
self.assertEqual('507 drive is not mounted', response.status)
|
||||
self.assertEqual(507, response.status_int)
|
||||
|
||||
def test_dispatch_unexpected_operation_db_does_not_exist(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
def mock_mkdirs(path):
|
||||
self.assertEqual('/drive/tmp', path)
|
||||
@ -907,7 +913,8 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self._patch(patch.object, db_replicator, 'mkdirs', mock_mkdirs)
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.return_value = False
|
||||
response = rpc.dispatch(('drive', 'part', 'hash'), ['unexpected'])
|
||||
|
||||
@ -915,7 +922,8 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(404, response.status_int)
|
||||
|
||||
def test_dispatch_operation_unexpected(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
self._patch(patch.object, db_replicator, 'mkdirs', lambda *args: True)
|
||||
|
||||
@ -927,7 +935,8 @@ class TestDBReplicator(unittest.TestCase):
|
||||
rpc.unexpected = unexpected_method
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.return_value = True
|
||||
response = rpc.dispatch(('drive', 'part', 'hash'),
|
||||
['unexpected', 'arg1', 'arg2'])
|
||||
@ -936,12 +945,14 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual('unexpected-called', response)
|
||||
|
||||
def test_dispatch_operation_rsync_then_merge(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
self._patch(patch.object, db_replicator, 'renamer', lambda *args: True)
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.return_value = True
|
||||
mock_os.unlink.return_value = True
|
||||
response = rpc.dispatch(('drive', 'part', 'hash'),
|
||||
@ -954,12 +965,14 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(204, response.status_int)
|
||||
|
||||
def test_dispatch_operation_complete_rsync(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
self._patch(patch.object, db_replicator, 'renamer', lambda *args: True)
|
||||
|
||||
with patch('swift.common.db_replicator.os', new=mock.MagicMock(
|
||||
wraps=os)) as mock_os:
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.side_effect = [False, True]
|
||||
response = rpc.dispatch(('drive', 'part', 'hash'),
|
||||
['complete_rsync', 'arg1', ['arg2']])
|
||||
@ -971,10 +984,12 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(204, response.status_int)
|
||||
|
||||
def test_rsync_then_merge_db_does_not_exist(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.return_value = False
|
||||
response = rpc.rsync_then_merge('drive', '/data/db.db',
|
||||
['arg1', ['arg2']])
|
||||
@ -983,10 +998,12 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(404, response.status_int)
|
||||
|
||||
def test_rsync_then_merge_old_does_not_exist(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.side_effect = [True, False]
|
||||
response = rpc.rsync_then_merge('drive', '/data/db.db',
|
||||
['arg1', ['arg2']])
|
||||
@ -997,7 +1014,8 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(404, response.status_int)
|
||||
|
||||
def test_rsync_then_merge_with_objects(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
def mock_renamer(old, new):
|
||||
self.assertEqual('/drive/tmp/arg1arg2', old)
|
||||
@ -1006,7 +1024,8 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self._patch(patch.object, db_replicator, 'renamer', mock_renamer)
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.return_value = True
|
||||
mock_os.unlink.return_value = True
|
||||
response = rpc.rsync_then_merge('drive', '/data/db.db',
|
||||
@ -1015,10 +1034,12 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(204, response.status_int)
|
||||
|
||||
def test_complete_rsync_db_does_not_exist(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.return_value = True
|
||||
response = rpc.complete_rsync('drive', '/data/db.db',
|
||||
['arg1', 'arg2'])
|
||||
@ -1027,10 +1048,12 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(404, response.status_int)
|
||||
|
||||
def test_complete_rsync_old_file_does_not_exist(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.return_value = False
|
||||
response = rpc.complete_rsync('drive', '/data/db.db',
|
||||
['arg1', ['arg2']])
|
||||
@ -1041,7 +1064,8 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(404, response.status_int)
|
||||
|
||||
def test_complete_rsync_rename(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
|
||||
def mock_exists(path):
|
||||
if path == '/data/db.db':
|
||||
@ -1056,7 +1080,8 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self._patch(patch.object, db_replicator, 'renamer', mock_renamer)
|
||||
|
||||
with patch('swift.common.db_replicator.os',
|
||||
new=mock.MagicMock(wraps=os)) as mock_os:
|
||||
new=mock.MagicMock(wraps=os)) as mock_os, \
|
||||
unit.mock_check_drive(isdir=True):
|
||||
mock_os.path.exists.side_effect = [False, True]
|
||||
response = rpc.complete_rsync('drive', '/data/db.db',
|
||||
['arg1', ['arg2']])
|
||||
@ -1064,7 +1089,8 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(204, response.status_int)
|
||||
|
||||
def test_replicator_sync_with_broker_replication_missing_table(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
rpc.logger = unit.debug_logger()
|
||||
broker = FakeBroker()
|
||||
broker.get_repl_missing_table = True
|
||||
@ -1079,9 +1105,10 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self._patch(patch.object, db_replicator, 'quarantine_db',
|
||||
mock_quarantine_db)
|
||||
|
||||
response = rpc.sync(broker, ('remote_sync', 'hash_', 'id_',
|
||||
'created_at', 'put_timestamp',
|
||||
'delete_timestamp', 'metadata'))
|
||||
with unit.mock_check_drive(isdir=True):
|
||||
response = rpc.sync(broker, ('remote_sync', 'hash_', 'id_',
|
||||
'created_at', 'put_timestamp',
|
||||
'delete_timestamp', 'metadata'))
|
||||
|
||||
self.assertEqual('404 Not Found', response.status)
|
||||
self.assertEqual(404, response.status_int)
|
||||
@ -1092,13 +1119,15 @@ class TestDBReplicator(unittest.TestCase):
|
||||
"Quarantining DB %s" % broker])
|
||||
|
||||
def test_replicator_sync(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
broker = FakeBroker()
|
||||
|
||||
response = rpc.sync(broker, (broker.get_sync() + 1, 12345, 'id_',
|
||||
'created_at', 'put_timestamp',
|
||||
'delete_timestamp',
|
||||
'{"meta1": "data1", "meta2": "data2"}'))
|
||||
with unit.mock_check_drive(isdir=True):
|
||||
response = rpc.sync(broker, (
|
||||
broker.get_sync() + 1, 12345, 'id_',
|
||||
'created_at', 'put_timestamp', 'delete_timestamp',
|
||||
'{"meta1": "data1", "meta2": "data2"}'))
|
||||
|
||||
self.assertEqual({'meta1': 'data1', 'meta2': 'data2'},
|
||||
broker.metadata)
|
||||
@ -1110,39 +1139,49 @@ class TestDBReplicator(unittest.TestCase):
|
||||
self.assertEqual(200, response.status_int)
|
||||
|
||||
def test_rsync_then_merge(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc.rsync_then_merge('sda1', '/srv/swift/blah', ['a', ['b']])
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
with unit.mock_check_drive(isdir=True):
|
||||
rpc.rsync_then_merge('sda1', '/srv/swift/blah', ['a', ['b']])
|
||||
|
||||
def test_merge_items(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
fake_broker = FakeBroker()
|
||||
args = ('a', 'b')
|
||||
rpc.merge_items(fake_broker, args)
|
||||
with unit.mock_check_drive(isdir=True):
|
||||
rpc.merge_items(fake_broker, args)
|
||||
self.assertEqual(fake_broker.args, args)
|
||||
|
||||
def test_merge_syncs(self):
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
fake_broker = FakeBroker()
|
||||
args = ('a', 'b')
|
||||
rpc.merge_syncs(fake_broker, args)
|
||||
with unit.mock_check_drive(isdir=True):
|
||||
rpc.merge_syncs(fake_broker, args)
|
||||
self.assertEqual(fake_broker.args, (args[0],))
|
||||
|
||||
def test_complete_rsync_with_bad_input(self):
|
||||
drive = '/some/root'
|
||||
db_file = __file__
|
||||
args = ['old_file', ['filename']]
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
resp = rpc.complete_rsync(drive, db_file, list(args))
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
with unit.mock_check_drive(isdir=True):
|
||||
resp = rpc.complete_rsync(drive, db_file, list(args))
|
||||
self.assertTrue(isinstance(resp, HTTPException))
|
||||
self.assertEqual(404, resp.status_int)
|
||||
resp = rpc.complete_rsync(drive, 'new_db_file', list(args))
|
||||
with unit.mock_check_drive(isdir=True):
|
||||
resp = rpc.complete_rsync(drive, 'new_db_file', list(args))
|
||||
self.assertTrue(isinstance(resp, HTTPException))
|
||||
self.assertEqual(404, resp.status_int)
|
||||
|
||||
def test_complete_rsync(self):
|
||||
drive = mkdtemp()
|
||||
args = ['old_file']
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker, False)
|
||||
rpc = db_replicator.ReplicatorRpc('/', '/', FakeBroker,
|
||||
mount_check=False)
|
||||
os.mkdir('%s/tmp' % drive)
|
||||
old_file = '%s/tmp/old_file' % drive
|
||||
new_file = '%s/new_db_file' % drive
|
||||
@ -1155,7 +1194,7 @@ class TestDBReplicator(unittest.TestCase):
|
||||
finally:
|
||||
rmtree(drive)
|
||||
|
||||
@with_tempdir
|
||||
@unit.with_tempdir
|
||||
def test_empty_suffix_and_hash_dirs_get_cleanedup(self, tempdir):
|
||||
datadir = os.path.join(tempdir, 'containers')
|
||||
db_path = ('450/afd/7089ab48d955ab0851fc51cc17a34afd/'
|
||||
@ -1548,7 +1587,9 @@ def attach_fake_replication_rpc(rpc, replicate_hook=None):
|
||||
print('REPLICATE: %s, %s, %r' % (self.path, op, sync_args))
|
||||
replicate_args = self.path.lstrip('/').split('/')
|
||||
args = [op] + list(sync_args)
|
||||
swob_response = rpc.dispatch(replicate_args, args)
|
||||
with unit.mock_check_drive(isdir=not rpc.mount_check,
|
||||
ismount=rpc.mount_check):
|
||||
swob_response = rpc.dispatch(replicate_args, args)
|
||||
resp = FakeHTTPResponse(swob_response)
|
||||
if replicate_hook:
|
||||
replicate_hook(op, *sync_args)
|
||||
@ -1575,7 +1616,7 @@ class TestReplicatorSync(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.root = mkdtemp()
|
||||
self.rpc = self.replicator_rpc(
|
||||
self.root, self.datadir, self.backend, False,
|
||||
self.root, self.datadir, self.backend, mount_check=False,
|
||||
logger=unit.debug_logger())
|
||||
FakeReplConnection = attach_fake_replication_rpc(self.rpc)
|
||||
self._orig_ReplConnection = db_replicator.ReplConnection
|
||||
@ -1635,7 +1676,9 @@ class TestReplicatorSync(unittest.TestCase):
|
||||
return True
|
||||
daemon._rsync_file = _rsync_file
|
||||
with mock.patch('swift.common.db_replicator.whataremyips',
|
||||
new=lambda *a, **kw: [node['replication_ip']]):
|
||||
new=lambda *a, **kw: [node['replication_ip']]), \
|
||||
unit.mock_check_drive(isdir=not daemon.mount_check,
|
||||
ismount=daemon.mount_check):
|
||||
daemon.run_once()
|
||||
return daemon
|
||||
|
||||
|
@ -358,11 +358,11 @@ class TestAccept(unittest.TestCase):
|
||||
'text /plain', 'text\x7f/plain',
|
||||
'text/plain;a=b=c',
|
||||
'text/plain;q=1;q=2',
|
||||
'text/plain;q=not-a-number',
|
||||
'text/plain; ubq="unbalanced " quotes"'):
|
||||
acc = swift.common.swob.Accept(accept)
|
||||
match = acc.best_match(['text/plain', 'application/xml',
|
||||
'text/xml'])
|
||||
self.assertIsNone(match)
|
||||
with self.assertRaises(ValueError):
|
||||
acc.best_match(['text/plain', 'application/xml', 'text/xml'])
|
||||
|
||||
def test_repr(self):
|
||||
acc = swift.common.swob.Accept("application/json")
|
||||
|
@ -33,6 +33,7 @@ import logging
|
||||
import platform
|
||||
import os
|
||||
import mock
|
||||
import pwd
|
||||
import random
|
||||
import re
|
||||
import socket
|
||||
@ -73,7 +74,8 @@ from swift.common.container_sync_realms import ContainerSyncRealms
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
from swift.common.storage_policy import POLICIES, reload_storage_policies
|
||||
from swift.common.swob import Request, Response
|
||||
from test.unit import FakeLogger, requires_o_tmpfile_support
|
||||
from test.unit import FakeLogger, requires_o_tmpfile_support, \
|
||||
quiet_eventlet_exceptions
|
||||
|
||||
threading = eventlet.patcher.original('threading')
|
||||
|
||||
@ -105,10 +107,10 @@ class MockOs(object):
|
||||
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
|
||||
|
||||
def called_func(self, name, *args, **kwargs):
|
||||
self.called_funcs[name] = True
|
||||
self.called_funcs[name] = args
|
||||
|
||||
def raise_func(self, name, *args, **kwargs):
|
||||
self.called_funcs[name] = True
|
||||
self.called_funcs[name] = args
|
||||
raise OSError()
|
||||
|
||||
def dup2(self, source, target):
|
||||
@ -925,9 +927,20 @@ class TestUtils(unittest.TestCase):
|
||||
utils.HASH_PATH_SUFFIX = 'endcap'
|
||||
utils.HASH_PATH_PREFIX = 'startcap'
|
||||
|
||||
def test_get_zero_indexed_base_string(self):
|
||||
self.assertEqual(utils.get_zero_indexed_base_string('something', 0),
|
||||
'something')
|
||||
self.assertEqual(utils.get_zero_indexed_base_string('something', None),
|
||||
'something')
|
||||
self.assertEqual(utils.get_zero_indexed_base_string('something', 1),
|
||||
'something-1')
|
||||
self.assertRaises(ValueError, utils.get_zero_indexed_base_string,
|
||||
'something', 'not_integer')
|
||||
|
||||
def test_lock_path(self):
|
||||
tmpdir = mkdtemp()
|
||||
try:
|
||||
# 2 locks with limit=1 must fail
|
||||
with utils.lock_path(tmpdir, 0.1):
|
||||
exc = None
|
||||
success = False
|
||||
@ -938,6 +951,26 @@ class TestUtils(unittest.TestCase):
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
self.assertTrue(not success)
|
||||
|
||||
# 2 locks with limit=2 must succeed
|
||||
with utils.lock_path(tmpdir, 0.1, limit=2):
|
||||
success = False
|
||||
with utils.lock_path(tmpdir, 0.1, limit=2):
|
||||
success = True
|
||||
self.assertTrue(success)
|
||||
|
||||
# 3 locks with limit=2 must fail
|
||||
with utils.lock_path(tmpdir, 0.1, limit=2):
|
||||
exc = None
|
||||
success = False
|
||||
with utils.lock_path(tmpdir, 0.1, limit=2):
|
||||
try:
|
||||
with utils.lock_path(tmpdir, 0.1, limit=2):
|
||||
success = True
|
||||
except LockTimeout as err:
|
||||
exc = err
|
||||
self.assertTrue(exc is not None)
|
||||
self.assertTrue(not success)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
@ -1504,8 +1537,8 @@ class TestUtils(unittest.TestCase):
|
||||
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
|
||||
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
|
||||
|
||||
try:
|
||||
utils.ThreadSafeSysLogHandler = syslog_handler_catcher
|
||||
with mock.patch.object(utils, 'ThreadSafeSysLogHandler',
|
||||
syslog_handler_catcher):
|
||||
utils.get_logger({
|
||||
'log_facility': 'LOG_LOCAL3',
|
||||
}, 'server', log_route='server')
|
||||
@ -1554,8 +1587,6 @@ class TestUtils(unittest.TestCase):
|
||||
((), {'address': ('syslog.funtimes.com', 2123),
|
||||
'facility': orig_sysloghandler.LOG_LOCAL0})],
|
||||
syslog_handler_args)
|
||||
finally:
|
||||
utils.ThreadSafeSysLogHandler = orig_sysloghandler
|
||||
|
||||
@reset_logger_state
|
||||
def test_clean_logger_exception(self):
|
||||
@ -2134,46 +2165,51 @@ log_name = %(yarr)s'''
|
||||
}
|
||||
self.assertEqual(conf, expected)
|
||||
|
||||
def test_drop_privileges(self):
|
||||
def _check_drop_privileges(self, mock_os, required_func_calls,
|
||||
call_setsid=True):
|
||||
user = getuser()
|
||||
user_data = pwd.getpwnam(user)
|
||||
self.assertFalse(mock_os.called_funcs) # sanity check
|
||||
# over-ride os with mock
|
||||
with mock.patch('swift.common.utils.os', mock_os):
|
||||
# exercise the code
|
||||
utils.drop_privileges(user, call_setsid=call_setsid)
|
||||
|
||||
for func in required_func_calls:
|
||||
self.assertIn(func, mock_os.called_funcs)
|
||||
self.assertEqual(user_data[5], mock_os.environ['HOME'])
|
||||
groups = {g.gr_gid for g in grp.getgrall() if user in g.gr_mem}
|
||||
self.assertEqual(groups, set(mock_os.called_funcs['setgroups'][0]))
|
||||
self.assertEqual(user_data[3], mock_os.called_funcs['setgid'][0])
|
||||
self.assertEqual(user_data[2], mock_os.called_funcs['setuid'][0])
|
||||
self.assertEqual('/', mock_os.called_funcs['chdir'][0])
|
||||
self.assertEqual(0o22, mock_os.called_funcs['umask'][0])
|
||||
|
||||
def test_drop_privileges(self):
|
||||
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
|
||||
'chdir', 'umask')
|
||||
utils.os = MockOs(called_funcs=required_func_calls)
|
||||
# exercise the code
|
||||
utils.drop_privileges(user)
|
||||
for func in required_func_calls:
|
||||
self.assertTrue(utils.os.called_funcs[func])
|
||||
import pwd
|
||||
self.assertEqual(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
|
||||
mock_os = MockOs(called_funcs=required_func_calls)
|
||||
self._check_drop_privileges(mock_os, required_func_calls)
|
||||
|
||||
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
|
||||
groups.append(pwd.getpwnam(user).pw_gid)
|
||||
self.assertEqual(set(groups), set(os.getgroups()))
|
||||
|
||||
# reset; test same args, OSError trying to get session leader
|
||||
utils.os = MockOs(called_funcs=required_func_calls,
|
||||
raise_funcs=('setsid',))
|
||||
for func in required_func_calls:
|
||||
self.assertFalse(utils.os.called_funcs.get(func, False))
|
||||
utils.drop_privileges(user)
|
||||
for func in required_func_calls:
|
||||
self.assertTrue(utils.os.called_funcs[func])
|
||||
def test_drop_privileges_setsid_error(self):
|
||||
# OSError trying to get session leader
|
||||
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
|
||||
'chdir', 'umask')
|
||||
mock_os = MockOs(called_funcs=required_func_calls,
|
||||
raise_funcs=('setsid',))
|
||||
self._check_drop_privileges(mock_os, required_func_calls)
|
||||
|
||||
def test_drop_privileges_no_call_setsid(self):
|
||||
user = getuser()
|
||||
# over-ride os with mock
|
||||
required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir',
|
||||
'umask')
|
||||
# OSError if trying to get session leader, but it shouldn't be called
|
||||
bad_func_calls = ('setsid',)
|
||||
utils.os = MockOs(called_funcs=required_func_calls,
|
||||
raise_funcs=bad_func_calls)
|
||||
# exercise the code
|
||||
utils.drop_privileges(user, call_setsid=False)
|
||||
for func in required_func_calls:
|
||||
self.assertTrue(utils.os.called_funcs[func])
|
||||
mock_os = MockOs(called_funcs=required_func_calls,
|
||||
raise_funcs=bad_func_calls)
|
||||
self._check_drop_privileges(mock_os, required_func_calls,
|
||||
call_setsid=False)
|
||||
for func in bad_func_calls:
|
||||
self.assertNotIn(func, utils.os.called_funcs)
|
||||
self.assertNotIn(func, mock_os.called_funcs)
|
||||
|
||||
@reset_logger_state
|
||||
def test_capture_stdio(self):
|
||||
@ -3020,8 +3056,7 @@ cluster_dfw1 = http://dfw1.host/v1/
|
||||
self.last_call[-1] = self.last_call[-1].value
|
||||
return 0
|
||||
|
||||
orig__sys_fallocate = utils._sys_fallocate
|
||||
try:
|
||||
with patch.object(utils, '_sys_fallocate', FallocateWrapper()):
|
||||
utils._sys_fallocate = FallocateWrapper()
|
||||
# Ensure fallocate calls _sys_fallocate even with 0 bytes
|
||||
utils._sys_fallocate.last_call = None
|
||||
@ -3043,8 +3078,6 @@ cluster_dfw1 = http://dfw1.host/v1/
|
||||
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
|
||||
self.assertEqual(utils._sys_fallocate.last_call,
|
||||
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
|
||||
finally:
|
||||
utils._sys_fallocate = orig__sys_fallocate
|
||||
|
||||
def test_generate_trans_id(self):
|
||||
fake_time = 1366428370.5163341
|
||||
@ -6386,8 +6419,9 @@ class TestPipeMutex(unittest.TestCase):
|
||||
|
||||
def test_wrong_releaser(self):
|
||||
self.mutex.acquire()
|
||||
self.assertRaises(RuntimeError,
|
||||
eventlet.spawn(self.mutex.release).wait)
|
||||
with quiet_eventlet_exceptions():
|
||||
self.assertRaises(RuntimeError,
|
||||
eventlet.spawn(self.mutex.release).wait)
|
||||
|
||||
def test_blocking(self):
|
||||
evt = eventlet.event.Event()
|
||||
|
@ -384,23 +384,24 @@ class TestWSGI(unittest.TestCase):
|
||||
f.write(contents.replace('TEMPDIR', t))
|
||||
_fake_rings(t)
|
||||
with mock.patch('swift.proxy.server.Application.'
|
||||
'modify_wsgi_pipeline'):
|
||||
with mock.patch('swift.common.wsgi.wsgi') as _wsgi:
|
||||
with mock.patch('swift.common.wsgi.eventlet') as _eventlet:
|
||||
with mock.patch('swift.common.wsgi.inspect'):
|
||||
conf = wsgi.appconfig(conf_file)
|
||||
logger = logging.getLogger('test')
|
||||
sock = listen_zero()
|
||||
wsgi.run_server(conf, logger, sock)
|
||||
'modify_wsgi_pipeline'), \
|
||||
mock.patch('swift.common.wsgi.wsgi') as _wsgi, \
|
||||
mock.patch('swift.common.wsgi.eventlet') as _wsgi_evt, \
|
||||
mock.patch('swift.common.utils.eventlet') as _utils_evt, \
|
||||
mock.patch('swift.common.wsgi.inspect'):
|
||||
conf = wsgi.appconfig(conf_file)
|
||||
logger = logging.getLogger('test')
|
||||
sock = listen_zero()
|
||||
wsgi.run_server(conf, logger, sock)
|
||||
self.assertEqual('HTTP/1.0',
|
||||
_wsgi.HttpProtocol.default_request_version)
|
||||
self.assertEqual(30, _wsgi.WRITE_TIMEOUT)
|
||||
_eventlet.hubs.use_hub.assert_called_with(utils.get_hub())
|
||||
_eventlet.patcher.monkey_patch.assert_called_with(all=False,
|
||||
socket=True,
|
||||
select=True,
|
||||
thread=True)
|
||||
_eventlet.debug.hub_exceptions.assert_called_with(False)
|
||||
_wsgi_evt.hubs.use_hub.assert_called_with(utils.get_hub())
|
||||
_utils_evt.patcher.monkey_patch.assert_called_with(all=False,
|
||||
socket=True,
|
||||
select=True,
|
||||
thread=True)
|
||||
_wsgi_evt.debug.hub_exceptions.assert_called_with(False)
|
||||
self.assertTrue(_wsgi.server.called)
|
||||
args, kwargs = _wsgi.server.call_args
|
||||
server_sock, server_app, server_logger = args
|
||||
@ -470,29 +471,28 @@ class TestWSGI(unittest.TestCase):
|
||||
f.write('[DEFAULT]\nswift_dir = %s' % conf_root)
|
||||
_fake_rings(conf_root)
|
||||
with mock.patch('swift.proxy.server.Application.'
|
||||
'modify_wsgi_pipeline'):
|
||||
with mock.patch('swift.common.wsgi.wsgi') as _wsgi:
|
||||
with mock.patch('swift.common.wsgi.eventlet') as _eventlet:
|
||||
with mock.patch.dict('os.environ', {'TZ': ''}):
|
||||
with mock.patch('swift.common.wsgi.inspect'):
|
||||
with mock.patch('time.tzset') as mock_tzset:
|
||||
conf = wsgi.appconfig(conf_dir)
|
||||
logger = logging.getLogger('test')
|
||||
sock = listen_zero()
|
||||
wsgi.run_server(conf, logger, sock)
|
||||
self.assertEqual(os.environ['TZ'], 'UTC+0')
|
||||
self.assertEqual(mock_tzset.mock_calls,
|
||||
[mock.call()])
|
||||
'modify_wsgi_pipeline'), \
|
||||
mock.patch('swift.common.wsgi.wsgi') as _wsgi, \
|
||||
mock.patch('swift.common.wsgi.eventlet') as _wsgi_evt, \
|
||||
mock.patch('swift.common.utils.eventlet') as _utils_evt, \
|
||||
mock.patch.dict('os.environ', {'TZ': ''}), \
|
||||
mock.patch('swift.common.wsgi.inspect'), \
|
||||
mock.patch('time.tzset'):
|
||||
conf = wsgi.appconfig(conf_dir)
|
||||
logger = logging.getLogger('test')
|
||||
sock = listen_zero()
|
||||
wsgi.run_server(conf, logger, sock)
|
||||
self.assertTrue(os.environ['TZ'] is not '')
|
||||
|
||||
self.assertEqual('HTTP/1.0',
|
||||
_wsgi.HttpProtocol.default_request_version)
|
||||
self.assertEqual(30, _wsgi.WRITE_TIMEOUT)
|
||||
_eventlet.hubs.use_hub.assert_called_with(utils.get_hub())
|
||||
_eventlet.patcher.monkey_patch.assert_called_with(all=False,
|
||||
socket=True,
|
||||
select=True,
|
||||
thread=True)
|
||||
_eventlet.debug.hub_exceptions.assert_called_with(False)
|
||||
_wsgi_evt.hubs.use_hub.assert_called_with(utils.get_hub())
|
||||
_utils_evt.patcher.monkey_patch.assert_called_with(all=False,
|
||||
socket=True,
|
||||
select=True,
|
||||
thread=True)
|
||||
_wsgi_evt.debug.hub_exceptions.assert_called_with(False)
|
||||
self.assertTrue(_wsgi.server.called)
|
||||
args, kwargs = _wsgi.server.call_args
|
||||
server_sock, server_app, server_logger = args
|
||||
@ -527,25 +527,26 @@ class TestWSGI(unittest.TestCase):
|
||||
f.write(contents.replace('TEMPDIR', t))
|
||||
_fake_rings(t)
|
||||
with mock.patch('swift.proxy.server.Application.'
|
||||
'modify_wsgi_pipeline'):
|
||||
with mock.patch('swift.common.wsgi.wsgi') as _wsgi:
|
||||
mock_server = _wsgi.server
|
||||
_wsgi.server = lambda *args, **kwargs: mock_server(
|
||||
*args, **kwargs)
|
||||
with mock.patch('swift.common.wsgi.eventlet') as _eventlet:
|
||||
conf = wsgi.appconfig(conf_file)
|
||||
logger = logging.getLogger('test')
|
||||
sock = listen_zero()
|
||||
wsgi.run_server(conf, logger, sock)
|
||||
'modify_wsgi_pipeline'), \
|
||||
mock.patch('swift.common.wsgi.wsgi') as _wsgi, \
|
||||
mock.patch('swift.common.utils.eventlet') as _utils_evt, \
|
||||
mock.patch('swift.common.wsgi.eventlet') as _wsgi_evt:
|
||||
mock_server = _wsgi.server
|
||||
_wsgi.server = lambda *args, **kwargs: mock_server(
|
||||
*args, **kwargs)
|
||||
conf = wsgi.appconfig(conf_file)
|
||||
logger = logging.getLogger('test')
|
||||
sock = listen_zero()
|
||||
wsgi.run_server(conf, logger, sock)
|
||||
self.assertEqual('HTTP/1.0',
|
||||
_wsgi.HttpProtocol.default_request_version)
|
||||
self.assertEqual(30, _wsgi.WRITE_TIMEOUT)
|
||||
_eventlet.hubs.use_hub.assert_called_with(utils.get_hub())
|
||||
_eventlet.patcher.monkey_patch.assert_called_with(all=False,
|
||||
socket=True,
|
||||
select=True,
|
||||
thread=True)
|
||||
_eventlet.debug.hub_exceptions.assert_called_with(True)
|
||||
_wsgi_evt.hubs.use_hub.assert_called_with(utils.get_hub())
|
||||
_utils_evt.patcher.monkey_patch.assert_called_with(all=False,
|
||||
socket=True,
|
||||
select=True,
|
||||
thread=True)
|
||||
_wsgi_evt.debug.hub_exceptions.assert_called_with(True)
|
||||
self.assertTrue(mock_server.called)
|
||||
args, kwargs = mock_server.call_args
|
||||
server_sock, server_app, server_logger = args
|
||||
|
@ -30,7 +30,7 @@ from swift.common.utils import Timestamp, encode_timestamps, ShardRange
|
||||
from swift.common.storage_policy import POLICIES
|
||||
|
||||
from test.unit.common import test_db_replicator
|
||||
from test.unit import patch_policies, make_timestamp_iter, FakeLogger
|
||||
from test.unit import patch_policies, make_timestamp_iter, mock_check_drive
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
@ -186,7 +186,8 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
node = random.choice([n for n in self._ring.devs
|
||||
if n['id'] != local_node['id']])
|
||||
info = broker.get_replication_info()
|
||||
success = daemon._repl_to_node(node, broker, part, info)
|
||||
with mock_check_drive(ismount=True):
|
||||
success = daemon._repl_to_node(node, broker, part, info)
|
||||
self.assertFalse(success)
|
||||
|
||||
def test_sync_remote_missing_most_rows(self):
|
||||
@ -1038,8 +1039,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
def update_sync_store(self, broker):
|
||||
raise OSError(1, '1')
|
||||
|
||||
logger = FakeLogger()
|
||||
daemon = replicator.ContainerReplicator({}, logger)
|
||||
daemon = replicator.ContainerReplicator({}, logger=self.logger)
|
||||
daemon.sync_store = FakeContainerSyncStore()
|
||||
ts_iter = make_timestamp_iter()
|
||||
broker = self._get_broker('a', 'c', node_index=0)
|
||||
@ -1047,7 +1047,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
|
||||
broker.initialize(timestamp.internal, POLICIES.default.idx)
|
||||
info = broker.get_replication_info()
|
||||
daemon._post_replicate_hook(broker, info, [])
|
||||
log_lines = logger.get_lines_for_level('error')
|
||||
log_lines = self.logger.get_lines_for_level('error')
|
||||
self.assertEqual(1, len(log_lines))
|
||||
self.assertIn('Failed to update sync_store', log_lines[0])
|
||||
|
||||
|
@ -22,7 +22,7 @@ import itertools
|
||||
from contextlib import contextmanager
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
from test.unit import FakeLogger, make_timestamp_iter
|
||||
from test.unit import make_timestamp_iter
|
||||
from time import gmtime
|
||||
from xml.dom import minidom
|
||||
import time
|
||||
@ -63,7 +63,7 @@ def save_globals():
|
||||
|
||||
@patch_policies
|
||||
class TestContainerController(unittest.TestCase):
|
||||
"""Test swift.container.server.ContainerController"""
|
||||
|
||||
def setUp(self):
|
||||
self.testdir = os.path.join(
|
||||
mkdtemp(), 'tmp_test_container_server_ContainerController')
|
||||
@ -71,10 +71,10 @@ class TestContainerController(unittest.TestCase):
|
||||
rmtree(self.testdir)
|
||||
mkdirs(os.path.join(self.testdir, 'sda1'))
|
||||
mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
|
||||
self.logger = debug_logger()
|
||||
self.controller = container_server.ContainerController(
|
||||
{'devices': self.testdir, 'mount_check': 'false'},
|
||||
logger=FakeLogger()
|
||||
)
|
||||
logger=self.logger)
|
||||
# some of the policy tests want at least two policies
|
||||
self.assertTrue(len(POLICIES) > 1)
|
||||
|
||||
@ -337,6 +337,14 @@ class TestContainerController(unittest.TestCase):
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 406)
|
||||
|
||||
def test_HEAD_invalid_accept(self):
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'},
|
||||
headers={'Accept': 'application/plain;q'})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 400)
|
||||
self.assertEqual(resp.body, '')
|
||||
|
||||
def test_HEAD_invalid_format(self):
|
||||
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
|
||||
req = Request.blank(
|
||||
@ -2979,6 +2987,14 @@ class TestContainerController(unittest.TestCase):
|
||||
self.assertEqual(resp.content_type, 'text/xml')
|
||||
self.assertEqual(resp.body, xml_body)
|
||||
|
||||
def test_GET_invalid_accept(self):
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Accept': 'application/plain;q'})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 400)
|
||||
self.assertEqual(resp.body, 'Invalid Accept header')
|
||||
|
||||
def test_GET_marker(self):
|
||||
# make a container
|
||||
req = Request.blank(
|
||||
@ -3790,7 +3806,6 @@ class TestContainerController(unittest.TestCase):
|
||||
self.assertEqual(self.logger.get_lines_for_level('info'), [])
|
||||
|
||||
def test_GET_log_requests_true(self):
|
||||
self.controller.logger = FakeLogger()
|
||||
self.controller.log_requests = True
|
||||
|
||||
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
|
||||
@ -3799,7 +3814,6 @@ class TestContainerController(unittest.TestCase):
|
||||
self.assertTrue(self.controller.logger.log_dict['info'])
|
||||
|
||||
def test_GET_log_requests_false(self):
|
||||
self.controller.logger = FakeLogger()
|
||||
self.controller.log_requests = False
|
||||
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'})
|
||||
resp = req.get_response(self.controller)
|
||||
@ -3810,19 +3824,18 @@ class TestContainerController(unittest.TestCase):
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c',
|
||||
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
|
||||
self.controller.logger = FakeLogger()
|
||||
with mock.patch(
|
||||
'time.gmtime', mock.MagicMock(side_effect=[gmtime(10001.0)])):
|
||||
with mock.patch(
|
||||
'time.time',
|
||||
mock.MagicMock(side_effect=[10000.0, 10001.0, 10002.0])):
|
||||
with mock.patch(
|
||||
'os.getpid', mock.MagicMock(return_value=1234)):
|
||||
req.get_response(self.controller)
|
||||
self.assertEqual(
|
||||
self.controller.logger.log_dict['info'],
|
||||
[(('1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a/c" '
|
||||
'404 - "-" "-" "-" 2.0000 "-" 1234 0',), {})])
|
||||
with mock.patch('time.gmtime',
|
||||
mock.MagicMock(side_effect=[gmtime(10001.0)])), \
|
||||
mock.patch('time.time',
|
||||
mock.MagicMock(side_effect=[
|
||||
10000.0, 10001.0, 10002.0])), \
|
||||
mock.patch('os.getpid', mock.MagicMock(return_value=1234)):
|
||||
req.get_response(self.controller)
|
||||
info_lines = self.controller.logger.get_lines_for_level('info')
|
||||
self.assertEqual(info_lines, [
|
||||
'1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a/c" '
|
||||
'404 - "-" "-" "-" 2.0000 "-" 1234 0',
|
||||
])
|
||||
|
||||
|
||||
@patch_policies([
|
||||
|
@ -21,7 +21,7 @@ from contextlib import closing
|
||||
from gzip import GzipFile
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
from test.unit import FakeLogger
|
||||
from test.unit import debug_logger, mock_check_drive
|
||||
|
||||
from eventlet import spawn, Timeout
|
||||
|
||||
@ -55,6 +55,7 @@ class TestContainerUpdater(unittest.TestCase):
|
||||
os.mkdir(self.devices_dir)
|
||||
self.sda1 = os.path.join(self.devices_dir, 'sda1')
|
||||
os.mkdir(self.sda1)
|
||||
self.logger = debug_logger('test')
|
||||
|
||||
def tearDown(self):
|
||||
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
|
||||
@ -71,7 +72,7 @@ class TestContainerUpdater(unittest.TestCase):
|
||||
}
|
||||
if conf_updates:
|
||||
conf.update(conf_updates)
|
||||
return container_updater.ContainerUpdater(conf)
|
||||
return container_updater.ContainerUpdater(conf, logger=self.logger)
|
||||
|
||||
def test_creation(self):
|
||||
cu = self._get_container_updater({'concurrency': '2',
|
||||
@ -127,11 +128,8 @@ class TestContainerUpdater(unittest.TestCase):
|
||||
check_bad({'slowdown': 'baz'})
|
||||
check_bad({'containers_per_second': 'quux'})
|
||||
|
||||
@mock.patch.object(container_updater, 'ismount')
|
||||
@mock.patch.object(container_updater.ContainerUpdater, 'container_sweep')
|
||||
def test_run_once_with_device_unmounted(self, mock_sweep, mock_ismount):
|
||||
|
||||
mock_ismount.return_value = False
|
||||
def test_run_once_with_device_unmounted(self, mock_sweep):
|
||||
cu = self._get_container_updater()
|
||||
containers_dir = os.path.join(self.sda1, DATADIR)
|
||||
os.mkdir(containers_dir)
|
||||
@ -146,9 +144,9 @@ class TestContainerUpdater(unittest.TestCase):
|
||||
|
||||
mock_sweep.reset_mock()
|
||||
cu = self._get_container_updater({'mount_check': 'true'})
|
||||
cu.logger = FakeLogger()
|
||||
cu.run_once()
|
||||
log_lines = cu.logger.get_lines_for_level('warning')
|
||||
with mock_check_drive():
|
||||
cu.run_once()
|
||||
log_lines = self.logger.get_lines_for_level('warning')
|
||||
self.assertGreater(len(log_lines), 0)
|
||||
msg = 'sda1 is not mounted'
|
||||
self.assertEqual(log_lines[0], msg)
|
||||
@ -237,10 +235,9 @@ class TestContainerUpdater(unittest.TestCase):
|
||||
e = OSError('permission_denied')
|
||||
mock_listdir.side_effect = e
|
||||
cu = self._get_container_updater()
|
||||
cu.logger = FakeLogger()
|
||||
paths = cu.get_paths()
|
||||
self.assertEqual(paths, [])
|
||||
log_lines = cu.logger.get_lines_for_level('error')
|
||||
log_lines = self.logger.get_lines_for_level('error')
|
||||
msg = ('ERROR: Failed to get paths to drive partitions: '
|
||||
'permission_denied')
|
||||
self.assertEqual(log_lines[0], msg)
|
||||
@ -248,10 +245,9 @@ class TestContainerUpdater(unittest.TestCase):
|
||||
@mock.patch('os.listdir', return_value=['foo', 'bar'])
|
||||
def test_listdir_without_exception(self, mock_listdir):
|
||||
cu = self._get_container_updater()
|
||||
cu.logger = FakeLogger()
|
||||
path = cu._listdir('foo/bar/')
|
||||
self.assertEqual(path, ['foo', 'bar'])
|
||||
log_lines = cu.logger.get_lines_for_level('error')
|
||||
log_lines = self.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(log_lines), 0)
|
||||
|
||||
def test_unicode(self):
|
||||
|
@ -25,7 +25,7 @@ from hashlib import md5
|
||||
from tempfile import mkdtemp
|
||||
import textwrap
|
||||
from os.path import dirname, basename
|
||||
from test.unit import (FakeLogger, patch_policies, make_timestamp_iter,
|
||||
from test.unit import (debug_logger, patch_policies, make_timestamp_iter,
|
||||
DEFAULT_TEST_EC_TYPE)
|
||||
from swift.obj import auditor, replicator
|
||||
from swift.obj.diskfile import (
|
||||
@ -66,7 +66,7 @@ class TestAuditor(unittest.TestCase):
|
||||
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
|
||||
self.devices = os.path.join(self.testdir, 'node')
|
||||
self.rcache = os.path.join(self.testdir, 'object.recon')
|
||||
self.logger = FakeLogger()
|
||||
self.logger = debug_logger()
|
||||
rmtree(self.testdir, ignore_errors=1)
|
||||
mkdirs(os.path.join(self.devices, 'sda'))
|
||||
os.mkdir(os.path.join(self.devices, 'sdb'))
|
||||
@ -246,7 +246,8 @@ class TestAuditor(unittest.TestCase):
|
||||
writer.put(metadata)
|
||||
writer.commit(Timestamp(timestamp))
|
||||
|
||||
auditor_worker = auditor.AuditorWorker(self.conf, FakeLogger(),
|
||||
self.logger.clear()
|
||||
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
|
||||
self.rcache, self.devices)
|
||||
self.assertEqual(0, auditor_worker.quarantines) # sanity check
|
||||
auditor_worker.object_audit(
|
||||
@ -600,20 +601,19 @@ class TestAuditor(unittest.TestCase):
|
||||
self.assertEqual(auditor_worker.stats_buckets['OVER'], 2)
|
||||
|
||||
def test_object_run_logging(self):
|
||||
logger = FakeLogger()
|
||||
auditor_worker = auditor.AuditorWorker(self.conf, logger,
|
||||
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
|
||||
self.rcache, self.devices)
|
||||
auditor_worker.audit_all_objects(device_dirs=['sda'])
|
||||
log_lines = logger.get_lines_for_level('info')
|
||||
log_lines = self.logger.get_lines_for_level('info')
|
||||
self.assertGreater(len(log_lines), 0)
|
||||
self.assertTrue(log_lines[0].index('ALL - parallel, sda'))
|
||||
|
||||
logger = FakeLogger()
|
||||
auditor_worker = auditor.AuditorWorker(self.conf, logger,
|
||||
self.logger.clear()
|
||||
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
|
||||
self.rcache, self.devices,
|
||||
zero_byte_only_at_fps=50)
|
||||
auditor_worker.audit_all_objects(device_dirs=['sda'])
|
||||
log_lines = logger.get_lines_for_level('info')
|
||||
log_lines = self.logger.get_lines_for_level('info')
|
||||
self.assertGreater(len(log_lines), 0)
|
||||
self.assertTrue(log_lines[0].index('ZBF - sda'))
|
||||
|
||||
@ -984,8 +984,7 @@ class TestAuditor(unittest.TestCase):
|
||||
|
||||
def _test_expired_object_is_ignored(self, zero_byte_fps):
|
||||
# verify that an expired object does not get mistaken for a tombstone
|
||||
audit = auditor.ObjectAuditor(self.conf)
|
||||
audit.logger = FakeLogger()
|
||||
audit = auditor.ObjectAuditor(self.conf, logger=self.logger)
|
||||
audit.log_time = 0
|
||||
now = time.time()
|
||||
write_diskfile(self.disk_file, Timestamp(now - 20),
|
||||
|
@ -41,11 +41,10 @@ import pyeclib.ec_iface
|
||||
|
||||
from eventlet import hubs, timeout, tpool
|
||||
from swift.obj.diskfile import MD5_OF_EMPTY_STRING, update_auditor_status
|
||||
from test.unit import (FakeLogger, mock as unit_mock, temptree,
|
||||
from test.unit import (mock as unit_mock, temptree, mock_check_drive,
|
||||
patch_policies, debug_logger, EMPTY_ETAG,
|
||||
make_timestamp_iter, DEFAULT_TEST_EC_TYPE,
|
||||
requires_o_tmpfile_support, encode_frag_archive_bodies,
|
||||
mock_check_drive)
|
||||
requires_o_tmpfile_support, encode_frag_archive_bodies)
|
||||
from nose import SkipTest
|
||||
from swift.obj import diskfile
|
||||
from swift.common import utils
|
||||
@ -167,7 +166,8 @@ class TestDiskFileModuleMethods(unittest.TestCase):
|
||||
self.conf = dict(
|
||||
swift_dir=self.testdir, devices=self.devices, mount_check='false',
|
||||
timeout='300', stats_interval='1')
|
||||
self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
|
||||
self.logger = debug_logger()
|
||||
self.df_mgr = diskfile.DiskFileManager(self.conf, logger=self.logger)
|
||||
|
||||
def tearDown(self):
|
||||
rmtree(self.testdir, ignore_errors=1)
|
||||
@ -456,40 +456,38 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
||||
self.assertEqual(locations, expected)
|
||||
|
||||
def test_skipping_unmounted_devices(self):
|
||||
def mock_ismount(path):
|
||||
return path.endswith('sdp')
|
||||
with temptree([]) as tmpdir, mock_check_drive() as mocks:
|
||||
mocks['ismount'].side_effect = lambda path: path.endswith('sdp')
|
||||
os.makedirs(os.path.join(tmpdir, "sdp", "objects",
|
||||
"2607", "df3",
|
||||
"ec2871fe724411f91787462f97d30df3"))
|
||||
os.makedirs(os.path.join(tmpdir, "sdq", "objects",
|
||||
"9785", "a10",
|
||||
"4993d582f41be9771505a8d4cb237a10"))
|
||||
|
||||
with mock.patch('swift.obj.diskfile.ismount', mock_ismount):
|
||||
with temptree([]) as tmpdir:
|
||||
os.makedirs(os.path.join(tmpdir, "sdp", "objects",
|
||||
"2607", "df3",
|
||||
"ec2871fe724411f91787462f97d30df3"))
|
||||
os.makedirs(os.path.join(tmpdir, "sdq", "objects",
|
||||
"9785", "a10",
|
||||
"4993d582f41be9771505a8d4cb237a10"))
|
||||
locations = [
|
||||
(loc.path, loc.device, loc.partition, loc.policy)
|
||||
for loc in diskfile.object_audit_location_generator(
|
||||
devices=tmpdir, mount_check=True)]
|
||||
locations.sort()
|
||||
|
||||
locations = [
|
||||
(loc.path, loc.device, loc.partition, loc.policy)
|
||||
for loc in diskfile.object_audit_location_generator(
|
||||
devices=tmpdir, mount_check=True)]
|
||||
locations.sort()
|
||||
self.assertEqual(
|
||||
locations,
|
||||
[(os.path.join(tmpdir, "sdp", "objects",
|
||||
"2607", "df3",
|
||||
"ec2871fe724411f91787462f97d30df3"),
|
||||
"sdp", "2607", POLICIES[0])])
|
||||
|
||||
self.assertEqual(
|
||||
locations,
|
||||
[(os.path.join(tmpdir, "sdp", "objects",
|
||||
"2607", "df3",
|
||||
"ec2871fe724411f91787462f97d30df3"),
|
||||
"sdp", "2607", POLICIES[0])])
|
||||
|
||||
# Do it again, this time with a logger.
|
||||
ml = mock.MagicMock()
|
||||
locations = [
|
||||
(loc.path, loc.device, loc.partition, loc.policy)
|
||||
for loc in diskfile.object_audit_location_generator(
|
||||
devices=tmpdir, mount_check=True, logger=ml)]
|
||||
ml.debug.assert_called_once_with(
|
||||
'Skipping %s as it is not mounted',
|
||||
'sdq')
|
||||
# Do it again, this time with a logger.
|
||||
logger = debug_logger()
|
||||
locations = [
|
||||
(loc.path, loc.device, loc.partition, loc.policy)
|
||||
for loc in diskfile.object_audit_location_generator(
|
||||
devices=tmpdir, mount_check=True, logger=logger)]
|
||||
debug_lines = logger.get_lines_for_level('debug')
|
||||
self.assertEqual([
|
||||
'Skipping sdq as it is not mounted',
|
||||
], debug_lines)
|
||||
|
||||
def test_skipping_files(self):
|
||||
with temptree([]) as tmpdir:
|
||||
@ -512,14 +510,38 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
||||
"sdp", "2607", POLICIES[0])])
|
||||
|
||||
# Do it again, this time with a logger.
|
||||
ml = mock.MagicMock()
|
||||
logger = debug_logger('test')
|
||||
locations = [
|
||||
(loc.path, loc.device, loc.partition, loc.policy)
|
||||
for loc in diskfile.object_audit_location_generator(
|
||||
devices=tmpdir, mount_check=False, logger=ml)]
|
||||
ml.debug.assert_called_once_with(
|
||||
'Skipping %s: Not a directory' %
|
||||
os.path.join(tmpdir, "garbage"))
|
||||
devices=tmpdir, mount_check=False, logger=logger)]
|
||||
debug_lines = logger.get_lines_for_level('debug')
|
||||
self.assertEqual([
|
||||
'Skipping garbage as it is not a dir',
|
||||
], debug_lines)
|
||||
logger.clear()
|
||||
with mock_check_drive(isdir=True):
|
||||
locations = [
|
||||
(loc.path, loc.device, loc.partition, loc.policy)
|
||||
for loc in diskfile.object_audit_location_generator(
|
||||
devices=tmpdir, mount_check=False, logger=logger)]
|
||||
debug_lines = logger.get_lines_for_level('debug')
|
||||
self.assertEqual([
|
||||
'Skipping %s: Not a directory' % os.path.join(
|
||||
tmpdir, "garbage"),
|
||||
], debug_lines)
|
||||
logger.clear()
|
||||
with mock_check_drive() as mocks:
|
||||
mocks['ismount'].side_effect = lambda path: (
|
||||
False if path.endswith('garbage') else True)
|
||||
locations = [
|
||||
(loc.path, loc.device, loc.partition, loc.policy)
|
||||
for loc in diskfile.object_audit_location_generator(
|
||||
devices=tmpdir, mount_check=True, logger=logger)]
|
||||
debug_lines = logger.get_lines_for_level('debug')
|
||||
self.assertEqual([
|
||||
'Skipping garbage as it is not mounted',
|
||||
], debug_lines)
|
||||
|
||||
def test_only_catch_expected_errors(self):
|
||||
# Crazy exceptions should still escape object_audit_location_generator
|
||||
@ -558,7 +580,8 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
||||
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "2", "a", "b"))
|
||||
|
||||
# Pretend that some time passed between each partition
|
||||
with mock.patch('os.stat') as mock_stat:
|
||||
with mock.patch('os.stat') as mock_stat, \
|
||||
mock_check_drive(isdir=True):
|
||||
mock_stat.return_value.st_mtime = time() - 60
|
||||
# Auditor starts, there are two partitions to check
|
||||
gen = diskfile.object_audit_location_generator(tmpdir, False)
|
||||
@ -569,15 +592,17 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
||||
# the generator and restarts There is now only one remaining
|
||||
# partition to check
|
||||
gen = diskfile.object_audit_location_generator(tmpdir, False)
|
||||
gen.next()
|
||||
with mock_check_drive(isdir=True):
|
||||
gen.next()
|
||||
|
||||
# There are no more remaining partitions
|
||||
self.assertRaises(StopIteration, gen.next)
|
||||
# There are no more remaining partitions
|
||||
self.assertRaises(StopIteration, gen.next)
|
||||
|
||||
# There are no partitions to check if the auditor restarts another
|
||||
# time and the status files have not been cleared
|
||||
gen = diskfile.object_audit_location_generator(tmpdir, False)
|
||||
self.assertRaises(StopIteration, gen.next)
|
||||
with mock_check_drive(isdir=True):
|
||||
self.assertRaises(StopIteration, gen.next)
|
||||
|
||||
# Reset status file
|
||||
diskfile.clear_auditor_status(tmpdir)
|
||||
@ -586,8 +611,9 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
||||
# check two partitions again, because the remaining
|
||||
# partitions were empty and a new listdir was executed
|
||||
gen = diskfile.object_audit_location_generator(tmpdir, False)
|
||||
gen.next()
|
||||
gen.next()
|
||||
with mock_check_drive(isdir=True):
|
||||
gen.next()
|
||||
gen.next()
|
||||
|
||||
def test_update_auditor_status_throttle(self):
|
||||
# If there are a lot of nearly empty partitions, the
|
||||
@ -864,9 +890,9 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
||||
tomb_file = timestamp.internal + '.ts'
|
||||
for policy in POLICIES:
|
||||
for unexpected in unexpected_files:
|
||||
self.logger.clear()
|
||||
files = [unexpected, tomb_file]
|
||||
df_mgr = self.df_router[policy]
|
||||
df_mgr.logger = FakeLogger()
|
||||
datadir = os.path.join('/srv/node/sdb1/',
|
||||
diskfile.get_data_dir(policy))
|
||||
|
||||
@ -874,7 +900,6 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
||||
|
||||
expected = {'ts_file': os.path.join(datadir, tomb_file)}
|
||||
self._assertDictContainsSubset(expected, results)
|
||||
|
||||
log_lines = df_mgr.logger.get_lines_for_level('warning')
|
||||
self.assertTrue(
|
||||
log_lines[0].startswith(
|
||||
@ -960,9 +985,66 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
||||
locations = list(self.df_mgr.object_audit_location_generator())
|
||||
self.assertEqual(locations, [])
|
||||
|
||||
def test_replication_one_per_device_deprecation(self):
|
||||
conf = dict(**self.conf)
|
||||
mgr = diskfile.DiskFileManager(conf, self.logger)
|
||||
self.assertEqual(mgr.replication_concurrency_per_device, 1)
|
||||
|
||||
conf = dict(replication_concurrency_per_device='0', **self.conf)
|
||||
mgr = diskfile.DiskFileManager(conf, self.logger)
|
||||
self.assertEqual(mgr.replication_concurrency_per_device, 0)
|
||||
|
||||
conf = dict(replication_concurrency_per_device='2', **self.conf)
|
||||
mgr = diskfile.DiskFileManager(conf, self.logger)
|
||||
self.assertEqual(mgr.replication_concurrency_per_device, 2)
|
||||
|
||||
conf = dict(replication_concurrency_per_device=2, **self.conf)
|
||||
mgr = diskfile.DiskFileManager(conf, self.logger)
|
||||
self.assertEqual(mgr.replication_concurrency_per_device, 2)
|
||||
|
||||
# Check backward compatibility
|
||||
conf = dict(replication_one_per_device='true', **self.conf)
|
||||
mgr = diskfile.DiskFileManager(conf, self.logger)
|
||||
self.assertEqual(mgr.replication_concurrency_per_device, 1)
|
||||
log_lines = mgr.logger.get_lines_for_level('warning')
|
||||
self.assertIn('replication_one_per_device is deprecated',
|
||||
log_lines[-1])
|
||||
|
||||
conf = dict(replication_one_per_device='false', **self.conf)
|
||||
mgr = diskfile.DiskFileManager(conf, self.logger)
|
||||
self.assertEqual(mgr.replication_concurrency_per_device, 0)
|
||||
log_lines = mgr.logger.get_lines_for_level('warning')
|
||||
self.assertIn('replication_one_per_device is deprecated',
|
||||
log_lines[-1])
|
||||
|
||||
# If defined, new parameter has precedence
|
||||
conf = dict(replication_concurrency_per_device='2',
|
||||
replication_one_per_device='true', **self.conf)
|
||||
mgr = diskfile.DiskFileManager(conf, self.logger)
|
||||
self.assertEqual(mgr.replication_concurrency_per_device, 2)
|
||||
log_lines = mgr.logger.get_lines_for_level('warning')
|
||||
self.assertIn('replication_one_per_device ignored',
|
||||
log_lines[-1])
|
||||
|
||||
conf = dict(replication_concurrency_per_device='2',
|
||||
replication_one_per_device='false', **self.conf)
|
||||
mgr = diskfile.DiskFileManager(conf, self.logger)
|
||||
self.assertEqual(mgr.replication_concurrency_per_device, 2)
|
||||
log_lines = mgr.logger.get_lines_for_level('warning')
|
||||
self.assertIn('replication_one_per_device ignored',
|
||||
log_lines[-1])
|
||||
|
||||
conf = dict(replication_concurrency_per_device='0',
|
||||
replication_one_per_device='true', **self.conf)
|
||||
mgr = diskfile.DiskFileManager(conf, self.logger)
|
||||
self.assertEqual(mgr.replication_concurrency_per_device, 0)
|
||||
log_lines = mgr.logger.get_lines_for_level('warning')
|
||||
self.assertIn('replication_one_per_device ignored',
|
||||
log_lines[-1])
|
||||
|
||||
def test_replication_lock_on(self):
|
||||
# Double check settings
|
||||
self.df_mgr.replication_one_per_device = True
|
||||
self.df_mgr.replication_concurrency_per_device = 1
|
||||
self.df_mgr.replication_lock_timeout = 0.1
|
||||
dev_path = os.path.join(self.testdir, self.existing_device)
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
@ -981,14 +1063,16 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
||||
|
||||
def test_replication_lock_off(self):
|
||||
# Double check settings
|
||||
self.df_mgr.replication_one_per_device = False
|
||||
self.df_mgr.replication_concurrency_per_device = 0
|
||||
self.df_mgr.replication_lock_timeout = 0.1
|
||||
dev_path = os.path.join(self.testdir, self.existing_device)
|
||||
with self.df_mgr.replication_lock(dev_path):
|
||||
|
||||
# 2 locks must succeed
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
lock_exc = None
|
||||
exc = None
|
||||
try:
|
||||
with self.df_mgr.replication_lock(dev_path):
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
raise Exception(
|
||||
'%r was not replication locked!' % dev_path)
|
||||
except ReplicationLockTimeout as err:
|
||||
@ -998,9 +1082,62 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
||||
self.assertTrue(lock_exc is None)
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
# 3 locks must succeed
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
lock_exc = None
|
||||
exc = None
|
||||
try:
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
raise Exception(
|
||||
'%r was not replication locked!' % dev_path)
|
||||
except ReplicationLockTimeout as err:
|
||||
lock_exc = err
|
||||
except Exception as err:
|
||||
exc = err
|
||||
self.assertTrue(lock_exc is None)
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
def test_replication_lock_2(self):
|
||||
# Double check settings
|
||||
self.df_mgr.replication_concurrency_per_device = 2
|
||||
self.df_mgr.replication_lock_timeout = 0.1
|
||||
dev_path = os.path.join(self.testdir, self.existing_device)
|
||||
|
||||
# 2 locks with replication_concurrency_per_device=2 must succeed
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
lock_exc = None
|
||||
exc = None
|
||||
try:
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
raise Exception(
|
||||
'%r was not replication locked!' % dev_path)
|
||||
except ReplicationLockTimeout as err:
|
||||
lock_exc = err
|
||||
except Exception as err:
|
||||
exc = err
|
||||
self.assertTrue(lock_exc is None)
|
||||
self.assertTrue(exc is not None)
|
||||
|
||||
# 3 locks with replication_concurrency_per_device=2 must fail
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
lock_exc = None
|
||||
exc = None
|
||||
try:
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
raise Exception(
|
||||
'%r was not replication locked!' % dev_path)
|
||||
except ReplicationLockTimeout as err:
|
||||
lock_exc = err
|
||||
except Exception as err:
|
||||
exc = err
|
||||
self.assertTrue(lock_exc is not None)
|
||||
self.assertTrue(exc is None)
|
||||
|
||||
def test_replication_lock_another_device_fine(self):
|
||||
# Double check settings
|
||||
self.df_mgr.replication_one_per_device = True
|
||||
self.df_mgr.replication_concurrency_per_device = 1
|
||||
self.df_mgr.replication_lock_timeout = 0.1
|
||||
with self.df_mgr.replication_lock(self.existing_device):
|
||||
lock_exc = None
|
||||
@ -1012,12 +1149,11 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
||||
self.assertTrue(lock_exc is None)
|
||||
|
||||
def test_missing_splice_warning(self):
|
||||
logger = FakeLogger()
|
||||
with mock.patch('swift.common.splice.splice._c_splice', None):
|
||||
self.conf['splice'] = 'yes'
|
||||
mgr = diskfile.DiskFileManager(self.conf, logger)
|
||||
mgr = diskfile.DiskFileManager(self.conf, logger=self.logger)
|
||||
|
||||
warnings = logger.get_lines_for_level('warning')
|
||||
warnings = self.logger.get_lines_for_level('warning')
|
||||
self.assertGreater(len(warnings), 0)
|
||||
self.assertTrue('splice()' in warnings[-1])
|
||||
self.assertFalse(mgr.use_splice)
|
||||
|
@ -30,7 +30,7 @@ from eventlet.green import subprocess
|
||||
from eventlet import Timeout
|
||||
|
||||
from test.unit import (debug_logger, patch_policies, make_timestamp_iter,
|
||||
mocked_http_conn, FakeLogger)
|
||||
mocked_http_conn, FakeLogger, mock_check_drive)
|
||||
from swift.common import utils
|
||||
from swift.common.utils import (hash_path, mkdirs, normalize_timestamp,
|
||||
storage_directory)
|
||||
@ -454,6 +454,20 @@ class TestObjectReplicator(unittest.TestCase):
|
||||
self.assertEqual(jobs_by_pol_part[part]['path'],
|
||||
os.path.join(self.objects_1, part[1:]))
|
||||
|
||||
def test_collect_jobs_unmounted(self):
|
||||
with mock_check_drive() as mocks:
|
||||
jobs = self.replicator.collect_jobs()
|
||||
self.assertEqual(jobs, [])
|
||||
self.assertEqual(mocks['ismount'].mock_calls, [])
|
||||
self.assertEqual(len(mocks['isdir'].mock_calls), 2)
|
||||
|
||||
self.replicator.mount_check = True
|
||||
with mock_check_drive() as mocks:
|
||||
jobs = self.replicator.collect_jobs()
|
||||
self.assertEqual(jobs, [])
|
||||
self.assertEqual(mocks['isdir'].mock_calls, [])
|
||||
self.assertEqual(len(mocks['ismount'].mock_calls), 2)
|
||||
|
||||
def test_collect_jobs_failure_report_with_auditor_stats_json(self):
|
||||
devs = [
|
||||
{'id': 0, 'device': 'sda', 'zone': 0,
|
||||
|
@ -54,7 +54,7 @@ class TestBaseSsync(BaseTest):
|
||||
conf = {
|
||||
'devices': self.rx_testdir,
|
||||
'mount_check': 'false',
|
||||
'replication_one_per_device': 'false',
|
||||
'replication_concurrency_per_device': '0',
|
||||
'log_requests': 'false'}
|
||||
self.rx_logger = debug_logger()
|
||||
self.rx_controller = server.ObjectController(conf, self.rx_logger)
|
||||
|
@ -55,7 +55,7 @@ class TestReceiver(unittest.TestCase):
|
||||
self.conf = {
|
||||
'devices': self.testdir,
|
||||
'mount_check': 'false',
|
||||
'replication_one_per_device': 'false',
|
||||
'replication_concurrency_per_device': '0',
|
||||
'log_requests': 'false'}
|
||||
utils.mkdirs(os.path.join(self.testdir, 'device', 'partition'))
|
||||
self.controller = server.ObjectController(self.conf)
|
||||
|
@ -24,21 +24,21 @@ from gzip import GzipFile
|
||||
from tempfile import mkdtemp
|
||||
from shutil import rmtree
|
||||
from test import listen_zero
|
||||
from test.unit import FakeLogger, make_timestamp_iter
|
||||
from test.unit import debug_logger, patch_policies, mocked_http_conn
|
||||
from test.unit import (
|
||||
make_timestamp_iter, debug_logger, patch_policies, mocked_http_conn)
|
||||
from time import time
|
||||
from distutils.dir_util import mkpath
|
||||
|
||||
from eventlet import spawn, Timeout
|
||||
|
||||
from swift.obj import updater as object_updater
|
||||
from swift.obj.diskfile import (ASYNCDIR_BASE, get_async_dir, DiskFileManager,
|
||||
get_tmp_dir)
|
||||
from swift.obj.diskfile import (
|
||||
ASYNCDIR_BASE, get_async_dir, DiskFileManager, get_tmp_dir)
|
||||
from swift.common.ring import RingData
|
||||
from swift.common import utils
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
from swift.common.utils import hash_path, normalize_timestamp, mkdirs, \
|
||||
write_pickle
|
||||
from swift.common.utils import (
|
||||
hash_path, normalize_timestamp, mkdirs, write_pickle)
|
||||
from swift.common.storage_policy import StoragePolicy, POLICIES
|
||||
|
||||
|
||||
@ -148,11 +148,10 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
'mount_check': 'false',
|
||||
'swift_dir': self.testdir,
|
||||
}
|
||||
daemon = object_updater.ObjectUpdater(conf)
|
||||
daemon.logger = FakeLogger()
|
||||
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
|
||||
paths = daemon._listdir('foo/bar')
|
||||
self.assertEqual([], paths)
|
||||
log_lines = daemon.logger.get_lines_for_level('error')
|
||||
log_lines = self.logger.get_lines_for_level('error')
|
||||
msg = ('ERROR: Unable to access foo/bar: permission_denied')
|
||||
self.assertEqual(log_lines[0], msg)
|
||||
|
||||
@ -164,10 +163,9 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
'mount_check': 'false',
|
||||
'swift_dir': self.testdir,
|
||||
}
|
||||
daemon = object_updater.ObjectUpdater(conf)
|
||||
daemon.logger = FakeLogger()
|
||||
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
|
||||
path = daemon._listdir('foo/bar/')
|
||||
log_lines = daemon.logger.get_lines_for_level('error')
|
||||
log_lines = self.logger.get_lines_for_level('error')
|
||||
self.assertEqual(len(log_lines), 0)
|
||||
self.assertEqual(path, ['foo', 'bar'])
|
||||
|
||||
@ -252,9 +250,9 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
# a warning indicating that the '99' policy isn't valid
|
||||
check_with_idx('99', 1, should_skip=True)
|
||||
|
||||
@mock.patch.object(object_updater, 'ismount')
|
||||
def test_run_once_with_disk_unmounted(self, mock_ismount):
|
||||
mock_ismount.return_value = False
|
||||
@mock.patch.object(object_updater, 'check_drive')
|
||||
def test_run_once_with_disk_unmounted(self, mock_check_drive):
|
||||
mock_check_drive.return_value = False
|
||||
ou = object_updater.ObjectUpdater({
|
||||
'devices': self.devices_dir,
|
||||
'mount_check': 'false',
|
||||
@ -267,8 +265,12 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
os.mkdir(async_dir)
|
||||
ou.run_once()
|
||||
self.assertTrue(os.path.exists(async_dir))
|
||||
# mount_check == False means no call to ismount
|
||||
self.assertEqual([], mock_ismount.mock_calls)
|
||||
# each run calls check_device
|
||||
self.assertEqual([
|
||||
mock.call(self.devices_dir, 'sda1', False),
|
||||
mock.call(self.devices_dir, 'sda1', False),
|
||||
], mock_check_drive.mock_calls)
|
||||
mock_check_drive.reset_mock()
|
||||
|
||||
ou = object_updater.ObjectUpdater({
|
||||
'devices': self.devices_dir,
|
||||
@ -283,15 +285,14 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
ou.run_once()
|
||||
self.assertTrue(os.path.exists(async_dir))
|
||||
self.assertTrue(os.path.exists(odd_dir)) # skipped - not mounted!
|
||||
# mount_check == True means ismount was checked
|
||||
self.assertEqual([
|
||||
mock.call(self.sda1),
|
||||
], mock_ismount.mock_calls)
|
||||
mock.call(self.devices_dir, 'sda1', True),
|
||||
], mock_check_drive.mock_calls)
|
||||
self.assertEqual(ou.logger.get_increment_counts(), {'errors': 1})
|
||||
|
||||
@mock.patch.object(object_updater, 'ismount')
|
||||
def test_run_once(self, mock_ismount):
|
||||
mock_ismount.return_value = True
|
||||
@mock.patch.object(object_updater, 'check_drive')
|
||||
def test_run_once(self, mock_check_drive):
|
||||
mock_check_drive.return_value = True
|
||||
ou = object_updater.ObjectUpdater({
|
||||
'devices': self.devices_dir,
|
||||
'mount_check': 'false',
|
||||
@ -304,8 +305,12 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
os.mkdir(async_dir)
|
||||
ou.run_once()
|
||||
self.assertTrue(os.path.exists(async_dir))
|
||||
# mount_check == False means no call to ismount
|
||||
self.assertEqual([], mock_ismount.mock_calls)
|
||||
# each run calls check_device
|
||||
self.assertEqual([
|
||||
mock.call(self.devices_dir, 'sda1', False),
|
||||
mock.call(self.devices_dir, 'sda1', False),
|
||||
], mock_check_drive.mock_calls)
|
||||
mock_check_drive.reset_mock()
|
||||
|
||||
ou = object_updater.ObjectUpdater({
|
||||
'devices': self.devices_dir,
|
||||
@ -319,11 +324,10 @@ class TestObjectUpdater(unittest.TestCase):
|
||||
os.mkdir(odd_dir)
|
||||
ou.run_once()
|
||||
self.assertTrue(os.path.exists(async_dir))
|
||||
self.assertTrue(not os.path.exists(odd_dir))
|
||||
# mount_check == True means ismount was checked
|
||||
self.assertFalse(os.path.exists(odd_dir))
|
||||
self.assertEqual([
|
||||
mock.call(self.sda1),
|
||||
], mock_ismount.mock_calls)
|
||||
mock.call(self.devices_dir, 'sda1', True),
|
||||
], mock_check_drive.mock_calls)
|
||||
|
||||
ohash = hash_path('a', 'c', 'o')
|
||||
odir = os.path.join(async_dir, ohash[-3:])
|
||||
|
@ -1352,6 +1352,35 @@ class TestReplicatedObjController(BaseObjectControllerMixin,
|
||||
resp = req.get_response(self.app)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
|
||||
def test_GET_not_found_when_404_newer(self):
|
||||
# if proxy receives a 404, it keeps waiting for other connections until
|
||||
# max number of nodes in hopes of finding an object, but if 404 is
|
||||
# more recent than a 200, then it should ignore 200 and return 404
|
||||
req = swift.common.swob.Request.blank('/v1/a/c/o')
|
||||
codes = [404] * self.obj_ring.replicas + \
|
||||
[200] * self.obj_ring.max_more_nodes
|
||||
ts_iter = iter([2] * self.obj_ring.replicas +
|
||||
[1] * self.obj_ring.max_more_nodes)
|
||||
with set_http_connect(*codes, timestamps=ts_iter):
|
||||
resp = req.get_response(self.app)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
|
||||
def test_GET_x_newest_not_found_when_404_newer(self):
|
||||
# if proxy receives a 404, it keeps waiting for other connections until
|
||||
# max number of nodes in hopes of finding an object, but if 404 is
|
||||
# more recent than a 200, then it should ignore 200 and return 404
|
||||
req = swift.common.swob.Request.blank('/v1/a/c/o',
|
||||
headers={'X-Newest': 'true'})
|
||||
codes = ([200] +
|
||||
[404] * self.obj_ring.replicas +
|
||||
[200] * (self.obj_ring.max_more_nodes - 1))
|
||||
ts_iter = iter([1] +
|
||||
[2] * self.obj_ring.replicas +
|
||||
[1] * (self.obj_ring.max_more_nodes - 1))
|
||||
with set_http_connect(*codes, timestamps=ts_iter):
|
||||
resp = req.get_response(self.app)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
|
||||
def test_PUT_delete_at(self):
|
||||
t = str(int(time.time() + 100))
|
||||
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
|
||||
|
@ -9375,6 +9375,24 @@ class TestAccountControllerFakeGetResponse(unittest.TestCase):
|
||||
resp = req.get_response(self.app)
|
||||
self.assertEqual(406, resp.status_int)
|
||||
|
||||
def test_GET_autocreate_bad_accept(self):
|
||||
with save_globals():
|
||||
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
|
||||
req = Request.blank('/v1/a', headers={"Accept": "a/b;q=nope"},
|
||||
environ={'REQUEST_METHOD': 'GET',
|
||||
'PATH_INFO': '/v1/a'})
|
||||
resp = req.get_response(self.app)
|
||||
self.assertEqual(400, resp.status_int)
|
||||
self.assertEqual('Invalid Accept header', resp.body)
|
||||
|
||||
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
|
||||
req = Request.blank('/v1/a', headers={"Accept": "a/b;q=0.5;q=1"},
|
||||
environ={'REQUEST_METHOD': 'GET',
|
||||
'PATH_INFO': '/v1/a'})
|
||||
resp = req.get_response(self.app)
|
||||
self.assertEqual(400, resp.status_int)
|
||||
self.assertEqual('Invalid Accept header', resp.body)
|
||||
|
||||
def test_GET_autocreate_format_invalid_utf8(self):
|
||||
with save_globals():
|
||||
set_http_connect(*([404] * 100)) # nonexistent: all backends 404
|
||||
|
Loading…
Reference in New Issue
Block a user