Merge branch 'master' into feature/hummingbird
Change-Id: I29d820e434e6e03bb11f07d88f686c3db99286ed
This commit is contained in:
commit
93ddaffaeb
|
@ -1,9 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
SRC_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))")
|
||||
set -e
|
||||
|
||||
cd ${SRC_DIR}/test/functional
|
||||
nosetests --exe $@
|
||||
cd ${SRC_DIR}
|
||||
export TESTS_DIR=${SRC_DIR}/test/functional
|
||||
ostestr --serial --pretty $@
|
||||
rvalue=$?
|
||||
cd -
|
||||
|
||||
|
|
|
@ -16,4 +16,6 @@ pycscope.*
|
|||
.idea
|
||||
MANIFEST
|
||||
|
||||
.testrepository/*
|
||||
subunit.log
|
||||
test/probe/.noseids
|
||||
|
|
10
.mailmap
10
.mailmap
|
@ -58,7 +58,7 @@ Madhuri Kumari <madhuri.rai07@gmail.com> madhuri <madhuri@madhuri-VirtualBox.(no
|
|||
Morgan Fainberg <morgan.fainberg@gmail.com> <m@metacloud.com>
|
||||
Hua Zhang <zhuadl@cn.ibm.com> <zhuadl@cn.ibm.com>
|
||||
Yummy Bian <yummy.bian@gmail.com> <yummy.bian@gmail.com>
|
||||
Alistair Coles <alistair.coles@hp.com> <alistair.coles@hp.com>
|
||||
Alistair Coles <alistair.coles@hpe.com> <alistair.coles@hp.com>
|
||||
Tong Li <litong01@us.ibm.com> <litong01@us.ibm.com>
|
||||
Paul Luse <paul.e.luse@intel.com> <paul.e.luse@intel.com>
|
||||
Yuan Zhou <yuan.zhou@intel.com> <yuan.zhou@intel.com>
|
||||
|
@ -66,9 +66,9 @@ Jola Mirecka <jola.mirecka@hp.com> <jola.mirecka@hp.com>
|
|||
Ning Zhang <ning@zmanda.com> <ning@zmanda.com>
|
||||
Mauro Stettler <mauro.stettler@gmail.com> <mauro.stettler@gmail.com>
|
||||
Pawel Palucki <pawel.palucki@gmail.com> <pawel.palucki@gmail.com>
|
||||
Guang Yee <guang.yee@hp.com> <guang.yee@hp.com>
|
||||
Guang Yee <guang.yee@hpe.com> <guang.yee@hp.com>
|
||||
Jing Liuqing <jing.liuqing@99cloud.net> <jing.liuqing@99cloud.net>
|
||||
Lorcan Browne <lorcan.browne@hp.com> <lorcan.browne@hp.com>
|
||||
Lorcan Browne <lorcan.browne@hpe.com> <lorcan.browne@hp.com>
|
||||
Eohyung Lee <liquidnuker@gmail.com> <liquid@kt.com>
|
||||
Harshit Chitalia <harshit@acelio.com> <harshit@acelio.com>
|
||||
Richard Hawkins <richard.hawkins@rackspace.com>
|
||||
|
@ -83,3 +83,7 @@ Atsushi Sakai <sakaia@jp.fujitsu.com>
|
|||
Takashi Natsume <natsume.takashi@lab.ntt.co.jp>
|
||||
Nakagawa Masaaki <nakagawamsa@nttdata.co.jp> nakagawamsa
|
||||
Romain Le Disez <romain.ledisez@ovh.net> Romain LE DISEZ
|
||||
Donagh McCabe <donagh.mccabe@hpe.com> <donagh.mccabe@hp.com>
|
||||
Eamonn O'Toole <eamonn.otoole@hpe.com> <eamonn.otoole@hp.com>
|
||||
Gerry Drudy <gerry.drudy@hpe.com> <gerry.drudy@hp.com>
|
||||
Mark Seger <mark.seger@hpe.com> <mark.seger@hp.com>
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
[DEFAULT]
|
||||
test_command=SWIFT_TEST_DEBUG_LOGS=${SWIFT_TEST_DEBUG_LOGS} ${PYTHON:-python} -m subunit.run discover -t ./ ${TESTS_DIR:-./test/functional/} $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
|
@ -20,7 +20,7 @@ from hashlib import md5
|
|||
import getopt
|
||||
from itertools import chain
|
||||
|
||||
import simplejson
|
||||
import json
|
||||
from eventlet.greenpool import GreenPool
|
||||
from eventlet.event import Event
|
||||
from six.moves.urllib.parse import quote
|
||||
|
@ -176,7 +176,7 @@ class Auditor(object):
|
|||
break
|
||||
if node['id'] not in responses:
|
||||
responses[node['id']] = dict(resp.getheaders())
|
||||
results = simplejson.loads(resp.read())
|
||||
results = json.loads(resp.read())
|
||||
except Exception:
|
||||
self.container_exceptions += 1
|
||||
consistent = False
|
||||
|
@ -249,7 +249,7 @@ class Auditor(object):
|
|||
" from %ss:%ss" %
|
||||
(account, node['ip'], node['device']))
|
||||
break
|
||||
results = simplejson.loads(resp.read())
|
||||
results = json.loads(resp.read())
|
||||
except Exception:
|
||||
self.account_exceptions += 1
|
||||
consistent = False
|
||||
|
|
|
@ -14,15 +14,12 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from six.moves.configparser import ConfigParser
|
||||
from optparse import OptionParser
|
||||
from sys import exit, stdout, stderr
|
||||
from time import time
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from eventlet import GreenPool, hubs, patcher, Timeout
|
||||
from eventlet.pools import Pool
|
||||
|
|
|
@ -20,7 +20,21 @@ from optparse import OptionParser
|
|||
from swift.common.manager import Manager, UnknownCommandError, \
|
||||
KILL_WAIT, RUN_DIR
|
||||
|
||||
USAGE = """%prog <server>[.config] [<server>[.config] ...] <command> [options]
|
||||
USAGE = \
|
||||
"""%prog <server>[.<config>] [<server>[.<config>] ...] <command> [options]
|
||||
|
||||
where:
|
||||
<server> is the name of a swift service e.g. proxy-server.
|
||||
The '-server' part of the name may be omitted.
|
||||
<config> is an explicit configuration filename without the
|
||||
.conf extension. If <config> is specified then <server> should
|
||||
refer to a directory containing the configuration file, e.g.:
|
||||
|
||||
swift-init object.1 start
|
||||
|
||||
will start an object-server using the configuration file
|
||||
/etc/swift/object-server/1.conf
|
||||
<command> is a command from the list below.
|
||||
|
||||
Commands:
|
||||
""" + '\n'.join(["%16s: %s" % x for x in Manager.list_commands()])
|
||||
|
@ -50,6 +64,16 @@ def main():
|
|||
dest="run_dir", default=RUN_DIR,
|
||||
help="alternative directory to store running pid files "
|
||||
"default: %s" % RUN_DIR)
|
||||
# Changing behaviour if missing config
|
||||
parser.add_option('--strict', dest='strict', action='store_true',
|
||||
help="Return non-zero status code if some config is "
|
||||
"missing. Default mode if all servers are "
|
||||
"explicitly named.")
|
||||
# a negative option for strict
|
||||
parser.add_option('--non-strict', dest='strict', action='store_false',
|
||||
help="Return zero status code even if some config is "
|
||||
"missing. Default mode if any server is a glob or "
|
||||
"one of aliases `all`, `main` or `rest`.")
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
|
|
|
@ -57,6 +57,8 @@ IP address the account server should bind to. The default is 0.0.0.0 which will
|
|||
it bind to all available addresses.
|
||||
.IP "\fBbind_port\fR"
|
||||
TCP port the account server should bind to. The default is 6002.
|
||||
.IP "\fBbind_timeout\fR"
|
||||
Timeout to bind socket. The default is 30.
|
||||
.IP \fBbacklog\fR
|
||||
TCP backlog. Maximum number of allowed pending connections. The default value is 4096.
|
||||
.IP \fBworkers\fR
|
||||
|
@ -79,12 +81,46 @@ Parent directory or where devices are mounted. Default is /srv/node.
|
|||
.IP \fBmount_check\fR
|
||||
Whether or not check if the devices are mounted to prevent accidentally writing to
|
||||
the root device. The default is set to true.
|
||||
.IP \fBdisable_fallocate\fR
|
||||
Disable pre-allocate disk space for a file. The default is false.
|
||||
.IP \fBlog_name\fR
|
||||
Label used when logging. The default is swift.
|
||||
.IP \fBlog_facility\fR
|
||||
Syslog log facility. The default is LOG_LOCAL0.
|
||||
.IP \fBlog_level\fR
|
||||
Logging level. The default is INFO.
|
||||
.IP "\fBlog_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
.IP \fBlog_max_line_length\fR
|
||||
The following caps the length of log lines to the value given; no limit if
|
||||
set to 0, the default.
|
||||
.IP \fBlog_custom_handlers\fR
|
||||
Comma separated list of functions to call to setup custom log handlers.
|
||||
functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
||||
adapted_logger. The default is empty.
|
||||
.IP \fBlog_udp_host\fR
|
||||
If set, log_udp_host will override log_address.
|
||||
.IP "\fBlog_udp_port\fR
|
||||
UDP log port, the default is 514.
|
||||
.IP \fBlog_statsd_host\fR = localhost
|
||||
log_statsd_* enable StatsD logging.
|
||||
.IP \fBlog_statsd_port\fR
|
||||
The default is 8125.
|
||||
.IP \fBlog_statsd_default_sample_rate\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_sample_rate_factor\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_metric_prefix\fR
|
||||
The default is empty.
|
||||
.IP \fBdb_preallocation\fR
|
||||
If you don't mind the extra disk space usage in overhead, you can turn this
|
||||
on to preallocate disk space with SQLite databases to decrease fragmentation.
|
||||
The default is false.
|
||||
.IP \fBeventlet_debug\fR
|
||||
Debug mode for eventlet library. The default is false.
|
||||
.IP \fBfallocate_reserve\fR
|
||||
You can set fallocate_reserve to the number of bytes you'd like fallocate to
|
||||
reserve, whether there is space for the given file size or not. The default is 0.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -117,12 +153,21 @@ This is normally \fBegg:swift#account\fR.
|
|||
Label used when logging. The default is account-server.
|
||||
.IP "\fBset log_facility\fR
|
||||
Syslog log facility. The default is LOG_LOCAL0.
|
||||
.IP "\fB set log_level\fR
|
||||
.IP "\fBset log_level\fR
|
||||
Logging level. The default is INFO.
|
||||
.IP "\fB set log_requests\fR
|
||||
.IP "\fBset log_requests\fR
|
||||
Enables request logging. The default is True.
|
||||
.IP "\fB set log_address\fR
|
||||
.IP "\fBset log_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
.IP "\fBauto_create_account_prefix\fR
|
||||
The default is ".".
|
||||
.IP "\fBreplication_server\fR
|
||||
Configure parameter for creating specific server.
|
||||
To handle all verbs, including replication verbs, do not specify
|
||||
"replication_server" (this is the default). To only handle replication,
|
||||
set to a true value (e.g. "true" or "1"). To handle only non-replication
|
||||
verbs, set to "false". Unless you have a separate replication network, you
|
||||
should not specify any value for "replication_server". The default is empty.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -158,6 +203,36 @@ and ensure that swift has read/write. The default is /var/cache/swift.
|
|||
.RE
|
||||
.PD
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:xprofile]\fR"
|
||||
.RS 3
|
||||
.IP "\fBuse\fR"
|
||||
Entry point for paste.deploy for the xprofile middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#xprofile\fR.
|
||||
.IP "\fBprofile_module\fR"
|
||||
This option enable you to switch profilers which should inherit from python
|
||||
standard profiler. Currently the supported value can be 'cProfile', 'eventlet.green.profile' etc.
|
||||
.IP "\fBlog_filename_prefix\fR"
|
||||
This prefix will be used to combine process ID and timestamp to name the
|
||||
profile data file. Make sure the executing user has permission to write
|
||||
into this path (missing path segments will be created, if necessary).
|
||||
If you enable profiling in more than one type of daemon, you must override
|
||||
it with an unique value like, the default is /var/log/swift/profile/account.profile.
|
||||
.IP "\fBdump_interval\fR"
|
||||
The profile data will be dumped to local disk based on above naming rule
|
||||
in this interval. The default is 5.0.
|
||||
.IP "\fBdump_timestamp\fR"
|
||||
Be careful, this option will enable profiler to dump data into the file with
|
||||
time stamp which means there will be lots of files piled up in the directory.
|
||||
The default is false
|
||||
.IP "\fBpath\fR"
|
||||
This is the path of the URL to access the mini web UI. The default is __profile__.
|
||||
.IP "\fBflush_at_shutdown\fR"
|
||||
Clear the data when the wsgi server shutdown. The default is false.
|
||||
.IP "\fBunwind\fR"
|
||||
Unwind the iterator of applications. Default is false.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH ADDITIONAL SECTIONS
|
||||
|
@ -177,7 +252,7 @@ Logging level. The default is INFO.
|
|||
.IP \fBlog_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
.IP \fBper_diff\fR
|
||||
The default is 1000.
|
||||
Maximum number of database rows that will be sync'd in a single HTTP replication request. The default is 1000.
|
||||
.IP \fBmax_diffs\fR
|
||||
This caps how long the replicator will spend trying to sync a given database per pass so the other databases don't get starved. The default is 100.
|
||||
.IP \fBconcurrency\fR
|
||||
|
@ -193,6 +268,15 @@ Connection timeout to external services. The default is 0.5 seconds.
|
|||
.IP \fBreclaim_age\fR
|
||||
Time elapsed in seconds before an account can be reclaimed. The default is
|
||||
604800 seconds.
|
||||
.IP \fBrsync_compress\fR
|
||||
Allow rsync to compress data which is transmitted to destination node
|
||||
during sync. However, this is applicable only when destination node is in
|
||||
a different region than the local one. The default is false.
|
||||
.IP \fBrsync_module\fR
|
||||
Format of the rysnc module where the replicator will send data. See
|
||||
etc/rsyncd.conf-sample for some usage examples.
|
||||
.IP \fBrecon_cache_path\fR
|
||||
Path to recon cache directory. The default is /var/cache/swift.
|
||||
.RE
|
||||
|
||||
|
||||
|
@ -213,6 +297,8 @@ Logging address. The default is /dev/log.
|
|||
Will audit, at most, 1 account per device per interval. The default is 1800 seconds.
|
||||
.IP \fBaccounts_per_second\fR
|
||||
Maximum accounts audited per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 200.
|
||||
.IP \fBrecon_cache_path\fR
|
||||
Path to recon cache directory. The default is /var/cache/swift.
|
||||
.RE
|
||||
|
||||
|
||||
|
@ -237,6 +323,18 @@ Minimum time for a pass to take. The default is 3600 seconds.
|
|||
Request timeout to external services. The default is 10 seconds.
|
||||
.IP \fBconn_timeout\fR
|
||||
Connection timeout to external services. The default is 0.5 seconds.
|
||||
.IP \fBdelay_reaping\fR
|
||||
Normally, the reaper begins deleting account information for deleted accounts
|
||||
immediately; you can set this to delay its work however. The value is in
|
||||
seconds. The default is 0.
|
||||
.IP \fBreap_warn_after\fR
|
||||
If the account fails to be be reaped due to a persistent error, the
|
||||
account reaper will log a message such as:
|
||||
Account <name> has not been reaped since <date>
|
||||
You can search logs for this message if space is not being reclaimed
|
||||
after you delete account(s).
|
||||
Default is 2592000 seconds (30 days). This is in addition to any time
|
||||
requested by delay_reaping.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
|
|
@ -57,6 +57,8 @@ IP address the container server should bind to. The default is 0.0.0.0 which wil
|
|||
it bind to all available addresses.
|
||||
.IP "\fBbind_port\fR"
|
||||
TCP port the container server should bind to. The default is 6001.
|
||||
.IP "\fBbind_timeout\fR"
|
||||
Timeout to bind socket. The default is 30.
|
||||
.IP \fBbacklog\fR
|
||||
TCP backlog. Maximum number of allowed pending connections. The default value is 4096.
|
||||
.IP \fBworkers\fR
|
||||
|
@ -70,6 +72,12 @@ concurrent requests.
|
|||
Maximum number of clients one worker can process simultaneously (it will
|
||||
actually accept(2) N + 1). Setting this to one (1) will only handle one request
|
||||
at a time, without accepting another request concurrently. The default is 1024.
|
||||
.IP \fBallowed_sync_hosts\fR
|
||||
This is a comma separated list of hosts allowed in the X-Container-Sync-To
|
||||
field for containers. This is the old-style of using container sync. It is
|
||||
strongly recommended to use the new style of a separate
|
||||
container-sync-realms.conf -- see container-sync-realms.conf-sample
|
||||
allowed_sync_hosts = 127.0.0.1
|
||||
.IP \fBuser\fR
|
||||
The system user that the container server will run as. The default is swift.
|
||||
.IP \fBswift_dir\fR
|
||||
|
@ -79,6 +87,8 @@ Parent directory or where devices are mounted. Default is /srv/node.
|
|||
.IP \fBmount_check\fR
|
||||
Whether or not check if the devices are mounted to prevent accidentally writing to
|
||||
the root device. The default is set to true.
|
||||
.IP \fBdisable_fallocate\fR
|
||||
Disable pre-allocate disk space for a file. The default is false.
|
||||
.IP \fBlog_name\fR
|
||||
Label used when logging. The default is swift.
|
||||
.IP \fBlog_facility\fR
|
||||
|
@ -87,6 +97,36 @@ Syslog log facility. The default is LOG_LOCAL0.
|
|||
Logging level. The default is INFO.
|
||||
.IP \fBlog_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
.IP \fBlog_max_line_length\fR
|
||||
The following caps the length of log lines to the value given; no limit if
|
||||
set to 0, the default.
|
||||
.IP \fBlog_custom_handlers\fR
|
||||
Comma separated list of functions to call to setup custom log handlers.
|
||||
functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
||||
adapted_logger. The default is empty.
|
||||
.IP \fBlog_udp_host\fR
|
||||
If set, log_udp_host will override log_address.
|
||||
.IP "\fBlog_udp_port\fR
|
||||
UDP log port, the default is 514.
|
||||
.IP \fBlog_statsd_host\fR = localhost
|
||||
log_statsd_* enable StatsD logging.
|
||||
.IP \fBlog_statsd_port\fR
|
||||
The default is 8125.
|
||||
.IP \fBlog_statsd_default_sample_rate\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_sample_rate_factor\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_metric_prefix\fR
|
||||
The default is empty.
|
||||
.IP \fBdb_preallocation\fR
|
||||
If you don't mind the extra disk space usage in overhead, you can turn this
|
||||
on to preallocate disk space with SQLite databases to decrease fragmentation.
|
||||
The default is false.
|
||||
.IP \fBeventlet_debug\fR
|
||||
Debug mode for eventlet library. The default is false.
|
||||
.IP \fBfallocate_reserve\fR
|
||||
You can set fallocate_reserve to the number of bytes you'd like fallocate to
|
||||
reserve, whether there is space for the given file size or not. The default is 0.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -129,6 +169,17 @@ Logging address. The default is /dev/log.
|
|||
Request timeout to external services. The default is 3 seconds.
|
||||
.IP \fBconn_timeout\fR
|
||||
Connection timeout to external services. The default is 0.5 seconds.
|
||||
.IP \fBallow_versions\fR
|
||||
The default is false.
|
||||
.IP \fBauto_create_account_prefix\fR
|
||||
The default is '.'.
|
||||
.IP \fBreplication_server\fR
|
||||
Configure parameter for creating specific server.
|
||||
To handle all verbs, including replication verbs, do not specify
|
||||
"replication_server" (this is the default). To only handle replication,
|
||||
set to a True value (e.g. "True" or "1"). To handle only non-replication
|
||||
verbs, set to "False". Unless you have a separate replication network, you
|
||||
should not specify any value for "replication_server".
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -164,6 +215,36 @@ and ensure that swift has read/write. The default is /var/cache/swift.
|
|||
.RE
|
||||
.PD
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:xprofile]\fR"
|
||||
.RS 3
|
||||
.IP "\fBuse\fR"
|
||||
Entry point for paste.deploy for the xprofile middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#xprofile\fR.
|
||||
.IP "\fBprofile_module\fR"
|
||||
This option enable you to switch profilers which should inherit from python
|
||||
standard profiler. Currently the supported value can be 'cProfile', 'eventlet.green.profile' etc.
|
||||
.IP "\fBlog_filename_prefix\fR"
|
||||
This prefix will be used to combine process ID and timestamp to name the
|
||||
profile data file. Make sure the executing user has permission to write
|
||||
into this path (missing path segments will be created, if necessary).
|
||||
If you enable profiling in more than one type of daemon, you must override
|
||||
it with an unique value like, the default is /var/log/swift/profile/account.profile.
|
||||
.IP "\fBdump_interval\fR"
|
||||
The profile data will be dumped to local disk based on above naming rule
|
||||
in this interval. The default is 5.0.
|
||||
.IP "\fBdump_timestamp\fR"
|
||||
Be careful, this option will enable profiler to dump data into the file with
|
||||
time stamp which means there will be lots of files piled up in the directory.
|
||||
The default is false
|
||||
.IP "\fBpath\fR"
|
||||
This is the path of the URL to access the mini web UI. The default is __profile__.
|
||||
.IP "\fBflush_at_shutdown\fR"
|
||||
Clear the data when the wsgi server shutdown. The default is false.
|
||||
.IP "\fBunwind\fR"
|
||||
Unwind the iterator of applications. Default is false.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH ADDITIONAL SECTIONS
|
||||
|
@ -182,8 +263,8 @@ Syslog log facility. The default is LOG_LOCAL0.
|
|||
Logging level. The default is INFO.
|
||||
.IP \fBlog_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
.IP \fBer_diff\fR
|
||||
The default is 1000.
|
||||
.IP \fBper_diff\fR
|
||||
Maximum number of database rows that will be sync'd in a single HTTP replication request. The default is 1000.
|
||||
.IP \fBmax_diffs\fR
|
||||
This caps how long the replicator will spend trying to sync a given database per pass so the other databases don't get starved. The default is 100.
|
||||
.IP \fBconcurrency\fR
|
||||
|
@ -199,6 +280,15 @@ Connection timeout to external services. The default is 0.5 seconds.
|
|||
.IP \fBreclaim_age\fR
|
||||
Time elapsed in seconds before an container can be reclaimed. The default is
|
||||
604800 seconds.
|
||||
.IP \fBrsync_compress\fR
|
||||
Allow rsync to compress data which is transmitted to destination node
|
||||
during sync. However, this is applicable only when destination node is in
|
||||
a different region than the local one. The default is false.
|
||||
.IP \fBrsync_module\fR
|
||||
Format of the rysnc module where the replicator will send data. See
|
||||
etc/rsyncd.conf-sample for some usage examples.
|
||||
.IP \fBrecon_cache_path\fR
|
||||
Path to recon cache directory. The default is /var/cache/swift.
|
||||
.RE
|
||||
|
||||
|
||||
|
@ -226,6 +316,8 @@ Connection timeout to external services. The default is 0.5 seconds.
|
|||
Slowdown will sleep that amount between containers. The default is 0.01 seconds.
|
||||
.IP \fBaccount_suppression_time\fR
|
||||
Seconds to suppress updating an account that has generated an error. The default is 60 seconds.
|
||||
.IP \fBrecon_cache_path\fR
|
||||
Path to recon cache directory. The default is /var/cache/swift.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -246,6 +338,8 @@ Logging address. The default is /dev/log.
|
|||
Will audit, at most, 1 container per device per interval. The default is 1800 seconds.
|
||||
.IP \fBcontainers_per_second\fR
|
||||
Maximum containers audited per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 200.
|
||||
.IP \fBrecon_cache_path\fR
|
||||
Path to recon cache directory. The default is /var/cache/swift.
|
||||
.RE
|
||||
|
||||
|
||||
|
@ -268,8 +362,10 @@ If you need to use an HTTP Proxy, set it here; defaults to no proxy.
|
|||
Will audit, at most, each container once per interval. The default is 300 seconds.
|
||||
.IP \fBcontainer_time\fR
|
||||
Maximum amount of time to spend syncing each container per pass. The default is 60 seconds.
|
||||
.IP \fBrequest_retries\fR
|
||||
Server errors from requests will be retried by default.
|
||||
.IP \fBconn_timeout\fR
|
||||
Connection timeout to external services. The default is 5 seconds.
|
||||
.IP \fBrequest_tries\fR
|
||||
Server errors from requests will be retried by default. The default is 3.
|
||||
.IP \fBinternal_client_conf_path\fR
|
||||
Internal client config file path.
|
||||
.RE
|
||||
|
|
|
@ -50,14 +50,22 @@ Project name in case of keystone auth version 3
|
|||
Project domain name in case of keystone auth version 3
|
||||
.IP "\fBuser_domain_name\fR"
|
||||
User domain name in case of keystone auth version 3
|
||||
.IP "\fBendpoint_type\fR"
|
||||
The default is 'publicURL'.
|
||||
.IP "\fBkeystone_api_insecure\fR"
|
||||
The default is false.
|
||||
.IP "\fBswift_dir\fR"
|
||||
Location of openstack-swift configuration and ring files
|
||||
.IP "\fBdispersion_coverage\fR"
|
||||
Percentage of partition coverage to use. The default is 1.0.
|
||||
.IP "\fBretries\fR"
|
||||
Maximum number of attempts
|
||||
Maximum number of attempts. The defaul is 5.
|
||||
.IP "\fBconcurrency\fR"
|
||||
Concurrency to use. The default is 25.
|
||||
.IP "\fBcontainer_populate\fR"
|
||||
The default is true.
|
||||
.IP "\fBobject_populate\fR"
|
||||
The default is true.
|
||||
.IP "\fBdump_json\fR"
|
||||
Whether to output in json format. The default is no.
|
||||
.IP "\fBcontainer_report\fR"
|
||||
|
|
|
@ -65,6 +65,27 @@ Syslog log facility. The default is LOG_LOCAL0.
|
|||
Logging level. The default is INFO.
|
||||
.IP \fBlog_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
.IP \fBlog_max_line_length\fR
|
||||
The following caps the length of log lines to the value given; no limit if
|
||||
set to 0, the default.
|
||||
.IP \fBlog_custom_handlers\fR
|
||||
Comma separated list of functions to call to setup custom log handlers.
|
||||
functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
||||
adapted_logger. The default is empty.
|
||||
.IP \fBlog_udp_host\fR
|
||||
If set, log_udp_host will override log_address.
|
||||
.IP "\fBlog_udp_port\fR
|
||||
UDP log port, the default is 514.
|
||||
.IP \fBlog_statsd_host\fR = localhost
|
||||
log_statsd_* enable StatsD logging.
|
||||
.IP \fBlog_statsd_port\fR
|
||||
The default is 8125.
|
||||
.IP \fBlog_statsd_default_sample_rate\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_sample_rate_factor\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_metric_prefix\fR
|
||||
The default is empty.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -126,9 +147,59 @@ Entry point for paste.deploy for the catch_errors middleware. This is the refere
|
|||
The default is \fBegg:swift#catch_errors\fR. See proxy-server.conf-sample for options or See proxy-server.conf manpage.
|
||||
.RE
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:proxy-logging]\fR"
|
||||
.RE
|
||||
|
||||
Logging for the proxy server now lives in this middleware.
|
||||
If the access_* variables are not set, logging directives from [DEFAULT]
|
||||
without "access_" will be used.
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the proxy_logging middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#proxy_logging\fR. See proxy-server.conf-sample for options or See proxy-server.conf manpage.
|
||||
.RE
|
||||
|
||||
.PD
|
||||
|
||||
|
||||
.SH ADDITIONAL SECTIONS
|
||||
.PD 1
|
||||
.RS 0
|
||||
The following sections are used by other swift-account services, such as replicator,
|
||||
auditor and reaper.
|
||||
.IP "\fB[account-replicator]\fR"
|
||||
.RE
|
||||
.RS 3
|
||||
.IP \fBinterval\fR
|
||||
Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 300.
|
||||
.IP "\fBauto_create_account_prefix\fR
|
||||
The default is ".".
|
||||
.IP \fBexpiring_objects_account_name\fR
|
||||
The default is 'expiring_objects'.
|
||||
.IP \fBreport_interval\fR
|
||||
The default is 300 seconds.
|
||||
.IP \fBconcurrency\fR
|
||||
Number of replication workers to spawn. The default is 1.
|
||||
.IP \fBprocesses\fR
|
||||
Processes is how many parts to divide the work into, one part per process that will be doing the work.
|
||||
Processes set 0 means that a single process will be doing all the work.
|
||||
Processes can also be specified on the command line and will override the config value.
|
||||
The default is 0.
|
||||
.IP \fBprocess\fR
|
||||
Process is which of the parts a particular process will work on process can also be specified
|
||||
on the command line and will override the config value process is "zero based", if you want
|
||||
to use 3 processes, you should run processes with process set to 0, 1, and 2. The default is 0.
|
||||
.IP \fBreclaim_age\fR
|
||||
The expirer will re-attempt expiring if the source object is not available
|
||||
up to reclaim_age seconds before it gives up and deletes the entry in the
|
||||
queue. The default is 604800 seconds.
|
||||
.IP \fBrecon_cache_path\fR
|
||||
Path to recon cache directory. The default is /var/cache/swift.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH DOCUMENTATION
|
||||
.LP
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
.SH DESCRIPTION
|
||||
.PP
|
||||
This is the configuration file used by the object server and other object
|
||||
background services, such as; replicator, updater and auditor.
|
||||
background services, such as; replicator, reconstructor, updater and auditor.
|
||||
|
||||
The configuration file follows the python-pastedeploy syntax. The file is divided
|
||||
into sections, which are enclosed by square brackets. Each section will contain a
|
||||
|
@ -57,6 +57,8 @@ IP address the object server should bind to. The default is 0.0.0.0 which will m
|
|||
it bind to all available addresses.
|
||||
.IP "\fBbind_port\fR"
|
||||
TCP port the object server should bind to. The default is 6000.
|
||||
.IP "\fBbind_timeout\fR"
|
||||
Timeout to bind socket. The default is 30.
|
||||
.IP \fBbacklog\fR
|
||||
TCP backlog. Maximum number of allowed pending connections. The default value is 4096.
|
||||
.IP \fBworkers\fR
|
||||
|
@ -79,6 +81,17 @@ Parent directory or where devices are mounted. Default is /srv/node.
|
|||
.IP \fBmount_check\fR
|
||||
Whether or not check if the devices are mounted to prevent accidentally writing to
|
||||
the root device. The default is set to true.
|
||||
.IP \fBdisable_fallocate\fR
|
||||
Disable pre-allocate disk space for a file. The default is false.
|
||||
.IP \fBexpiring_objects_container_divisor\fR
|
||||
The default is 86400.
|
||||
.IP \fBexpiring_objects_account_name\fR
|
||||
The default is 'expiring_objects'.
|
||||
.IP \fBservers_per_port\fR
|
||||
Make object-server run this many worker processes per unique port of
|
||||
"local" ring devices across all storage policies. This can help provide
|
||||
the isolation of threads_per_disk without the severe overhead. The default
|
||||
value of 0 disables this feature.
|
||||
.IP \fBlog_name\fR
|
||||
Label used when logging. The default is swift.
|
||||
.IP \fBlog_facility\fR
|
||||
|
@ -87,6 +100,45 @@ Syslog log facility. The default is LOG_LOCAL0.
|
|||
Logging level. The default is INFO.
|
||||
.IP \fBlog_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
.IP \fBlog_max_line_length\fR
|
||||
The following caps the length of log lines to the value given; no limit if
|
||||
set to 0, the default.
|
||||
.IP \fBlog_custom_handlers\fR
|
||||
Comma separated list of functions to call to setup custom log handlers.
|
||||
functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
||||
adapted_logger. The default is empty.
|
||||
.IP \fBlog_udp_host\fR
|
||||
If set, log_udp_host will override log_address.
|
||||
.IP "\fBlog_udp_port\fR
|
||||
UDP log port, the default is 514.
|
||||
.IP \fBlog_statsd_host\fR = localhost
|
||||
log_statsd_* enable StatsD logging.
|
||||
.IP \fBlog_statsd_port\fR
|
||||
The default is 8125.
|
||||
.IP \fBlog_statsd_default_sample_rate\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_sample_rate_factor\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_metric_prefix\fR
|
||||
The default is empty.
|
||||
.IP \fBeventlet_debug\fR
|
||||
Debug mode for eventlet library. The default is false.
|
||||
.IP \fBfallocate_reserve\fR
|
||||
You can set fallocate_reserve to the number of bytes you'd like fallocate to
|
||||
reserve, whether there is space for the given file size or not. The default is 0.
|
||||
.IP \fBnode_timeout\fR
|
||||
Request timeout to external services. The default is 3 seconds.
|
||||
.IP \fBconn_timeout\fR
|
||||
Connection timeout to external services. The default is 0.5 seconds.
|
||||
.IP \fBcontainer_update_timeout\fR
|
||||
Time to wait while sending a container update on object update. The default is 1 second.
|
||||
.IP \fBclient_timeout\fR
|
||||
Time to wait while receiving each chunk of data from a client or another
|
||||
backend node. The default is 60.
|
||||
.IP \fBnetwork_chunk_size\fR
|
||||
The default is 65536.
|
||||
.IP \fBdisk_chunk_size\fR
|
||||
The default is 65536.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -115,22 +167,68 @@ that are acceptable within this section.
|
|||
.IP "\fBuse\fR"
|
||||
Entry point for paste.deploy for the object server. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#object\fR.
|
||||
.IP "\fBset log_name\fR
|
||||
.IP "\fBset log_name\fR"
|
||||
Label used when logging. The default is object-server.
|
||||
.IP "\fBset log_facility\fR
|
||||
.IP "\fBset log_facility\fR"
|
||||
Syslog log facility. The default is LOG_LOCAL0.
|
||||
.IP "\fB set log_level\fR
|
||||
.IP "\fBset log_level\fR"
|
||||
Logging level. The default is INFO.
|
||||
.IP "\fB set log_requests\fR
|
||||
.IP "\fBset log_requests\fR"
|
||||
Enables request logging. The default is True.
|
||||
.IP "\fB set log_address\fR
|
||||
.IP "\fBset log_address\fR"
|
||||
Logging address. The default is /dev/log.
|
||||
.IP \fBnode_timeout\fR
|
||||
Request timeout to external services. The default is 3 seconds.
|
||||
.IP \fBconn_timeout\fR
|
||||
Connection timeout to external services. The default is 0.5 seconds.
|
||||
.IP \fBcontainer_update_timeout\fR
|
||||
Time to wait while sending a container update on object update. The default is 1 second.
|
||||
.IP "\fBmax_upload_time\fR"
|
||||
The default is 86400.
|
||||
.IP "\fBslow\fR"
|
||||
The default is 0.
|
||||
.IP "\fBkeep_cache_size\fR"
|
||||
Objects smaller than this are not evicted from the buffercache once read. The default is 5242880.
|
||||
.IP "\fBkeep_cache_private\fR"
|
||||
If true, objects for authenticated GET requests may be kept in buffer cache
|
||||
if small enough. The default is false.
|
||||
.IP "\fBmb_per_sync\fR"
|
||||
On PUTs, sync data every n MB. The default is 512.
|
||||
.IP "\fBallowed_headers\fR"
|
||||
Comma separated list of headers that can be set in metadata on an object.
|
||||
This list is in addition to X-Object-Meta-* headers and cannot include Content-Type, etag, Content-Length, or deleted.
|
||||
The default is 'Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object'.
|
||||
.IP "\fBauto_create_account_prefix\fR"
|
||||
The default is '.'.
|
||||
.IP "\fBthreads_per_disk\fR"
|
||||
A value of 0 means "don't use thread pools". A reasonable starting point is
|
||||
4. The default is 0.
|
||||
.IP "\fBreplication_server\fR"
|
||||
Configure parameter for creating specific server
|
||||
To handle all verbs, including replication verbs, do not specify
|
||||
"replication_server" (this is the default). To only handle replication,
|
||||
set to a True value (e.g. "True" or "1"). To handle only non-replication
|
||||
verbs, set to "False". Unless you have a separate replication network, you
|
||||
should not specify any value for "replication_server".
|
||||
.IP "\fBreplication_concurrency\fR"
|
||||
Set to restrict the number of concurrent incoming REPLICATION requests
|
||||
Set to 0 for unlimited (the default is 4). Note that REPLICATION is currently an ssync only item.
|
||||
.IP "\fBreplication_one_per_device\fR"
|
||||
Restricts incoming REPLICATION requests to one per device,
|
||||
replication_currency above allowing. This can help control I/O to each
|
||||
device, but you may wish to set this to False to allow multiple REPLICATION
|
||||
requests (up to the above replication_concurrency setting) per device. The default is true.
|
||||
.IP "\fBreplication_lock_timeout\fR"
|
||||
Number of seconds to wait for an existing replication device lock before
|
||||
giving up. The default is 15.
|
||||
.IP "\fBreplication_failure_threshold\fR"
|
||||
.IP "\fBreplication_failure_ratio\fR"
|
||||
These two settings control when the REPLICATION subrequest handler will
|
||||
abort an incoming REPLICATION attempt. An abort will occur if there are at
|
||||
least threshold number of failures and the value of failures / successes
|
||||
exceeds the ratio. The defaults of 100 and 1.0 means that at least 100
|
||||
failures have to occur and there have to be more failures than successes for
|
||||
an abort to occur.
|
||||
.IP "\fBsplice\fR"
|
||||
Use splice() for zero-copy object GETs. This requires Linux kernel
|
||||
version 3.0 or greater. If you set "splice = yes" but the kernel
|
||||
does not support it, error messages will appear in the object server
|
||||
logs at startup, but your object servers should continue to function.
|
||||
The default is false.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -164,9 +262,41 @@ This is normally \fBegg:swift#recon\fR.
|
|||
The recon_cache_path simply sets the directory where stats for a few items will be stored.
|
||||
Depending on the method of deployment you may need to create this directory manually
|
||||
and ensure that swift has read/write. The default is /var/cache/swift.
|
||||
.IP "\fBrecon_lock_path\fR"
|
||||
The default is /var/lock.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:xprofile]\fR"
|
||||
.RS 3
|
||||
.IP "\fBuse\fR"
|
||||
Entry point for paste.deploy for the xprofile middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#xprofile\fR.
|
||||
.IP "\fBprofile_module\fR"
|
||||
This option enable you to switch profilers which should inherit from python
|
||||
standard profiler. Currently the supported value can be 'cProfile', 'eventlet.green.profile' etc.
|
||||
.IP "\fBlog_filename_prefix\fR"
|
||||
This prefix will be used to combine process ID and timestamp to name the
|
||||
profile data file. Make sure the executing user has permission to write
|
||||
into this path (missing path segments will be created, if necessary).
|
||||
If you enable profiling in more than one type of daemon, you must override
|
||||
it with an unique value like, the default is /var/log/swift/profile/account.profile.
|
||||
.IP "\fBdump_interval\fR"
|
||||
The profile data will be dumped to local disk based on above naming rule
|
||||
in this interval. The default is 5.0.
|
||||
.IP "\fBdump_timestamp\fR"
|
||||
Be careful, this option will enable profiler to dump data into the file with
|
||||
time stamp which means there will be lots of files piled up in the directory.
|
||||
The default is false
|
||||
.IP "\fBpath\fR"
|
||||
This is the path of the URL to access the mini web UI. The default is __profile__.
|
||||
.IP "\fBflush_at_shutdown\fR"
|
||||
Clear the data when the wsgi server shutdown. The default is false.
|
||||
.IP "\fBunwind\fR"
|
||||
Unwind the iterator of applications. Default is false.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH ADDITIONAL SECTIONS
|
||||
|
@ -195,10 +325,26 @@ Time in seconds to wait between replication passes. The default is 30.
|
|||
Number of replication workers to spawn. The default is 1.
|
||||
.IP \fBstats_interval\fR
|
||||
Interval in seconds between logging replication statistics. The default is 300.
|
||||
.IP \fBsync_method\fR
|
||||
The sync method to use; default is rsync but you can use ssync to try the
|
||||
EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified
|
||||
as having performance comparable to, or better than, rsync, we plan to
|
||||
deprecate rsync so we can move on with more features for replication.
|
||||
.IP \fBrsync_timeout\fR
|
||||
Max duration of a partition rsync. The default is 900 seconds.
|
||||
.IP \fBrsync_io_timeout\fR
|
||||
Passed to rsync for I/O OP timeout. The default is 30 seconds.
|
||||
.IP \fBrsync_compress\fR
|
||||
Allow rsync to compress data which is transmitted to destination node
|
||||
during sync. However, this is applicable only when destination node is in
|
||||
a different region than the local one.
|
||||
NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might
|
||||
slow down the syncing process. The default is false.
|
||||
.IP \fBrsync_module\fR
|
||||
Format of the rysnc module where the replicator will send data. See
|
||||
etc/rsyncd.conf-sample for some usage examples. The default is empty.
|
||||
.IP \fBnode_timeout\fR
|
||||
Request timeout to external services. The default is 10 seconds.
|
||||
.IP \fBrsync_bwlimit\fR
|
||||
Passed to rsync for bandwidth limit in kB/s. The default is 0 (unlimited).
|
||||
.IP \fBhttp_timeout\fR
|
||||
|
@ -206,18 +352,87 @@ Max duration of an HTTP request. The default is 60 seconds.
|
|||
.IP \fBlockup_timeout\fR
|
||||
Attempts to kill all workers if nothing replicates for lockup_timeout seconds. The
|
||||
default is 1800 seconds.
|
||||
.IP \fBring_check_interval\fR
|
||||
The default is 15.
|
||||
.IP \fBrsync_error_log_line_length\fR
|
||||
Limits how long rsync error log lines are. 0 (default) means to log the entire line.
|
||||
.IP \fBreclaim_age\fR
|
||||
Time elapsed in seconds before an object can be reclaimed. The default is
|
||||
604800 seconds.
|
||||
.IP \fBrecon_enable\fR
|
||||
Enable logging of replication stats for recon. The default is on.
|
||||
.IP "\fBrecon_cache_path\fR"
|
||||
The recon_cache_path simply sets the directory where stats for a few items will be stored.
|
||||
Depending on the method of deployment you may need to create this directory manually
|
||||
and ensure that swift has read/write.The default is /var/cache/swift.
|
||||
.IP "\fBhandoffs_first\fR"
|
||||
The flag to replicate handoffs prior to canonical partitions.
|
||||
It allows to force syncing and deleting handoffs quickly.
|
||||
If set to a True value(e.g. "True" or "1"), partitions
|
||||
that are not supposed to be on the node will be replicated first.
|
||||
The default is false.
|
||||
.IP "\fBhandoff_delete\fR"
|
||||
The number of replicas which are ensured in swift.
|
||||
If the number less than the number of replicas is set, object-replicator
|
||||
could delete local handoffs even if all replicas are not ensured in the
|
||||
cluster. Object-replicator would remove local handoff partition directories
|
||||
after syncing partition when the number of successful responses is greater
|
||||
than or equal to this number. By default(auto), handoff partitions will be
|
||||
removed when it has successfully replicated to all the canonical nodes.
|
||||
|
||||
The handoffs_first and handoff_delete are options for a special case
|
||||
such as disk full in the cluster. These two options SHOULD NOT BE
|
||||
CHANGED, except for such an extreme situations. (e.g. disks filled up
|
||||
or are about to fill up. Anyway, DO NOT let your drives fill up).
|
||||
.RE
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[object-reconstructor]\fR"
|
||||
.RE
|
||||
.RS 3
|
||||
.IP \fBlog_name\fR
|
||||
Label used when logging. The default is object-reconstructor.
|
||||
.IP \fBlog_facility\fR
|
||||
Syslog log facility. The default is LOG_LOCAL0.
|
||||
.IP \fBlog_level\fR
|
||||
Logging level. The default is INFO.
|
||||
.IP \fBlog_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
.IP \fBdaemonize\fR
|
||||
Whether or not to run replication as a daemon. The default is yes.
|
||||
.IP "\fBrun_pause [deprecated]\fR"
|
||||
Time in seconds to wait between replication passes. The default is 30.
|
||||
.IP \fBinterval\fR
|
||||
Time in seconds to wait between replication passes. The default is 30.
|
||||
.IP \fBconcurrency\fR
|
||||
Number of replication workers to spawn. The default is 1.
|
||||
.IP \fBstats_interval\fR
|
||||
Interval in seconds between logging replication statistics. The default is 300.
|
||||
.IP \fBnode_timeout\fR
|
||||
Request timeout to external services. The default is 10 seconds.
|
||||
.IP \fBhttp_timeout\fR
|
||||
Max duration of an HTTP request. The default is 60 seconds.
|
||||
.IP \fBlockup_timeout\fR
|
||||
Attempts to kill all workers if nothing replicates for lockup_timeout seconds. The
|
||||
default is 1800 seconds.
|
||||
.IP \fBring_check_interval\fR
|
||||
The default is 15.
|
||||
.IP \fBreclaim_age\fR
|
||||
Time elapsed in seconds before an object can be reclaimed. The default is
|
||||
604800 seconds.
|
||||
.IP "\fBrecon_cache_path\fR"
|
||||
The recon_cache_path simply sets the directory where stats for a few items will be stored.
|
||||
Depending on the method of deployment you may need to create this directory manually
|
||||
and ensure that swift has read/write.The default is /var/cache/swift.
|
||||
.IP "\fBhandoffs_first\fR"
|
||||
The flag to replicate handoffs prior to canonical partitions.
|
||||
It allows to force syncing and deleting handoffs quickly.
|
||||
If set to a True value(e.g. "True" or "1"), partitions
|
||||
that are not supposed to be on the node will be replicated first.
|
||||
The default is false.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[object-updater]\fR"
|
||||
.RE
|
||||
|
@ -236,10 +451,12 @@ Minimum time for a pass to take. The default is 300 seconds.
|
|||
Number of reaper workers to spawn. The default is 1.
|
||||
.IP \fBnode_timeout\fR
|
||||
Request timeout to external services. The default is 10 seconds.
|
||||
.IP \fBconn_timeout\fR
|
||||
Connection timeout to external services. The default is 0.5 seconds.
|
||||
.IP \fBslowdown\fR
|
||||
Slowdown will sleep that amount between objects. The default is 0.01 seconds.
|
||||
.IP "\fBrecon_cache_path\fR"
|
||||
The recon_cache_path simply sets the directory where stats for a few items will be stored.
|
||||
Depending on the method of deployment you may need to create this directory manually
|
||||
and ensure that swift has read/write. The default is /var/cache/swift.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -257,16 +474,28 @@ Logging level. The default is INFO.
|
|||
.IP \fBlog_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
|
||||
.IP \fBdisk_chunk_size\fR
|
||||
The default is 65536.
|
||||
.IP \fBfiles_per_second\fR
|
||||
Maximum files audited per second. Should be tuned according to individual
|
||||
system specs. 0 is unlimited. The default is 20.
|
||||
.IP \fBbytes_per_second\fR
|
||||
Maximum bytes audited per second. Should be tuned according to individual
|
||||
system specs. 0 is unlimited. The default is 10000000.
|
||||
.IP \fBconcurrency\fR
|
||||
Number of reaper workers to spawn. The default is 1.
|
||||
.IP \fBlog_time\fR
|
||||
The default is 3600 seconds.
|
||||
.IP \fBzero_byte_files_per_second\fR
|
||||
The default is 50.
|
||||
.IP "\fBrecon_cache_path\fR"
|
||||
The recon_cache_path simply sets the directory where stats for a few items will be stored.
|
||||
Depending on the method of deployment you may need to create this directory manually
|
||||
and ensure that swift has read/write. The default is /var/cache/swift.
|
||||
.IP \fBobject_size_stats\fR
|
||||
Takes a comma separated list of ints. If set, the object auditor will
|
||||
increment a counter for every object whose size is <= to the given break
|
||||
points and report the result after a full scan.
|
||||
.RE
|
||||
|
||||
|
||||
|
|
|
@ -56,8 +56,21 @@ IP address the proxy server should bind to. The default is 0.0.0.0 which will ma
|
|||
it bind to all available addresses.
|
||||
.IP "\fBbind_port\fR"
|
||||
TCP port the proxy server should bind to. The default is 80.
|
||||
.IP "\fBbind_timeout\fR"
|
||||
Timeout to bind socket. The default is 30.
|
||||
.IP \fBbacklog\fR
|
||||
TCP backlog. Maximum number of allowed pending connections. The default value is 4096.
|
||||
.IP \fBadmin_key\fR
|
||||
Key to use for admin calls that are HMAC signed. Default is empty,
|
||||
which will disable admin calls to /info.
|
||||
.IP \fBdisallowed_sections\fR
|
||||
Allows the ability to withhold sections from showing up in the public calls
|
||||
to /info. You can withhold subsections by separating the dict level with a
|
||||
".". The following would cause the sections 'container_quotas' and 'tempurl'
|
||||
to not be listed, and the key max_failed_deletes would be removed from
|
||||
bulk_delete. Default value is 'swift.valid_api_versions' which allows all
|
||||
registered features to be listed via HTTP GET /info except
|
||||
swift.valid_api_versions information
|
||||
.IP \fBworkers\fR
|
||||
The number of pre-forked processes that will accept connections. Zero means
|
||||
no fork. The default is auto which will make the server try to match the
|
||||
|
@ -71,6 +84,8 @@ actually accept(2) N + 1). Setting this to one (1) will only handle one request
|
|||
at a time, without accepting another request concurrently. The default is 1024.
|
||||
.IP \fBuser\fR
|
||||
The system user that the proxy server will run as. The default is swift.
|
||||
.IP \fBexpose_info\fR
|
||||
Enables exposing configuration settings via HTTP GET /info. The default is true.
|
||||
.IP \fBswift_dir\fR
|
||||
Swift configuration directory. The default is /etc/swift.
|
||||
.IP \fBcert_file\fR
|
||||
|
@ -79,6 +94,10 @@ disabled by default.
|
|||
.IP \fBkey_file\fR
|
||||
Location of the SSL certificate key file. The default path is /etc/swift/proxy.key. This is
|
||||
disabled by default.
|
||||
.IP \fBexpiring_objects_container_divisor\fR
|
||||
The default is 86400.
|
||||
.IP \fBexpiring_objects_account_name\fR
|
||||
The default is 'expiring_objects'.
|
||||
.IP \fBlog_name\fR
|
||||
Label used when logging. The default is swift.
|
||||
.IP \fBlog_facility\fR
|
||||
|
@ -87,10 +106,41 @@ Syslog log facility. The default is LOG_LOCAL0.
|
|||
Logging level. The default is INFO.
|
||||
.IP \fBlog_address\fR
|
||||
Logging address. The default is /dev/log.
|
||||
.IP \fBlog_max_line_length\fR
|
||||
To cap the length of log lines to the value given. No limit if set to 0, the default.
|
||||
.IP \fBlog_headers\fR
|
||||
The default is false.
|
||||
.IP \fBlog_custom_handlers\fR
|
||||
Comma separated list of functions to call to setup custom log handlers.
|
||||
functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
||||
adapted_logger. The default is empty.
|
||||
.IP \fBlog_udp_host\fR
|
||||
If set, log_udp_host will override log_address.
|
||||
.IP "\fBlog_udp_port\fR
|
||||
UDP log port, the default is 514.
|
||||
.IP \fBlog_statsd_host\fR = localhost
|
||||
log_statsd_* enable StatsD logging.
|
||||
.IP \fBlog_statsd_port\fR
|
||||
The default is 8125.
|
||||
.IP \fBlog_statsd_default_sample_rate\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_sample_rate_factor\fR
|
||||
The default is 1.
|
||||
.IP \fBlog_statsd_metric_prefix\fR
|
||||
The default is empty.
|
||||
.IP \fBclient_timeout\fR
|
||||
Time to wait while receiving each chunk of data from a client or another
|
||||
backend node. The default is 60.
|
||||
.IP \fBeventlet_debug\fR
|
||||
Debug mode for eventlet library. The default is false.
|
||||
.IP \fBtrans_id_suffix\fR
|
||||
This optional suffix (default is empty) that would be appended to the swift transaction
|
||||
id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
|
||||
This is very useful when one is managing more than one swift cluster.
|
||||
.IP \fBcors_allow_origin\fR
|
||||
Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
|
||||
.IP \fBstrict_cors_mode\fR
|
||||
The default is true.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -104,8 +154,13 @@ are acceptable within this section.
|
|||
|
||||
.IP "\fBpipeline\fR"
|
||||
It is used when you need apply a number of filters. It is a list of filters
|
||||
ended by an application. The normal pipeline is "catch_errors healthcheck
|
||||
cache ratelimit tempauth proxy-logging proxy-server".
|
||||
ended by an application. The normal pipeline is "catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server".
|
||||
|
||||
Note: The double proxy-logging in the pipeline is not a mistake. The
|
||||
left-most proxy-logging is there to log requests that were handled in
|
||||
middleware and never made it through to the right-most middleware (and
|
||||
proxy server). Double logging is prevented for normal requests. See
|
||||
proxy-logging docs.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
@ -127,6 +182,7 @@ This is normally \fBegg:swift#healthcheck\fR.
|
|||
An optional filesystem path which, if present, will cause the healthcheck
|
||||
URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
|
@ -154,11 +210,28 @@ systems are in use for one Swift cluster. The default is AUTH.
|
|||
.IP \fBauth_prefix\fR
|
||||
The auth prefix will cause requests beginning with this prefix to be routed
|
||||
to the auth subsystem, for granting tokens, etc. The default is /auth/.
|
||||
.IP \fBrequire_group\fR
|
||||
The require_group parameter names a group that must be presented by
|
||||
either X-Auth-Token or X-Service-Token. Usually this parameter is
|
||||
used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah).
|
||||
By default, no group is needed. Do not use .admin.
|
||||
.IP \fBtoken_life\fR
|
||||
This is the time in seconds before the token expires. The default is 86400.
|
||||
.IP \fBallow_overrides\fR
|
||||
This allows middleware higher in the WSGI pipeline to override auth
|
||||
processing, useful for middleware such as tempurl and formpost. If you know
|
||||
you're not going to use such middleware and you want a bit of extra security,
|
||||
you can set this to false. The default is true.
|
||||
.IP \fBstorage_url_scheme\fR
|
||||
This specifies what scheme to return with storage urls:
|
||||
http, https, or default (chooses based on what the server is running as)
|
||||
This can be useful with an SSL load balancer in front of a non-SSL server.
|
||||
.IP \fBuser_<account>_<user>\fR
|
||||
Lastly, you need to list all the accounts/users you want here. The format is:
|
||||
user_<account>_<user> = <key> [group] [group] [...] [storage_url]
|
||||
or if you want underscores in <account> or <user>, you can base64 encode them
|
||||
(with no equal signs) and use this format:
|
||||
user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
|
||||
|
||||
There are special groups of: \fI.reseller_admin\fR who can do anything to any account for this auth
|
||||
and also \fI.admin\fR who can do anything within the account.
|
||||
|
@ -184,6 +257,107 @@ Here are example entries, required for running the tests:
|
|||
.RE
|
||||
.PD
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:authtoken]\fR"
|
||||
.RE
|
||||
|
||||
To enable Keystone authentication you need to have the auth token
|
||||
middleware first to be configured. Here is an example below, please
|
||||
refer to the keystone's documentation for details about the
|
||||
different settings.
|
||||
|
||||
You'll need to have as well the keystoneauth middleware enabled
|
||||
and have it in your main pipeline so instead of having tempauth in
|
||||
there you can change it to: authtoken keystoneauth
|
||||
|
||||
.PD 0
|
||||
.RS 10
|
||||
.IP "paste.filter_factory = keystonemiddleware.auth_token:filter_factory"
|
||||
.IP "identity_uri = http://keystonehost:35357/"
|
||||
.IP "auth_uri = http://keystonehost:5000/"
|
||||
.IP "admin_tenant_name = service"
|
||||
.IP "admin_user = swift"
|
||||
.IP "admin_password = password"
|
||||
.IP ""
|
||||
.IP "# delay_auth_decision defaults to False, but leaving it as false will"
|
||||
.IP "# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from"
|
||||
.IP "# working. This value must be explicitly set to True."
|
||||
.IP "delay_auth_decision = False"
|
||||
.IP
|
||||
.IP "cache = swift.cache"
|
||||
.IP "include_service_catalog = False"
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:keystoneauth]\fR"
|
||||
.RE
|
||||
|
||||
Keystone authentication middleware.
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the keystoneauth middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#keystoneauth\fR.
|
||||
.IP \fBreseller_prefix\fR
|
||||
The reseller_prefix option lists account namespaces that this middleware is
|
||||
responsible for. The prefix is placed before the Keystone project id.
|
||||
For example, for project 12345678, and prefix AUTH, the account is
|
||||
named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...).
|
||||
Several prefixes are allowed by specifying a comma-separated list
|
||||
as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a
|
||||
single blank/empty prefix. If an empty prefix is required in a list of
|
||||
prefixes, a value of '' (two single quote characters) indicates a
|
||||
blank/empty prefix. Except for the blank/empty prefix, an underscore ('_')
|
||||
character is appended to the value unless already present.
|
||||
.IP \fBoperator_roles\fR
|
||||
The user must have at least one role named by operator_roles on a
|
||||
project in order to create, delete and modify containers and objects
|
||||
and to set and read privileged headers such as ACLs.
|
||||
If there are several reseller prefix items, you can prefix the
|
||||
parameter so it applies only to those accounts (for example
|
||||
the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project>
|
||||
path). If you omit the prefix, the option applies to all reseller
|
||||
prefix items. For the blank/empty prefix, prefix with '' (do not put
|
||||
underscore after the two single quote characters).
|
||||
.IP \fBreseller_admin_role\fR
|
||||
The reseller admin role has the ability to create and delete accounts.
|
||||
.IP \fBallow_overrides\fR
|
||||
This allows middleware higher in the WSGI pipeline to override auth
|
||||
processing, useful for middleware such as tempurl and formpost. If you know
|
||||
you're not going to use such middleware and you want a bit of extra security,
|
||||
you can set this to false.
|
||||
.IP \fBis_admin [DEPRECATED]\fR
|
||||
If is_admin is true, a user whose username is the same as the project name
|
||||
and who has any role on the project will have access rights elevated to be
|
||||
the same as if the user had an operator role. Note that the condition
|
||||
compares names rather than UUIDs. This option is deprecated.
|
||||
.IP \fBservice_roles\fR
|
||||
If the service_roles parameter is present, an X-Service-Token must be
|
||||
present in the request that when validated, grants at least one role listed
|
||||
in the parameter. The X-Service-Token may be scoped to any project.
|
||||
If there are several reseller prefix items, you can prefix the
|
||||
parameter so it applies only to those accounts (for example
|
||||
the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project>
|
||||
path). If you omit the prefix, the option applies to all reseller
|
||||
prefix items. For the blank/empty prefix, prefix with '' (do not put
|
||||
underscore after the two single quote characters).
|
||||
By default, no service_roles are required.
|
||||
.IP \fBdefault_domain_id\fR
|
||||
For backwards compatibility, keystoneauth will match names in cross-tenant
|
||||
access control lists (ACLs) when both the requesting user and the tenant
|
||||
are in the default domain i.e the domain to which existing tenants are
|
||||
migrated. The default_domain_id value configured here should be the same as
|
||||
the value used during migration of tenants to keystone domains.
|
||||
.IP \fBallow_names_in_acls\fR
|
||||
For a new installation, or an installation in which keystone projects may
|
||||
move between domains, you should disable backwards compatible name matching
|
||||
in ACLs by setting allow_names_in_acls to false:
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:cache]\fR"
|
||||
.RE
|
||||
|
@ -202,8 +376,10 @@ Syslog log facility. The default is LOG_LOCAL0.
|
|||
Logging level. The default is INFO.
|
||||
.IP "\fBset log_address\fR"
|
||||
Logging address. The default is /dev/log.
|
||||
.IP "\fBset log_headers\fR "
|
||||
.IP "\fBset log_headers\fR"
|
||||
Enables the ability to log request headers. The default is False.
|
||||
.IP \fBmemcache_max_connections\fR
|
||||
Sets the maximum number of connections to each memcached server per worker.
|
||||
.IP \fBmemcache_servers\fR
|
||||
If not set in the configuration file, the value for memcache_servers will be
|
||||
read from /etc/swift/memcache.conf (see memcache.conf-sample) or lacking that
|
||||
|
@ -225,7 +401,7 @@ To avoid an instant full cache flush, existing installations should upgrade with
|
|||
|
||||
If not set in the configuration file, the value for memcache_serialization_support will be read from /etc/swift/memcache.conf if it exists (see memcache.conf-sample). Otherwise, the default value as indicated above will be used.
|
||||
.RE
|
||||
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
|
@ -268,14 +444,20 @@ in requests per second. If set to 0 means disabled. The default is 0.
|
|||
.IP \fBcontainer_ratelimit_size\fR
|
||||
When set with container_limit_x = r: for containers of size x, limit requests per second
|
||||
to r. Will limit PUT, DELETE, and POST requests to /a/c/o. The default is ''.
|
||||
.IP \fBcontainer_listing_ratelimit_size\fR
|
||||
Similarly to the above container-level write limits, the following will limit
|
||||
container GET (listing) requests.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:domain_remap]\fR"
|
||||
.RE
|
||||
|
||||
Middleware that translates container and account parts of a domain to path parameters that the proxy server understands. The container.account.storageurl/object gets translated to container.account.storageurl/path_root/account/container/object and account.storageurl/path_root/container/object gets translated to account.storageurl/path_root/account/container/object
|
||||
Middleware that translates container and account parts of a domain to path parameters that the proxy server understands.
|
||||
The container.account.storageurl/object gets translated to container.account.storageurl/path_root/account/container/object and account.storageurl/path_root/container/object gets translated to account.storageurl/path_root/account/container/object
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
|
@ -283,9 +465,13 @@ Entry point for paste.deploy for the domain_remap middleware. This is the refere
|
|||
This is normally \fBegg:swift#domain_remap\fR.
|
||||
.IP "\fBset log_name\fR"
|
||||
Label used when logging. The default is domain_remap.
|
||||
.IP "\fBset log_facility\fR"
|
||||
Syslog log facility. The default is LOG_LOCAL0.
|
||||
.IP "\fBset log_level\fR "
|
||||
Logging level. The default is INFO.
|
||||
.IP "\fBset log_address\fR"
|
||||
Logging address. The default is /dev/log.
|
||||
.IP "\fBset log_headers\fR"
|
||||
.IP "\fBset log_headers\fR "
|
||||
Enables the ability to log request headers. The default is False.
|
||||
.IP \fBstorage_domain\fR
|
||||
The domain to be used by the middleware.
|
||||
|
@ -304,7 +490,7 @@ Defaults to 'AUTH'.
|
|||
The default reseller prefix. This is used when none of the configured
|
||||
reseller_prefixes match. When not set, no reseller prefix is added.
|
||||
.RE
|
||||
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
|
@ -325,7 +511,7 @@ Logging address. The default is /dev/log.
|
|||
.IP "\fBset log_headers\fR"
|
||||
Enables the ability to log request headers. The default is False.
|
||||
.RE
|
||||
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
|
@ -354,7 +540,7 @@ The domain to be used by the middleware.
|
|||
How deep in the CNAME chain to look for something that matches the storage domain.
|
||||
The default is 1.
|
||||
.RE
|
||||
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
|
@ -367,8 +553,6 @@ Note: Put staticweb just after your auth filter(s) in the pipeline
|
|||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the staticweb middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#staticweb\fR.
|
||||
.IP \fBcache_timeout\fR
|
||||
Seconds to cache container x-container-meta-web-* header values. The default is 300 seconds.
|
||||
.IP "\fBset log_name\fR"
|
||||
Label used when logging. The default is staticweb.
|
||||
.IP "\fBset log_facility\fR"
|
||||
|
@ -379,14 +563,8 @@ Logging level. The default is INFO.
|
|||
Logging address. The default is /dev/log.
|
||||
.IP "\fBset log_headers\fR"
|
||||
Enables the ability to log request headers. The default is False.
|
||||
.IP "\fBset access_log_name\fR"
|
||||
Label used when logging. The default is staticweb.
|
||||
.IP "\fBset access_log_facility\fR"
|
||||
Syslog log facility. The default is LOG_LOCAL0.
|
||||
.IP "\fBset access_log_level\fR "
|
||||
Logging level. The default is INFO.
|
||||
.RE
|
||||
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
|
@ -396,6 +574,11 @@ Logging level. The default is INFO.
|
|||
Note: Put tempurl before slo, dlo, and your auth filter(s) in the pipeline
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the tempurl middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#tempurl\fR.
|
||||
.IP \fBmethods\fR
|
||||
The methods allowed with Temp URLs. The default is 'GET HEAD PUT POST DELETE'.
|
||||
.IP \fBincoming_remove_headers\fR
|
||||
The headers to remove from incoming requests. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. incoming_allow_headers is a list of exceptions to these removals.
|
||||
.IP \fBincoming_allow_headers\fR
|
||||
|
@ -404,9 +587,8 @@ The headers allowed as exceptions to incoming_remove_headers. Simply a whitespac
|
|||
The headers to remove from outgoing responses. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. outgoing_allow_headers is a list of exceptions to these removals.
|
||||
.IP "\fBoutgoing_allow_headers\fR"
|
||||
The headers allowed as exceptions to outgoing_remove_headers. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match.
|
||||
.IP "\fBset log_level\fR "
|
||||
.RE
|
||||
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
|
@ -420,6 +602,7 @@ Note: Put formpost just before your auth filter(s) in the pipeline
|
|||
Entry point for paste.deploy for the formpost middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#formpost\fR.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
|
||||
|
@ -434,12 +617,25 @@ Note: Just needs to be placed before the proxy-server in the pipeline.
|
|||
Entry point for paste.deploy for the name_check middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#name_check\fR.
|
||||
.IP \fBforbidden_chars\fR
|
||||
Characters that will not be allowed in a name.
|
||||
Characters that will not be allowed in a name. The default is '"`<>.
|
||||
.IP \fBmaximum_length\fR
|
||||
Maximum number of characters that can be in the name.
|
||||
Maximum number of characters that can be in the name. The default is 255.
|
||||
.IP \fBforbidden_regexp\fR
|
||||
Python regular expressions of substrings that will not be allowed in a name.
|
||||
Python regular expressions of substrings that will not be allowed in a name. The default is /\./|/\.\./|/\.$|/\.\.$.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:list-endpoints]\fR"
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the list_endpoints middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#list_endpoints\fR.
|
||||
.IP \fBlist_endpoints_path\fR
|
||||
The default is '/endpoints/'.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
|
@ -474,20 +670,249 @@ Default is localhost.
|
|||
Default is 8125.
|
||||
.IP \fBaccess_log_statsd_default_sample_rate\fR
|
||||
Default is 1.
|
||||
.IP \fBaccess_log_statsd_sample_rate_factor\fR
|
||||
The default is 1.
|
||||
.IP \fBaccess_log_statsd_metric_prefix\fR
|
||||
Default is "" (empty-string)
|
||||
.IP \fBaccess_log_headers\fR
|
||||
Default is False.
|
||||
.IP \fBaccess_log_headers_only\fR
|
||||
If access_log_headers is True and access_log_headers_only is set only
|
||||
these headers are logged. Multiple headers can be defined as comma separated
|
||||
list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
|
||||
.IP \fBreveal_sensitive_prefix\fR
|
||||
By default, the X-Auth-Token is logged. To obscure the value,
|
||||
set reveal_sensitive_prefix to the number of characters to log.
|
||||
For example, if set to 12, only the first 12 characters of the
|
||||
token appear in the log. An unauthorized access of the log file
|
||||
won't allow unauthorized usage of the token. However, the first
|
||||
12 or so characters is unique enough that you can trace/debug
|
||||
token usage. Set to 0 to suppress the token completely (replaced
|
||||
by '...' in the log). The default is 16 chars.
|
||||
Note: reveal_sensitive_prefix will not affect the value logged with access_log_headers=True.
|
||||
.IP \fBlog_statsd_valid_http_methods\fR
|
||||
What HTTP methods are allowed for StatsD logging (comma-sep); request methods
|
||||
not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
|
||||
Default is "GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS".
|
||||
.RE
|
||||
|
||||
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:bulk]\fR"
|
||||
.RE
|
||||
|
||||
Note: Put before both ratelimit and auth in the pipeline.
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the bulk middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#bulk\fR.
|
||||
.IP \fBmax_containers_per_extraction\fR
|
||||
The default is 10000.
|
||||
.IP \fBmax_failed_extractions\fR
|
||||
The default is 1000.
|
||||
.IP \fBmax_deletes_per_request\fR
|
||||
The default is 10000.
|
||||
.IP \fBmax_failed_deletes\fR
|
||||
The default is 1000.
|
||||
|
||||
In order to keep a connection active during a potentially long bulk request,
|
||||
Swift may return whitespace prepended to the actual response body. This
|
||||
whitespace will be yielded no more than every yield_frequency seconds.
|
||||
The default is 10.
|
||||
.IP \fByield_frequency\fR
|
||||
|
||||
.IP \fBdelete_container_retry_count\fR
|
||||
Note: This parameter is used during a bulk delete of objects and
|
||||
their container. This would frequently fail because it is very likely
|
||||
that all replicated objects have not been deleted by the time the middleware got a
|
||||
successful response. It can be configured the number of retries. And the
|
||||
number of seconds to wait between each retry will be 1.5**retry
|
||||
The default is 0.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:slo]\fR"
|
||||
.RE
|
||||
|
||||
Note: Put after auth and staticweb in the pipeline.
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the slo middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#slo\fR.
|
||||
.IP \fBmax_manifest_segments\fR
|
||||
The default is 1000.
|
||||
.IP \fBmax_manifest_size\fR
|
||||
The default is 2097152.
|
||||
.IP \fBmin_segment_size\fR
|
||||
The default is 1048576
|
||||
.IP \fBrate_limit_after_segment\fR
|
||||
Start rate-limiting object segments after the Nth segment of a segmented
|
||||
object. The default is 10 segments.
|
||||
.IP \fBrate_limit_segments_per_sec\fR
|
||||
Once segment rate-limiting kicks in for an object, limit segments served to N
|
||||
per second. The default is 1.
|
||||
.IP \fBmax_get_time\fR
|
||||
Time limit on GET requests (seconds). The default is 86400.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:dlo]\fR"
|
||||
.RE
|
||||
|
||||
Note: Put after auth and staticweb in the pipeline.
|
||||
If you don't put it in the pipeline, it will be inserted for you.
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the dlo middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#dlo\fR.
|
||||
.IP \fBrate_limit_after_segment\fR
|
||||
Start rate-limiting object segments after the Nth segment of a segmented
|
||||
object. The default is 10 segments.
|
||||
.IP \fBrate_limit_segments_per_sec\fR
|
||||
Once segment rate-limiting kicks in for an object, limit segments served to N
|
||||
per second. The default is 1.
|
||||
.IP \fBmax_get_time\fR
|
||||
Time limit on GET requests (seconds). The default is 86400.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:container-quotas]\fR"
|
||||
.RE
|
||||
|
||||
Note: Put after auth in the pipeline.
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the container_quotas middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#container_quotas\fR.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:account-quotas]\fR"
|
||||
.RE
|
||||
|
||||
Note: Put after auth in the pipeline.
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the account_quotas middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#account_quotas\fR.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:gatekeeper]\fR"
|
||||
.RE
|
||||
|
||||
Note: this middleware requires python-dnspython
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the gatekeeper middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#gatekeeper\fR.
|
||||
.IP "\fBset log_name\fR"
|
||||
Label used when logging. The default is gatekeeper.
|
||||
.IP "\fBset log_facility\fR"
|
||||
Syslog log facility. The default is LOG_LOCAL0.
|
||||
.IP "\fBset log_level\fR "
|
||||
Logging level. The default is INFO.
|
||||
.IP "\fBset log_address\fR"
|
||||
Logging address. The default is /dev/log.
|
||||
.IP "\fBset log_headers\fR"
|
||||
Enables the ability to log request headers. The default is False.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:container_sync]\fR"
|
||||
.RE
|
||||
|
||||
Note: this middleware requires python-dnspython
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the container_sync middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#container_sync\fR.
|
||||
.IP \fBallow_full_urls\fR
|
||||
Set this to false if you want to disallow any full url values to be set for
|
||||
any new X-Container-Sync-To headers. This will keep any new full urls from
|
||||
coming in, but won't change any existing values already in the cluster.
|
||||
Updating those will have to be done manually, as knowing what the true realm
|
||||
endpoint should be cannot always be guessed. The default is true.
|
||||
.IP \fBcurrent\fR
|
||||
Set this to specify this clusters //realm/cluster as "current" in /info
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:xprofile]\fR"
|
||||
.RE
|
||||
|
||||
Note: Put it at the beginning of the pipeline to profile all middleware. But it is safer to put this after healthcheck.
|
||||
|
||||
.RS 3
|
||||
.IP "\fBuse\fR"
|
||||
Entry point for paste.deploy for the xprofile middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#xprofile\fR.
|
||||
.IP "\fBprofile_module\fR"
|
||||
This option enable you to switch profilers which should inherit from python
|
||||
standard profiler. Currently the supported value can be 'cProfile', 'eventlet.green.profile' etc.
|
||||
.IP "\fBlog_filename_prefix\fR"
|
||||
This prefix will be used to combine process ID and timestamp to name the
|
||||
profile data file. Make sure the executing user has permission to write
|
||||
into this path (missing path segments will be created, if necessary).
|
||||
If you enable profiling in more than one type of daemon, you must override
|
||||
it with an unique value like, the default is /var/log/swift/profile/account.profile.
|
||||
.IP "\fBdump_interval\fR"
|
||||
The profile data will be dumped to local disk based on above naming rule
|
||||
in this interval. The default is 5.0.
|
||||
.IP "\fBdump_timestamp\fR"
|
||||
Be careful, this option will enable profiler to dump data into the file with
|
||||
time stamp which means there will be lots of files piled up in the directory.
|
||||
The default is false
|
||||
.IP "\fBpath\fR"
|
||||
This is the path of the URL to access the mini web UI. The default is __profile__.
|
||||
.IP "\fBflush_at_shutdown\fR"
|
||||
Clear the data when the wsgi server shutdown. The default is false.
|
||||
.IP "\fBunwind\fR"
|
||||
Unwind the iterator of applications. Default is false.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.RS 0
|
||||
.IP "\fB[filter:versioned_writes]\fR"
|
||||
.RE
|
||||
|
||||
Note: Put after slo, dlo in the pipeline.
|
||||
If you don't put it in the pipeline, it will be inserted automatically.
|
||||
|
||||
.RS 3
|
||||
.IP \fBuse\fR
|
||||
Entry point for paste.deploy for the versioned_writes middleware. This is the reference to the installed python egg.
|
||||
This is normally \fBegg:swift#versioned_writes\fR.
|
||||
.IP \fBallow_versioned_writes\fR
|
||||
Enables using versioned writes middleware and exposing configuration settings via HTTP GET /info.
|
||||
WARNING: Setting this option bypasses the "allow_versions" option
|
||||
in the container configuration file, which will be eventually
|
||||
deprecated. See documentation for more details.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
||||
.SH APP SECTION
|
||||
|
@ -518,10 +943,19 @@ Chunk size to read from object servers. The default is 8192.
|
|||
Chunk size to read from clients. The default is 8192.
|
||||
.IP \fBnode_timeout\fR
|
||||
Request timeout to external services. The default is 10 seconds.
|
||||
.IP \fBclient_timeout\fR
|
||||
Timeout to read one chunk from a client. The default is 60 seconds.
|
||||
.IP \fBrecoverable_node_timeout\fR
|
||||
How long the proxy server will wait for an initial response and to read a
|
||||
chunk of data from the object servers while serving GET / HEAD requests.
|
||||
Timeouts from these requests can be recovered from so setting this to
|
||||
something lower than node_timeout would provide quicker error recovery
|
||||
while allowing for a longer timeout for non-recoverable requests (PUTs).
|
||||
Defaults to node_timeout, should be overriden if node_timeout is set to a
|
||||
high number to prevent client timeouts from firing before the proxy server
|
||||
has a chance to retry.
|
||||
.IP \fBconn_timeout\fR
|
||||
Connection timeout to external services. The default is 0.5 seconds.
|
||||
.IP \fBpost_quorum_timeout\fR
|
||||
How long to wait for requests to finish after a quorum has been established. The default is 0.5 seconds.
|
||||
.IP \fBerror_suppression_interval\fR
|
||||
Time in seconds that must elapse since the last error for a node to
|
||||
be considered no longer error limited. The default is 60 seconds.
|
||||
|
@ -539,12 +973,63 @@ container sync won't be able to sync posts. The default is True.
|
|||
.IP \fBaccount_autocreate\fR
|
||||
If set to 'true' authorized accounts that do not yet exist within the Swift cluster
|
||||
will be automatically created. The default is set to false.
|
||||
.IP \fBrate_limit_after_segment\fR
|
||||
Start rate-limiting object segments after the Nth segment of a segmented
|
||||
object. The default is 10 segments.
|
||||
.IP \fBrate_limit_segments_per_sec\fR
|
||||
Once segment rate-limiting kicks in for an object, limit segments served to N
|
||||
per second. The default is 1.
|
||||
.IP \fBauto_create_account_prefix\fR
|
||||
Prefix used when automatically creating accounts. The default is '.'.
|
||||
.IP \fBmax_containers_per_account\fR
|
||||
If set to a positive value, trying to create a container when the account
|
||||
already has at least this maximum containers will result in a 403 Forbidden.
|
||||
Note: This is a soft limit, meaning a user might exceed the cap for
|
||||
recheck_account_existence before the 403s kick in.
|
||||
.IP \fBmax_containers_whitelist\fR
|
||||
This is a comma separated list of account hashes that ignore the max_containers_per_account cap.
|
||||
.IP \fBdeny_host_headers\fR
|
||||
Comma separated list of Host headers to which the proxy will deny requests. The default is empty.
|
||||
.IP \fBput_queue_depth\fR
|
||||
Depth of the proxy put queue. The default is 10.
|
||||
.IP \fBsorting_method\fR
|
||||
Storage nodes can be chosen at random (shuffle - default), by using timing
|
||||
measurements (timing), or by using an explicit match (affinity).
|
||||
Using timing measurements may allow for lower overall latency, while
|
||||
using affinity allows for finer control. In both the timing and
|
||||
affinity cases, equally-sorting nodes are still randomly chosen to
|
||||
spread load.
|
||||
The valid values for sorting_method are "affinity", "shuffle", and "timing".
|
||||
.IP \fBtiming_expiry\fR
|
||||
If the "timing" sorting_method is used, the timings will only be valid for
|
||||
the number of seconds configured by timing_expiry. The default is 300.
|
||||
.IP \fBmax_large_object_get_time\fR
|
||||
The maximum time (seconds) that a large object connection is allowed to last. The default is 86400.
|
||||
.IP \fBrequest_node_count\fR
|
||||
Set to the number of nodes to contact for a normal request. You can use
|
||||
'* replicas' at the end to have it use the number given times the number of
|
||||
replicas for the ring being used for the request. The default is '2 * replicas'.
|
||||
.IP \fBread_affinity\fR
|
||||
Which backend servers to prefer on reads. Format is r<N> for region
|
||||
N or r<N>z<M> for region N, zone M. The value after the equals is
|
||||
the priority; lower numbers are higher priority.
|
||||
Default is empty, meaning no preference.
|
||||
Example: first read from region 1 zone 1, then region 1 zone 2, then anything in region 2, then everything else:
|
||||
read_affinity = r1z1=100, r1z2=200, r2=300
|
||||
.IP \fBwrite_affinity\fR
|
||||
Which backend servers to prefer on writes. Format is r<N> for region
|
||||
N or r<N>z<M> for region N, zone M. If this is set, then when
|
||||
handling an object PUT request, some number (see setting
|
||||
write_affinity_node_count) of local backend servers will be tried
|
||||
before any nonlocal ones. Default is empty, meaning no preference.
|
||||
Example: try to write to regions 1 and 2 before writing to any other
|
||||
nodes:
|
||||
write_affinity = r1, r2
|
||||
.IP \fBwrite_affinity_node_count\fR
|
||||
The number of local (as governed by the write_affinity setting)
|
||||
nodes to attempt to contact first, before any non-local ones. You
|
||||
can use '* replicas' at the end to have it use the number given
|
||||
times the number of replicas for the ring being used for the
|
||||
request. The default is '2 * replicas'.
|
||||
.IP \fBswift_owner_headers\fR
|
||||
These are the headers whose values will only be shown to swift_owners. The
|
||||
exact definition of a swift_owner is up to the auth system in use, but
|
||||
usually indicates administrative responsibilities.
|
||||
The default is 'x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control'.
|
||||
.RE
|
||||
.PD
|
||||
|
||||
|
|
|
@ -109,6 +109,8 @@ allows one to use the keywords such as "all", "main" and "rest" for the <server>
|
|||
.IP "-c N, --config-num=N \t send command to the Nth server only
|
||||
.IP "-k N, --kill-wait=N \t wait N seconds for processes to die (default 15)
|
||||
.IP "-r RUN_DIR, --run-dir=RUN_DIR directory where the pids will be stored (default /var/run/swift)
|
||||
.IP "--strict return non-zero status code if some config is missing. Default mode if server is explicitly named."
|
||||
.IP "--non-strict return zero status code even if some config is missing. Default mode if server is one of aliases `all`, `main` or `rest`."
|
||||
.PD
|
||||
.RE
|
||||
|
||||
|
|
|
@ -83,7 +83,9 @@ Get drive audit error stats
|
|||
.IP "\fB-T, --time\fR"
|
||||
Check time synchronization
|
||||
.IP "\fB--all\fR"
|
||||
Perform all checks. Equivalent to \-arudlqT \-\-md5
|
||||
Perform all checks. Equivalent to \-arudlqT
|
||||
\-\-md5 \-\-sockstat \-\-auditor \-\-updater \-\-expirer
|
||||
\-\-driveaudit \-\-validate\-servers
|
||||
.IP "\fB--region=REGION\fR"
|
||||
Only query servers in specified region
|
||||
.IP "\fB-z ZONE, --zone=ZONE\fR"
|
||||
|
|
|
@ -110,8 +110,8 @@ You can create scripts to create the account and container rings and rebalance.
|
|||
cd /etc/swift
|
||||
rm -f account.builder account.ring.gz backups/account.builder backups/account.ring.gz
|
||||
swift-ring-builder account.builder create 18 3 1
|
||||
swift-ring-builder account.builder add z1-<account-server-1>:6002/sdb1 1
|
||||
swift-ring-builder account.builder add z2-<account-server-2>:6002/sdb1 1
|
||||
swift-ring-builder account.builder add r1z1-<account-server-1>:6002/sdb1 1
|
||||
swift-ring-builder account.builder add r1z2-<account-server-2>:6002/sdb1 1
|
||||
swift-ring-builder account.builder rebalance
|
||||
|
||||
You need to replace the values of <account-server-1>,
|
||||
|
@ -121,7 +121,8 @@ You can create scripts to create the account and container rings and rebalance.
|
|||
6002, and have a storage device called "sdb1" (this is a directory
|
||||
name created under /drives when we setup the account server). The
|
||||
"z1", "z2", etc. designate zones, and you can choose whether you
|
||||
put devices in the same or different zones.
|
||||
put devices in the same or different zones. The "r1" designates
|
||||
the region, with different regions specified as "r1", "r2", etc.
|
||||
|
||||
2. Make the script file executable and run it to create the account ring file::
|
||||
|
||||
|
@ -588,7 +589,9 @@ This information can also be queried via the swift-recon command line utility::
|
|||
--md5 Get md5sum of servers ring and compare to local copy
|
||||
--sockstat Get cluster socket usage stats
|
||||
-T, --time Check time synchronization
|
||||
--all Perform all checks. Equal to -arudlqT --md5 --sockstat
|
||||
--all Perform all checks. Equal to
|
||||
-arudlqT --md5 --sockstat --auditor --updater
|
||||
--expirer --driveaudit --validate-servers
|
||||
-z ZONE, --zone=ZONE Only query servers in specified zone
|
||||
-t SECONDS, --timeout=SECONDS
|
||||
Time to wait for a response from a server
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
.. _formpost:
|
||||
|
||||
====================
|
||||
Form POST middleware
|
||||
====================
|
||||
|
@ -19,9 +17,7 @@ URL middleware uses. For information about how to set these keys, see
|
|||
:ref:`secret_keys`.
|
||||
|
||||
For information about the form **POST** middleware configuration
|
||||
options, see `Form
|
||||
post <http://docs.openstack.org/havana/config-reference/content/object-storage-form-post.html>`__
|
||||
in the *OpenStack Configuration Reference*.
|
||||
options, see :ref:`formpost` in the *Source Documentation*.
|
||||
|
||||
Form POST format
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
|
|
@ -293,10 +293,12 @@ a manifest object but a normal object with content same as what you would
|
|||
get on a **GET** request to original manifest object.
|
||||
|
||||
To duplicate a manifest object:
|
||||
|
||||
* Use the **GET** operation to read the value of ``X-Object-Manifest`` and
|
||||
use this value in the ``X-Object-Manifest`` request header in a **PUT**
|
||||
operation.
|
||||
* Alternatively, you can include *``?multipart-manifest=get``* query
|
||||
string in the **COPY** request.
|
||||
|
||||
This creates a new manifest object that shares the same set of segment
|
||||
objects as the original manifest object.
|
||||
|
|
|
@ -128,7 +128,24 @@ If you have a large number of containers or objects, you can use query
|
|||
parameters to page through large lists of containers or objects. Use the
|
||||
*``marker``*, *``limit``*, and *``end_marker``* query parameters to
|
||||
control how many items are returned in a list and where the list starts
|
||||
or ends.
|
||||
or ends. If you want to page through in reverse order, you can use the query
|
||||
parameter *``reverse``*, noting that your marker and end_markers should be
|
||||
switched when applied to a reverse listing. I.e, for a list of objects
|
||||
``[a, b, c, d, e]`` the non-reversed could be:
|
||||
|
||||
.. code::
|
||||
|
||||
/v1/{account}/{container}/?marker=a&end_marker=d
|
||||
b
|
||||
c
|
||||
|
||||
However, when reversed marker and end_marker are applied to a reversed list:
|
||||
|
||||
.. code::
|
||||
|
||||
/v1/{account}/{container}/?marker=d&end_marker=a&reverse=on
|
||||
c
|
||||
b
|
||||
|
||||
Object Storage HTTP requests have the following default constraints.
|
||||
Your service provider might use different default values.
|
||||
|
|
|
@ -15,9 +15,7 @@ downloads the object directly from Object Storage, eliminating the need
|
|||
for the website to act as a proxy for the request.
|
||||
|
||||
Ask your cloud administrator to enable the temporary URL feature. For
|
||||
information, see `Temporary
|
||||
URL <http://docs.openstack.org/havana/config-reference/content/object-storage-tempurl.html>`__
|
||||
in the *OpenStack Configuration Reference*.
|
||||
information, see :ref:`tempurl` in the *Source Documentation*.
|
||||
|
||||
Note
|
||||
~~~~
|
||||
|
|
|
@ -23,12 +23,15 @@ Application Bindings
|
|||
* `SwiftBox <https://github.com/suniln/SwiftBox>`_ - C# library using RestSharp
|
||||
* `jclouds <http://jclouds.incubator.apache.org/documentation/quickstart/openstack/>`_ - Java library offering bindings for all OpenStack projects
|
||||
* `java-openstack-swift <https://github.com/dkocher/java-openstack-swift>`_ - Java bindings for OpenStack Swift
|
||||
* `swift_client <https://github.com/mrkamel/swift_client>`_ - Small but powerful Ruby client to interact with OpenStack Swift
|
||||
* `nightcrawler_swift <https://github.com/tulios/nightcrawler_swift>`_ - This Ruby gem teleports your assets to a OpenStack Swift bucket/container
|
||||
* `swift storage <https://rubygems.org/gems/swift-storage>`_ - Simple Openstack Swift storage client.
|
||||
|
||||
Authentication
|
||||
--------------
|
||||
|
||||
* `Keystone <https://github.com/openstack/keystone>`_ - Official Identity Service for OpenStack.
|
||||
* `Swauth <https://github.com/gholt/swauth>`_ - Older Swift authentication service that only requires Swift itself.
|
||||
* `Swauth <https://github.com/openstack/swauth>`_ - An alternative Swift authentication service that only requires Swift itself.
|
||||
* `Basicauth <https://github.com/CloudVPS/swift-basicauth>`_ - HTTP Basic authentication support (keystone backed).
|
||||
|
||||
|
||||
|
@ -60,7 +63,7 @@ Content Distribution Network Integration
|
|||
Alternative API
|
||||
---------------
|
||||
|
||||
* `Swift3 <https://github.com/stackforge/swift3>`_ - Amazon S3 API emulation.
|
||||
* `Swift3 <https://github.com/openstack/swift3>`_ - Amazon S3 API emulation.
|
||||
* `CDMI <https://github.com/osaddon/cdmi>`_ - CDMI support
|
||||
|
||||
|
||||
|
@ -81,8 +84,8 @@ Custom Logger Hooks
|
|||
|
||||
Storage Backends (DiskFile API implementations)
|
||||
-----------------------------------------------
|
||||
* `Swift-on-File <https://github.com/stackforge/swiftonfile>`_ - Enables objects created using Swift API to be accessed as files on a POSIX filesystem and vice versa.
|
||||
* `swift-ceph-backend <https://github.com/stackforge/swift-ceph-backend>`_ - Ceph RADOS object server implementation for Swift.
|
||||
* `Swift-on-File <https://github.com/openstack/swiftonfile>`_ - Enables objects created using Swift API to be accessed as files on a POSIX filesystem and vice versa.
|
||||
* `swift-ceph-backend <https://github.com/openstack/swift-ceph-backend>`_ - Ceph RADOS object server implementation for Swift.
|
||||
* `kinetic-swift <https://github.com/swiftstack/kinetic-swift>`_ - Seagate Kinetic Drive as backend for Swift
|
||||
* `swift-scality-backend <https://github.com/scality/ScalitySproxydSwift>`_ - Scality sproxyd object server implementation for Swift.
|
||||
|
||||
|
@ -95,7 +98,7 @@ Developer Tools
|
|||
* `SAIO Ansible playbook <https://github.com/thiagodasilva/swift-aio>`_ -
|
||||
Quickly setup a standard development environment using Vagrant and Ansible in
|
||||
a Fedora virtual machine (with built-in `Swift-on-File
|
||||
<https://github.com/stackforge/swiftonfile>`_ support).
|
||||
<https://github.com/openstack/swiftonfile>`_ support).
|
||||
|
||||
Other
|
||||
-----
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -352,7 +352,7 @@ folks a start on their own code if they want to use repoze.what::
|
|||
self.ssl = \
|
||||
conf.get('ssl', 'false').lower() in ('true', 'on', '1', 'yes')
|
||||
self.auth_prefix = conf.get('prefix', '/')
|
||||
self.timeout = int(conf.get('node_timeout', 10))
|
||||
self.timeout = float(conf.get('node_timeout', 10))
|
||||
|
||||
def authenticate(self, env, identity):
|
||||
token = identity.get('token')
|
||||
|
@ -375,7 +375,7 @@ folks a start on their own code if they want to use repoze.what::
|
|||
expiration = float(resp.getheader('x-auth-ttl'))
|
||||
user = resp.getheader('x-auth-user')
|
||||
memcache_client.set(key, (time(), expiration, user),
|
||||
timeout=expiration)
|
||||
time=expiration)
|
||||
return user
|
||||
return None
|
||||
|
||||
|
@ -487,7 +487,8 @@ folks a start on their own code if they want to use repoze.what::
|
|||
Allowing CORS with Auth
|
||||
-----------------------
|
||||
|
||||
Cross Origin RequestS require that the auth system allow the OPTIONS method to
|
||||
pass through without a token. The preflight request will make an OPTIONS call
|
||||
against the object or container and will not work if the auth system stops it.
|
||||
Cross Origin Resource Sharing (CORS) require that the auth system allow the
|
||||
OPTIONS method to pass through without a token. The preflight request will
|
||||
make an OPTIONS call against the object or container and will not work if
|
||||
the auth system stops it.
|
||||
See TempAuth for an example of how OPTIONS requests are handled.
|
||||
|
|
|
@ -51,16 +51,16 @@ To execute the unit tests:
|
|||
|
||||
.. note::
|
||||
As of tox version 2.0.0, most environment variables are not automatically
|
||||
passed to the test environment. Swift's tox.ini overrides this default
|
||||
behavior so that variable names matching SWIFT_* and *_proxy will be passed,
|
||||
but you may need to run tox --recreate for this to take effect after
|
||||
upgrading from tox<2.0.0.
|
||||
passed to the test environment. Swift's `tox.ini` overrides this default
|
||||
behavior so that variable names matching ``SWIFT_*`` and ``*_proxy`` will be
|
||||
passed, but you may need to run `tox --recreate` for this to take effect
|
||||
after upgrading from tox<2.0.0.
|
||||
|
||||
Conversely, if you do not want those environment variables to be passed to
|
||||
the test environment then you will need to unset them before calling tox.
|
||||
|
||||
Also, if you ever encounter DistributionNotFound, try to use `tox --recreate`
|
||||
or remove the .tox directory to force tox to recreate the dependency list.
|
||||
or remove the `.tox` directory to force tox to recreate the dependency list.
|
||||
|
||||
The functional tests may be executed against a :doc:`development_saio` or
|
||||
other running Swift cluster using the command:
|
||||
|
|
|
@ -39,7 +39,7 @@ Installing dependencies
|
|||
sudo apt-get install curl gcc memcached rsync sqlite3 xfsprogs \
|
||||
git-core libffi-dev python-setuptools
|
||||
sudo apt-get install python-coverage python-dev python-nose \
|
||||
python-simplejson python-xattr python-eventlet \
|
||||
python-xattr python-eventlet \
|
||||
python-greenlet python-pastedeploy \
|
||||
python-netifaces python-pip python-dnspython \
|
||||
python-mock
|
||||
|
@ -50,14 +50,14 @@ Installing dependencies
|
|||
sudo yum install curl gcc memcached rsync sqlite xfsprogs git-core \
|
||||
libffi-devel xinetd python-setuptools \
|
||||
python-coverage python-devel python-nose \
|
||||
python-simplejson pyxattr python-eventlet \
|
||||
pyxattr python-eventlet \
|
||||
python-greenlet python-paste-deploy \
|
||||
python-netifaces python-pip python-dns \
|
||||
python-mock
|
||||
|
||||
Note: This installs necessary system dependencies and *most* of the python
|
||||
dependencies. Later in the process setuptools/distribute or pip will install
|
||||
and/or upgrade packages.
|
||||
and/or upgrade packages.
|
||||
|
||||
Next, choose either :ref:`partition-section` or :ref:`loopback-section`.
|
||||
|
||||
|
|
|
@ -6,6 +6,13 @@ Please refer to the latest official
|
|||
`Openstack Installation Guides <http://docs.openstack.org/#install-guides>`_
|
||||
for the most up-to-date documentation.
|
||||
|
||||
Object Storage installation guide for Openstack Liberty
|
||||
----------------------------------------------------
|
||||
|
||||
* `openSUSE 13.2 and SUSE Linux Enterprise Server 12 <http://docs.openstack.org/liberty/install-guide-obs/swift.html>`_
|
||||
* `RHEL 7, CentOS 7 <http://docs.openstack.org/liberty/install-guide-rdo/swift.html>`_
|
||||
* `Ubuntu 14.04 <http://docs.openstack.org/liberty/install-guide-ubuntu/swift.html>`_
|
||||
|
||||
Object Storage installation guide for Openstack Kilo
|
||||
----------------------------------------------------
|
||||
|
||||
|
@ -26,11 +33,3 @@ Object Storage installation guide for Openstack Icehouse
|
|||
* `openSUSE and SUSE Linux Enterprise Server <http://docs.openstack.org/icehouse/install-guide/install/zypper/content/ch_swift.html>`_
|
||||
* `Red Hat Enterprise Linux, CentOS, and Fedora <http://docs.openstack.org/icehouse/install-guide/install/yum/content/ch_swift.html>`_
|
||||
* `Ubuntu 12.04/14.04 (LTS) <http://docs.openstack.org/icehouse/install-guide/install/apt/content/ch_swift.html>`_
|
||||
|
||||
Object Storage installation guide for Openstack Havana
|
||||
------------------------------------------------------
|
||||
|
||||
* `Debian 7.0 <http://docs.openstack.org/havana/install-guide/install/apt-debian/content/ch_swift.html>`_
|
||||
* `openSUSE and SUSE Linux Enterprise Server <http://docs.openstack.org/havana/install-guide/install/zypper/content/ch_swift.html>`_
|
||||
* `Red Hat Enterprise Linux, CentOS, and Fedora <http://docs.openstack.org/havana/install-guide/install/yum/content/ch_swift.html>`_
|
||||
* `Ubuntu 12.04 (LTS) <http://docs.openstack.org/havana/install-guide/install/apt/content/ch_swift.html>`_
|
||||
|
|
|
@ -254,9 +254,11 @@ This configuration works as follows:
|
|||
``admin`` or ``swiftoperator`` role(s). When validated, the service token
|
||||
gives the ``service`` role.
|
||||
* Swift interprets the above configuration as follows:
|
||||
|
||||
* Did the user token provide one of the roles listed in operator_roles?
|
||||
* Did the service token have the ``service`` role as described by the
|
||||
``SERVICE_service_roles`` options.
|
||||
|
||||
* If both conditions are met, the request is granted. Otherwise, Swift
|
||||
rejects the request.
|
||||
|
||||
|
|
|
@ -171,6 +171,7 @@ The sequence of events and actions are as follows:
|
|||
a copy of the <user-token>. In the X-Service-Token header, place your
|
||||
Service's token. If you use python-swiftclient you can achieve this
|
||||
by:
|
||||
|
||||
* Putting the URL in the ``preauthurl`` parameter
|
||||
* Putting the <user-token> in ``preauthtoken`` paramater
|
||||
* Adding the X-Service-Token to the ``headers`` parameter
|
||||
|
@ -251,7 +252,7 @@ However, if one Service is compromised, that Service can access
|
|||
data created by another Service. To prevent this, multiple Service Prefixes may
|
||||
be used. This also requires that the operator configure multiple service
|
||||
roles. For example, in a system that has Glance and Cinder, the following
|
||||
Swift configuration could be used:
|
||||
Swift configuration could be used::
|
||||
|
||||
[keystoneauth]
|
||||
reseller_prefix = AUTH_, IMAGE_, BLOCK_
|
||||
|
|
|
@ -2,20 +2,6 @@
|
|||
Erasure Code Support
|
||||
====================
|
||||
|
||||
|
||||
--------------------------
|
||||
Beta: Not production ready
|
||||
--------------------------
|
||||
The erasure code support in Swift is considered "beta" at this point.
|
||||
Most major functionality is included, but it has not been tested or validated
|
||||
at large scale. This feature relies on ssync for durability. Deployers are
|
||||
urged to do extensive testing and not deploy production data using an
|
||||
erasure code storage policy.
|
||||
|
||||
If any bugs are found during testing, please report them to
|
||||
https://bugs.launchpad.net/swift
|
||||
|
||||
|
||||
-------------------------------
|
||||
History and Theory of Operation
|
||||
-------------------------------
|
||||
|
|
|
@ -57,7 +57,7 @@ deployers. Each container has a new special immutable metadata element called
|
|||
the storage policy index. Note that internally, Swift relies on policy
|
||||
indexes and not policy names. Policy names exist for human readability and
|
||||
translation is managed in the proxy. When a container is created, one new
|
||||
optional header is supported to specify the policy name. If nothing is
|
||||
optional header is supported to specify the policy name. If no name is
|
||||
specified, the default policy is used (and if no other policies defined,
|
||||
Policy-0 is considered the default). We will be covering the difference
|
||||
between default and Policy-0 in the next section.
|
||||
|
@ -170,12 +170,13 @@ Storage Policies is a versatile feature intended to support both new and
|
|||
pre-existing clusters with the same level of flexibility. For that reason, we
|
||||
introduce the ``Policy-0`` concept which is not the same as the "default"
|
||||
policy. As you will see when we begin to configure policies, each policy has
|
||||
both a name (human friendly, configurable) as well as an index (or simply
|
||||
policy number). Swift reserves index 0 to map to the object ring that's
|
||||
present in all installations (e.g., ``/etc/swift/object.ring.gz``). You can
|
||||
name this policy anything you like, and if no policies are defined it will
|
||||
report itself as ``Policy-0``, however you cannot change the index as there must
|
||||
always be a policy with index 0.
|
||||
a single name and an arbitrary number of aliases (human friendly,
|
||||
configurable) as well as an index (or simply policy number). Swift reserves
|
||||
index 0 to map to the object ring that's present in all installations
|
||||
(e.g., ``/etc/swift/object.ring.gz``). You can name this policy anything you
|
||||
like, and if no policies are defined it will report itself as ``Policy-0``,
|
||||
however you cannot change the index as there must always be a policy with
|
||||
index 0.
|
||||
|
||||
Another important concept is the default policy which can be any policy
|
||||
in the cluster. The default policy is the policy that is automatically
|
||||
|
@ -273,6 +274,8 @@ file:
|
|||
* Policy names must contain only letters, digits or a dash
|
||||
* Policy names must be unique
|
||||
* The policy name 'Policy-0' can only be used for the policy with index 0
|
||||
* Multiple names can be assigned to one policy using aliases. All names
|
||||
must follow the Swift naming rules.
|
||||
* If any policies are defined, exactly one policy must be declared default
|
||||
* Deprecated policies cannot be declared the default
|
||||
* If no ``policy_type`` is provided, ``replication`` is the default value.
|
||||
|
@ -288,6 +291,7 @@ example configuration.::
|
|||
|
||||
[storage-policy:0]
|
||||
name = gold
|
||||
aliases = yellow, orange
|
||||
policy_type = replication
|
||||
default = yes
|
||||
|
||||
|
@ -301,8 +305,10 @@ information about the ``default`` and ``deprecated`` options.
|
|||
|
||||
There are some other considerations when managing policies:
|
||||
|
||||
* Policy names can be changed (but be sure that users are aware, aliases are
|
||||
not currently supported but could be implemented in custom middleware!)
|
||||
* Policy names can be changed.
|
||||
* Aliases are supported and can be added and removed. If the primary name
|
||||
of a policy is removed the next available alias will be adopted as the
|
||||
primary name. A policy must always have at least one name.
|
||||
* You cannot change the index of a policy once it has been created
|
||||
* The default policy can be changed at any time, by adding the
|
||||
default directive to the desired policy section
|
||||
|
@ -399,7 +405,7 @@ The module, :ref:`storage_policy`, is responsible for parsing the
|
|||
configured policies via class :class:`.StoragePolicyCollection`. This
|
||||
collection is made up of policies of class :class:`.StoragePolicy`. The
|
||||
collection class includes handy functions for getting to a policy either by
|
||||
name or by index , getting info about the policies, etc. There's also one
|
||||
name or by index , getting info about the policies, etc. There's also one
|
||||
very important function, :meth:`~.StoragePolicyCollection.get_object_ring`.
|
||||
Object rings are members of the :class:`.StoragePolicy` class and are
|
||||
actually not instantiated until the :meth:`~.StoragePolicy.load_ring`
|
||||
|
|
|
@ -26,6 +26,7 @@ to implement a usable set of policies.
|
|||
|
||||
[storage-policy:0]
|
||||
name = gold
|
||||
aliases = yellow, orange
|
||||
default = yes
|
||||
|
||||
[storage-policy:1]
|
||||
|
@ -82,7 +83,8 @@ Storage Policies effect placement of data in Swift.
|
|||
|
||||
You should see this: (only showing the policy output here)::
|
||||
|
||||
policies: [{'default': True, 'name': 'gold'}, {'name': 'silver'}]
|
||||
policies: [{'aliases': 'gold, yellow, orange', 'default': True,
|
||||
'name': 'gold'}, {'aliases': 'silver', 'name': 'silver'}]
|
||||
|
||||
3. Now create a container without specifying a policy, it will use the
|
||||
default, 'gold' and then put a test object in it (create the file ``file0.txt``
|
||||
|
|
|
@ -70,7 +70,7 @@ use = egg:swift#account
|
|||
# "replication_server" (this is the default). To only handle replication,
|
||||
# set to a True value (e.g. "True" or "1"). To handle only non-replication
|
||||
# verbs, set to "False". Unless you have a separate replication network, you
|
||||
# should not specify any value for "replication_server".
|
||||
# should not specify any value for "replication_server". Default is empty.
|
||||
# replication_server = false
|
||||
|
||||
[filter:healthcheck]
|
||||
|
@ -90,8 +90,19 @@ use = egg:swift#recon
|
|||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# Maximum number of database rows that will be sync'd in a single HTTP
|
||||
# replication request. Databases with less than or equal to this number of
|
||||
# differing rows will always be sync'd using an HTTP replication request rather
|
||||
# than using rsync.
|
||||
# per_diff = 1000
|
||||
#
|
||||
# Maximum number of HTTP replication requests attempted on each replication
|
||||
# pass for any one container. This caps how long the replicator will spend
|
||||
# trying to sync a given database per pass so the other databases don't get
|
||||
# starved.
|
||||
# max_diffs = 100
|
||||
#
|
||||
# Number of replication workers to spawn.
|
||||
# concurrency = 8
|
||||
#
|
||||
# Time in seconds to wait between replication passes
|
||||
|
@ -126,8 +137,6 @@ use = egg:swift#recon
|
|||
# Will audit each account at most once per interval
|
||||
# interval = 1800
|
||||
#
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# accounts_per_second = 200
|
||||
# recon_cache_path = /var/cache/swift
|
||||
|
||||
|
|
|
@ -99,8 +99,19 @@ use = egg:swift#recon
|
|||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# Maximum number of database rows that will be sync'd in a single HTTP
|
||||
# replication request. Databases with less than or equal to this number of
|
||||
# differing rows will always be sync'd using an HTTP replication request rather
|
||||
# than using rsync.
|
||||
# per_diff = 1000
|
||||
#
|
||||
# Maximum number of HTTP replication requests attempted on each replication
|
||||
# pass for any one container. This caps how long the replicator will spend
|
||||
# trying to sync a given database per pass so the other databases don't get
|
||||
# starved.
|
||||
# max_diffs = 100
|
||||
#
|
||||
# Number of replication workers to spawn.
|
||||
# concurrency = 8
|
||||
#
|
||||
# Time in seconds to wait between replication passes
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
[drive-audit]
|
||||
# device_dir = /srv/node
|
||||
#
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = drive-audit
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
# The following caps the length of log lines to the value given; no limit if
|
||||
# set to 0, the default.
|
||||
# log_max_line_length = 0
|
||||
#
|
||||
# minutes = 60
|
||||
# error_limit = 1
|
||||
# recon_cache_path = /var/cache/swift
|
||||
|
|
|
@ -172,10 +172,7 @@ use = egg:swift#recon
|
|||
# concurrency = 1
|
||||
# stats_interval = 300
|
||||
#
|
||||
# The sync method to use; default is rsync but you can use ssync to try the
|
||||
# EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified
|
||||
# as having performance comparable to, or better than, rsync, we plan to
|
||||
# deprecate rsync so we can move on with more features for replication.
|
||||
# default is rsync, alternative is ssync
|
||||
# sync_method = rsync
|
||||
#
|
||||
# max duration of a partition rsync
|
||||
|
|
|
@ -77,8 +77,15 @@ bind_port = 8080
|
|||
# eventlet_debug = false
|
||||
|
||||
[pipeline:main]
|
||||
# This sample pipeline uses tempauth and is used for SAIO dev work and
|
||||
# testing. See below for a pipeline using keystone.
|
||||
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
|
||||
|
||||
# The following pipeline shows keystone integration. Comment out the one
|
||||
# above and uncomment this one. Additional steps for integrating keystone are
|
||||
# covered further below in the filter sections for authtoken and keystoneauth.
|
||||
#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
# You can override the default log routing for this app here:
|
||||
|
@ -277,9 +284,8 @@ user_test5_tester5 = testing5 service
|
|||
# refer to the keystone's documentation for details about the
|
||||
# different settings.
|
||||
#
|
||||
# You'll need to have as well the keystoneauth middleware enabled
|
||||
# and have it in your main pipeline so instead of having tempauth in
|
||||
# there you can change it to: authtoken keystoneauth
|
||||
# You'll also need to have the keystoneauth middleware enabled and have it in
|
||||
# your main pipeline, as show in the sample pipeline at the top of this file.
|
||||
#
|
||||
# [filter:authtoken]
|
||||
# paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
||||
|
@ -499,6 +505,12 @@ use = egg:swift#cname_lookup
|
|||
# Note: Put staticweb just after your auth filter(s) in the pipeline
|
||||
[filter:staticweb]
|
||||
use = egg:swift#staticweb
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = staticweb
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = false
|
||||
# set log_address = /dev/log
|
||||
|
||||
# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
|
||||
[filter:tempurl]
|
||||
|
|
|
@ -21,7 +21,7 @@ swift_hash_path_prefix = changeme
|
|||
# policy with index 0 will be declared the default. If multiple policies are
|
||||
# defined you must define a policy with index 0 and you must specify a
|
||||
# default. It is recommended you always define a section for
|
||||
# storage-policy:0.
|
||||
# storage-policy:0. Aliases are not required when defining a storage policy.
|
||||
#
|
||||
# A 'policy_type' argument is also supported but is not mandatory. Default
|
||||
# policy type 'replication' is used when 'policy_type' is unspecified.
|
||||
|
@ -29,6 +29,7 @@ swift_hash_path_prefix = changeme
|
|||
name = Policy-0
|
||||
default = yes
|
||||
#policy_type = replication
|
||||
aliases = yellow, orange
|
||||
|
||||
# the following section would declare a policy called 'silver', the number of
|
||||
# replicas will be determined by how the ring is built. In this example the
|
||||
|
@ -40,7 +41,10 @@ default = yes
|
|||
# this config has specified it as the default. However if a legacy container
|
||||
# (one created with a pre-policy version of swift) is accessed, it is known
|
||||
# implicitly to be assigned to the policy with index 0 as opposed to the
|
||||
# current default.
|
||||
# current default. Note that even without specifying any aliases, a policy
|
||||
# always has at least the default name stored in aliases because this field is
|
||||
# used to contain all human readable names for a storage policy.
|
||||
#
|
||||
#[storage-policy:1]
|
||||
#name = silver
|
||||
#policy_type = replication
|
||||
|
@ -67,12 +71,13 @@ default = yes
|
|||
# refer to Swift documentation for details on how to configure EC policies.
|
||||
#
|
||||
# The example 'deepfreeze10-4' policy defined below is a _sample_
|
||||
# configuration with 10 'data' and 4 'parity' fragments. 'ec_type'
|
||||
# defines the Erasure Coding scheme. 'jerasure_rs_vand' (Reed-Solomon
|
||||
# Vandermonde) is used as an example below.
|
||||
# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity'
|
||||
# fragments. 'ec_type' defines the Erasure Coding scheme.
|
||||
# 'jerasure_rs_vand' (Reed-Solomon Vandermonde) is used as an example below.
|
||||
#
|
||||
#[storage-policy:2]
|
||||
#name = deepfreeze10-4
|
||||
#aliases = df10-4
|
||||
#policy_type = erasure_coding
|
||||
#ec_type = jerasure_rs_vand
|
||||
#ec_num_data_fragments = 10
|
||||
|
|
|
@ -2,12 +2,12 @@
|
|||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
dnspython>=1.9.4
|
||||
dnspython>=1.12.0;python_version<'3.0'
|
||||
dnspython3>=1.12.0;python_version>='3.0'
|
||||
eventlet>=0.16.1,!=0.17.0
|
||||
greenlet>=0.3.1
|
||||
netifaces>=0.5,!=0.10.0,!=0.10.1
|
||||
pastedeploy>=1.3.3
|
||||
simplejson>=2.0.9
|
||||
six>=1.9.0
|
||||
xattr>=0.4
|
||||
PyECLib==1.0.7 # BSD
|
||||
PyECLib>=1.0.7 # BSD
|
||||
|
|
|
@ -366,7 +366,7 @@ class AccountBroker(DatabaseBroker):
|
|||
''').fetchone())
|
||||
|
||||
def list_containers_iter(self, limit, marker, end_marker, prefix,
|
||||
delimiter):
|
||||
delimiter, reverse=False):
|
||||
"""
|
||||
Get a list of containers sorted by name starting at marker onward, up
|
||||
to limit entries. Entries will begin with the prefix and will not have
|
||||
|
@ -377,15 +377,21 @@ class AccountBroker(DatabaseBroker):
|
|||
:param end_marker: end marker query
|
||||
:param prefix: prefix query
|
||||
:param delimiter: delimiter for query
|
||||
:param reverse: reverse the result order.
|
||||
|
||||
:returns: list of tuples of (name, object_count, bytes_used, 0)
|
||||
"""
|
||||
delim_force_gte = False
|
||||
(marker, end_marker, prefix, delimiter) = utf8encode(
|
||||
marker, end_marker, prefix, delimiter)
|
||||
if reverse:
|
||||
# Reverse the markers if we are reversing the listing.
|
||||
marker, end_marker = end_marker, marker
|
||||
self._commit_puts_stale_ok()
|
||||
if delimiter and not prefix:
|
||||
prefix = ''
|
||||
if prefix:
|
||||
end_prefix = prefix[:-1] + chr(ord(prefix[-1]) + 1)
|
||||
orig_marker = marker
|
||||
with self.get() as conn:
|
||||
results = []
|
||||
|
@ -395,9 +401,13 @@ class AccountBroker(DatabaseBroker):
|
|||
FROM container
|
||||
WHERE """
|
||||
query_args = []
|
||||
if end_marker:
|
||||
if end_marker and (not prefix or end_marker < end_prefix):
|
||||
query += ' name < ? AND'
|
||||
query_args.append(end_marker)
|
||||
elif prefix:
|
||||
query += ' name < ? AND'
|
||||
query_args.append(end_prefix)
|
||||
|
||||
if delim_force_gte:
|
||||
query += ' name >= ? AND'
|
||||
query_args.append(marker)
|
||||
|
@ -413,38 +423,40 @@ class AccountBroker(DatabaseBroker):
|
|||
query += ' +deleted = 0'
|
||||
else:
|
||||
query += ' deleted = 0'
|
||||
query += ' ORDER BY name LIMIT ?'
|
||||
query += ' ORDER BY name %s LIMIT ?' % \
|
||||
('DESC' if reverse else '')
|
||||
query_args.append(limit - len(results))
|
||||
curs = conn.execute(query, query_args)
|
||||
curs.row_factory = None
|
||||
|
||||
if prefix is None:
|
||||
# A delimiter without a specified prefix is ignored
|
||||
# Delimiters without a prefix is ignored, further if there
|
||||
# is no delimiter then we can simply return the result as
|
||||
# prefixes are now handled in the SQL statement.
|
||||
if prefix is None or not delimiter:
|
||||
return [r for r in curs]
|
||||
if not delimiter:
|
||||
if not prefix:
|
||||
# It is possible to have a delimiter but no prefix
|
||||
# specified. As above, the prefix will be set to the
|
||||
# empty string, so avoid performing the extra work to
|
||||
# check against an empty prefix.
|
||||
return [r for r in curs]
|
||||
else:
|
||||
return [r for r in curs if r[0].startswith(prefix)]
|
||||
|
||||
# We have a delimiter and a prefix (possibly empty string) to
|
||||
# handle
|
||||
rowcount = 0
|
||||
for row in curs:
|
||||
rowcount += 1
|
||||
marker = name = row[0]
|
||||
if len(results) >= limit or not name.startswith(prefix):
|
||||
name = row[0]
|
||||
if reverse:
|
||||
end_marker = name
|
||||
else:
|
||||
marker = name
|
||||
|
||||
if len(results) >= limit:
|
||||
curs.close()
|
||||
return results
|
||||
end = name.find(delimiter, len(prefix))
|
||||
if end > 0:
|
||||
marker = name[:end] + chr(ord(delimiter) + 1)
|
||||
# we want result to be inclusive of delim+1
|
||||
delim_force_gte = True
|
||||
if reverse:
|
||||
end_marker = name[:end + 1]
|
||||
else:
|
||||
marker = name[:end] + chr(ord(delimiter) + 1)
|
||||
# we want result to be inclusive of delim+1
|
||||
delim_force_gte = True
|
||||
dir_name = name[:end + 1]
|
||||
if dir_name != orig_marker:
|
||||
results.append([dir_name, 0, 0, 1])
|
||||
|
|
|
@ -70,10 +70,10 @@ class AccountReaper(Daemon):
|
|||
self.account_ring = None
|
||||
self.container_ring = None
|
||||
self.object_ring = None
|
||||
self.node_timeout = int(conf.get('node_timeout', 10))
|
||||
self.node_timeout = float(conf.get('node_timeout', 10))
|
||||
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||
self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0'))
|
||||
self.bind_port = int(conf.get('bind_port', 0))
|
||||
self.bind_port = int(conf.get('bind_port', 6002))
|
||||
self.concurrency = int(conf.get('concurrency', 25))
|
||||
self.container_concurrency = self.object_concurrency = \
|
||||
sqrt(self.concurrency)
|
||||
|
@ -311,8 +311,8 @@ class AccountReaper(Daemon):
|
|||
delete_timestamp = Timestamp(info['delete_timestamp'])
|
||||
if self.stats_containers_remaining and \
|
||||
begin - float(delete_timestamp) >= self.reap_not_done_after:
|
||||
self.logger.warn(_('Account %s has not been reaped since %s') %
|
||||
(account, delete_timestamp.isoformat))
|
||||
self.logger.warning(_('Account %s has not been reaped since %s') %
|
||||
(account, delete_timestamp.isoformat))
|
||||
return True
|
||||
|
||||
def reap_container(self, account, account_partition, account_nodes,
|
||||
|
|
|
@ -191,6 +191,7 @@ class AccountController(BaseStorageServer):
|
|||
return HTTPPreconditionFailed(body='Bad delimiter')
|
||||
limit = constraints.ACCOUNT_LISTING_LIMIT
|
||||
given_limit = get_param(req, 'limit')
|
||||
reverse = config_true_value(get_param(req, 'reverse'))
|
||||
if given_limit and given_limit.isdigit():
|
||||
limit = int(given_limit)
|
||||
if limit > constraints.ACCOUNT_LISTING_LIMIT:
|
||||
|
@ -211,7 +212,7 @@ class AccountController(BaseStorageServer):
|
|||
return self._deleted_response(broker, req, HTTPNotFound)
|
||||
return account_listing_response(account, req, out_content_type, broker,
|
||||
limit, marker, end_marker, prefix,
|
||||
delimiter)
|
||||
delimiter, reverse)
|
||||
|
||||
@public
|
||||
@replication
|
||||
|
|
|
@ -13,11 +13,12 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import time
|
||||
from xml.sax import saxutils
|
||||
|
||||
from swift.common.swob import HTTPOk, HTTPNoContent
|
||||
from swift.common.utils import json, Timestamp
|
||||
from swift.common.utils import Timestamp
|
||||
from swift.common.storage_policy import POLICIES
|
||||
|
||||
|
||||
|
@ -70,14 +71,14 @@ def get_response_headers(broker):
|
|||
|
||||
def account_listing_response(account, req, response_content_type, broker=None,
|
||||
limit='', marker='', end_marker='', prefix='',
|
||||
delimiter=''):
|
||||
delimiter='', reverse=False):
|
||||
if broker is None:
|
||||
broker = FakeAccountBroker()
|
||||
|
||||
resp_headers = get_response_headers(broker)
|
||||
|
||||
account_list = broker.list_containers_iter(limit, marker, end_marker,
|
||||
prefix, delimiter)
|
||||
prefix, delimiter, reverse)
|
||||
if response_content_type == 'application/json':
|
||||
data = []
|
||||
for (name, object_count, bytes_used, is_subdir) in account_list:
|
||||
|
|
|
@ -978,7 +978,8 @@ class SwiftRecon(object):
|
|||
order.')
|
||||
args.add_option('--all', action="store_true",
|
||||
help="Perform all checks. Equal to \t\t\t-arudlqT "
|
||||
"--md5 --sockstat --auditor --updater --expirer")
|
||||
"--md5 --sockstat --auditor --updater --expirer "
|
||||
"--driveaudit --validate-servers")
|
||||
args.add_option('--region', type="int",
|
||||
help="Only query servers in specified region")
|
||||
args.add_option('--zone', '-z', type="int",
|
||||
|
@ -1018,22 +1019,21 @@ class SwiftRecon(object):
|
|||
if options.all:
|
||||
if self.server_type == 'object':
|
||||
self.async_check(hosts)
|
||||
self.replication_check(hosts)
|
||||
self.object_auditor_check(hosts)
|
||||
self.updater_check(hosts)
|
||||
self.expirer_check(hosts)
|
||||
elif self.server_type == 'container':
|
||||
self.replication_check(hosts)
|
||||
self.auditor_check(hosts)
|
||||
self.updater_check(hosts)
|
||||
elif self.server_type == 'account':
|
||||
self.replication_check(hosts)
|
||||
self.auditor_check(hosts)
|
||||
self.replication_check(hosts)
|
||||
self.umount_check(hosts)
|
||||
self.load_check(hosts)
|
||||
self.disk_usage(hosts, options.top, options.lowest,
|
||||
options.human_readable)
|
||||
self.get_ringmd5(hosts, swift_dir)
|
||||
self.get_swiftconfmd5(hosts)
|
||||
self.quarantine_check(hosts)
|
||||
self.socket_usage(hosts)
|
||||
self.server_type_check(hosts)
|
||||
|
|
|
@ -306,18 +306,20 @@ def run_scenario(scenario):
|
|||
command_f(*command)
|
||||
|
||||
rebalance_number = 1
|
||||
parts_moved, old_balance = rb.rebalance(seed=seed)
|
||||
parts_moved, old_balance, removed_devs = rb.rebalance(seed=seed)
|
||||
rb.pretend_min_part_hours_passed()
|
||||
print "\tRebalance 1: moved %d parts, balance is %.6f" % (
|
||||
parts_moved, old_balance)
|
||||
print "\tRebalance 1: moved %d parts, balance is %.6f, \
|
||||
%d removed devs" % (
|
||||
parts_moved, old_balance, removed_devs)
|
||||
|
||||
while True:
|
||||
rebalance_number += 1
|
||||
parts_moved, new_balance = rb.rebalance(seed=seed)
|
||||
parts_moved, new_balance, removed_devs = rb.rebalance(seed=seed)
|
||||
rb.pretend_min_part_hours_passed()
|
||||
print "\tRebalance %d: moved %d parts, balance is %.6f" % (
|
||||
rebalance_number, parts_moved, new_balance)
|
||||
if parts_moved == 0:
|
||||
print "\tRebalance %d: moved %d parts, balance is %.6f, \
|
||||
%d removed devs" % (
|
||||
rebalance_number, parts_moved, new_balance, removed_devs)
|
||||
if parts_moved == 0 and removed_devs == 0:
|
||||
break
|
||||
if abs(new_balance - old_balance) < 1 and not (
|
||||
old_balance == builder.MAX_BALANCE and
|
||||
|
|
|
@ -293,14 +293,14 @@ def _parse_set_info_values(argvish):
|
|||
devs = builder.search_devs(parse_search_value(search_value))
|
||||
change = {}
|
||||
ip = ''
|
||||
if len(change_value) and change_value[0].isdigit():
|
||||
if change_value and change_value[0].isdigit():
|
||||
i = 1
|
||||
while (i < len(change_value) and
|
||||
change_value[i] in '0123456789.'):
|
||||
i += 1
|
||||
ip = change_value[:i]
|
||||
change_value = change_value[i:]
|
||||
elif len(change_value) and change_value[0] == '[':
|
||||
elif change_value and change_value.startswith('['):
|
||||
i = 1
|
||||
while i < len(change_value) and change_value[i] != ']':
|
||||
i += 1
|
||||
|
@ -318,14 +318,14 @@ def _parse_set_info_values(argvish):
|
|||
if change_value.startswith('R'):
|
||||
change_value = change_value[1:]
|
||||
replication_ip = ''
|
||||
if len(change_value) and change_value[0].isdigit():
|
||||
if change_value and change_value[0].isdigit():
|
||||
i = 1
|
||||
while (i < len(change_value) and
|
||||
change_value[i] in '0123456789.'):
|
||||
i += 1
|
||||
replication_ip = change_value[:i]
|
||||
change_value = change_value[i:]
|
||||
elif len(change_value) and change_value[0] == '[':
|
||||
elif change_value and change_value.startswith('['):
|
||||
i = 1
|
||||
while i < len(change_value) and change_value[i] != ']':
|
||||
i += 1
|
||||
|
@ -421,6 +421,8 @@ swift-ring-builder <builder_file> create <part_power> <replicas>
|
|||
"""
|
||||
swift-ring-builder <builder_file>
|
||||
Shows information about the ring and the devices within.
|
||||
Flags:
|
||||
DEL - marked for removal and will be removed next rebalance.
|
||||
"""
|
||||
print('%s, build version %d' % (builder_file, builder.version))
|
||||
regions = 0
|
||||
|
@ -446,28 +448,19 @@ swift-ring-builder <builder_file>
|
|||
print('The overload factor is %0.2f%% (%.6f)' % (
|
||||
builder.overload * 100, builder.overload))
|
||||
if builder.devs:
|
||||
balance_per_dev = builder._build_balance_per_dev()
|
||||
print('Devices: id region zone ip address port '
|
||||
'replication ip replication port name '
|
||||
'weight partitions balance meta')
|
||||
weighted_parts = builder.parts * builder.replicas / \
|
||||
sum(d['weight'] for d in builder.devs if d is not None)
|
||||
for dev in builder.devs:
|
||||
if dev is None:
|
||||
continue
|
||||
if not dev['weight']:
|
||||
if dev['parts']:
|
||||
balance = MAX_BALANCE
|
||||
else:
|
||||
balance = 0
|
||||
else:
|
||||
balance = 100.0 * dev['parts'] / \
|
||||
(dev['weight'] * weighted_parts) - 100.0
|
||||
'weight partitions balance flags meta')
|
||||
for dev in builder._iter_devs():
|
||||
flags = 'DEL' if dev in builder._remove_devs else ''
|
||||
print(' %5d %7d %5d %15s %5d %15s %17d %9s %6.02f '
|
||||
'%10s %7.02f %s' %
|
||||
'%10s %7.02f %5s %s' %
|
||||
(dev['id'], dev['region'], dev['zone'], dev['ip'],
|
||||
dev['port'], dev['replication_ip'],
|
||||
dev['replication_port'], dev['device'], dev['weight'],
|
||||
dev['parts'], balance, dev['meta']))
|
||||
dev['parts'], balance_per_dev[dev['id']], flags,
|
||||
dev['meta']))
|
||||
exit(EXIT_SUCCESS)
|
||||
|
||||
def search():
|
||||
|
@ -797,7 +790,7 @@ swift-ring-builder <builder_file> rebalance [options]
|
|||
devs_changed = builder.devs_changed
|
||||
try:
|
||||
last_balance = builder.get_balance()
|
||||
parts, balance = builder.rebalance(seed=get_seed(3))
|
||||
parts, balance, removed_devs = builder.rebalance(seed=get_seed(3))
|
||||
except exceptions.RingBuilderError as e:
|
||||
print('-' * 79)
|
||||
print("An error has occurred during ring validation. Common\n"
|
||||
|
@ -807,7 +800,7 @@ swift-ring-builder <builder_file> rebalance [options]
|
|||
(e,))
|
||||
print('-' * 79)
|
||||
exit(EXIT_ERROR)
|
||||
if not (parts or options.force):
|
||||
if not (parts or options.force or removed_devs):
|
||||
print('No partitions could be reassigned.')
|
||||
print('Either none need to be or none can be due to '
|
||||
'min_part_hours [%s].' % builder.min_part_hours)
|
||||
|
@ -921,6 +914,8 @@ swift-ring-builder <builder_file> dispersion <search_filter> [options]
|
|||
verbose=options.verbose)
|
||||
print('Dispersion is %.06f, Balance is %.06f, Overload is %0.2f%%' % (
|
||||
builder.dispersion, builder.get_balance(), builder.overload * 100))
|
||||
print('Required overload is %.6f%%' % (
|
||||
builder.get_required_overload() * 100))
|
||||
if report['worst_tier']:
|
||||
status = EXIT_WARNING
|
||||
print('Worst tier is %.06f (%s)' % (report['max_dispersion'],
|
||||
|
@ -1031,10 +1026,22 @@ swift-ring-builder <ring_file> write_builder [min_part_hours]
|
|||
for parts in builder._replica2part2dev:
|
||||
for dev_id in parts:
|
||||
builder.devs[dev_id]['parts'] += 1
|
||||
builder._set_parts_wanted()
|
||||
builder.save(builder_file)
|
||||
|
||||
def pretend_min_part_hours_passed():
|
||||
"""
|
||||
swift-ring-builder <builder_file> pretend_min_part_hours_passed
|
||||
Resets the clock on the last time a rebalance happened, thus
|
||||
circumventing the min_part_hours check.
|
||||
|
||||
*****************************
|
||||
USE THIS WITH EXTREME CAUTION
|
||||
*****************************
|
||||
|
||||
If you run this command and deploy rebalanced rings before a replication
|
||||
pass completes, you may introduce unavailability in your cluster. This
|
||||
has an end-user impact.
|
||||
"""
|
||||
builder.pretend_min_part_hours_passed()
|
||||
builder.save(builder_file)
|
||||
exit(EXIT_SUCCESS)
|
||||
|
@ -1144,7 +1151,7 @@ def main(arguments=None):
|
|||
print(Commands.default.__doc__.strip())
|
||||
print()
|
||||
cmds = [c for c, f in Commands.__dict__.items()
|
||||
if f.__doc__ and c[0] != '_' and c != 'default']
|
||||
if f.__doc__ and not c.startswith('_') and c != 'default']
|
||||
cmds.sort()
|
||||
for cmd in cmds:
|
||||
print(Commands.__dict__[cmd].__doc__.strip())
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
from contextlib import contextmanager, closing
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from uuid import uuid4
|
||||
|
@ -32,7 +33,7 @@ from eventlet import sleep, Timeout
|
|||
import sqlite3
|
||||
|
||||
from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE
|
||||
from swift.common.utils import json, Timestamp, renamer, \
|
||||
from swift.common.utils import Timestamp, renamer, \
|
||||
mkdirs, lock_parent_directory, fallocate
|
||||
from swift.common.exceptions import LockTimeout
|
||||
from swift.common.swob import HTTPBadRequest
|
||||
|
|
|
@ -166,7 +166,7 @@ class Replicator(Daemon):
|
|||
self.max_diffs = int(conf.get('max_diffs') or 100)
|
||||
self.interval = int(conf.get('interval') or
|
||||
conf.get('run_pause') or 30)
|
||||
self.node_timeout = int(conf.get('node_timeout', 10))
|
||||
self.node_timeout = float(conf.get('node_timeout', 10))
|
||||
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||
self.rsync_compress = config_true_value(
|
||||
conf.get('rsync_compress', 'no'))
|
||||
|
@ -174,11 +174,12 @@ class Replicator(Daemon):
|
|||
if not self.rsync_module:
|
||||
self.rsync_module = '{replication_ip}::%s' % self.server_type
|
||||
if config_true_value(conf.get('vm_test_mode', 'no')):
|
||||
self.logger.warn('Option %(type)s-replicator/vm_test_mode is '
|
||||
'deprecated and will be removed in a future '
|
||||
'version. Update your configuration to use '
|
||||
'option %(type)s-replicator/rsync_module.'
|
||||
% {'type': self.server_type})
|
||||
self.logger.warning('Option %(type)s-replicator/vm_test_mode '
|
||||
'is deprecated and will be removed in a '
|
||||
'future version. Update your configuration'
|
||||
' to use option %(type)s-replicator/'
|
||||
'rsync_module.'
|
||||
% {'type': self.server_type})
|
||||
self.rsync_module += '{replication_port}'
|
||||
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
|
||||
swift.common.db.DB_PREALLOCATION = \
|
||||
|
@ -434,8 +435,12 @@ class Replicator(Daemon):
|
|||
if self._in_sync(rinfo, info, broker, local_sync):
|
||||
return True
|
||||
# if the difference in rowids between the two differs by
|
||||
# more than 50%, rsync then do a remote merge.
|
||||
if rinfo['max_row'] / float(info['max_row']) < 0.5:
|
||||
# more than 50% and the difference is greater than per_diff,
|
||||
# rsync then do a remote merge.
|
||||
# NOTE: difference > per_diff stops us from dropping to rsync
|
||||
# on smaller containers, who have only a few rows to sync.
|
||||
if rinfo['max_row'] / float(info['max_row']) < 0.5 and \
|
||||
info['max_row'] - rinfo['max_row'] > self.per_diff:
|
||||
self.stats['remote_merge'] += 1
|
||||
self.logger.increment('remote_merges')
|
||||
return self._rsync_db(broker, node, http, info['id'],
|
||||
|
@ -616,17 +621,19 @@ class Replicator(Daemon):
|
|||
self.logger.error(_('ERROR Failed to get my own IPs?'))
|
||||
return
|
||||
self._local_device_ids = set()
|
||||
found_local = False
|
||||
for node in self.ring.devs:
|
||||
if node and is_local_device(ips, self.port,
|
||||
node['replication_ip'],
|
||||
node['replication_port']):
|
||||
found_local = True
|
||||
if self.mount_check and not ismount(
|
||||
os.path.join(self.root, node['device'])):
|
||||
self._add_failure_stats(
|
||||
[(failure_dev['replication_ip'],
|
||||
failure_dev['device'])
|
||||
for failure_dev in self.ring.devs if failure_dev])
|
||||
self.logger.warn(
|
||||
self.logger.warning(
|
||||
_('Skipping %(device)s as it is not mounted') % node)
|
||||
continue
|
||||
unlink_older_than(
|
||||
|
@ -636,6 +643,10 @@ class Replicator(Daemon):
|
|||
if os.path.isdir(datadir):
|
||||
self._local_device_ids.add(node['id'])
|
||||
dirs.append((datadir, node['id']))
|
||||
if not found_local:
|
||||
self.logger.error("Can't find itself %s with port %s in ring "
|
||||
"file, not replicating",
|
||||
", ".join(ips), self.port)
|
||||
self.logger.info(_('Beginning replication run'))
|
||||
for part, object_file, node_id in roundrobin_datadirs(dirs):
|
||||
self.cpool.spawn_n(
|
||||
|
|
|
@ -18,6 +18,7 @@ Internal client library for making calls directly to the servers rather than
|
|||
through the proxy.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
from time import time
|
||||
|
@ -34,11 +35,6 @@ from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \
|
|||
from swift.common.swob import HeaderKeyDict
|
||||
from swift.common.utils import quote
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
|
||||
class DirectClientException(ClientException):
|
||||
|
||||
|
@ -54,7 +50,7 @@ class DirectClientException(ClientException):
|
|||
|
||||
|
||||
def _get_direct_account_container(path, stype, node, part,
|
||||
account, marker=None, limit=None,
|
||||
marker=None, limit=None,
|
||||
prefix=None, delimiter=None, conn_timeout=5,
|
||||
response_timeout=15):
|
||||
"""Base class for get direct account and container.
|
||||
|
@ -117,7 +113,7 @@ def direct_get_account(node, part, account, marker=None, limit=None,
|
|||
"""
|
||||
path = '/' + account
|
||||
return _get_direct_account_container(path, "Account", node, part,
|
||||
account, marker=marker,
|
||||
marker=marker,
|
||||
limit=limit, prefix=prefix,
|
||||
delimiter=delimiter,
|
||||
conn_timeout=conn_timeout,
|
||||
|
@ -193,7 +189,7 @@ def direct_get_container(node, part, account, container, marker=None,
|
|||
"""
|
||||
path = '/%s/%s' % (account, container)
|
||||
return _get_direct_account_container(path, "Container", node,
|
||||
part, account, marker=marker,
|
||||
part, marker=marker,
|
||||
limit=limit, prefix=prefix,
|
||||
delimiter=delimiter,
|
||||
conn_timeout=conn_timeout,
|
||||
|
|
|
@ -27,7 +27,7 @@ from time import gmtime, strftime, time
|
|||
from zlib import compressobj
|
||||
|
||||
from swift.common.utils import quote
|
||||
from swift.common.http import HTTP_NOT_FOUND
|
||||
from swift.common.http import HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES
|
||||
from swift.common.swob import Request
|
||||
from swift.common.wsgi import loadapp, pipeline_property
|
||||
|
||||
|
@ -256,6 +256,8 @@ class InternalClient(object):
|
|||
(path, quote(marker), quote(end_marker)),
|
||||
{}, acceptable_statuses)
|
||||
if not resp.status_int == 200:
|
||||
if resp.status_int >= HTTP_MULTIPLE_CHOICES:
|
||||
''.join(resp.app_iter)
|
||||
break
|
||||
data = json.loads(resp.body)
|
||||
if not data:
|
||||
|
|
|
@ -42,6 +42,8 @@ ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
|
|||
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
|
||||
'object-server']
|
||||
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
|
||||
# aliases mapping
|
||||
ALIASES = {'all': ALL_SERVERS, 'main': MAIN_SERVERS, 'rest': REST_SERVERS}
|
||||
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
|
||||
START_ONCE_SERVERS = REST_SERVERS
|
||||
# These are servers that match a type (account-*, container-*, object-*) but
|
||||
|
@ -173,18 +175,17 @@ class Manager(object):
|
|||
|
||||
def __init__(self, servers, run_dir=RUN_DIR):
|
||||
self.server_names = set()
|
||||
self._default_strict = True
|
||||
for server in servers:
|
||||
if server == 'all':
|
||||
self.server_names.update(ALL_SERVERS)
|
||||
elif server == 'main':
|
||||
self.server_names.update(MAIN_SERVERS)
|
||||
elif server == 'rest':
|
||||
self.server_names.update(REST_SERVERS)
|
||||
if server in ALIASES:
|
||||
self.server_names.update(ALIASES[server])
|
||||
self._default_strict = False
|
||||
elif '*' in server:
|
||||
# convert glob to regex
|
||||
self.server_names.update([
|
||||
s for s in ALL_SERVERS if
|
||||
re.match(server.replace('*', '.*'), s)])
|
||||
self._default_strict = False
|
||||
else:
|
||||
self.server_names.add(server)
|
||||
|
||||
|
@ -211,8 +212,17 @@ class Manager(object):
|
|||
setup_env()
|
||||
status = 0
|
||||
|
||||
strict = kwargs.get('strict')
|
||||
# if strict not set explicitly
|
||||
if strict is None:
|
||||
strict = self._default_strict
|
||||
|
||||
for server in self.servers:
|
||||
server.launch(**kwargs)
|
||||
status += 0 if server.launch(**kwargs) else 1
|
||||
|
||||
if not strict:
|
||||
status = 0
|
||||
|
||||
if not kwargs.get('daemon', True):
|
||||
for server in self.servers:
|
||||
try:
|
||||
|
|
|
@ -45,6 +45,7 @@ http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt
|
|||
"""
|
||||
|
||||
import six.moves.cPickle as pickle
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from bisect import bisect
|
||||
|
@ -56,7 +57,6 @@ from eventlet.pools import Pool
|
|||
from eventlet import Timeout
|
||||
from six.moves import range
|
||||
|
||||
from swift.common.utils import json
|
||||
|
||||
DEFAULT_MEMCACHED_PORT = 11211
|
||||
|
||||
|
@ -223,7 +223,7 @@ class MemcacheRing(object):
|
|||
"""Returns a server connection to the pool."""
|
||||
self._client_cache[server].put((fp, sock))
|
||||
|
||||
def set(self, key, value, serialize=True, timeout=0, time=0,
|
||||
def set(self, key, value, serialize=True, time=0,
|
||||
min_compress_len=0):
|
||||
"""
|
||||
Set a key/value pair in memcache
|
||||
|
@ -233,22 +233,14 @@ class MemcacheRing(object):
|
|||
:param serialize: if True, value is serialized with JSON before sending
|
||||
to memcache, or with pickle if configured to use
|
||||
pickle instead of JSON (to avoid cache poisoning)
|
||||
:param timeout: ttl in memcache, this parameter is now deprecated. It
|
||||
will be removed in next release of OpenStack,
|
||||
use time parameter instead in the future
|
||||
:time: equivalent to timeout, this parameter is added to keep the
|
||||
signature compatible with python-memcached interface. This
|
||||
implementation will take this value and sign it to the
|
||||
parameter timeout
|
||||
:param time: the time to live
|
||||
:min_compress_len: minimum compress length, this parameter was added
|
||||
to keep the signature compatible with
|
||||
python-memcached interface. This implementation
|
||||
ignores it.
|
||||
"""
|
||||
key = md5hash(key)
|
||||
if timeout:
|
||||
logging.warn("parameter timeout has been deprecated, use time")
|
||||
timeout = sanitize_timeout(time or timeout)
|
||||
timeout = sanitize_timeout(time)
|
||||
flags = 0
|
||||
if serialize and self._allow_pickle:
|
||||
value = pickle.dumps(value, PICKLE_PROTOCOL)
|
||||
|
@ -302,7 +294,7 @@ class MemcacheRing(object):
|
|||
except (Exception, Timeout) as e:
|
||||
self._exception_occurred(server, e, sock=sock, fp=fp)
|
||||
|
||||
def incr(self, key, delta=1, time=0, timeout=0):
|
||||
def incr(self, key, delta=1, time=0):
|
||||
"""
|
||||
Increments a key which has a numeric value by delta.
|
||||
If the key can't be found, it's added as delta or 0 if delta < 0.
|
||||
|
@ -315,22 +307,16 @@ class MemcacheRing(object):
|
|||
:param key: key
|
||||
:param delta: amount to add to the value of key (or set as the value
|
||||
if the key is not found) will be cast to an int
|
||||
:param time: the time to live. This parameter deprecates parameter
|
||||
timeout. The addition of this parameter is to make the
|
||||
interface consistent with set and set_multi methods
|
||||
:param timeout: ttl in memcache, deprecated, will be removed in future
|
||||
OpenStack releases
|
||||
:param time: the time to live
|
||||
:returns: result of incrementing
|
||||
:raises MemcacheConnectionError:
|
||||
"""
|
||||
if timeout:
|
||||
logging.warn("parameter timeout has been deprecated, use time")
|
||||
key = md5hash(key)
|
||||
command = 'incr'
|
||||
if delta < 0:
|
||||
command = 'decr'
|
||||
delta = str(abs(int(delta)))
|
||||
timeout = sanitize_timeout(time or timeout)
|
||||
timeout = sanitize_timeout(time)
|
||||
for (server, fp, sock) in self._get_conns(key):
|
||||
try:
|
||||
with Timeout(self._io_timeout):
|
||||
|
@ -358,7 +344,7 @@ class MemcacheRing(object):
|
|||
self._exception_occurred(server, e, sock=sock, fp=fp)
|
||||
raise MemcacheConnectionError("No Memcached connections succeeded.")
|
||||
|
||||
def decr(self, key, delta=1, time=0, timeout=0):
|
||||
def decr(self, key, delta=1, time=0):
|
||||
"""
|
||||
Decrements a key which has a numeric value by delta. Calls incr with
|
||||
-delta.
|
||||
|
@ -367,18 +353,11 @@ class MemcacheRing(object):
|
|||
:param delta: amount to subtract to the value of key (or set the
|
||||
value to 0 if the key is not found) will be cast to
|
||||
an int
|
||||
:param time: the time to live. This parameter depcates parameter
|
||||
timeout. The addition of this parameter is to make the
|
||||
interface consistent with set and set_multi methods
|
||||
:param timeout: ttl in memcache, deprecated, will be removed in future
|
||||
OpenStack releases
|
||||
:param time: the time to live
|
||||
:returns: result of decrementing
|
||||
:raises MemcacheConnectionError:
|
||||
"""
|
||||
if timeout:
|
||||
logging.warn("parameter timeout has been deprecated, use time")
|
||||
|
||||
return self.incr(key, delta=-delta, time=(time or timeout))
|
||||
return self.incr(key, delta=-delta, time=time)
|
||||
|
||||
def delete(self, key):
|
||||
"""
|
||||
|
@ -398,8 +377,8 @@ class MemcacheRing(object):
|
|||
except (Exception, Timeout) as e:
|
||||
self._exception_occurred(server, e, sock=sock, fp=fp)
|
||||
|
||||
def set_multi(self, mapping, server_key, serialize=True, timeout=0,
|
||||
time=0, min_compress_len=0):
|
||||
def set_multi(self, mapping, server_key, serialize=True, time=0,
|
||||
min_compress_len=0):
|
||||
"""
|
||||
Sets multiple key/value pairs in memcache.
|
||||
|
||||
|
@ -409,23 +388,14 @@ class MemcacheRing(object):
|
|||
:param serialize: if True, value is serialized with JSON before sending
|
||||
to memcache, or with pickle if configured to use
|
||||
pickle instead of JSON (to avoid cache poisoning)
|
||||
:param timeout: ttl for memcache. This parameter is now deprecated, it
|
||||
will be removed in next release of OpenStack, use time
|
||||
parameter instead in the future
|
||||
:time: equalvent to timeout, this parameter is added to keep the
|
||||
signature compatible with python-memcached interface. This
|
||||
implementation will take this value and sign it to parameter
|
||||
timeout
|
||||
:param time: the time to live
|
||||
:min_compress_len: minimum compress length, this parameter was added
|
||||
to keep the signature compatible with
|
||||
python-memcached interface. This implementation
|
||||
ignores it
|
||||
"""
|
||||
if timeout:
|
||||
logging.warn("parameter timeout has been deprecated, use time")
|
||||
|
||||
server_key = md5hash(server_key)
|
||||
timeout = sanitize_timeout(time or timeout)
|
||||
timeout = sanitize_timeout(time)
|
||||
msg = ''
|
||||
for key, value in mapping.items():
|
||||
key = md5hash(key)
|
||||
|
|
|
@ -13,7 +13,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.utils import urlparse, json
|
||||
import json
|
||||
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
def clean_acl(name, value):
|
||||
|
@ -95,17 +97,17 @@ def clean_acl(name, value):
|
|||
values.append(raw_value)
|
||||
continue
|
||||
first, second = (v.strip() for v in raw_value.split(':', 1))
|
||||
if not first or first[0] != '.':
|
||||
if not first or not first.startswith('.'):
|
||||
values.append(raw_value)
|
||||
elif first in ('.r', '.ref', '.referer', '.referrer'):
|
||||
if 'write' in name:
|
||||
raise ValueError('Referrers not allowed in write ACL: '
|
||||
'%s' % repr(raw_value))
|
||||
negate = False
|
||||
if second and second[0] == '-':
|
||||
if second and second.startswith('-'):
|
||||
negate = True
|
||||
second = second[1:].strip()
|
||||
if second and second != '*' and second[0] == '*':
|
||||
if second and second != '*' and second.startswith('*'):
|
||||
second = second[1:].strip()
|
||||
if not second or second == '.':
|
||||
raise ValueError('No host/domain value after referrer '
|
||||
|
@ -261,13 +263,13 @@ def referrer_allowed(referrer, referrer_acl):
|
|||
if referrer_acl:
|
||||
rhost = urlparse(referrer or '').hostname or 'unknown'
|
||||
for mhost in referrer_acl:
|
||||
if mhost[0] == '-':
|
||||
if mhost.startswith('-'):
|
||||
mhost = mhost[1:]
|
||||
if mhost == rhost or (mhost[0] == '.' and
|
||||
if mhost == rhost or (mhost.startswith('.') and
|
||||
rhost.endswith(mhost)):
|
||||
allow = False
|
||||
elif mhost == '*' or mhost == rhost or \
|
||||
(mhost[0] == '.' and rhost.endswith(mhost)):
|
||||
(mhost.startswith('.') and rhost.endswith(mhost)):
|
||||
allow = True
|
||||
return allow
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
from six.moves.urllib.parse import quote, unquote
|
||||
import tarfile
|
||||
from xml.sax import saxutils
|
||||
|
@ -23,7 +24,7 @@ from swift.common.swob import Request, HTTPBadGateway, \
|
|||
HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \
|
||||
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \
|
||||
HTTPLengthRequired, HTTPException, HTTPServerError, wsgify
|
||||
from swift.common.utils import json, get_logger, register_swift_info
|
||||
from swift.common.utils import get_logger, register_swift_info
|
||||
from swift.common import constraints
|
||||
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND, HTTP_CONFLICT
|
||||
|
||||
|
@ -32,7 +33,7 @@ class CreateContainerError(Exception):
|
|||
def __init__(self, msg, status_int, status):
|
||||
self.status_int = status_int
|
||||
self.status = status
|
||||
Exception.__init__(self, msg)
|
||||
super(CreateContainerError, self).__init__(msg)
|
||||
|
||||
|
||||
ACCEPTABLE_FORMATS = ['text/plain', 'application/json', 'application/xml',
|
||||
|
@ -121,6 +122,67 @@ class Bulk(object):
|
|||
Only regular files will be uploaded. Empty directories, symlinks, etc will
|
||||
not be uploaded.
|
||||
|
||||
Content Type:
|
||||
|
||||
If the content-type header is set in the extract-archive call, Swift will
|
||||
assign that content-type to all the underlying files. The bulk middleware
|
||||
will extract the archive file and send the internal files using PUT
|
||||
operations using the same headers from the original request
|
||||
(e.g. auth-tokens, content-Type, etc.). Notice that any middleware call
|
||||
that follows the bulk middleware does not know if this was a bulk request
|
||||
or if these were individual requests sent by the user.
|
||||
|
||||
In order to make Swift detect the content-type for the files based on the
|
||||
file extension, the content-type in the extract-archive call should not be
|
||||
set. Alternatively, it is possible to explicitly tell swift to detect the
|
||||
content type using this header:
|
||||
|
||||
X-Detect-Content-Type:true
|
||||
|
||||
For example:
|
||||
|
||||
curl -X PUT http://127.0.0.1/v1/AUTH_acc/cont/$?extract-archive=tar -T
|
||||
backup.tar -H "Content-Type: application/x-tar" -H "X-Auth-Token: xxx"
|
||||
-H "X-Detect-Content-Type:true"
|
||||
|
||||
Assigning Metadata:
|
||||
|
||||
The tar file format (1) allows for UTF-8 key/value pairs to be associated
|
||||
with each file in an archive. If a file has extended attributes, then tar
|
||||
will store those as key/value pairs. The bulk middleware can read those
|
||||
extended attributes and convert them to Swift object metadata. Attributes
|
||||
starting with "user.meta" are converted to object metadata, and
|
||||
"user.mime_type" is converted to Content-Type.
|
||||
|
||||
For example:
|
||||
|
||||
setfattr -n user.mime_type -v "application/python-setup" setup.py
|
||||
setfattr -n user.meta.lunch -v "burger and fries" setup.py
|
||||
setfattr -n user.meta.dinner -v "baked ziti" setup.py
|
||||
setfattr -n user.stuff -v "whee" setup.py
|
||||
|
||||
Will get translated to headers:
|
||||
|
||||
Content-Type: application/python-setup
|
||||
X-Object-Meta-Lunch: burger and fries
|
||||
X-Object-Meta-Dinner: baked ziti
|
||||
|
||||
The bulk middleware will handle xattrs stored by both GNU and BSD tar (2).
|
||||
Only xattrs user.mime_type and user.meta.* are processed. Other attributes
|
||||
are ignored.
|
||||
|
||||
Notes:
|
||||
|
||||
(1) The POSIX 1003.1-2001 (pax) format. The default format on GNU tar
|
||||
1.27.1 or later.
|
||||
|
||||
(2) Even with pax-format tarballs, different encoders store xattrs slightly
|
||||
differently; for example, GNU tar stores the xattr "user.userattribute" as
|
||||
pax header "SCHILY.xattr.user.userattribute", while BSD tar (which uses
|
||||
libarchive) stores it as "LIBARCHIVE.xattr.user.userattribute".
|
||||
|
||||
Response:
|
||||
|
||||
The response from bulk operations functions differently from other swift
|
||||
responses. This is because a short request body sent from the client could
|
||||
result in many operations on the proxy server and precautions need to be
|
||||
|
|
|
@ -114,6 +114,7 @@ Here's an example using ``curl`` with tiny 1-byte segments::
|
|||
http://<storage_url>/container/myobject
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import six
|
||||
|
@ -126,7 +127,7 @@ from swift.common.exceptions import ListingIterError, SegmentError
|
|||
from swift.common.http import is_success
|
||||
from swift.common.swob import Request, Response, \
|
||||
HTTPRequestedRangeNotSatisfiable, HTTPBadRequest, HTTPConflict
|
||||
from swift.common.utils import get_logger, json, \
|
||||
from swift.common.utils import get_logger, \
|
||||
RateLimitedIterator, read_conf_dir, quote, close_if_possible, \
|
||||
closing_if_possible
|
||||
from swift.common.request_helpers import SegmentedIterable
|
||||
|
@ -415,13 +416,12 @@ class DynamicLargeObject(object):
|
|||
return GetContext(self, self.logger).\
|
||||
handle_request(req, start_response)
|
||||
elif req.method == 'PUT':
|
||||
error_response = self.validate_x_object_manifest_header(
|
||||
req, start_response)
|
||||
error_response = self._validate_x_object_manifest_header(req)
|
||||
if error_response:
|
||||
return error_response(env, start_response)
|
||||
return self.app(env, start_response)
|
||||
|
||||
def validate_x_object_manifest_header(self, req, start_response):
|
||||
def _validate_x_object_manifest_header(self, req):
|
||||
"""
|
||||
Make sure that X-Object-Manifest is valid if present.
|
||||
"""
|
||||
|
@ -433,7 +433,7 @@ class DynamicLargeObject(object):
|
|||
except ValueError:
|
||||
pass
|
||||
if not container or not prefix or '?' in value or '&' in value or \
|
||||
prefix[0] == '/':
|
||||
prefix.startswith('/'):
|
||||
return HTTPBadRequest(
|
||||
request=req,
|
||||
body=('X-Object-Manifest must be in the '
|
||||
|
|
|
@ -68,7 +68,7 @@ class DomainRemapMiddleware(object):
|
|||
def __init__(self, app, conf):
|
||||
self.app = app
|
||||
self.storage_domain = conf.get('storage_domain', 'example.com')
|
||||
if self.storage_domain and self.storage_domain[0] != '.':
|
||||
if self.storage_domain and not self.storage_domain.startswith('.'):
|
||||
self.storage_domain = '.' + self.storage_domain
|
||||
self.path_root = conf.get('path_root', 'v1').strip('/')
|
||||
prefixes = conf.get('reseller_prefixes', 'AUTH')
|
||||
|
|
|
@ -272,7 +272,7 @@ class FormPost(object):
|
|||
hdrs['Content-Type'] or 'application/octet-stream'
|
||||
status, subheaders, message = \
|
||||
self._perform_subrequest(env, attributes, fp, keys)
|
||||
if status[:1] != '2':
|
||||
if not status.startswith('2'):
|
||||
break
|
||||
else:
|
||||
data = ''
|
||||
|
@ -337,7 +337,7 @@ class FormPost(object):
|
|||
del subenv['QUERY_STRING']
|
||||
subenv['HTTP_TRANSFER_ENCODING'] = 'chunked'
|
||||
subenv['wsgi.input'] = _CappedFileLikeObject(fp, max_file_size)
|
||||
if subenv['PATH_INFO'][-1] != '/' and \
|
||||
if not subenv['PATH_INFO'].endswith('/') and \
|
||||
subenv['PATH_INFO'].count('/') < 4:
|
||||
subenv['PATH_INFO'] += '/'
|
||||
subenv['PATH_INFO'] += attributes['filename'] or 'filename'
|
||||
|
|
|
@ -31,17 +31,17 @@ UNKNOWN_ID = '_unknown'
|
|||
class KeystoneAuth(object):
|
||||
"""Swift middleware to Keystone authorization system.
|
||||
|
||||
In Swift's proxy-server.conf add this middleware to your pipeline::
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache authtoken keystoneauth proxy-server
|
||||
|
||||
Make sure you have the authtoken middleware before the
|
||||
keystoneauth middleware.
|
||||
In Swift's proxy-server.conf add this keystoneauth middleware and the
|
||||
authtoken middleware to your pipeline. Make sure you have the authtoken
|
||||
middleware before the keystoneauth middleware.
|
||||
|
||||
The authtoken middleware will take care of validating the user and
|
||||
keystoneauth will authorize access.
|
||||
|
||||
The sample proxy-server.conf shows a sample pipeline that uses keystone.
|
||||
|
||||
:download:`proxy-server.conf-sample </../../etc/proxy-server.conf-sample>`
|
||||
|
||||
The authtoken middleware is shipped with keystonemiddleware - it
|
||||
does not have any other dependencies than itself so you can either
|
||||
install it by copying the file directly in your python path or by
|
||||
|
@ -196,7 +196,7 @@ class KeystoneAuth(object):
|
|||
conf.get('allow_names_in_acls', 'true'))
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
identity = self._keystone_identity(environ)
|
||||
env_identity = self._keystone_identity(environ)
|
||||
|
||||
# Check if one of the middleware like tempurl or formpost have
|
||||
# set the swift.authorize_override environ and want to control the
|
||||
|
@ -207,14 +207,13 @@ class KeystoneAuth(object):
|
|||
self.logger.debug(msg)
|
||||
return self.app(environ, start_response)
|
||||
|
||||
if identity:
|
||||
self.logger.debug('Using identity: %r', identity)
|
||||
environ['keystone.identity'] = identity
|
||||
environ['REMOTE_USER'] = identity.get('tenant')
|
||||
env_identity = self._integral_keystone_identity(environ)
|
||||
if env_identity:
|
||||
self.logger.debug('Using identity: %r', env_identity)
|
||||
environ['REMOTE_USER'] = env_identity.get('tenant')
|
||||
environ['keystone.identity'] = env_identity
|
||||
environ['swift.authorize'] = functools.partial(
|
||||
self.authorize, env_identity)
|
||||
user_roles = (r.lower() for r in identity.get('roles', []))
|
||||
user_roles = (r.lower() for r in env_identity.get('roles', []))
|
||||
if self.reseller_admin_role in user_roles:
|
||||
environ['reseller_request'] = True
|
||||
else:
|
||||
|
@ -238,26 +237,11 @@ class KeystoneAuth(object):
|
|||
|
||||
def _keystone_identity(self, environ):
|
||||
"""Extract the identity from the Keystone auth component."""
|
||||
# In next release, we would add user id in env['keystone.identity'] by
|
||||
# using _integral_keystone_identity to replace current
|
||||
# _keystone_identity. The purpose of keeping it in this release it for
|
||||
# back compatibility.
|
||||
if (environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed'
|
||||
or environ.get(
|
||||
'HTTP_X_SERVICE_IDENTITY_STATUS') not in (None, 'Confirmed')):
|
||||
return
|
||||
roles = list_from_csv(environ.get('HTTP_X_ROLES', ''))
|
||||
identity = {'user': environ.get('HTTP_X_USER_NAME'),
|
||||
'tenant': (environ.get('HTTP_X_TENANT_ID'),
|
||||
environ.get('HTTP_X_TENANT_NAME')),
|
||||
'roles': roles}
|
||||
return identity
|
||||
|
||||
def _integral_keystone_identity(self, environ):
|
||||
"""Extract the identity from the Keystone auth component."""
|
||||
if environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
|
||||
return
|
||||
roles = list_from_csv(environ.get('HTTP_X_ROLES', ''))
|
||||
service_roles = list_from_csv(environ.get('HTTP_X_SERVICE_ROLES', ''))
|
||||
identity = {'user': (environ.get('HTTP_X_USER_ID'),
|
||||
environ.get('HTTP_X_USER_NAME')),
|
||||
|
@ -341,9 +325,9 @@ class KeystoneAuth(object):
|
|||
# unknown domain, update if req confirms domain
|
||||
new_id = req_id or ''
|
||||
elif req_has_id and sysmeta_id != req_id:
|
||||
self.logger.warn("Inconsistent project domain id: " +
|
||||
"%s in token vs %s in account metadata."
|
||||
% (req_id, sysmeta_id))
|
||||
self.logger.warning("Inconsistent project domain id: " +
|
||||
"%s in token vs %s in account metadata."
|
||||
% (req_id, sysmeta_id))
|
||||
|
||||
if new_id is not None:
|
||||
req.headers[PROJECT_DOMAIN_ID_SYSMETA_HEADER] = new_id
|
||||
|
|
|
@ -78,11 +78,12 @@ with this middleware enabled should not be open to an untrusted
|
|||
environment (everyone can query the locality data using this middleware).
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from six.moves.urllib.parse import quote, unquote
|
||||
|
||||
from swift.common.ring import Ring
|
||||
from swift.common.utils import json, get_logger, split_path
|
||||
from swift.common.utils import get_logger, split_path
|
||||
from swift.common.swob import Request, Response
|
||||
from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed
|
||||
from swift.common.storage_policy import POLICIES
|
||||
|
|
|
@ -79,12 +79,7 @@ class NameCheckMiddleware(object):
|
|||
self.logger.debug("name_check: self.forbidden_chars %s" %
|
||||
self.forbidden_chars)
|
||||
|
||||
for c in unquote(req.path):
|
||||
if c in self.forbidden_chars:
|
||||
return True
|
||||
else:
|
||||
pass
|
||||
return False
|
||||
return any((c in unquote(req.path)) for c in self.forbidden_chars)
|
||||
|
||||
def check_length(self, req):
|
||||
'''
|
||||
|
@ -93,10 +88,7 @@ class NameCheckMiddleware(object):
|
|||
Returns False if the length is <= the maximum
|
||||
'''
|
||||
length = len(unquote(req.path))
|
||||
if length > self.maximum_length:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return length > self.maximum_length
|
||||
|
||||
def check_regexp(self, req):
|
||||
'''
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import errno
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from swift import gettext_ as _
|
||||
|
@ -21,7 +22,7 @@ from swift import gettext_ as _
|
|||
from swift import __version__ as swiftver
|
||||
from swift.common.storage_policy import POLICIES
|
||||
from swift.common.swob import Request, Response
|
||||
from swift.common.utils import get_logger, config_true_value, json, \
|
||||
from swift.common.utils import get_logger, config_true_value, \
|
||||
SWIFT_CONF_FILE
|
||||
from swift.common.constraints import check_mount
|
||||
from resource import getpagesize
|
||||
|
|
|
@ -26,26 +26,31 @@ defined manifest of the object segments is used.
|
|||
Uploading the Manifest
|
||||
----------------------
|
||||
|
||||
After the user has uploaded the objects to be concatenated a manifest is
|
||||
After the user has uploaded the objects to be concatenated, a manifest is
|
||||
uploaded. The request must be a PUT with the query parameter::
|
||||
|
||||
?multipart-manifest=put
|
||||
|
||||
The body of this request will be an ordered list of files in
|
||||
json data format. The data to be supplied for each segment is::
|
||||
The body of this request will be an ordered list of segment descriptions in
|
||||
JSON format. The data to be supplied for each segment is:
|
||||
|
||||
path: the path to the segment object (not including account)
|
||||
/container/object_name
|
||||
etag: the etag given back when the segment object was PUT,
|
||||
or null
|
||||
size_bytes: the size of the complete segment object in
|
||||
bytes, or null
|
||||
range: (Optional) the range within the object to use as a
|
||||
segment. If omitted, the entire object is used.
|
||||
=========== ========================================================
|
||||
Key Description
|
||||
=========== ========================================================
|
||||
path the path to the segment object (not including account)
|
||||
/container/object_name
|
||||
etag the ETag given back when the segment object was PUT,
|
||||
or null
|
||||
size_bytes the size of the complete segment object in
|
||||
bytes, or null
|
||||
range (optional) the (inclusive) range within the object to
|
||||
use as a segment. If omitted, the entire object is used.
|
||||
=========== ========================================================
|
||||
|
||||
The format of the list will be::
|
||||
The format of the list will be:
|
||||
|
||||
.. code::
|
||||
|
||||
json:
|
||||
[{"path": "/cont/object",
|
||||
"etag": "etagoftheobjectsegment",
|
||||
"size_bytes": 10485760,
|
||||
|
@ -84,6 +89,42 @@ segments of a SLO manifest can even be other SLO manifests. Treat them as any
|
|||
other object i.e., use the Etag and Content-Length given on the PUT of the
|
||||
sub-SLO in the manifest to the parent SLO.
|
||||
|
||||
-------------------
|
||||
Range Specification
|
||||
-------------------
|
||||
|
||||
Users now have the ability to specify ranges for SLO segments.
|
||||
Users can now include an optional 'range' field in segment descriptions
|
||||
to specify which bytes from the underlying object should be used for the
|
||||
segment data. Only one range may be specified per segment.
|
||||
|
||||
.. note::
|
||||
|
||||
The 'etag' and 'size_bytes' fields still describe the backing object as a
|
||||
whole.
|
||||
|
||||
If a user uploads this manifest:
|
||||
|
||||
.. code::
|
||||
|
||||
[{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 2097152,
|
||||
"range": "0-1048576"},
|
||||
{"path": "/con/obj_seg_2", "etag": null, "size_bytes": 2097152,
|
||||
"range": "512-1550000"},
|
||||
{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 2097152,
|
||||
"range": "-2048"}]
|
||||
|
||||
The segment will consist of the first 1048576 bytes of /con/obj_seg_1,
|
||||
followed by bytes 513 through 1550000 (inclusive) of /con/obj_seg_2, and
|
||||
finally bytes 2095104 through 2097152 (i.e., the last 2048 bytes) of
|
||||
/con/obj_seg_1.
|
||||
|
||||
.. note::
|
||||
|
||||
The minimum sized range is min_segment_size, which by
|
||||
default is 1048576 (1MB).
|
||||
|
||||
|
||||
-------------------------
|
||||
Retrieving a Large Object
|
||||
-------------------------
|
||||
|
@ -156,6 +197,7 @@ metadata which can be used for stats purposes.
|
|||
from six.moves import range
|
||||
|
||||
from datetime import datetime
|
||||
import json
|
||||
import mimetypes
|
||||
import re
|
||||
import six
|
||||
|
@ -167,7 +209,7 @@ from swift.common.swob import Request, HTTPBadRequest, HTTPServerError, \
|
|||
HTTPOk, HTTPPreconditionFailed, HTTPException, HTTPNotFound, \
|
||||
HTTPUnauthorized, HTTPConflict, HTTPRequestedRangeNotSatisfiable,\
|
||||
Response, Range
|
||||
from swift.common.utils import json, get_logger, config_true_value, \
|
||||
from swift.common.utils import get_logger, config_true_value, \
|
||||
get_valid_utf8_str, override_bytes_from_content_type, split_path, \
|
||||
register_swift_info, RateLimitedIterator, quote, close_if_possible, \
|
||||
closing_if_possible
|
||||
|
@ -948,6 +990,7 @@ class StaticLargeObject(object):
|
|||
:params req: a swob.Request with an obj in path
|
||||
:returns: swob.Response whose app_iter set to Bulk.handle_delete_iter
|
||||
"""
|
||||
req.headers['Content-Type'] = None # Ignore content-type from client
|
||||
resp = HTTPOk(request=req)
|
||||
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
|
||||
if out_content_type:
|
||||
|
|
|
@ -92,6 +92,7 @@ Example usage of this middleware via ``swift``:
|
|||
|
||||
Turn on listings::
|
||||
|
||||
swift post -r '.r:*,.rlistings' container
|
||||
swift post -m 'web-listings: true' container
|
||||
|
||||
Now you should see object listings for paths and pseudo paths that have no
|
||||
|
@ -121,8 +122,8 @@ import json
|
|||
import time
|
||||
|
||||
from swift.common.utils import human_readable, split_path, config_true_value, \
|
||||
quote, register_swift_info
|
||||
from swift.common.wsgi import make_pre_authed_env, WSGIContext
|
||||
quote, register_swift_info, get_logger
|
||||
from swift.common.wsgi import make_env, WSGIContext
|
||||
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND
|
||||
from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound
|
||||
from swift.proxy.controllers.base import get_container_info
|
||||
|
@ -167,7 +168,7 @@ class _StaticWebContext(WSGIContext):
|
|||
save_response_status = self._response_status
|
||||
save_response_headers = self._response_headers
|
||||
save_response_exc_info = self._response_exc_info
|
||||
resp = self._app_call(make_pre_authed_env(
|
||||
resp = self._app_call(make_env(
|
||||
env, 'GET', '/%s/%s/%s/%s%s' % (
|
||||
self.version, self.account, self.container,
|
||||
self._get_status_int(), self._error),
|
||||
|
@ -236,7 +237,7 @@ class _StaticWebContext(WSGIContext):
|
|||
body += ' </body>\n</html>\n'
|
||||
resp = HTTPNotFound(body=body)(env, self._start_response)
|
||||
return self._error_response(resp, env, start_response)
|
||||
tmp_env = make_pre_authed_env(
|
||||
tmp_env = make_env(
|
||||
env, 'GET', '/%s/%s/%s' % (
|
||||
self.version, self.account, self.container),
|
||||
self.agent, swift_source='SW')
|
||||
|
@ -349,7 +350,7 @@ class _StaticWebContext(WSGIContext):
|
|||
if config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
|
||||
return HTTPNotFound()(env, start_response)
|
||||
return self.app(env, start_response)
|
||||
if env['PATH_INFO'][-1] != '/':
|
||||
if not env['PATH_INFO'].endswith('/'):
|
||||
resp = HTTPMovedPermanently(
|
||||
location=(env['PATH_INFO'] + '/'))
|
||||
return resp(env, start_response)
|
||||
|
@ -414,13 +415,13 @@ class _StaticWebContext(WSGIContext):
|
|||
tmp_env['HTTP_USER_AGENT'] = \
|
||||
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
|
||||
tmp_env['swift.source'] = 'SW'
|
||||
if tmp_env['PATH_INFO'][-1] != '/':
|
||||
if not tmp_env['PATH_INFO'].endswith('/'):
|
||||
tmp_env['PATH_INFO'] += '/'
|
||||
tmp_env['PATH_INFO'] += self._index
|
||||
resp = self._app_call(tmp_env)
|
||||
status_int = self._get_status_int()
|
||||
if is_success(status_int) or is_redirection(status_int):
|
||||
if env['PATH_INFO'][-1] != '/':
|
||||
if not env['PATH_INFO'].endswith('/'):
|
||||
resp = HTTPMovedPermanently(
|
||||
location=env['PATH_INFO'] + '/')
|
||||
return resp(env, start_response)
|
||||
|
@ -428,8 +429,8 @@ class _StaticWebContext(WSGIContext):
|
|||
self._response_exc_info)
|
||||
return resp
|
||||
if status_int == HTTP_NOT_FOUND:
|
||||
if env['PATH_INFO'][-1] != '/':
|
||||
tmp_env = make_pre_authed_env(
|
||||
if not env['PATH_INFO'].endswith('/'):
|
||||
tmp_env = make_env(
|
||||
env, 'GET', '/%s/%s/%s' % (
|
||||
self.version, self.account, self.container),
|
||||
self.agent, swift_source='SW')
|
||||
|
@ -463,6 +464,7 @@ class StaticWeb(object):
|
|||
self.app = app
|
||||
#: The filter configuration dict.
|
||||
self.conf = conf
|
||||
self.logger = get_logger(conf, log_route='staticweb')
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
"""
|
||||
|
@ -472,6 +474,11 @@ class StaticWeb(object):
|
|||
:param start_response: The WSGI start_response hook.
|
||||
"""
|
||||
env['staticweb.start_time'] = time.time()
|
||||
if 'swift.authorize' not in env:
|
||||
self.logger.warning(
|
||||
'No authentication middleware authorized request yet. '
|
||||
'Skipping staticweb')
|
||||
return self.app(env, start_response)
|
||||
try:
|
||||
(version, account, container, obj) = \
|
||||
split_path(env['PATH_INFO'], 2, 4, True)
|
||||
|
|
|
@ -72,16 +72,16 @@ class TempAuth(object):
|
|||
|
||||
The reseller prefix specifies which parts of the account namespace this
|
||||
middleware is responsible for managing authentication and authorization.
|
||||
By default, the prefix is AUTH so accounts and tokens are prefixed
|
||||
by AUTH_. When a request's token and/or path start with AUTH_, this
|
||||
By default, the prefix is 'AUTH' so accounts and tokens are prefixed
|
||||
by 'AUTH\_'. When a request's token and/or path start with 'AUTH\_', this
|
||||
middleware knows it is responsible.
|
||||
|
||||
We allow the reseller prefix to be a list. In tempauth, the first item
|
||||
in the list is used as the prefix for tokens and user groups. The
|
||||
other prefixes provide alternate accounts that user's can access. For
|
||||
example if the reseller prefix list is 'AUTH, OTHER', a user with
|
||||
admin access to AUTH_account also has admin access to
|
||||
OTHER_account.
|
||||
admin access to 'AUTH_account' also has admin access to
|
||||
'OTHER_account'.
|
||||
|
||||
Required Group:
|
||||
|
||||
|
@ -99,7 +99,7 @@ class TempAuth(object):
|
|||
is not processed.
|
||||
|
||||
The X-Service-Token is useful when combined with multiple reseller prefix
|
||||
items. In the following configuration, accounts prefixed SERVICE_
|
||||
items. In the following configuration, accounts prefixed 'SERVICE\_'
|
||||
are only accessible if X-Auth-Token is from the end-user and
|
||||
X-Service-Token is from the ``glance`` user::
|
||||
|
||||
|
@ -177,9 +177,9 @@ class TempAuth(object):
|
|||
'"/auth/" (Non-empty auth prefix path '
|
||||
'is required)' % self.auth_prefix)
|
||||
self.auth_prefix = '/auth/'
|
||||
if self.auth_prefix[0] != '/':
|
||||
if not self.auth_prefix.startswith('/'):
|
||||
self.auth_prefix = '/' + self.auth_prefix
|
||||
if self.auth_prefix[-1] != '/':
|
||||
if not self.auth_prefix.endswith('/'):
|
||||
self.auth_prefix += '/'
|
||||
self.token_life = int(conf.get('token_life', 86400))
|
||||
self.allow_overrides = config_true_value(
|
||||
|
@ -429,10 +429,12 @@ class TempAuth(object):
|
|||
try:
|
||||
acls = acls_from_account_info(info)
|
||||
except ValueError as e1:
|
||||
self.logger.warn("Invalid ACL stored in metadata: %r" % e1)
|
||||
self.logger.warning("Invalid ACL stored in metadata: %r" % e1)
|
||||
return None
|
||||
except NotImplementedError as e2:
|
||||
self.logger.warn("ACL version exceeds middleware version: %r" % e2)
|
||||
self.logger.warning(
|
||||
"ACL version exceeds middleware version: %r"
|
||||
% e2)
|
||||
return None
|
||||
return acls
|
||||
|
||||
|
|
|
@ -44,14 +44,18 @@ If the user were to share the link with all his friends, or
|
|||
accidentally post it on a forum, etc. the direct access would be
|
||||
limited to the expiration time set when the website created the link.
|
||||
|
||||
To create such temporary URLs, first an X-Account-Meta-Temp-URL-Key
|
||||
------------
|
||||
Client Usage
|
||||
------------
|
||||
|
||||
To create such temporary URLs, first an ``X-Account-Meta-Temp-URL-Key``
|
||||
header must be set on the Swift account. Then, an HMAC-SHA1 (RFC 2104)
|
||||
signature is generated using the HTTP method to allow (GET, PUT,
|
||||
DELETE, etc.), the Unix timestamp the access should be allowed until,
|
||||
signature is generated using the HTTP method to allow (``GET``, ``PUT``,
|
||||
``DELETE``, etc.), the Unix timestamp the access should be allowed until,
|
||||
the full path to the object, and the key set on the account.
|
||||
|
||||
For example, here is code generating the signature for a GET for 60
|
||||
seconds on /v1/AUTH_account/container/object::
|
||||
For example, here is code generating the signature for a ``GET`` for 60
|
||||
seconds on ``/v1/AUTH_account/container/object``::
|
||||
|
||||
import hmac
|
||||
from hashlib import sha1
|
||||
|
@ -63,19 +67,20 @@ seconds on /v1/AUTH_account/container/object::
|
|||
hmac_body = '%s\\n%s\\n%s' % (method, expires, path)
|
||||
sig = hmac.new(key, hmac_body, sha1).hexdigest()
|
||||
|
||||
Be certain to use the full path, from the /v1/ onward.
|
||||
Be certain to use the full path, from the ``/v1/`` onward.
|
||||
|
||||
Let's say the sig ends up equaling
|
||||
da39a3ee5e6b4b0d3255bfef95601890afd80709 and expires ends up
|
||||
1323479485. Then, for example, the website could provide a link to::
|
||||
Let's say ``sig`` ends up equaling
|
||||
``da39a3ee5e6b4b0d3255bfef95601890afd80709`` and ``expires`` ends up
|
||||
``1323479485``. Then, for example, the website could provide a link to::
|
||||
|
||||
https://swift-cluster.example.com/v1/AUTH_account/container/object?
|
||||
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
|
||||
temp_url_expires=1323479485
|
||||
|
||||
Any alteration of the resource path or query arguments would result
|
||||
in 401 Unauthorized. Similarly, a PUT where GET was the allowed method
|
||||
would 401. HEAD is allowed if GET, PUT, or POST is allowed.
|
||||
Any alteration of the resource path or query arguments would result in
|
||||
``401 Unauthorized``. Similarly, a ``PUT`` where ``GET`` was the allowed method
|
||||
would be rejected with ``401 Unauthorized``. However, ``HEAD`` is allowed if
|
||||
``GET``, ``PUT``, or ``POST`` is allowed.
|
||||
|
||||
Using this in combination with browser form post translation
|
||||
middleware could also allow direct-from-browser uploads to specific
|
||||
|
@ -83,13 +88,13 @@ locations in Swift.
|
|||
|
||||
TempURL supports both account and container level keys. Each allows up to two
|
||||
keys to be set, allowing key rotation without invalidating all existing
|
||||
temporary URLs. Account keys are specified by X-Account-Meta-Temp-URL-Key and
|
||||
X-Account-Meta-Temp-URL-Key-2, while container keys are specified by
|
||||
X-Container-Meta-Temp-URL-Key and X-Container-Meta-Temp-URL-Key-2.
|
||||
temporary URLs. Account keys are specified by ``X-Account-Meta-Temp-URL-Key``
|
||||
and ``X-Account-Meta-Temp-URL-Key-2``, while container keys are specified by
|
||||
``X-Container-Meta-Temp-URL-Key`` and ``X-Container-Meta-Temp-URL-Key-2``.
|
||||
Signatures are checked against account and container keys, if
|
||||
present.
|
||||
|
||||
With GET TempURLs, a Content-Disposition header will be set on the
|
||||
With ``GET`` TempURLs, a ``Content-Disposition`` header will be set on the
|
||||
response so that browsers will interpret this as a file attachment to
|
||||
be saved. The filename chosen is based on the object name, but you
|
||||
can override this with a filename query parameter. Modifying the
|
||||
|
@ -100,13 +105,54 @@ above example::
|
|||
temp_url_expires=1323479485&filename=My+Test+File.pdf
|
||||
|
||||
If you do not want the object to be downloaded, you can cause
|
||||
"Content-Disposition: inline" to be set on the response by adding the "inline"
|
||||
parameter to the query string, like so::
|
||||
``Content-Disposition: inline`` to be set on the response by adding the
|
||||
``inline`` parameter to the query string, like so::
|
||||
|
||||
https://swift-cluster.example.com/v1/AUTH_account/container/object?
|
||||
temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709&
|
||||
temp_url_expires=1323479485&inline
|
||||
|
||||
---------------------
|
||||
Cluster Configuration
|
||||
---------------------
|
||||
|
||||
This middleware understands the following configuration settings:
|
||||
|
||||
``incoming_remove_headers``
|
||||
A whitespace-delimited list of the headers to remove from
|
||||
incoming requests. Names may optionally end with ``*`` to
|
||||
indicate a prefix match. ``incoming_allow_headers`` is a
|
||||
list of exceptions to these removals.
|
||||
Default: ``x-timestamp``
|
||||
|
||||
``incoming_allow_headers``
|
||||
A whitespace-delimited list of the headers allowed as
|
||||
exceptions to ``incoming_remove_headers``. Names may
|
||||
optionally end with ``*`` to indicate a prefix match.
|
||||
|
||||
Default: None
|
||||
|
||||
``outgoing_remove_headers``
|
||||
A whitespace-delimited list of the headers to remove from
|
||||
outgoing responses. Names may optionally end with ``*`` to
|
||||
indicate a prefix match. ``outgoing_allow_headers`` is a
|
||||
list of exceptions to these removals.
|
||||
|
||||
Default: ``x-object-meta-*``
|
||||
|
||||
``outgoing_allow_headers``
|
||||
A whitespace-delimited list of the headers allowed as
|
||||
exceptions to ``outgoing_remove_headers``. Names may
|
||||
optionally end with ``*`` to indicate a prefix match.
|
||||
|
||||
Default: ``x-object-meta-public-*``
|
||||
|
||||
``methods``
|
||||
A whitespace delimited list of request methods that are
|
||||
allowed to be used with a temporary URL.
|
||||
|
||||
Default: ``GET HEAD PUT POST DELETE``
|
||||
|
||||
"""
|
||||
|
||||
__all__ = ['TempURL', 'filter_factory',
|
||||
|
@ -123,7 +169,8 @@ from six.moves.urllib.parse import parse_qs
|
|||
from six.moves.urllib.parse import urlencode
|
||||
|
||||
from swift.proxy.controllers.base import get_account_info, get_container_info
|
||||
from swift.common.swob import HeaderKeyDict, HTTPUnauthorized, HTTPBadRequest
|
||||
from swift.common.swob import HeaderKeyDict, header_to_environ_key, \
|
||||
HTTPUnauthorized, HTTPBadRequest
|
||||
from swift.common.utils import split_path, get_valid_utf8_str, \
|
||||
register_swift_info, get_hmac, streq_const_time, quote
|
||||
|
||||
|
@ -214,43 +261,6 @@ class TempURL(object):
|
|||
WSGI Middleware to grant temporary URLs specific access to Swift
|
||||
resources. See the overview for more information.
|
||||
|
||||
This middleware understands the following configuration settings::
|
||||
|
||||
incoming_remove_headers
|
||||
The headers to remove from incoming requests. Simply a
|
||||
whitespace delimited list of header names and names can
|
||||
optionally end with '*' to indicate a prefix match.
|
||||
incoming_allow_headers is a list of exceptions to these
|
||||
removals.
|
||||
Default: x-timestamp
|
||||
|
||||
incoming_allow_headers
|
||||
The headers allowed as exceptions to
|
||||
incoming_remove_headers. Simply a whitespace delimited
|
||||
list of header names and names can optionally end with
|
||||
'*' to indicate a prefix match.
|
||||
Default: None
|
||||
|
||||
outgoing_remove_headers
|
||||
The headers to remove from outgoing responses. Simply a
|
||||
whitespace delimited list of header names and names can
|
||||
optionally end with '*' to indicate a prefix match.
|
||||
outgoing_allow_headers is a list of exceptions to these
|
||||
removals.
|
||||
Default: x-object-meta-*
|
||||
|
||||
outgoing_allow_headers
|
||||
The headers allowed as exceptions to
|
||||
outgoing_remove_headers. Simply a whitespace delimited
|
||||
list of header names and names can optionally end with
|
||||
'*' to indicate a prefix match.
|
||||
Default: x-object-meta-public-*
|
||||
|
||||
methods
|
||||
A whitespace delimited list of request methods that are
|
||||
allowed to be used with a temporary URL.
|
||||
Default: 'GET HEAD PUT POST DELETE'
|
||||
|
||||
The proxy logs created for any subrequests made will have swift.source set
|
||||
to "TU".
|
||||
|
||||
|
@ -259,69 +269,63 @@ class TempURL(object):
|
|||
:param conf: The configuration dict for the middleware.
|
||||
"""
|
||||
|
||||
def __init__(self, app, conf,
|
||||
methods=('GET', 'HEAD', 'PUT', 'POST', 'DELETE')):
|
||||
def __init__(self, app, conf):
|
||||
#: The next WSGI application/filter in the paste.deploy pipeline.
|
||||
self.app = app
|
||||
#: The filter configuration dict.
|
||||
self.conf = conf
|
||||
|
||||
#: The methods allowed with Temp URLs.
|
||||
self.methods = methods
|
||||
|
||||
self.disallowed_headers = set(
|
||||
'HTTP_' + h.upper().replace('-', '_')
|
||||
header_to_environ_key(h)
|
||||
for h in DISALLOWED_INCOMING_HEADERS.split())
|
||||
|
||||
headers = DEFAULT_INCOMING_REMOVE_HEADERS
|
||||
if 'incoming_remove_headers' in conf:
|
||||
headers = conf['incoming_remove_headers']
|
||||
headers = \
|
||||
['HTTP_' + h.upper().replace('-', '_') for h in headers.split()]
|
||||
headers = [header_to_environ_key(h)
|
||||
for h in conf.get('incoming_remove_headers',
|
||||
DEFAULT_INCOMING_REMOVE_HEADERS.split())]
|
||||
#: Headers to remove from incoming requests. Uppercase WSGI env style,
|
||||
#: like `HTTP_X_PRIVATE`.
|
||||
self.incoming_remove_headers = [h for h in headers if h[-1] != '*']
|
||||
self.incoming_remove_headers = \
|
||||
[h for h in headers if not h.endswith('*')]
|
||||
#: Header with match prefixes to remove from incoming requests.
|
||||
#: Uppercase WSGI env style, like `HTTP_X_SENSITIVE_*`.
|
||||
self.incoming_remove_headers_startswith = \
|
||||
[h[:-1] for h in headers if h[-1] == '*']
|
||||
[h[:-1] for h in headers if h.endswith('*')]
|
||||
|
||||
headers = DEFAULT_INCOMING_ALLOW_HEADERS
|
||||
if 'incoming_allow_headers' in conf:
|
||||
headers = conf['incoming_allow_headers']
|
||||
headers = \
|
||||
['HTTP_' + h.upper().replace('-', '_') for h in headers.split()]
|
||||
headers = [header_to_environ_key(h)
|
||||
for h in conf.get('incoming_allow_headers',
|
||||
DEFAULT_INCOMING_ALLOW_HEADERS.split())]
|
||||
#: Headers to allow in incoming requests. Uppercase WSGI env style,
|
||||
#: like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY`.
|
||||
self.incoming_allow_headers = [h for h in headers if h[-1] != '*']
|
||||
self.incoming_allow_headers = \
|
||||
[h for h in headers if not h.endswith('*')]
|
||||
#: Header with match prefixes to allow in incoming requests. Uppercase
|
||||
#: WSGI env style, like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY_*`.
|
||||
self.incoming_allow_headers_startswith = \
|
||||
[h[:-1] for h in headers if h[-1] == '*']
|
||||
[h[:-1] for h in headers if h.endswith('*')]
|
||||
|
||||
headers = DEFAULT_OUTGOING_REMOVE_HEADERS
|
||||
if 'outgoing_remove_headers' in conf:
|
||||
headers = conf['outgoing_remove_headers']
|
||||
headers = [h.title() for h in headers.split()]
|
||||
headers = [h.title()
|
||||
for h in conf.get('outgoing_remove_headers',
|
||||
DEFAULT_OUTGOING_REMOVE_HEADERS.split())]
|
||||
#: Headers to remove from outgoing responses. Lowercase, like
|
||||
#: `x-account-meta-temp-url-key`.
|
||||
self.outgoing_remove_headers = [h for h in headers if h[-1] != '*']
|
||||
self.outgoing_remove_headers = \
|
||||
[h for h in headers if not h.endswith('*')]
|
||||
#: Header with match prefixes to remove from outgoing responses.
|
||||
#: Lowercase, like `x-account-meta-private-*`.
|
||||
self.outgoing_remove_headers_startswith = \
|
||||
[h[:-1] for h in headers if h[-1] == '*']
|
||||
[h[:-1] for h in headers if h.endswith('*')]
|
||||
|
||||
headers = DEFAULT_OUTGOING_ALLOW_HEADERS
|
||||
if 'outgoing_allow_headers' in conf:
|
||||
headers = conf['outgoing_allow_headers']
|
||||
headers = [h.title() for h in headers.split()]
|
||||
headers = [h.title()
|
||||
for h in conf.get('outgoing_allow_headers',
|
||||
DEFAULT_OUTGOING_ALLOW_HEADERS.split())]
|
||||
#: Headers to allow in outgoing responses. Lowercase, like
|
||||
#: `x-matches-remove-prefix-but-okay`.
|
||||
self.outgoing_allow_headers = [h for h in headers if h[-1] != '*']
|
||||
self.outgoing_allow_headers = \
|
||||
[h for h in headers if not h.endswith('*')]
|
||||
#: Header with match prefixes to allow in outgoing responses.
|
||||
#: Lowercase, like `x-matches-remove-prefix-but-okay-*`.
|
||||
self.outgoing_allow_headers_startswith = \
|
||||
[h[:-1] for h in headers if h[-1] == '*']
|
||||
[h[:-1] for h in headers if h.endswith('*')]
|
||||
#: HTTP user agent to use for subrequests.
|
||||
self.agent = '%(orig)s TempURL'
|
||||
|
||||
|
@ -371,7 +375,7 @@ class TempURL(object):
|
|||
break
|
||||
if not is_valid_hmac:
|
||||
return self._invalid(env, start_response)
|
||||
# disallowed headers prevent accidently allowing upload of a pointer
|
||||
# disallowed headers prevent accidentally allowing upload of a pointer
|
||||
# to data that the PUT tempurl would not otherwise allow access for.
|
||||
# It should be safe to provide a GET tempurl for data that an
|
||||
# untrusted client just uploaded with a PUT tempurl.
|
||||
|
@ -434,7 +438,7 @@ class TempURL(object):
|
|||
:param env: The WSGI environment for the request.
|
||||
:returns: (Account str, container str) or (None, None).
|
||||
"""
|
||||
if env['REQUEST_METHOD'] in self.methods:
|
||||
if env['REQUEST_METHOD'] in self.conf['methods']:
|
||||
try:
|
||||
ver, acc, cont, obj = split_path(env['PATH_INFO'], 4, 4, True)
|
||||
except ValueError:
|
||||
|
@ -536,7 +540,7 @@ class TempURL(object):
|
|||
|
||||
def _clean_disallowed_headers(self, env, start_response):
|
||||
"""
|
||||
Validate the absense of disallowed headers for "unsafe" operations.
|
||||
Validate the absence of disallowed headers for "unsafe" operations.
|
||||
|
||||
:returns: None for safe operations or swob.HTTPBadResponse if the
|
||||
request includes disallowed headers.
|
||||
|
@ -607,7 +611,15 @@ def filter_factory(global_conf, **local_conf):
|
|||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
methods = conf.get('methods', 'GET HEAD PUT POST DELETE').split()
|
||||
register_swift_info('tempurl', methods=methods)
|
||||
defaults = {
|
||||
'methods': 'GET HEAD PUT POST DELETE',
|
||||
'incoming_remove_headers': DEFAULT_INCOMING_REMOVE_HEADERS,
|
||||
'incoming_allow_headers': DEFAULT_INCOMING_ALLOW_HEADERS,
|
||||
'outgoing_remove_headers': DEFAULT_OUTGOING_REMOVE_HEADERS,
|
||||
'outgoing_allow_headers': DEFAULT_OUTGOING_ALLOW_HEADERS,
|
||||
}
|
||||
info_conf = {k: conf.get(k, v).split() for k, v in defaults.items()}
|
||||
register_swift_info('tempurl', **info_conf)
|
||||
conf.update(info_conf)
|
||||
|
||||
return lambda app: TempURL(app, conf, methods=methods)
|
||||
return lambda app: TempURL(app, conf)
|
||||
|
|
|
@ -93,12 +93,14 @@ See a listing of the older versions of the object::
|
|||
http://<storage_url>/versions?prefix=008myobject/
|
||||
|
||||
Now delete the current version of the object and see that the older version is
|
||||
gone::
|
||||
gone from 'versions' container and back in 'container' container::
|
||||
|
||||
curl -i -XDELETE -H "X-Auth-Token: <token>" \
|
||||
http://<storage_url>/container/myobject
|
||||
curl -i -H "X-Auth-Token: <token>" \
|
||||
http://<storage_url>/versions?prefix=008myobject/
|
||||
curl -i -XGET -H "X-Auth-Token: <token>" \
|
||||
http://<storage_url>/container/myobject
|
||||
|
||||
---------------------------------------------------
|
||||
How to Disable Object Versioning in a Swift Cluster
|
||||
|
@ -113,10 +115,11 @@ Disable versioning from a container (x is any value except empty)::
|
|||
-H "X-Remove-Versions-Location: x" http://<storage_url>/container
|
||||
"""
|
||||
|
||||
import json
|
||||
import six
|
||||
from six.moves.urllib.parse import quote, unquote
|
||||
import time
|
||||
from swift.common.utils import get_logger, Timestamp, json, \
|
||||
from swift.common.utils import get_logger, Timestamp, \
|
||||
register_swift_info, config_true_value
|
||||
from swift.common.request_helpers import get_sys_meta_prefix
|
||||
from swift.common.wsgi import WSGIContext, make_pre_authed_request
|
||||
|
@ -138,11 +141,18 @@ class VersionedWritesContext(WSGIContext):
|
|||
WSGIContext.__init__(self, wsgi_app)
|
||||
self.logger = logger
|
||||
|
||||
def _listing_iter(self, account_name, lcontainer, lprefix, env):
|
||||
for page in self._listing_pages_iter(account_name,
|
||||
lcontainer, lprefix, env):
|
||||
for item in page:
|
||||
yield item
|
||||
def _listing_iter(self, account_name, lcontainer, lprefix, req):
|
||||
try:
|
||||
for page in self._listing_pages_iter(account_name, lcontainer,
|
||||
lprefix, req.environ):
|
||||
for item in page:
|
||||
yield item
|
||||
except ListingIterNotFound:
|
||||
pass
|
||||
except HTTPPreconditionFailed:
|
||||
raise HTTPPreconditionFailed(request=req)
|
||||
except ListingIterError:
|
||||
raise HTTPServerError(request=req)
|
||||
|
||||
def _listing_pages_iter(self, account_name, lcontainer, lprefix, env):
|
||||
marker = ''
|
||||
|
@ -151,8 +161,8 @@ class VersionedWritesContext(WSGIContext):
|
|||
env, method='GET', swift_source='VW',
|
||||
path='/v1/%s/%s' % (account_name, lcontainer))
|
||||
lreq.environ['QUERY_STRING'] = \
|
||||
'format=json&prefix=%s&marker=%s' % (quote(lprefix),
|
||||
quote(marker))
|
||||
'format=json&prefix=%s&reverse=on&marker=%s' % (
|
||||
quote(lprefix), quote(marker))
|
||||
lresp = lreq.get_response(self.app)
|
||||
if not is_success(lresp.status_int):
|
||||
if lresp.status_int == HTTP_NOT_FOUND:
|
||||
|
@ -244,31 +254,22 @@ class VersionedWritesContext(WSGIContext):
|
|||
lcontainer = object_versions.split('/')[0]
|
||||
prefix_len = '%03x' % len(object_name)
|
||||
lprefix = prefix_len + object_name + '/'
|
||||
item_list = []
|
||||
try:
|
||||
for _item in self._listing_iter(account_name, lcontainer, lprefix,
|
||||
req.environ):
|
||||
item_list.append(_item)
|
||||
except ListingIterNotFound:
|
||||
pass
|
||||
except HTTPPreconditionFailed:
|
||||
return HTTPPreconditionFailed(request=req)
|
||||
except ListingIterError:
|
||||
return HTTPServerError(request=req)
|
||||
|
||||
if item_list:
|
||||
# we're about to start making COPY requests - need to validate the
|
||||
# write access to the versioned container
|
||||
if 'swift.authorize' in req.environ:
|
||||
container_info = get_container_info(
|
||||
req.environ, self.app)
|
||||
req.acl = container_info.get('write_acl')
|
||||
aresp = req.environ['swift.authorize'](req)
|
||||
if aresp:
|
||||
return aresp
|
||||
item_iter = self._listing_iter(account_name, lcontainer, lprefix, req)
|
||||
|
||||
while len(item_list) > 0:
|
||||
previous_version = item_list.pop()
|
||||
authed = False
|
||||
for previous_version in item_iter:
|
||||
if not authed:
|
||||
# we're about to start making COPY requests - need to
|
||||
# validate the write access to the versioned container
|
||||
if 'swift.authorize' in req.environ:
|
||||
container_info = get_container_info(
|
||||
req.environ, self.app)
|
||||
req.acl = container_info.get('write_acl')
|
||||
aresp = req.environ['swift.authorize'](req)
|
||||
if aresp:
|
||||
return aresp
|
||||
authed = True
|
||||
|
||||
# there are older versions so copy the previous version to the
|
||||
# current object and delete the previous version
|
||||
|
|
|
@ -307,8 +307,10 @@ class SegmentedIterable(object):
|
|||
'ERROR: While processing manifest %s, '
|
||||
'max LO GET time of %ds exceeded' %
|
||||
(self.name, self.max_get_time))
|
||||
# Make sure that the segment is a plain old object, not some
|
||||
# flavor of large object, so that we can check its MD5.
|
||||
# The "multipart-manifest=get" query param ensures that the
|
||||
# segment is a plain old object, not some flavor of large
|
||||
# object; therefore, its etag is its MD5sum and hence we can
|
||||
# check it.
|
||||
path = seg_path + '?multipart-manifest=get'
|
||||
seg_req = make_subrequest(
|
||||
self.req.environ, path=path, method='GET',
|
||||
|
@ -317,21 +319,35 @@ class SegmentedIterable(object):
|
|||
agent=('%(orig)s ' + self.ua_suffix),
|
||||
swift_source=self.swift_source)
|
||||
|
||||
seg_req_rangeval = None
|
||||
if first_byte != 0 or not go_to_end:
|
||||
seg_req.headers['Range'] = "bytes=%s-%s" % (
|
||||
seg_req_rangeval = "%s-%s" % (
|
||||
first_byte, '' if go_to_end else last_byte)
|
||||
seg_req.headers['Range'] = "bytes=" + seg_req_rangeval
|
||||
|
||||
# We can only coalesce if paths match and we know the segment
|
||||
# size (so we can check that the ranges will be allowed)
|
||||
if pending_req and pending_req.path == seg_req.path and \
|
||||
seg_size is not None:
|
||||
new_range = '%s,%s' % (
|
||||
pending_req.headers.get('Range',
|
||||
'bytes=0-%s' % (seg_size - 1)),
|
||||
seg_req.headers['Range'].split('bytes=')[1])
|
||||
if Range(new_range).ranges_for_length(seg_size):
|
||||
|
||||
# Make a new Range object so that we don't goof up the
|
||||
# existing one in case of invalid ranges. Note that a
|
||||
# range set with too many individual byteranges is
|
||||
# invalid, so we can combine N valid byteranges and 1
|
||||
# valid byterange and get an invalid range set.
|
||||
if pending_req.range:
|
||||
new_range_str = str(pending_req.range)
|
||||
else:
|
||||
new_range_str = "bytes=0-%d" % (seg_size - 1)
|
||||
|
||||
if seg_req.range:
|
||||
new_range_str += "," + seg_req_rangeval
|
||||
else:
|
||||
new_range_str += ",0-%d" % (seg_size - 1)
|
||||
|
||||
if Range(new_range_str).ranges_for_length(seg_size):
|
||||
# Good news! We can coalesce the requests
|
||||
pending_req.headers['Range'] = new_range
|
||||
pending_req.headers['Range'] = new_range_str
|
||||
continue
|
||||
# else, Too many ranges, or too much backtracking, or ...
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -16,6 +16,7 @@
|
|||
import array
|
||||
import six.moves.cPickle as pickle
|
||||
import inspect
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from gzip import GzipFile
|
||||
from os.path import getmtime
|
||||
|
@ -29,7 +30,7 @@ from tempfile import NamedTemporaryFile
|
|||
|
||||
from six.moves import range
|
||||
|
||||
from swift.common.utils import hash_path, validate_configuration, json
|
||||
from swift.common.utils import hash_path, validate_configuration
|
||||
from swift.common.ring.utils import tiers_for_dev
|
||||
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ def is_valid_hostname(hostname):
|
|||
"""
|
||||
if len(hostname) < 1 or len(hostname) > 255:
|
||||
return False
|
||||
if hostname[-1] == ".":
|
||||
if hostname.endswith('.'):
|
||||
# strip exactly one dot from the right, if present
|
||||
hostname = hostname[:-1]
|
||||
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
|
||||
|
@ -328,13 +328,13 @@ def parse_search_value(search_value):
|
|||
search_value = search_value[i:]
|
||||
if search_value.startswith('-'):
|
||||
search_value = search_value[1:]
|
||||
if len(search_value) and search_value[0].isdigit():
|
||||
if search_value and search_value[0].isdigit():
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i] in '0123456789.':
|
||||
i += 1
|
||||
match['ip'] = search_value[:i]
|
||||
search_value = search_value[i:]
|
||||
elif len(search_value) and search_value[0] == '[':
|
||||
elif search_value and search_value.startswith('['):
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i] != ']':
|
||||
i += 1
|
||||
|
@ -356,14 +356,14 @@ def parse_search_value(search_value):
|
|||
# replication parameters
|
||||
if search_value.startswith('R'):
|
||||
search_value = search_value[1:]
|
||||
if len(search_value) and search_value[0].isdigit():
|
||||
if search_value and search_value[0].isdigit():
|
||||
i = 1
|
||||
while (i < len(search_value) and
|
||||
search_value[i] in '0123456789.'):
|
||||
i += 1
|
||||
match['replication_ip'] = search_value[:i]
|
||||
search_value = search_value[i:]
|
||||
elif len(search_value) and search_value[0] == '[':
|
||||
elif search_value and search_value.startswith('['):
|
||||
i = 1
|
||||
while i < len(search_value) and search_value[i] != ']':
|
||||
i += 1
|
||||
|
|
|
@ -16,11 +16,9 @@ import os
|
|||
import string
|
||||
import textwrap
|
||||
import six
|
||||
|
||||
from six.moves.configparser import ConfigParser
|
||||
|
||||
from swift.common.utils import (
|
||||
config_true_value, SWIFT_CONF_FILE, whataremyips)
|
||||
config_true_value, SWIFT_CONF_FILE, whataremyips, list_from_csv)
|
||||
from swift.common.ring import Ring, RingData
|
||||
from swift.common.utils import quorum_size
|
||||
from swift.common.exceptions import RingValidationError
|
||||
|
@ -84,7 +82,6 @@ class BindPortsCache(object):
|
|||
|
||||
|
||||
class PolicyError(ValueError):
|
||||
|
||||
def __init__(self, msg, index=None):
|
||||
if index is not None:
|
||||
msg += ', for index %r' % index
|
||||
|
@ -161,7 +158,7 @@ class BaseStoragePolicy(object):
|
|||
policy_type_to_policy_cls = {}
|
||||
|
||||
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
|
||||
object_ring=None):
|
||||
object_ring=None, aliases=''):
|
||||
# do not allow BaseStoragePolicy class to be instantiated directly
|
||||
if type(self) == BaseStoragePolicy:
|
||||
raise TypeError("Can't instantiate BaseStoragePolicy directly")
|
||||
|
@ -172,18 +169,17 @@ class BaseStoragePolicy(object):
|
|||
raise PolicyError('Invalid index', idx)
|
||||
if self.idx < 0:
|
||||
raise PolicyError('Invalid index', idx)
|
||||
if not name:
|
||||
self.alias_list = []
|
||||
if not name or not self._validate_policy_name(name):
|
||||
raise PolicyError('Invalid name %r' % name, idx)
|
||||
# this is defensively restrictive, but could be expanded in the future
|
||||
if not all(c in VALID_CHARS for c in name):
|
||||
raise PolicyError('Names are used as HTTP headers, and can not '
|
||||
'reliably contain any characters not in %r. '
|
||||
'Invalid name %r' % (VALID_CHARS, name))
|
||||
if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
|
||||
msg = 'The name %s is reserved for policy index 0. ' \
|
||||
'Invalid name %r' % (LEGACY_POLICY_NAME, name)
|
||||
raise PolicyError(msg, idx)
|
||||
self.name = name
|
||||
self.alias_list.append(name)
|
||||
if aliases:
|
||||
names_list = list_from_csv(aliases)
|
||||
for alias in names_list:
|
||||
if alias == name:
|
||||
continue
|
||||
self._validate_policy_name(alias)
|
||||
self.alias_list.append(alias)
|
||||
self.is_deprecated = config_true_value(is_deprecated)
|
||||
self.is_default = config_true_value(is_default)
|
||||
if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls:
|
||||
|
@ -191,9 +187,23 @@ class BaseStoragePolicy(object):
|
|||
if self.is_deprecated and self.is_default:
|
||||
raise PolicyError('Deprecated policy can not be default. '
|
||||
'Invalid config', self.idx)
|
||||
|
||||
self.ring_name = _get_policy_string('object', self.idx)
|
||||
self.object_ring = object_ring
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.alias_list[0]
|
||||
|
||||
@name.setter
|
||||
def name_setter(self, name):
|
||||
self._validate_policy_name(name)
|
||||
self.alias_list[0] = name
|
||||
|
||||
@property
|
||||
def aliases(self):
|
||||
return ", ".join(self.alias_list)
|
||||
|
||||
def __int__(self):
|
||||
return self.idx
|
||||
|
||||
|
@ -203,8 +213,8 @@ class BaseStoragePolicy(object):
|
|||
def __repr__(self):
|
||||
return ("%s(%d, %r, is_default=%s, "
|
||||
"is_deprecated=%s, policy_type=%r)") % \
|
||||
(self.__class__.__name__, self.idx, self.name,
|
||||
self.is_default, self.is_deprecated, self.policy_type)
|
||||
(self.__class__.__name__, self.idx, self.alias_list,
|
||||
self.is_default, self.is_deprecated, self.policy_type)
|
||||
|
||||
@classmethod
|
||||
def register(cls, policy_type):
|
||||
|
@ -213,6 +223,7 @@ class BaseStoragePolicy(object):
|
|||
their StoragePolicy class. This will also set the policy_type
|
||||
attribute on the registered implementation.
|
||||
"""
|
||||
|
||||
def register_wrapper(policy_cls):
|
||||
if policy_type in cls.policy_type_to_policy_cls:
|
||||
raise PolicyError(
|
||||
|
@ -222,6 +233,7 @@ class BaseStoragePolicy(object):
|
|||
cls.policy_type_to_policy_cls[policy_type] = policy_cls
|
||||
policy_cls.policy_type = policy_type
|
||||
return policy_cls
|
||||
|
||||
return register_wrapper
|
||||
|
||||
@classmethod
|
||||
|
@ -231,6 +243,7 @@ class BaseStoragePolicy(object):
|
|||
"""
|
||||
return {
|
||||
'name': 'name',
|
||||
'aliases': 'aliases',
|
||||
'policy_type': 'policy_type',
|
||||
'default': 'is_default',
|
||||
'deprecated': 'is_deprecated',
|
||||
|
@ -269,6 +282,77 @@ class BaseStoragePolicy(object):
|
|||
info.pop('policy_type')
|
||||
return info
|
||||
|
||||
def _validate_policy_name(self, name):
|
||||
"""
|
||||
Helper function to determine the validity of a policy name. Used
|
||||
to check policy names before setting them.
|
||||
|
||||
:param name: a name string for a single policy name.
|
||||
:returns: true if the name is valid.
|
||||
:raises: PolicyError if the policy name is invalid.
|
||||
"""
|
||||
# this is defensively restrictive, but could be expanded in the future
|
||||
if not all(c in VALID_CHARS for c in name):
|
||||
raise PolicyError('Names are used as HTTP headers, and can not '
|
||||
'reliably contain any characters not in %r. '
|
||||
'Invalid name %r' % (VALID_CHARS, name))
|
||||
if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0:
|
||||
msg = 'The name %s is reserved for policy index 0. ' \
|
||||
'Invalid name %r' % (LEGACY_POLICY_NAME, name)
|
||||
raise PolicyError(msg, self.idx)
|
||||
if name.upper() in (existing_name.upper() for existing_name
|
||||
in self.alias_list):
|
||||
msg = 'The name %s is already assigned to this policy.' % name
|
||||
raise PolicyError(msg, self.idx)
|
||||
|
||||
return True
|
||||
|
||||
def add_name(self, name):
|
||||
"""
|
||||
Adds an alias name to the storage policy. Shouldn't be called
|
||||
directly from the storage policy but instead through the
|
||||
storage policy collection class, so lookups by name resolve
|
||||
correctly.
|
||||
|
||||
:param name: a new alias for the storage policy
|
||||
"""
|
||||
if self._validate_policy_name(name):
|
||||
self.alias_list.append(name)
|
||||
|
||||
def remove_name(self, name):
|
||||
"""
|
||||
Removes an alias name from the storage policy. Shouldn't be called
|
||||
directly from the storage policy but instead through the storage
|
||||
policy collection class, so lookups by name resolve correctly. If
|
||||
the name removed is the primary name then the next available alias
|
||||
will be adopted as the new primary name.
|
||||
|
||||
:param name: a name assigned to the storage policy
|
||||
"""
|
||||
if name not in self.alias_list:
|
||||
raise PolicyError("%s is not a name assigned to policy %s"
|
||||
% (name, self.idx))
|
||||
if len(self.alias_list) == 1:
|
||||
raise PolicyError("Cannot remove only name %s from policy %s. "
|
||||
"Policies must have at least one name."
|
||||
% (name, self.idx))
|
||||
else:
|
||||
self.alias_list.remove(name)
|
||||
|
||||
def change_primary_name(self, name):
|
||||
"""
|
||||
Changes the primary/default name of the policy to a specified name.
|
||||
|
||||
:param name: a string name to replace the current primary name.
|
||||
"""
|
||||
if name == self.name:
|
||||
return
|
||||
elif name in self.alias_list:
|
||||
self.remove_name(name)
|
||||
else:
|
||||
self._validate_policy_name(name)
|
||||
self.alias_list.insert(0, name)
|
||||
|
||||
def _validate_ring(self):
|
||||
"""
|
||||
Hook, called when the ring is loaded. Can be used to
|
||||
|
@ -329,13 +413,15 @@ class ECStoragePolicy(BaseStoragePolicy):
|
|||
:func:`~swift.common.storage_policy.reload_storage_policies` to load
|
||||
POLICIES from ``swift.conf``.
|
||||
"""
|
||||
def __init__(self, idx, name='', is_default=False,
|
||||
|
||||
def __init__(self, idx, name='', aliases='', is_default=False,
|
||||
is_deprecated=False, object_ring=None,
|
||||
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE,
|
||||
ec_type=None, ec_ndata=None, ec_nparity=None):
|
||||
|
||||
super(ECStoragePolicy, self).__init__(
|
||||
idx, name, is_default, is_deprecated, object_ring)
|
||||
idx=idx, name=name, aliases=aliases, is_default=is_default,
|
||||
is_deprecated=is_deprecated, object_ring=object_ring)
|
||||
|
||||
# Validate erasure_coding policy specific members
|
||||
# ec_type is one of the EC implementations supported by PyEClib
|
||||
|
@ -441,9 +527,9 @@ class ECStoragePolicy(BaseStoragePolicy):
|
|||
|
||||
def __repr__(self):
|
||||
return ("%s, EC config(ec_type=%s, ec_segment_size=%d, "
|
||||
"ec_ndata=%d, ec_nparity=%d)") % (
|
||||
super(ECStoragePolicy, self).__repr__(), self.ec_type,
|
||||
self.ec_segment_size, self.ec_ndata, self.ec_nparity)
|
||||
"ec_ndata=%d, ec_nparity=%d)") % \
|
||||
(super(ECStoragePolicy, self).__repr__(), self.ec_type,
|
||||
self.ec_segment_size, self.ec_ndata, self.ec_nparity)
|
||||
|
||||
@classmethod
|
||||
def _config_options_map(cls):
|
||||
|
@ -532,6 +618,7 @@ class StoragePolicyCollection(object):
|
|||
* Deprecated policies can not be declared the default
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, pols):
|
||||
self.default = []
|
||||
self.by_name = {}
|
||||
|
@ -542,7 +629,8 @@ class StoragePolicyCollection(object):
|
|||
"""
|
||||
Add pre-validated policies to internal indexes.
|
||||
"""
|
||||
self.by_name[policy.name.upper()] = policy
|
||||
for name in policy.alias_list:
|
||||
self.by_name[name.upper()] = policy
|
||||
self.by_index[int(policy)] = policy
|
||||
|
||||
def __repr__(self):
|
||||
|
@ -570,9 +658,10 @@ class StoragePolicyCollection(object):
|
|||
if int(policy) in self.by_index:
|
||||
raise PolicyError('Duplicate index %s conflicts with %s' % (
|
||||
policy, self.get_by_index(int(policy))))
|
||||
if policy.name.upper() in self.by_name:
|
||||
raise PolicyError('Duplicate name %s conflicts with %s' % (
|
||||
policy, self.get_by_name(policy.name)))
|
||||
for name in policy.alias_list:
|
||||
if name.upper() in self.by_name:
|
||||
raise PolicyError('Duplicate name %s conflicts with %s' % (
|
||||
policy, self.get_by_name(name)))
|
||||
if policy.is_default:
|
||||
if not self.default:
|
||||
self.default = policy
|
||||
|
@ -667,6 +756,62 @@ class StoragePolicyCollection(object):
|
|||
policy_info.append(policy_entry)
|
||||
return policy_info
|
||||
|
||||
def add_policy_alias(self, policy_index, *aliases):
|
||||
"""
|
||||
Adds a new name or names to a policy
|
||||
|
||||
:param policy_index: index of a policy in this policy collection.
|
||||
:param *aliases: arbitrary number of string policy names to add.
|
||||
"""
|
||||
policy = self.get_by_index(policy_index)
|
||||
for alias in aliases:
|
||||
if alias.upper() in self.by_name:
|
||||
raise PolicyError('Duplicate name %s in use '
|
||||
'by policy %s' % (alias,
|
||||
self.get_by_name(alias)))
|
||||
else:
|
||||
policy.add_name(alias)
|
||||
self.by_name[alias.upper()] = policy
|
||||
|
||||
def remove_policy_alias(self, *aliases):
|
||||
"""
|
||||
Removes a name or names from a policy. If the name removed is the
|
||||
primary name then the next available alias will be adopted
|
||||
as the new primary name.
|
||||
|
||||
:param *aliases: arbitrary number of existing policy names to remove.
|
||||
"""
|
||||
for alias in aliases:
|
||||
policy = self.get_by_name(alias)
|
||||
if not policy:
|
||||
raise PolicyError('No policy with name %s exists.' % alias)
|
||||
if len(policy.alias_list) == 1:
|
||||
raise PolicyError('Policy %s with name %s has only one name. '
|
||||
'Policies must have at least one name.' % (
|
||||
policy, alias))
|
||||
else:
|
||||
policy.remove_name(alias)
|
||||
del self.by_name[alias.upper()]
|
||||
|
||||
def change_policy_primary_name(self, policy_index, new_name):
|
||||
"""
|
||||
Changes the primary or default name of a policy. The new primary
|
||||
name can be an alias that already belongs to the policy or a
|
||||
completely new name.
|
||||
|
||||
:param policy_index: index of a policy in this policy collection.
|
||||
:param new_name: a string name to set as the new default name.
|
||||
"""
|
||||
policy = self.get_by_index(policy_index)
|
||||
name_taken = self.get_by_name(new_name)
|
||||
# if the name belongs to some other policy in the collection
|
||||
if name_taken and name_taken != policy:
|
||||
raise PolicyError('Other policy %s with name %s exists.' %
|
||||
(self.get_by_name(new_name).idx, new_name))
|
||||
else:
|
||||
policy.change_primary_name(new_name)
|
||||
self.by_name[new_name.upper()] = policy
|
||||
|
||||
|
||||
def parse_storage_policies(conf):
|
||||
"""
|
||||
|
|
|
@ -217,6 +217,15 @@ def _header_int_property(header):
|
|||
doc="Retrieve and set the %s header as an int" % header)
|
||||
|
||||
|
||||
def header_to_environ_key(header_name):
|
||||
header_name = 'HTTP_' + header_name.replace('-', '_').upper()
|
||||
if header_name == 'HTTP_CONTENT_LENGTH':
|
||||
return 'CONTENT_LENGTH'
|
||||
if header_name == 'HTTP_CONTENT_TYPE':
|
||||
return 'CONTENT_TYPE'
|
||||
return header_name
|
||||
|
||||
|
||||
class HeaderEnvironProxy(MutableMapping):
|
||||
"""
|
||||
A dict-like object that proxies requests to a wsgi environ,
|
||||
|
@ -235,30 +244,22 @@ class HeaderEnvironProxy(MutableMapping):
|
|||
def __len__(self):
|
||||
return len(self.keys())
|
||||
|
||||
def _normalize(self, key):
|
||||
key = 'HTTP_' + key.replace('-', '_').upper()
|
||||
if key == 'HTTP_CONTENT_LENGTH':
|
||||
return 'CONTENT_LENGTH'
|
||||
if key == 'HTTP_CONTENT_TYPE':
|
||||
return 'CONTENT_TYPE'
|
||||
return key
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.environ[self._normalize(key)]
|
||||
return self.environ[header_to_environ_key(key)]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if value is None:
|
||||
self.environ.pop(self._normalize(key), None)
|
||||
self.environ.pop(header_to_environ_key(key), None)
|
||||
elif isinstance(value, six.text_type):
|
||||
self.environ[self._normalize(key)] = value.encode('utf-8')
|
||||
self.environ[header_to_environ_key(key)] = value.encode('utf-8')
|
||||
else:
|
||||
self.environ[self._normalize(key)] = str(value)
|
||||
self.environ[header_to_environ_key(key)] = str(value)
|
||||
|
||||
def __contains__(self, key):
|
||||
return self._normalize(key) in self.environ
|
||||
return header_to_environ_key(key) in self.environ
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self.environ[self._normalize(key)]
|
||||
del self.environ[header_to_environ_key(key)]
|
||||
|
||||
def keys(self):
|
||||
keys = [key[5:].replace('_', '-').title()
|
||||
|
@ -541,14 +542,15 @@ class Range(object):
|
|||
|
||||
def __str__(self):
|
||||
string = 'bytes='
|
||||
for start, end in self.ranges:
|
||||
for i, (start, end) in enumerate(self.ranges):
|
||||
if start is not None:
|
||||
string += str(start)
|
||||
string += '-'
|
||||
if end is not None:
|
||||
string += str(end)
|
||||
string += ','
|
||||
return string.rstrip(',')
|
||||
if i < len(self.ranges) - 1:
|
||||
string += ','
|
||||
return string
|
||||
|
||||
def ranges_for_length(self, length):
|
||||
"""
|
||||
|
@ -970,7 +972,7 @@ class Request(object):
|
|||
the path segment.
|
||||
"""
|
||||
path_info = self.path_info
|
||||
if not path_info or path_info[0] != '/':
|
||||
if not path_info or not path_info.startswith('/'):
|
||||
return None
|
||||
try:
|
||||
slash_loc = path_info.index('/', 1)
|
||||
|
@ -1184,7 +1186,7 @@ class Response(object):
|
|||
"""
|
||||
|
||||
content_size = self.content_length
|
||||
content_type = self.content_type
|
||||
content_type = self.headers.get('content-type')
|
||||
self.content_type = ''.join(['multipart/byteranges;',
|
||||
'boundary=', self.boundary])
|
||||
|
||||
|
|
|
@ -21,16 +21,15 @@ import errno
|
|||
import fcntl
|
||||
import grp
|
||||
import hmac
|
||||
import json
|
||||
import operator
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
import sys
|
||||
import threading as stdlib_threading
|
||||
import time
|
||||
import uuid
|
||||
import functools
|
||||
import weakref
|
||||
import email.parser
|
||||
from hashlib import md5, sha1
|
||||
from random import random, shuffle
|
||||
|
@ -40,10 +39,6 @@ import ctypes.util
|
|||
from optparse import OptionParser
|
||||
|
||||
from tempfile import mkstemp, NamedTemporaryFile
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import glob
|
||||
import itertools
|
||||
import stat
|
||||
|
@ -63,7 +58,6 @@ import six
|
|||
from six.moves import cPickle as pickle
|
||||
from six.moves.configparser import (ConfigParser, NoSectionError,
|
||||
NoOptionError, RawConfigParser)
|
||||
from six.moves.queue import Queue, Empty
|
||||
from six.moves import range
|
||||
from six.moves.urllib.parse import ParseResult
|
||||
from six.moves.urllib.parse import quote as _quote
|
||||
|
@ -74,6 +68,11 @@ import swift.common.exceptions
|
|||
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \
|
||||
HTTP_PRECONDITION_FAILED, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE
|
||||
|
||||
if six.PY3:
|
||||
stdlib_queue = eventlet.patcher.original('queue')
|
||||
else:
|
||||
stdlib_queue = eventlet.patcher.original('Queue')
|
||||
stdlib_threading = eventlet.patcher.original('threading')
|
||||
|
||||
# logging doesn't import patched as cleanly as one would like
|
||||
from logging.handlers import SysLogHandler
|
||||
|
@ -249,7 +248,7 @@ def backward(f, blocksize=4096):
|
|||
f.seek(0, os.SEEK_END)
|
||||
if f.tell() == 0:
|
||||
return
|
||||
last_row = ''
|
||||
last_row = b''
|
||||
while f.tell() != 0:
|
||||
try:
|
||||
f.seek(-blocksize, os.SEEK_CUR)
|
||||
|
@ -258,7 +257,7 @@ def backward(f, blocksize=4096):
|
|||
f.seek(-blocksize, os.SEEK_CUR)
|
||||
block = f.read(blocksize)
|
||||
f.seek(-blocksize, os.SEEK_CUR)
|
||||
rows = block.split('\n')
|
||||
rows = block.split(b'\n')
|
||||
rows[-1] = rows[-1] + last_row
|
||||
while rows:
|
||||
last_row = rows.pop(-1)
|
||||
|
@ -297,7 +296,7 @@ def config_auto_int_value(value, default):
|
|||
|
||||
|
||||
def append_underscore(prefix):
|
||||
if prefix and prefix[-1] != '_':
|
||||
if prefix and not prefix.endswith('_'):
|
||||
prefix += '_'
|
||||
return prefix
|
||||
|
||||
|
@ -390,8 +389,8 @@ def load_libc_function(func_name, log_error=True,
|
|||
if fail_if_missing:
|
||||
raise
|
||||
if log_error:
|
||||
logging.warn(_("Unable to locate %s in libc. Leaving as a "
|
||||
"no-op."), func_name)
|
||||
logging.warning(_("Unable to locate %s in libc. Leaving as a "
|
||||
"no-op."), func_name)
|
||||
return noop_libc_function
|
||||
|
||||
|
||||
|
@ -425,7 +424,7 @@ def get_log_line(req, res, trans_time, additional_info):
|
|||
:param trans_time: the time the request took to complete, a float.
|
||||
:param additional_info: a string to log at the end of the line
|
||||
|
||||
:returns: a properly formated line for logging.
|
||||
:returns: a properly formatted line for logging.
|
||||
"""
|
||||
|
||||
policy_index = get_policy_index(req.headers, res.headers)
|
||||
|
@ -440,7 +439,8 @@ def get_log_line(req, res, trans_time, additional_info):
|
|||
|
||||
|
||||
def get_trans_id_time(trans_id):
|
||||
if len(trans_id) >= 34 and trans_id[:2] == 'tx' and trans_id[23] == '-':
|
||||
if len(trans_id) >= 34 and \
|
||||
trans_id.startswith('tx') and trans_id[23] == '-':
|
||||
try:
|
||||
return int(trans_id[24:34], 16)
|
||||
except ValueError:
|
||||
|
@ -580,8 +580,8 @@ class FallocateWrapper(object):
|
|||
if self.fallocate is not noop_libc_function:
|
||||
break
|
||||
if self.fallocate is noop_libc_function:
|
||||
logging.warn(_("Unable to locate fallocate, posix_fallocate in "
|
||||
"libc. Leaving as a no-op."))
|
||||
logging.warning(_("Unable to locate fallocate, posix_fallocate in "
|
||||
"libc. Leaving as a no-op."))
|
||||
|
||||
def __call__(self, fd, mode, offset, length):
|
||||
"""The length parameter must be a ctypes.c_uint64."""
|
||||
|
@ -664,8 +664,8 @@ def fsync_dir(dirpath):
|
|||
if err.errno == errno.ENOTDIR:
|
||||
# Raise error if someone calls fsync_dir on a non-directory
|
||||
raise
|
||||
logging.warn(_("Unable to perform fsync() on directory %s: %s"),
|
||||
dirpath, os.strerror(err.errno))
|
||||
logging.warning(_("Unable to perform fsync() on directory %s: %s"),
|
||||
dirpath, os.strerror(err.errno))
|
||||
finally:
|
||||
if dirfd:
|
||||
os.close(dirfd)
|
||||
|
@ -686,9 +686,9 @@ def drop_buffer_cache(fd, offset, length):
|
|||
ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
|
||||
ctypes.c_uint64(length), 4)
|
||||
if ret != 0:
|
||||
logging.warn("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
|
||||
"-> %(ret)s", {'fd': fd, 'offset': offset,
|
||||
'length': length, 'ret': ret})
|
||||
logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
|
||||
"-> %(ret)s", {'fd': fd, 'offset': offset,
|
||||
'length': length, 'ret': ret})
|
||||
|
||||
|
||||
NORMAL_FORMAT = "%016.05f"
|
||||
|
@ -832,6 +832,9 @@ class Timestamp(object):
|
|||
other = Timestamp(other)
|
||||
return cmp(self.internal, other.internal)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.internal)
|
||||
|
||||
|
||||
def normalize_timestamp(timestamp):
|
||||
"""
|
||||
|
@ -1166,14 +1169,16 @@ class StatsdClient(object):
|
|||
parts.append('@%s' % (sample_rate,))
|
||||
else:
|
||||
return
|
||||
if six.PY3:
|
||||
parts = [part.encode('utf-8') for part in parts]
|
||||
# Ideally, we'd cache a sending socket in self, but that
|
||||
# results in a socket getting shared by multiple green threads.
|
||||
with closing(self._open_socket()) as sock:
|
||||
try:
|
||||
return sock.sendto('|'.join(parts), self._target)
|
||||
return sock.sendto(b'|'.join(parts), self._target)
|
||||
except IOError as err:
|
||||
if self.logger:
|
||||
self.logger.warn(
|
||||
self.logger.warning(
|
||||
'Error sending UDP message to %r: %s',
|
||||
self._target, err)
|
||||
|
||||
|
@ -1227,7 +1232,7 @@ def timing_stats(**dec_kwargs):
|
|||
swift's wsgi server controllers, based on response code.
|
||||
"""
|
||||
def decorating_func(func):
|
||||
method = func.func_name
|
||||
method = func.__name__
|
||||
|
||||
@functools.wraps(func)
|
||||
def _timing_stats(ctrl, *args, **kwargs):
|
||||
|
@ -1245,27 +1250,6 @@ def timing_stats(**dec_kwargs):
|
|||
return decorating_func
|
||||
|
||||
|
||||
class LoggingHandlerWeakRef(weakref.ref):
|
||||
"""
|
||||
Like a weak reference, but passes through a couple methods that logging
|
||||
handlers need.
|
||||
"""
|
||||
|
||||
def close(self):
|
||||
referent = self()
|
||||
try:
|
||||
if referent:
|
||||
referent.close()
|
||||
except KeyError:
|
||||
# This is to catch an issue with old py2.6 versions
|
||||
pass
|
||||
|
||||
def flush(self):
|
||||
referent = self()
|
||||
if referent:
|
||||
referent.flush()
|
||||
|
||||
|
||||
# double inheritance to support property with setter
|
||||
class LogAdapter(logging.LoggerAdapter, object):
|
||||
"""
|
||||
|
@ -1279,7 +1263,6 @@ class LogAdapter(logging.LoggerAdapter, object):
|
|||
def __init__(self, logger, server):
|
||||
logging.LoggerAdapter.__init__(self, logger, {})
|
||||
self.server = server
|
||||
setattr(self, 'warn', self.warning)
|
||||
|
||||
@property
|
||||
def txn_id(self):
|
||||
|
@ -1334,13 +1317,10 @@ class LogAdapter(logging.LoggerAdapter, object):
|
|||
_junk, exc, _junk = sys.exc_info()
|
||||
call = self.error
|
||||
emsg = ''
|
||||
if isinstance(exc, OSError):
|
||||
if isinstance(exc, (OSError, socket.error)):
|
||||
if exc.errno in (errno.EIO, errno.ENOSPC):
|
||||
emsg = str(exc)
|
||||
else:
|
||||
call = self._exception
|
||||
elif isinstance(exc, socket.error):
|
||||
if exc.errno == errno.ECONNREFUSED:
|
||||
elif exc.errno == errno.ECONNREFUSED:
|
||||
emsg = _('Connection refused')
|
||||
elif exc.errno == errno.EHOSTUNREACH:
|
||||
emsg = _('Host unreachable')
|
||||
|
@ -1426,7 +1406,7 @@ class SwiftLogFormatter(logging.Formatter):
|
|||
record.exc_text = self.formatException(
|
||||
record.exc_info).replace('\n', '#012')
|
||||
if record.exc_text:
|
||||
if msg[-3:] != '#012':
|
||||
if not msg.endswith('#012'):
|
||||
msg = msg + '#012'
|
||||
msg = msg + record.exc_text
|
||||
|
||||
|
@ -1565,31 +1545,6 @@ def get_logger(conf, name=None, log_to_console=False, log_route=None,
|
|||
print('Invalid custom handler format [%s]' % hook,
|
||||
file=sys.stderr)
|
||||
|
||||
# Python 2.6 has the undesirable property of keeping references to all log
|
||||
# handlers around forever in logging._handlers and logging._handlerList.
|
||||
# Combine that with handlers that keep file descriptors, and you get an fd
|
||||
# leak.
|
||||
#
|
||||
# And no, we can't share handlers; a SyslogHandler has a socket, and if
|
||||
# two greenthreads end up logging at the same time, you could get message
|
||||
# overlap that garbles the logs and makes eventlet complain.
|
||||
#
|
||||
# Python 2.7 uses weakrefs to avoid the leak, so let's do that too.
|
||||
if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
|
||||
try:
|
||||
logging._acquireLock() # some thread-safety thing
|
||||
for handler in adapted_logger.logger.handlers:
|
||||
if handler in logging._handlers:
|
||||
wr = LoggingHandlerWeakRef(handler)
|
||||
del logging._handlers[handler]
|
||||
logging._handlers[wr] = 1
|
||||
for i, handler_ref in enumerate(logging._handlerList):
|
||||
if handler_ref is handler:
|
||||
logging._handlerList[i] = LoggingHandlerWeakRef(
|
||||
handler)
|
||||
finally:
|
||||
logging._releaseLock()
|
||||
|
||||
return adapted_logger
|
||||
|
||||
|
||||
|
@ -1739,7 +1694,7 @@ def expand_ipv6(address):
|
|||
def whataremyips(bind_ip=None):
|
||||
"""
|
||||
Get "our" IP addresses ("us" being the set of services configured by
|
||||
one *.conf file). If our REST listens on a specific address, return it.
|
||||
one `*.conf` file). If our REST listens on a specific address, return it.
|
||||
Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including
|
||||
the loopback.
|
||||
|
||||
|
@ -2085,6 +2040,9 @@ def search_tree(root, glob_match, ext='', exts=None, dir_ext=None):
|
|||
:param glob_match: glob to match in root, matching dirs are traversed with
|
||||
os.walk
|
||||
:param ext: only files that end in ext will be returned
|
||||
:param exts: a list of file extensions; only files that end in one of these
|
||||
extensions will be returned; if set this list overrides any
|
||||
extension specified using the 'ext' param.
|
||||
:param dir_ext: if present directories that end with dir_ext will not be
|
||||
traversed and instead will be returned as a matched path
|
||||
|
||||
|
@ -2333,7 +2291,7 @@ class GreenAsyncPile(object):
|
|||
def next(self):
|
||||
try:
|
||||
rv = self._responses.get_nowait()
|
||||
except Empty:
|
||||
except eventlet.queue.Empty:
|
||||
if self._inflight == 0:
|
||||
raise StopIteration()
|
||||
rv = self._responses.get()
|
||||
|
@ -2984,8 +2942,8 @@ class ThreadPool(object):
|
|||
|
||||
def __init__(self, nthreads=2):
|
||||
self.nthreads = nthreads
|
||||
self._run_queue = Queue()
|
||||
self._result_queue = Queue()
|
||||
self._run_queue = stdlib_queue.Queue()
|
||||
self._result_queue = stdlib_queue.Queue()
|
||||
self._threads = []
|
||||
self._alive = True
|
||||
|
||||
|
@ -3010,7 +2968,7 @@ class ThreadPool(object):
|
|||
# multiple instances instantiated. Since the object server uses one
|
||||
# pool per disk, we have to reimplement this stuff.
|
||||
_raw_rpipe, self.wpipe = os.pipe()
|
||||
self.rpipe = greenio.GreenPipe(_raw_rpipe, 'rb', bufsize=0)
|
||||
self.rpipe = greenio.GreenPipe(_raw_rpipe, 'rb')
|
||||
|
||||
for _junk in range(nthreads):
|
||||
thr = stdlib_threading.Thread(
|
||||
|
@ -3065,7 +3023,7 @@ class ThreadPool(object):
|
|||
while True:
|
||||
try:
|
||||
ev, success, result = queue.get(block=False)
|
||||
except Empty:
|
||||
except stdlib_queue.Empty:
|
||||
break
|
||||
|
||||
try:
|
||||
|
@ -3078,15 +3036,15 @@ class ThreadPool(object):
|
|||
|
||||
def run_in_thread(self, func, *args, **kwargs):
|
||||
"""
|
||||
Runs func(*args, **kwargs) in a thread. Blocks the current greenlet
|
||||
Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet
|
||||
until results are available.
|
||||
|
||||
Exceptions thrown will be reraised in the calling thread.
|
||||
|
||||
If the threadpool was initialized with nthreads=0, it invokes
|
||||
func(*args, **kwargs) directly, followed by eventlet.sleep() to ensure
|
||||
the eventlet hub has a chance to execute. It is more likely the hub
|
||||
will be invoked when queuing operations to an external thread.
|
||||
``func(*args, **kwargs)`` directly, followed by eventlet.sleep() to
|
||||
ensure the eventlet hub has a chance to execute. It is more likely the
|
||||
hub will be invoked when queuing operations to an external thread.
|
||||
|
||||
:returns: result of calling func
|
||||
:raises: whatever func raises
|
||||
|
@ -3126,7 +3084,7 @@ class ThreadPool(object):
|
|||
|
||||
def force_run_in_thread(self, func, *args, **kwargs):
|
||||
"""
|
||||
Runs func(*args, **kwargs) in a thread. Blocks the current greenlet
|
||||
Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet
|
||||
until results are available.
|
||||
|
||||
Exceptions thrown will be reraised in the calling thread.
|
||||
|
@ -3604,7 +3562,8 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
|
|||
except StopIteration:
|
||||
pass
|
||||
else:
|
||||
logger.warn("More than one part in a single-part response?")
|
||||
logger.warning(
|
||||
"More than one part in a single-part response?")
|
||||
|
||||
return string_along(response_body_iter, ranges_iter, logger)
|
||||
|
||||
|
|
|
@ -407,7 +407,8 @@ def run_server(conf, logger, sock, global_conf=None):
|
|||
wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60)
|
||||
|
||||
eventlet.hubs.use_hub(get_hub())
|
||||
eventlet.patcher.monkey_patch(all=False, socket=True)
|
||||
# NOTE(sileht): monkey-patching thread is required by python-keystoneclient
|
||||
eventlet.patcher.monkey_patch(all=False, socket=True, thread=True)
|
||||
eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no'))
|
||||
eventlet.debug.hub_exceptions(eventlet_debug)
|
||||
wsgi_logger = NullLogger()
|
||||
|
@ -597,6 +598,8 @@ class PortPidState(object):
|
|||
|
||||
def port_index_pairs(self):
|
||||
"""
|
||||
Returns current (port, server index) pairs.
|
||||
|
||||
:returns: A set of (port, server_idx) tuples for currently-tracked
|
||||
ports, sockets, and PIDs.
|
||||
"""
|
||||
|
@ -711,6 +714,8 @@ class ServersPerPortStrategy(object):
|
|||
|
||||
def loop_timeout(self):
|
||||
"""
|
||||
Return timeout before checking for reloaded rings.
|
||||
|
||||
:returns: The time to wait for a child to exit before checking for
|
||||
reloaded rings (new ports).
|
||||
"""
|
||||
|
@ -1090,7 +1095,8 @@ def make_env(env, method=None, path=None, agent='Swift', query_string=None,
|
|||
'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD',
|
||||
'SERVER_PROTOCOL', 'swift.cache', 'swift.source',
|
||||
'swift.trans_id', 'swift.authorize_override',
|
||||
'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID'):
|
||||
'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID',
|
||||
'HTTP_REFERER'):
|
||||
if name in env:
|
||||
newenv[name] = env[name]
|
||||
if method:
|
||||
|
|
|
@ -557,7 +557,7 @@ class ContainerBroker(DatabaseBroker):
|
|||
conn.commit()
|
||||
|
||||
def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter,
|
||||
path=None, storage_policy_index=0):
|
||||
path=None, storage_policy_index=0, reverse=False):
|
||||
"""
|
||||
Get a list of objects sorted by name starting at marker onward, up
|
||||
to limit entries. Entries will begin with the prefix and will not
|
||||
|
@ -570,6 +570,7 @@ class ContainerBroker(DatabaseBroker):
|
|||
:param delimiter: delimiter for query
|
||||
:param path: if defined, will set the prefix and delimiter based on
|
||||
the path
|
||||
:param reverse: reverse the result order.
|
||||
|
||||
:returns: list of tuples of (name, created_at, size, content_type,
|
||||
etag)
|
||||
|
@ -578,6 +579,9 @@ class ContainerBroker(DatabaseBroker):
|
|||
(marker, end_marker, prefix, delimiter, path) = utf8encode(
|
||||
marker, end_marker, prefix, delimiter, path)
|
||||
self._commit_puts_stale_ok()
|
||||
if reverse:
|
||||
# Reverse the markers if we are reversing the listing.
|
||||
marker, end_marker = end_marker, marker
|
||||
if path is not None:
|
||||
prefix = path
|
||||
if path:
|
||||
|
@ -585,6 +589,8 @@ class ContainerBroker(DatabaseBroker):
|
|||
delimiter = '/'
|
||||
elif delimiter and not prefix:
|
||||
prefix = ''
|
||||
if prefix:
|
||||
end_prefix = prefix[:-1] + chr(ord(prefix[-1]) + 1)
|
||||
orig_marker = marker
|
||||
with self.get() as conn:
|
||||
results = []
|
||||
|
@ -592,9 +598,13 @@ class ContainerBroker(DatabaseBroker):
|
|||
query = '''SELECT name, created_at, size, content_type, etag
|
||||
FROM object WHERE'''
|
||||
query_args = []
|
||||
if end_marker:
|
||||
if end_marker and (not prefix or end_marker < end_prefix):
|
||||
query += ' name < ? AND'
|
||||
query_args.append(end_marker)
|
||||
elif prefix:
|
||||
query += ' name < ? AND'
|
||||
query_args.append(end_prefix)
|
||||
|
||||
if delim_force_gte:
|
||||
query += ' name >= ? AND'
|
||||
query_args.append(marker)
|
||||
|
@ -611,8 +621,8 @@ class ContainerBroker(DatabaseBroker):
|
|||
else:
|
||||
query += ' deleted = 0'
|
||||
orig_tail_query = '''
|
||||
ORDER BY name LIMIT ?
|
||||
'''
|
||||
ORDER BY name %s LIMIT ?
|
||||
''' % ('DESC' if reverse else '')
|
||||
orig_tail_args = [limit - len(results)]
|
||||
# storage policy filter
|
||||
policy_tail_query = '''
|
||||
|
@ -633,26 +643,24 @@ class ContainerBroker(DatabaseBroker):
|
|||
tuple(query_args + tail_args))
|
||||
curs.row_factory = None
|
||||
|
||||
if prefix is None:
|
||||
# A delimiter without a specified prefix is ignored
|
||||
# Delimiters without a prefix is ignored, further if there
|
||||
# is no delimiter then we can simply return the result as
|
||||
# prefixes are now handled in the SQL statement.
|
||||
if prefix is None or not delimiter:
|
||||
return [r for r in curs]
|
||||
if not delimiter:
|
||||
if not prefix:
|
||||
# It is possible to have a delimiter but no prefix
|
||||
# specified. As above, the prefix will be set to the
|
||||
# empty string, so avoid performing the extra work to
|
||||
# check against an empty prefix.
|
||||
return [r for r in curs]
|
||||
else:
|
||||
return [r for r in curs if r[0].startswith(prefix)]
|
||||
|
||||
# We have a delimiter and a prefix (possibly empty string) to
|
||||
# handle
|
||||
rowcount = 0
|
||||
for row in curs:
|
||||
rowcount += 1
|
||||
marker = name = row[0]
|
||||
if len(results) >= limit or not name.startswith(prefix):
|
||||
name = row[0]
|
||||
if reverse:
|
||||
end_marker = name
|
||||
else:
|
||||
marker = name
|
||||
|
||||
if len(results) >= limit:
|
||||
curs.close()
|
||||
return results
|
||||
end = name.find(delimiter, len(prefix))
|
||||
|
@ -660,13 +668,19 @@ class ContainerBroker(DatabaseBroker):
|
|||
if name == path:
|
||||
continue
|
||||
if end >= 0 and len(name) > end + len(delimiter):
|
||||
marker = name[:end] + chr(ord(delimiter) + 1)
|
||||
if reverse:
|
||||
end_marker = name[:end + 1]
|
||||
else:
|
||||
marker = name[:end] + chr(ord(delimiter) + 1)
|
||||
curs.close()
|
||||
break
|
||||
elif end > 0:
|
||||
marker = name[:end] + chr(ord(delimiter) + 1)
|
||||
# we want result to be inclusive of delim+1
|
||||
delim_force_gte = True
|
||||
if reverse:
|
||||
end_marker = name[:end + 1]
|
||||
else:
|
||||
marker = name[:end] + chr(ord(delimiter) + 1)
|
||||
# we want result to be inclusive of delim+1
|
||||
delim_force_gte = True
|
||||
dir_name = name[:end + 1]
|
||||
if dir_name != orig_marker:
|
||||
results.append([dir_name, '0', 0, None, ''])
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
import os
|
||||
import itertools
|
||||
import json
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from eventlet import Timeout
|
||||
|
@ -28,7 +29,7 @@ from swift.common.storage_policy import POLICIES
|
|||
from swift.common.exceptions import DeviceUnavailable
|
||||
from swift.common.http import is_success
|
||||
from swift.common.db import DatabaseAlreadyExists
|
||||
from swift.common.utils import (json, Timestamp, hash_path,
|
||||
from swift.common.utils import (Timestamp, hash_path,
|
||||
storage_directory, quorum_size)
|
||||
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
|
@ -30,7 +31,7 @@ from swift.common.request_helpers import get_param, get_listing_content_type, \
|
|||
split_and_validate_path, is_sys_or_user_meta
|
||||
from swift.common.utils import get_logger, hash_path, public, \
|
||||
Timestamp, storage_directory, validate_sync_to, \
|
||||
config_true_value, json, timing_stats, replication, \
|
||||
config_true_value, timing_stats, replication, \
|
||||
override_bytes_from_content_type, get_log_line
|
||||
from swift.common.constraints import check_mount, valid_timestamp, check_utf8
|
||||
from swift.common import constraints
|
||||
|
@ -86,7 +87,7 @@ class ContainerController(BaseStorageServer):
|
|||
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
|
||||
self.root = conf.get('devices', '/srv/node')
|
||||
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
||||
self.node_timeout = int(conf.get('node_timeout', 3))
|
||||
self.node_timeout = float(conf.get('node_timeout', 3))
|
||||
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||
#: ContainerSyncCluster instance for validating sync-to values.
|
||||
self.realms_conf = ContainerSyncRealms(
|
||||
|
@ -452,6 +453,7 @@ class ContainerController(BaseStorageServer):
|
|||
end_marker = get_param(req, 'end_marker')
|
||||
limit = constraints.CONTAINER_LISTING_LIMIT
|
||||
given_limit = get_param(req, 'limit')
|
||||
reverse = config_true_value(get_param(req, 'reverse'))
|
||||
if given_limit and given_limit.isdigit():
|
||||
limit = int(given_limit)
|
||||
if limit > constraints.CONTAINER_LISTING_LIMIT:
|
||||
|
@ -471,7 +473,7 @@ class ContainerController(BaseStorageServer):
|
|||
return HTTPNotFound(request=req, headers=resp_headers)
|
||||
container_list = broker.list_objects_iter(
|
||||
limit, marker, end_marker, prefix, delimiter, path,
|
||||
storage_policy_index=info['storage_policy_index'])
|
||||
storage_policy_index=info['storage_policy_index'], reverse=reverse)
|
||||
return self.create_listing(req, out_content_type, info, resp_headers,
|
||||
broker.metadata, container_list, container)
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ class ContainerUpdater(Daemon):
|
|||
self.account_ring = None
|
||||
self.concurrency = int(conf.get('concurrency', 4))
|
||||
self.slowdown = float(conf.get('slowdown', 0.01))
|
||||
self.node_timeout = int(conf.get('node_timeout', 3))
|
||||
self.node_timeout = float(conf.get('node_timeout', 3))
|
||||
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||
self.no_changes = 0
|
||||
self.successes = 0
|
||||
|
@ -89,7 +89,7 @@ class ContainerUpdater(Daemon):
|
|||
for device in self._listdir(self.devices):
|
||||
dev_path = os.path.join(self.devices, device)
|
||||
if self.mount_check and not ismount(dev_path):
|
||||
self.logger.warn(_('%s is not mounted'), device)
|
||||
self.logger.warning(_('%s is not mounted'), device)
|
||||
continue
|
||||
con_path = os.path.join(dev_path, DATADIR)
|
||||
if not os.path.exists(con_path):
|
||||
|
|
|
@ -10,14 +10,13 @@
|
|||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language: de\n"
|
||||
"Language-Team: German (http://www.transifex.com/openstack/swift/language/"
|
||||
"de/)\n"
|
||||
"Language-Team: German\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -8,14 +8,13 @@
|
|||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-09-09 05:36+0000\n"
|
||||
"Last-Translator: Carlos A. Muñoz <camunoz@redhat.com>\n"
|
||||
"Language: es\n"
|
||||
"Language-Team: Spanish (http://www.transifex.com/openstack/swift/language/"
|
||||
"es/)\n"
|
||||
"Language-Team: Spanish\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -8,14 +8,13 @@
|
|||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language: fr\n"
|
||||
"Language-Team: French (http://www.transifex.com/openstack/swift/language/"
|
||||
"fr/)\n"
|
||||
"Language-Team: French\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -7,14 +7,13 @@
|
|||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language: it\n"
|
||||
"Language-Team: Italian (http://www.transifex.com/openstack/swift/language/"
|
||||
"it/)\n"
|
||||
"Language-Team: Italian\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -9,14 +9,13 @@
|
|||
# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-09-26 09:26+0000\n"
|
||||
"Last-Translator: Akihiro Motoki <amotoki@gmail.com>\n"
|
||||
"Language: ja\n"
|
||||
"Language-Team: Japanese (http://www.transifex.com/openstack/swift/language/"
|
||||
"ja/)\n"
|
||||
"Language-Team: Japanese\n"
|
||||
"Plural-Forms: nplurals=1; plural=0\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -9,14 +9,13 @@
|
|||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-09-09 05:10+0000\n"
|
||||
"Last-Translator: Ying Chun Guo <daisy.ycguo@gmail.com>\n"
|
||||
"Language: ko_KR\n"
|
||||
"Language-Team: Korean (Korea) (http://www.transifex.com/openstack/swift/"
|
||||
"language/ko_KR/)\n"
|
||||
"Language-Team: Korean (South Korea)\n"
|
||||
"Plural-Forms: nplurals=1; plural=0\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -11,14 +11,13 @@
|
|||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language: pt_BR\n"
|
||||
"Language-Team: Portuguese (Brazil) (http://www.transifex.com/openstack/swift/"
|
||||
"language/pt_BR/)\n"
|
||||
"Language-Team: Portuguese (Brazil)\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -7,14 +7,13 @@
|
|||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language: ru\n"
|
||||
"Language-Team: Russian (http://www.transifex.com/openstack/swift/language/"
|
||||
"ru/)\n"
|
||||
"Language-Team: Russian\n"
|
||||
"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
|
||||
"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
|
||||
"%100>=11 && n%100<=14)? 2 : 3)\n"
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
# Translations template for swift.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.3.1.dev213\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# Translations template for swift.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.3.1.dev213\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# Translations template for swift.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.3.1.dev213\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# Translations template for swift.
|
||||
# Copyright (C) 2015 ORGANIZATION
|
||||
# This file is distributed under the same license as the swift project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.3.1.dev213\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-07-29 06:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.0\n"
|
||||
|
|
@ -7,14 +7,13 @@
|
|||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-09-04 07:42+0000\n"
|
||||
"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
|
||||
"Language: tr_TR\n"
|
||||
"Language-Team: Turkish (Turkey) (http://www.transifex.com/openstack/swift/"
|
||||
"language/tr_TR/)\n"
|
||||
"Language-Team: Turkish (Turkey)\n"
|
||||
"Plural-Forms: nplurals=1; plural=0\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -8,14 +8,13 @@
|
|||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language: zh_Hans_CN\n"
|
||||
"Language-Team: Chinese (China) (http://www.transifex.com/openstack/swift/"
|
||||
"language/zh_CN/)\n"
|
||||
"Language-Team: Chinese (China)\n"
|
||||
"Plural-Forms: nplurals=1; plural=0\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -7,14 +7,13 @@
|
|||
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swift 2.4.1.dev48\n"
|
||||
"Project-Id-Version: swift 2.5.1.dev70\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-09-28 06:27+0000\n"
|
||||
"POT-Creation-Date: 2015-10-23 06:34+0000\n"
|
||||
"PO-Revision-Date: 2015-08-11 11:22+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language: zh_Hant_TW\n"
|
||||
"Language-Team: Chinese (Taiwan) (http://www.transifex.com/openstack/swift/"
|
||||
"language/zh_TW/)\n"
|
||||
"Language-Team: Chinese (Taiwan)\n"
|
||||
"Plural-Forms: nplurals=1; plural=0\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
@ -24,7 +25,7 @@ from eventlet import Timeout
|
|||
|
||||
from swift.obj import diskfile
|
||||
from swift.common.utils import get_logger, ratelimit_sleep, dump_recon_cache, \
|
||||
list_from_csv, json, listdir
|
||||
list_from_csv, listdir
|
||||
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist
|
||||
from swift.common.daemon import Daemon
|
||||
|
||||
|
|
|
@ -303,8 +303,8 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
|
|||
base, policy = split_policy_string(dir_)
|
||||
except PolicyError as e:
|
||||
if logger:
|
||||
logger.warn(_('Directory %r does not map '
|
||||
'to a valid policy (%s)') % (dir_, e))
|
||||
logger.warning(_('Directory %r does not map '
|
||||
'to a valid policy (%s)') % (dir_, e))
|
||||
continue
|
||||
datadir_path = os.path.join(devices, device, dir_)
|
||||
partitions = listdir(datadir_path)
|
||||
|
@ -420,7 +420,7 @@ class BaseDiskFileManager(object):
|
|||
# If the operator wants zero-copy with splice() but we don't have the
|
||||
# requisite kernel support, complain so they can go fix it.
|
||||
if conf_wants_splice and not splice.available:
|
||||
self.logger.warn(
|
||||
self.logger.warning(
|
||||
"Use of splice() requested (config says \"splice = %s\"), "
|
||||
"but the system does not support it. "
|
||||
"splice() will not be used." % conf.get('splice'))
|
||||
|
@ -434,8 +434,8 @@ class BaseDiskFileManager(object):
|
|||
# AF_ALG support), we can't use zero-copy.
|
||||
if err.errno != errno.EAFNOSUPPORT:
|
||||
raise
|
||||
self.logger.warn("MD5 sockets not supported. "
|
||||
"splice() will not be used.")
|
||||
self.logger.warning("MD5 sockets not supported. "
|
||||
"splice() will not be used.")
|
||||
else:
|
||||
self.use_splice = True
|
||||
with open('/proc/sys/fs/pipe-max-size') as f:
|
||||
|
@ -447,7 +447,7 @@ class BaseDiskFileManager(object):
|
|||
Parse an on disk file name.
|
||||
|
||||
:param filename: the data file name including extension
|
||||
:returns: a dict, with keys for timestamp, and ext::
|
||||
:returns: a dict, with keys for timestamp, and ext:
|
||||
|
||||
* timestamp is a :class:`~swift.common.utils.Timestamp`
|
||||
* ext is a string, the file extension including the leading dot or
|
||||
|
@ -460,92 +460,175 @@ class BaseDiskFileManager(object):
|
|||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def _gather_on_disk_file(self, filename, ext, context, frag_index=None,
|
||||
**kwargs):
|
||||
def _process_ondisk_files(self, exts, results, **kwargs):
|
||||
"""
|
||||
Called by gather_ondisk_files() for each file in an object
|
||||
datadir in reverse sorted order. If a file is considered part of a
|
||||
valid on-disk file set it will be added to the context dict, keyed by
|
||||
its extension. If a file is considered to be obsolete it will be added
|
||||
to a list stored under the key 'obsolete' in the context dict.
|
||||
Called by get_ondisk_files(). Should be over-ridden to implement
|
||||
subclass specific handling of files.
|
||||
|
||||
:param filename: name of file to be accepted or not
|
||||
:param ext: extension part of filename
|
||||
:param context: a context dict that may have been populated by previous
|
||||
calls to this method
|
||||
:returns: True if a valid file set has been found, False otherwise
|
||||
:param exts: dict of lists of file info, keyed by extension
|
||||
:param results: a dict that may be updated with results
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def _verify_on_disk_files(self, accepted_files, **kwargs):
|
||||
def _verify_ondisk_files(self, results, **kwargs):
|
||||
"""
|
||||
Verify that the final combination of on disk files complies with the
|
||||
diskfile contract.
|
||||
|
||||
:param accepted_files: files that have been found and accepted
|
||||
:param results: files that have been found and accepted
|
||||
:returns: True if the file combination is compliant, False otherwise
|
||||
"""
|
||||
raise NotImplementedError
|
||||
data_file, meta_file, ts_file = tuple(
|
||||
[results[key]
|
||||
for key in ('data_file', 'meta_file', 'ts_file')])
|
||||
|
||||
def gather_ondisk_files(self, files, include_obsolete=False,
|
||||
verify=False, **kwargs):
|
||||
return ((data_file is None and meta_file is None and ts_file is None)
|
||||
or (ts_file is not None and data_file is None
|
||||
and meta_file is None)
|
||||
or (data_file is not None and ts_file is None))
|
||||
|
||||
def _split_list(self, original_list, condition):
|
||||
"""
|
||||
Given a simple list of files names, iterate over them to determine the
|
||||
files that constitute a valid object, and optionally determine the
|
||||
files that are obsolete and could be deleted. Note that some files may
|
||||
fall into neither category.
|
||||
Split a list into two lists. The first list contains the first N items
|
||||
of the original list, in their original order, where 0 < N <=
|
||||
len(original list). The second list contains the remaining items of the
|
||||
original list, in their original order.
|
||||
|
||||
The index, N, at which the original list is split is the index of the
|
||||
first item in the list that does not satisfy the given condition. Note
|
||||
that the original list should be appropriately sorted if the second
|
||||
list is to contain no items that satisfy the given condition.
|
||||
|
||||
:param original_list: the list to be split.
|
||||
:param condition: a single argument function that will be used to test
|
||||
for the list item to split on.
|
||||
:return: a tuple of two lists.
|
||||
"""
|
||||
for i, item in enumerate(original_list):
|
||||
if not condition(item):
|
||||
return original_list[:i], original_list[i:]
|
||||
return original_list, []
|
||||
|
||||
def _split_gt_timestamp(self, file_info_list, timestamp):
|
||||
"""
|
||||
Given a list of file info dicts, reverse sorted by timestamp, split the
|
||||
list into two: items newer than timestamp, and items at same time or
|
||||
older than timestamp.
|
||||
|
||||
:param file_info_list: a list of file_info dicts.
|
||||
:param timestamp: a Timestamp.
|
||||
:return: a tuple of two lists.
|
||||
"""
|
||||
return self._split_list(
|
||||
file_info_list, lambda x: x['timestamp'] > timestamp)
|
||||
|
||||
def _split_gte_timestamp(self, file_info_list, timestamp):
|
||||
"""
|
||||
Given a list of file info dicts, reverse sorted by timestamp, split the
|
||||
list into two: items newer than or at same time as the timestamp, and
|
||||
items older than timestamp.
|
||||
|
||||
:param file_info_list: a list of file_info dicts.
|
||||
:param timestamp: a Timestamp.
|
||||
:return: a tuple of two lists.
|
||||
"""
|
||||
return self._split_list(
|
||||
file_info_list, lambda x: x['timestamp'] >= timestamp)
|
||||
|
||||
def get_ondisk_files(self, files, datadir, verify=True, **kwargs):
|
||||
"""
|
||||
Given a simple list of files names, determine the files that constitute
|
||||
a valid fileset i.e. a set of files that defines the state of an
|
||||
object, and determine the files that are obsolete and could be deleted.
|
||||
Note that some files may fall into neither category.
|
||||
|
||||
If a file is considered part of a valid fileset then its info dict will
|
||||
be added to the results dict, keyed by <extension>_info. Any files that
|
||||
are no longer required will have their info dicts added to a list
|
||||
stored under the key 'obsolete'.
|
||||
|
||||
The results dict will always contain entries with keys 'ts_file',
|
||||
'data_file' and 'meta_file'. Their values will be the fully qualified
|
||||
path to a file of the corresponding type if there is such a file in the
|
||||
valid fileset, or None.
|
||||
|
||||
:param files: a list of file names.
|
||||
:param include_obsolete: By default the iteration will stop when a
|
||||
valid file set has been found. Setting this
|
||||
argument to True will cause the iteration to
|
||||
continue in order to find all obsolete files.
|
||||
:param datadir: directory name files are from.
|
||||
:param verify: if True verify that the ondisk file contract has not
|
||||
been violated, otherwise do not verify.
|
||||
:returns: a dict that may contain: valid on disk files keyed by their
|
||||
filename extension; a list of obsolete files stored under the
|
||||
key 'obsolete'.
|
||||
:returns: a dict that will contain keys:
|
||||
ts_file -> path to a .ts file or None
|
||||
data_file -> path to a .data file or None
|
||||
meta_file -> path to a .meta file or None
|
||||
and may contain keys:
|
||||
ts_info -> a file info dict for a .ts file
|
||||
data_info -> a file info dict for a .data file
|
||||
meta_info -> a file info dict for a .meta file
|
||||
obsolete -> a list of file info dicts for obsolete files
|
||||
"""
|
||||
files.sort(reverse=True)
|
||||
results = {}
|
||||
# Build the exts data structure:
|
||||
# exts is a dict that maps file extensions to a list of file_info
|
||||
# dicts for the files having that extension. The file_info dicts are of
|
||||
# the form returned by parse_on_disk_filename, with the filename added.
|
||||
# Each list is sorted in reverse timestamp order.
|
||||
#
|
||||
# The exts dict will be modified during subsequent processing as files
|
||||
# are removed to be discarded or ignored.
|
||||
exts = defaultdict(list)
|
||||
for afile in files:
|
||||
ts_file = results.get('.ts')
|
||||
data_file = results.get('.data')
|
||||
if not include_obsolete:
|
||||
assert ts_file is None, "On-disk file search loop" \
|
||||
" continuing after tombstone, %s, encountered" % ts_file
|
||||
assert data_file is None, "On-disk file search loop" \
|
||||
" continuing after data file, %s, encountered" % data_file
|
||||
# Categorize files by extension
|
||||
try:
|
||||
file_info = self.parse_on_disk_filename(afile)
|
||||
file_info['filename'] = afile
|
||||
exts[file_info['ext']].append(file_info)
|
||||
except DiskFileError as e:
|
||||
self.logger.warning('Unexpected file %s: %s' %
|
||||
(os.path.join(datadir or '', afile), e))
|
||||
for ext in exts:
|
||||
# For each extension sort files into reverse chronological order.
|
||||
exts[ext] = sorted(
|
||||
exts[ext], key=lambda info: info['timestamp'], reverse=True)
|
||||
|
||||
ext = splitext(afile)[1]
|
||||
if self._gather_on_disk_file(
|
||||
afile, ext, results, **kwargs):
|
||||
if not include_obsolete:
|
||||
break
|
||||
# the results dict is used to collect results of file filtering
|
||||
results = {}
|
||||
|
||||
# non-tombstones older than or equal to latest tombstone are obsolete
|
||||
if exts.get('.ts'):
|
||||
for ext in filter(lambda ext: ext != '.ts', exts.keys()):
|
||||
exts[ext], older = self._split_gt_timestamp(
|
||||
exts[ext], exts['.ts'][0]['timestamp'])
|
||||
results.setdefault('obsolete', []).extend(older)
|
||||
|
||||
# all but most recent .meta and .ts are obsolete
|
||||
for ext in ('.meta', '.ts'):
|
||||
if ext in exts:
|
||||
results.setdefault('obsolete', []).extend(exts[ext][1:])
|
||||
exts[ext] = exts[ext][:1]
|
||||
|
||||
# delegate to subclass handler
|
||||
self._process_ondisk_files(exts, results, **kwargs)
|
||||
|
||||
# set final choice of files
|
||||
if exts.get('.ts'):
|
||||
results['ts_info'] = exts['.ts'][0]
|
||||
if 'data_info' in results and exts.get('.meta'):
|
||||
# only report a meta file if there is a data file
|
||||
results['meta_info'] = exts['.meta'][0]
|
||||
|
||||
# set ts_file, data_file and meta_file with path to chosen file or None
|
||||
for info_key in ('data_info', 'meta_info', 'ts_info'):
|
||||
info = results.get(info_key)
|
||||
key = info_key[:-5] + '_file'
|
||||
results[key] = join(datadir, info['filename']) if info else None
|
||||
|
||||
if verify:
|
||||
assert self._verify_on_disk_files(
|
||||
assert self._verify_ondisk_files(
|
||||
results, **kwargs), \
|
||||
"On-disk file search algorithm contract is broken: %s" \
|
||||
% results.values()
|
||||
% str(results)
|
||||
|
||||
return results
|
||||
|
||||
def get_ondisk_files(self, files, datadir, **kwargs):
|
||||
"""
|
||||
Given a simple list of files names, determine the files to use.
|
||||
|
||||
:param files: simple set of files as a python list
|
||||
:param datadir: directory name files are from for convenience
|
||||
:returns: dict of files to use having keys 'data_file', 'ts_file',
|
||||
'meta_file' and optionally other policy specific keys
|
||||
"""
|
||||
file_info = self.gather_ondisk_files(files, verify=True, **kwargs)
|
||||
for ext in ('.data', '.meta', '.ts'):
|
||||
filename = file_info.get(ext)
|
||||
key = '%s_file' % ext[1:]
|
||||
file_info[key] = join(datadir, filename) if filename else None
|
||||
return file_info
|
||||
|
||||
def cleanup_ondisk_files(self, hsh_path, reclaim_age=ONE_WEEK, **kwargs):
|
||||
"""
|
||||
Clean up on-disk files that are obsolete and gather the set of valid
|
||||
|
@ -560,27 +643,24 @@ class BaseDiskFileManager(object):
|
|||
key 'obsolete'; a list of files remaining in the directory,
|
||||
reverse sorted, stored under the key 'files'.
|
||||
"""
|
||||
def is_reclaimable(filename):
|
||||
timestamp = self.parse_on_disk_filename(filename)['timestamp']
|
||||
def is_reclaimable(timestamp):
|
||||
return (time.time() - float(timestamp)) > reclaim_age
|
||||
|
||||
files = listdir(hsh_path)
|
||||
files.sort(reverse=True)
|
||||
results = self.gather_ondisk_files(files, include_obsolete=True,
|
||||
**kwargs)
|
||||
# TODO ref to durables here
|
||||
if '.durable' in results and not results.get('fragments'):
|
||||
# a .durable with no .data is deleted as soon as it is found
|
||||
results.setdefault('obsolete', []).append(results.pop('.durable'))
|
||||
if '.ts' in results and is_reclaimable(results['.ts']):
|
||||
results.setdefault('obsolete', []).append(results.pop('.ts'))
|
||||
for filename in results.get('fragments_without_durable', []):
|
||||
results = self.get_ondisk_files(
|
||||
files, hsh_path, verify=False, **kwargs)
|
||||
if 'ts_info' in results and is_reclaimable(
|
||||
results['ts_info']['timestamp']):
|
||||
remove_file(join(hsh_path, results['ts_info']['filename']))
|
||||
files.remove(results.pop('ts_info')['filename'])
|
||||
for file_info in results.get('possible_reclaim', []):
|
||||
# stray fragments are not deleted until reclaim-age
|
||||
if is_reclaimable(filename):
|
||||
results.setdefault('obsolete', []).append(filename)
|
||||
for filename in results.get('obsolete', []):
|
||||
remove_file(join(hsh_path, filename))
|
||||
files.remove(filename)
|
||||
if is_reclaimable(file_info['timestamp']):
|
||||
results.setdefault('obsolete', []).append(file_info)
|
||||
for file_info in results.get('obsolete', []):
|
||||
remove_file(join(hsh_path, file_info['filename']))
|
||||
files.remove(file_info['filename'])
|
||||
results['files'] = files
|
||||
return results
|
||||
|
||||
|
@ -895,8 +975,10 @@ class BaseDiskFileManager(object):
|
|||
be yielded.
|
||||
|
||||
timestamps is a dict which may contain items mapping:
|
||||
|
||||
ts_data -> timestamp of data or tombstone file,
|
||||
ts_meta -> timestamp of meta file, if one exists
|
||||
|
||||
where timestamps are instances of
|
||||
:class:`~swift.common.utils.Timestamp`
|
||||
"""
|
||||
|
@ -913,9 +995,9 @@ class BaseDiskFileManager(object):
|
|||
(os.path.join(partition_path, suffix), suffix)
|
||||
for suffix in suffixes)
|
||||
key_preference = (
|
||||
('ts_meta', '.meta'),
|
||||
('ts_data', '.data'),
|
||||
('ts_data', '.ts'),
|
||||
('ts_meta', 'meta_info'),
|
||||
('ts_data', 'data_info'),
|
||||
('ts_data', 'ts_info'),
|
||||
)
|
||||
for suffix_path, suffix in suffixes:
|
||||
for object_hash in self._listdir(suffix_path):
|
||||
|
@ -924,14 +1006,13 @@ class BaseDiskFileManager(object):
|
|||
results = self.cleanup_ondisk_files(
|
||||
object_path, self.reclaim_age, **kwargs)
|
||||
timestamps = {}
|
||||
for ts_key, ext in key_preference:
|
||||
if ext not in results:
|
||||
for ts_key, info_key in key_preference:
|
||||
if info_key not in results:
|
||||
continue
|
||||
timestamps[ts_key] = self.parse_on_disk_filename(
|
||||
results[ext])['timestamp']
|
||||
timestamps[ts_key] = results[info_key]['timestamp']
|
||||
if 'ts_data' not in timestamps:
|
||||
# file sets that do not include a .data or .ts
|
||||
# file can not be opened and therefore can not
|
||||
# file cannot be opened and therefore cannot
|
||||
# be ssync'd
|
||||
continue
|
||||
yield (object_path, object_hash, timestamps)
|
||||
|
@ -1323,7 +1404,7 @@ class BaseDiskFileReader(object):
|
|||
self._quarantined_dir = self._threadpool.run_in_thread(
|
||||
self.manager.quarantine_renamer, self._device_path,
|
||||
self._data_file)
|
||||
self._logger.warn("Quarantined object %s: %s" % (
|
||||
self._logger.warning("Quarantined object %s: %s" % (
|
||||
self._data_file, msg))
|
||||
self._logger.increment('quarantines')
|
||||
self._quarantine_hook(msg)
|
||||
|
@ -1428,6 +1509,7 @@ class BaseDiskFile(object):
|
|||
self._obj = None
|
||||
self._datadir = None
|
||||
self._tmpdir = join(device_path, get_tmp_dir(policy))
|
||||
self._ondisk_info = None
|
||||
self._metadata = None
|
||||
self._datafile_metadata = None
|
||||
self._metafile_metadata = None
|
||||
|
@ -1477,6 +1559,26 @@ class BaseDiskFile(object):
|
|||
raise DiskFileNotOpen()
|
||||
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
|
||||
|
||||
@property
|
||||
def durable_timestamp(self):
|
||||
"""
|
||||
Provides the timestamp of the newest data file found in the object
|
||||
directory.
|
||||
|
||||
:return: A Timestamp instance, or None if no data file was found.
|
||||
:raises DiskFileNotOpen: if the open() method has not been previously
|
||||
called on this instance.
|
||||
"""
|
||||
if self._ondisk_info is None:
|
||||
raise DiskFileNotOpen()
|
||||
if self._datafile_metadata:
|
||||
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
|
||||
return None
|
||||
|
||||
@property
|
||||
def fragments(self):
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy):
|
||||
return cls(mgr, device_path, None, partition, _datadir=hash_dir_path,
|
||||
|
@ -1522,8 +1624,8 @@ class BaseDiskFile(object):
|
|||
# The data directory does not exist, so the object cannot exist.
|
||||
files = []
|
||||
|
||||
# gather info about the valid files to us to open the DiskFile
|
||||
file_info = self._get_ondisk_file(files)
|
||||
# gather info about the valid files to use to open the DiskFile
|
||||
file_info = self._get_ondisk_files(files)
|
||||
|
||||
self._data_file = file_info.get('data_file')
|
||||
if not self._data_file:
|
||||
|
@ -1572,12 +1674,12 @@ class BaseDiskFile(object):
|
|||
"""
|
||||
self._quarantined_dir = self._threadpool.run_in_thread(
|
||||
self.manager.quarantine_renamer, self._device_path, data_file)
|
||||
self._logger.warn("Quarantined object %s: %s" % (
|
||||
self._logger.warning("Quarantined object %s: %s" % (
|
||||
data_file, msg))
|
||||
self._logger.increment('quarantines')
|
||||
return DiskFileQuarantined(msg)
|
||||
|
||||
def _get_ondisk_file(self, files):
|
||||
def _get_ondisk_files(self, files):
|
||||
"""
|
||||
Determine the on-disk files to use.
|
||||
|
||||
|
@ -1948,8 +2050,9 @@ class DiskFile(BaseDiskFile):
|
|||
reader_cls = DiskFileReader
|
||||
writer_cls = DiskFileWriter
|
||||
|
||||
def _get_ondisk_file(self, files):
|
||||
return self.manager.get_ondisk_files(files, self._datadir)
|
||||
def _get_ondisk_files(self, files):
|
||||
self._ondisk_info = self.manager.get_ondisk_files(files, self._datadir)
|
||||
return self._ondisk_info
|
||||
|
||||
|
||||
@DiskFileRouter.register(REPL_POLICY)
|
||||
|
@ -1961,93 +2064,48 @@ class DiskFileManager(BaseDiskFileManager):
|
|||
Returns the timestamp extracted .data file name.
|
||||
|
||||
:param filename: the data file name including extension
|
||||
:returns: a dict, with keys for timestamp, and ext::
|
||||
:returns: a dict, with keys for timestamp, and ext:
|
||||
|
||||
* timestamp is a :class:`~swift.common.utils.Timestamp`
|
||||
* ext is a string, the file extension including the leading dot or
|
||||
the empty string if the filename has no extenstion.
|
||||
the empty string if the filename has no extension.
|
||||
|
||||
:raises DiskFileError: if any part of the filename is not able to be
|
||||
validated.
|
||||
"""
|
||||
filename, ext = splitext(filename)
|
||||
float_part, ext = splitext(filename)
|
||||
try:
|
||||
timestamp = Timestamp(float_part)
|
||||
except ValueError:
|
||||
raise DiskFileError('Invalid Timestamp value in filename %r'
|
||||
% filename)
|
||||
return {
|
||||
'timestamp': Timestamp(filename),
|
||||
'timestamp': timestamp,
|
||||
'ext': ext,
|
||||
}
|
||||
|
||||
def _gather_on_disk_file(self, filename, ext, context, frag_index=None,
|
||||
**kwargs):
|
||||
def _process_ondisk_files(self, exts, results, **kwargs):
|
||||
"""
|
||||
Called by gather_ondisk_files() for each file in an object
|
||||
datadir in reverse sorted order. If a file is considered part of a
|
||||
valid on-disk file set it will be added to the context dict, keyed by
|
||||
its extension. If a file is considered to be obsolete it will be added
|
||||
to a list stored under the key 'obsolete' in the context dict.
|
||||
Implement replication policy specific handling of .data files.
|
||||
|
||||
:param filename: name of file to be accepted or not
|
||||
:param ext: extension part of filename
|
||||
:param context: a context dict that may have been populated by previous
|
||||
calls to this method
|
||||
:returns: True if a valid file set has been found, False otherwise
|
||||
:param exts: dict of lists of file info, keyed by extension
|
||||
:param results: a dict that may be updated with results
|
||||
"""
|
||||
if exts.get('.data'):
|
||||
for ext in exts.keys():
|
||||
if ext == '.data':
|
||||
# older .data's are obsolete
|
||||
exts[ext], obsolete = self._split_gte_timestamp(
|
||||
exts[ext], exts['.data'][0]['timestamp'])
|
||||
else:
|
||||
# other files at same or older timestamp as most recent
|
||||
# data are obsolete
|
||||
exts[ext], obsolete = self._split_gt_timestamp(
|
||||
exts[ext], exts['.data'][0]['timestamp'])
|
||||
results.setdefault('obsolete', []).extend(obsolete)
|
||||
|
||||
# if first file with given extension then add filename to context
|
||||
# dict and return True
|
||||
accept_first = lambda: context.setdefault(ext, filename) == filename
|
||||
# add the filename to the list of obsolete files in context dict
|
||||
discard = lambda: context.setdefault('obsolete', []).append(filename)
|
||||
# set a flag in the context dict indicating that a valid fileset has
|
||||
# been found
|
||||
set_valid_fileset = lambda: context.setdefault('found_valid', True)
|
||||
# return True if the valid fileset flag is set in the context dict
|
||||
have_valid_fileset = lambda: context.get('found_valid')
|
||||
|
||||
if ext == '.data':
|
||||
if have_valid_fileset():
|
||||
# valid fileset means we must have a newer
|
||||
# .data or .ts, so discard the older .data file
|
||||
discard()
|
||||
else:
|
||||
accept_first()
|
||||
set_valid_fileset()
|
||||
elif ext == '.ts':
|
||||
if have_valid_fileset() or not accept_first():
|
||||
# newer .data or .ts already found so discard this
|
||||
discard()
|
||||
if not have_valid_fileset():
|
||||
# remove any .meta that may have been previously found
|
||||
context.pop('.meta', None)
|
||||
set_valid_fileset()
|
||||
elif ext == '.meta':
|
||||
if have_valid_fileset() or not accept_first():
|
||||
# newer .data, .durable or .ts already found so discard this
|
||||
discard()
|
||||
else:
|
||||
# ignore unexpected files
|
||||
pass
|
||||
return have_valid_fileset()
|
||||
|
||||
def _verify_on_disk_files(self, accepted_files, **kwargs):
|
||||
"""
|
||||
Verify that the final combination of on disk files complies with the
|
||||
replicated diskfile contract.
|
||||
|
||||
:param accepted_files: files that have been found and accepted
|
||||
:returns: True if the file combination is compliant, False otherwise
|
||||
"""
|
||||
# mimic legacy behavior - .meta is ignored when .ts is found
|
||||
if accepted_files.get('.ts'):
|
||||
accepted_files.pop('.meta', None)
|
||||
|
||||
data_file, meta_file, ts_file, durable_file = tuple(
|
||||
[accepted_files.get(ext)
|
||||
for ext in ('.data', '.meta', '.ts', '.durable')])
|
||||
|
||||
return ((data_file is None and meta_file is None and ts_file is None)
|
||||
or (ts_file is not None and data_file is None
|
||||
and meta_file is None)
|
||||
or (data_file is not None and ts_file is None))
|
||||
# set results
|
||||
results['data_info'] = exts['.data'][0]
|
||||
|
||||
def _hash_suffix(self, path, reclaim_age):
|
||||
"""
|
||||
|
@ -2151,14 +2209,47 @@ class ECDiskFile(BaseDiskFile):
|
|||
if frag_index is not None:
|
||||
self._frag_index = self.manager.validate_fragment_index(frag_index)
|
||||
|
||||
def _get_ondisk_file(self, files):
|
||||
@property
|
||||
def durable_timestamp(self):
|
||||
"""
|
||||
Provides the timestamp of the newest durable file found in the object
|
||||
directory.
|
||||
|
||||
:return: A Timestamp instance, or None if no durable file was found.
|
||||
:raises DiskFileNotOpen: if the open() method has not been previously
|
||||
called on this instance.
|
||||
"""
|
||||
if self._ondisk_info is None:
|
||||
raise DiskFileNotOpen()
|
||||
if self._ondisk_info.get('durable_frag_set'):
|
||||
return self._ondisk_info['durable_frag_set'][0]['timestamp']
|
||||
return None
|
||||
|
||||
@property
|
||||
def fragments(self):
|
||||
"""
|
||||
Provides information about all fragments that were found in the object
|
||||
directory, including fragments without a matching durable file, and
|
||||
including any fragment chosen to construct the opened diskfile.
|
||||
|
||||
:return: A dict mapping <Timestamp instance> -> <list of frag indexes>,
|
||||
or None if the diskfile has not been opened or no fragments
|
||||
were found.
|
||||
"""
|
||||
if self._ondisk_info:
|
||||
frag_sets = self._ondisk_info['frag_sets']
|
||||
return dict([(ts, [info['frag_index'] for info in frag_set])
|
||||
for ts, frag_set in frag_sets.items()])
|
||||
|
||||
def _get_ondisk_files(self, files):
|
||||
"""
|
||||
The only difference between this method and the replication policy
|
||||
DiskFile method is passing in the frag_index kwarg to our manager's
|
||||
get_ondisk_files method.
|
||||
"""
|
||||
return self.manager.get_ondisk_files(
|
||||
self._ondisk_info = self.manager.get_ondisk_files(
|
||||
files, self._datadir, frag_index=self._frag_index)
|
||||
return self._ondisk_info
|
||||
|
||||
def purge(self, timestamp, frag_index):
|
||||
"""
|
||||
|
@ -2241,20 +2332,24 @@ class ECDiskFileManager(BaseDiskFileManager):
|
|||
be stripped off to retrieve the timestamp.
|
||||
|
||||
:param filename: the data file name including extension
|
||||
:returns: a dict, with keys for timestamp, frag_index, and ext::
|
||||
:returns: a dict, with keys for timestamp, frag_index, and ext:
|
||||
|
||||
* timestamp is a :class:`~swift.common.utils.Timestamp`
|
||||
* frag_index is an int or None
|
||||
* ext is a string, the file extension including the leading dot or
|
||||
the empty string if the filename has no extenstion.
|
||||
the empty string if the filename has no extension.
|
||||
|
||||
:raises DiskFileError: if any part of the filename is not able to be
|
||||
validated.
|
||||
"""
|
||||
frag_index = None
|
||||
filename, ext = splitext(filename)
|
||||
parts = filename.split('#', 1)
|
||||
timestamp = parts[0]
|
||||
float_frag, ext = splitext(filename)
|
||||
parts = float_frag.split('#', 1)
|
||||
try:
|
||||
timestamp = Timestamp(parts[0])
|
||||
except ValueError:
|
||||
raise DiskFileError('Invalid Timestamp value in filename %r'
|
||||
% filename)
|
||||
if ext == '.data':
|
||||
# it is an error for an EC data file to not have a valid
|
||||
# fragment index
|
||||
|
@ -2265,137 +2360,94 @@ class ECDiskFileManager(BaseDiskFileManager):
|
|||
pass
|
||||
frag_index = self.validate_fragment_index(frag_index)
|
||||
return {
|
||||
'timestamp': Timestamp(timestamp),
|
||||
'timestamp': timestamp,
|
||||
'frag_index': frag_index,
|
||||
'ext': ext,
|
||||
}
|
||||
|
||||
def is_obsolete(self, filename, other_filename):
|
||||
def _process_ondisk_files(self, exts, results, frag_index=None, **kwargs):
|
||||
"""
|
||||
Test if a given file is considered to be obsolete with respect to
|
||||
another file in an object storage dir.
|
||||
Implement EC policy specific handling of .data and .durable files.
|
||||
|
||||
Implements EC policy specific behavior when comparing files against a
|
||||
.durable file.
|
||||
|
||||
A simple string comparison would consider t2#1.data to be older than
|
||||
t2.durable (since t2#1.data < t2.durable). By stripping off the file
|
||||
extensions we get the desired behavior: t2#1 > t2 without compromising
|
||||
the detection of t1#1 < t2.
|
||||
|
||||
:param filename: a string representing an absolute filename
|
||||
:param other_filename: a string representing an absolute filename
|
||||
:returns: True if filename is considered obsolete, False otherwise.
|
||||
"""
|
||||
if other_filename.endswith('.durable'):
|
||||
return splitext(filename)[0] < splitext(other_filename)[0]
|
||||
return filename < other_filename
|
||||
|
||||
def _gather_on_disk_file(self, filename, ext, context, frag_index=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Called by gather_ondisk_files() for each file in an object
|
||||
datadir in reverse sorted order. If a file is considered part of a
|
||||
valid on-disk file set it will be added to the context dict, keyed by
|
||||
its extension. If a file is considered to be obsolete it will be added
|
||||
to a list stored under the key 'obsolete' in the context dict.
|
||||
|
||||
:param filename: name of file to be accepted or not
|
||||
:param ext: extension part of filename
|
||||
:param context: a context dict that may have been populated by previous
|
||||
calls to this method
|
||||
:param exts: dict of lists of file info, keyed by extension
|
||||
:param results: a dict that may be updated with results
|
||||
:param frag_index: if set, search for a specific fragment index .data
|
||||
file, otherwise accept the first valid .data file.
|
||||
:returns: True if a valid file set has been found, False otherwise
|
||||
"""
|
||||
durable_info = None
|
||||
if exts.get('.durable'):
|
||||
durable_info = exts['.durable'][0]
|
||||
# Mark everything older than most recent .durable as obsolete
|
||||
# and remove from the exts dict.
|
||||
for ext in exts.keys():
|
||||
exts[ext], older = self._split_gte_timestamp(
|
||||
exts[ext], durable_info['timestamp'])
|
||||
results.setdefault('obsolete', []).extend(older)
|
||||
|
||||
# if first file with given extension then add filename to context
|
||||
# dict and return True
|
||||
accept_first = lambda: context.setdefault(ext, filename) == filename
|
||||
# add the filename to the list of obsolete files in context dict
|
||||
discard = lambda: context.setdefault('obsolete', []).append(filename)
|
||||
# set a flag in the context dict indicating that a valid fileset has
|
||||
# been found
|
||||
set_valid_fileset = lambda: context.setdefault('found_valid', True)
|
||||
# return True if the valid fileset flag is set in the context dict
|
||||
have_valid_fileset = lambda: context.get('found_valid')
|
||||
# Split the list of .data files into sets of frags having the same
|
||||
# timestamp, identifying the durable and newest sets (if any) as we go.
|
||||
# To do this we can take advantage of the list of .data files being
|
||||
# reverse-time ordered. Keep the resulting per-timestamp frag sets in
|
||||
# a frag_sets dict mapping a Timestamp instance -> frag_set.
|
||||
all_frags = exts.get('.data')
|
||||
frag_sets = {}
|
||||
durable_frag_set = None
|
||||
while all_frags:
|
||||
frag_set, all_frags = self._split_gte_timestamp(
|
||||
all_frags, all_frags[0]['timestamp'])
|
||||
# sort the frag set into ascending frag_index order
|
||||
frag_set.sort(key=lambda info: info['frag_index'])
|
||||
timestamp = frag_set[0]['timestamp']
|
||||
frag_sets[timestamp] = frag_set
|
||||
if durable_info and durable_info['timestamp'] == timestamp:
|
||||
durable_frag_set = frag_set
|
||||
|
||||
if context.get('.durable'):
|
||||
# a .durable file has been found
|
||||
if ext == '.data':
|
||||
if self.is_obsolete(filename, context.get('.durable')):
|
||||
# this and remaining data files are older than durable
|
||||
discard()
|
||||
set_valid_fileset()
|
||||
else:
|
||||
# accept the first .data file if it matches requested
|
||||
# frag_index, or if no specific frag_index is requested
|
||||
fi = self.parse_on_disk_filename(filename)['frag_index']
|
||||
if frag_index is None or frag_index == int(fi):
|
||||
accept_first()
|
||||
set_valid_fileset()
|
||||
# else: keep searching for a .data file to match frag_index
|
||||
context.setdefault('fragments', []).append(filename)
|
||||
# Select a single chosen frag from the chosen frag_set, by either
|
||||
# matching against a specified frag_index or taking the highest index.
|
||||
chosen_frag = None
|
||||
if durable_frag_set:
|
||||
if frag_index is not None:
|
||||
# search the frag set to find the exact frag_index
|
||||
for info in durable_frag_set:
|
||||
if info['frag_index'] == frag_index:
|
||||
chosen_frag = info
|
||||
break
|
||||
else:
|
||||
# there can no longer be a matching .data file so mark what has
|
||||
# been found so far as the valid fileset
|
||||
discard()
|
||||
set_valid_fileset()
|
||||
elif ext == '.data':
|
||||
# not yet found a .durable
|
||||
if have_valid_fileset():
|
||||
# valid fileset means we must have a newer
|
||||
# .ts, so discard the older .data file
|
||||
discard()
|
||||
else:
|
||||
# .data newer than a .durable or .ts, don't discard yet
|
||||
context.setdefault('fragments_without_durable', []).append(
|
||||
filename)
|
||||
elif ext == '.ts':
|
||||
if have_valid_fileset() or not accept_first():
|
||||
# newer .data, .durable or .ts already found so discard this
|
||||
discard()
|
||||
if not have_valid_fileset():
|
||||
# remove any .meta that may have been previously found
|
||||
context.pop('.meta', None)
|
||||
set_valid_fileset()
|
||||
elif ext in ('.meta', '.durable'):
|
||||
if have_valid_fileset() or not accept_first():
|
||||
# newer .data, .durable or .ts already found so discard this
|
||||
discard()
|
||||
else:
|
||||
# ignore unexpected files
|
||||
pass
|
||||
return have_valid_fileset()
|
||||
chosen_frag = durable_frag_set[-1]
|
||||
|
||||
def _verify_on_disk_files(self, accepted_files, frag_index=None, **kwargs):
|
||||
# If we successfully found a frag then set results
|
||||
if chosen_frag:
|
||||
results['data_info'] = chosen_frag
|
||||
results['durable_frag_set'] = durable_frag_set
|
||||
results['frag_sets'] = frag_sets
|
||||
|
||||
# Mark any isolated .durable as obsolete
|
||||
if exts.get('.durable') and not durable_frag_set:
|
||||
results.setdefault('obsolete', []).extend(exts['.durable'])
|
||||
exts.pop('.durable')
|
||||
|
||||
# Fragments *may* be ready for reclaim, unless they are durable or
|
||||
# at the timestamp we have just chosen for constructing the diskfile.
|
||||
for frag_set in frag_sets.values():
|
||||
if frag_set == durable_frag_set:
|
||||
continue
|
||||
results.setdefault('possible_reclaim', []).extend(frag_set)
|
||||
|
||||
def _verify_ondisk_files(self, results, frag_index=None, **kwargs):
|
||||
"""
|
||||
Verify that the final combination of on disk files complies with the
|
||||
erasure-coded diskfile contract.
|
||||
|
||||
:param accepted_files: files that have been found and accepted
|
||||
:param results: files that have been found and accepted
|
||||
:param frag_index: specifies a specific fragment index .data file
|
||||
:returns: True if the file combination is compliant, False otherwise
|
||||
"""
|
||||
if not accepted_files.get('.data'):
|
||||
# We may find only a .meta, which doesn't mean the on disk
|
||||
# contract is broken. So we clear it to comply with
|
||||
# superclass assertions.
|
||||
accepted_files.pop('.meta', None)
|
||||
|
||||
data_file, meta_file, ts_file, durable_file = tuple(
|
||||
[accepted_files.get(ext)
|
||||
for ext in ('.data', '.meta', '.ts', '.durable')])
|
||||
|
||||
return ((data_file is None or durable_file is not None)
|
||||
and (data_file is None and meta_file is None
|
||||
and ts_file is None and durable_file is None)
|
||||
or (ts_file is not None and data_file is None
|
||||
and meta_file is None and durable_file is None)
|
||||
or (data_file is not None and durable_file is not None
|
||||
and ts_file is None)
|
||||
or (durable_file is not None and meta_file is None
|
||||
and ts_file is None))
|
||||
if super(ECDiskFileManager, self)._verify_ondisk_files(
|
||||
results, **kwargs):
|
||||
have_data_file = results['data_file'] is not None
|
||||
have_durable = results.get('durable_frag_set') is not None
|
||||
return have_data_file == have_durable
|
||||
return False
|
||||
|
||||
def _hash_suffix(self, path, reclaim_age):
|
||||
"""
|
||||
|
@ -2410,12 +2462,12 @@ class ECDiskFileManager(BaseDiskFileManager):
|
|||
# here we flatten out the hashers hexdigest into a dictionary instead
|
||||
# of just returning the one hexdigest for the whole suffix
|
||||
def mapper(filename):
|
||||
info = self.parse_on_disk_filename(filename)
|
||||
fi = info['frag_index']
|
||||
if fi is None:
|
||||
return None, filename
|
||||
else:
|
||||
return fi, info['timestamp'].internal
|
||||
info = self.parse_on_disk_filename(filename)
|
||||
fi = info['frag_index']
|
||||
if fi is None:
|
||||
return None, filename
|
||||
else:
|
||||
return fi, info['timestamp'].internal
|
||||
|
||||
hash_per_fi = self._hash_suffix_dir(path, mapper, reclaim_age)
|
||||
return dict((fi, md5.hexdigest()) for fi, md5 in hash_per_fi.items())
|
||||
|
|
|
@ -254,6 +254,7 @@ class DiskFile(object):
|
|||
self._metadata = None
|
||||
self._fp = None
|
||||
self._filesystem = fs
|
||||
self.fragments = None
|
||||
|
||||
def open(self):
|
||||
"""
|
||||
|
@ -421,3 +422,5 @@ class DiskFile(object):
|
|||
return Timestamp(self._metadata.get('X-Timestamp'))
|
||||
|
||||
data_timestamp = timestamp
|
||||
|
||||
durable_timestamp = timestamp
|
||||
|
|
|
@ -819,8 +819,8 @@ class ObjectReconstructor(Daemon):
|
|||
dev_path = self._df_router[policy].get_dev_path(
|
||||
local_dev['device'])
|
||||
if not dev_path:
|
||||
self.logger.warn(_('%s is not mounted'),
|
||||
local_dev['device'])
|
||||
self.logger.warning(_('%s is not mounted'),
|
||||
local_dev['device'])
|
||||
continue
|
||||
obj_path = join(dev_path, data_dir)
|
||||
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
|
||||
|
|
|
@ -85,10 +85,11 @@ class ObjectReplicator(Daemon):
|
|||
if not self.rsync_module:
|
||||
self.rsync_module = '{replication_ip}::object'
|
||||
if config_true_value(conf.get('vm_test_mode', 'no')):
|
||||
self.logger.warn('Option object-replicator/vm_test_mode is '
|
||||
'deprecated and will be removed in a future '
|
||||
'version. Update your configuration to use '
|
||||
'option object-replicator/rsync_module.')
|
||||
self.logger.warning('Option object-replicator/vm_test_mode '
|
||||
'is deprecated and will be removed in a '
|
||||
'future version. Update your '
|
||||
'configuration to use option '
|
||||
'object-replicator/rsync_module.')
|
||||
self.rsync_module += '{replication_port}'
|
||||
self.http_timeout = int(conf.get('http_timeout', 60))
|
||||
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
|
||||
|
@ -109,10 +110,10 @@ class ObjectReplicator(Daemon):
|
|||
self.handoff_delete = config_auto_int_value(
|
||||
conf.get('handoff_delete', 'auto'), 0)
|
||||
if any((self.handoff_delete, self.handoffs_first)):
|
||||
self.logger.warn('Handoff only mode is not intended for normal '
|
||||
'operation, please disable handoffs_first and '
|
||||
'handoff_delete before the next '
|
||||
'normal rebalance')
|
||||
self.logger.warning('Handoff only mode is not intended for normal '
|
||||
'operation, please disable handoffs_first and '
|
||||
'handoff_delete before the next '
|
||||
'normal rebalance')
|
||||
self._diskfile_mgr = DiskFileManager(conf, self.logger)
|
||||
|
||||
def _zero_stats(self):
|
||||
|
@ -566,6 +567,7 @@ class ObjectReplicator(Daemon):
|
|||
[(dev['replication_ip'], dev['device'])
|
||||
for dev in policy.object_ring.devs if dev])
|
||||
data_dir = get_data_dir(policy)
|
||||
found_local = False
|
||||
for local_dev in [dev for dev in policy.object_ring.devs
|
||||
if (dev
|
||||
and is_local_device(ips,
|
||||
|
@ -574,6 +576,7 @@ class ObjectReplicator(Daemon):
|
|||
dev['replication_port'])
|
||||
and (override_devices is None
|
||||
or dev['device'] in override_devices))]:
|
||||
found_local = True
|
||||
dev_path = join(self.devices_dir, local_dev['device'])
|
||||
obj_path = join(dev_path, data_dir)
|
||||
tmp_path = join(dev_path, get_tmp_dir(policy))
|
||||
|
@ -583,7 +586,8 @@ class ObjectReplicator(Daemon):
|
|||
failure_dev['device'])
|
||||
for failure_dev in policy.object_ring.devs
|
||||
if failure_dev])
|
||||
self.logger.warn(_('%s is not mounted'), local_dev['device'])
|
||||
self.logger.warning(
|
||||
_('%s is not mounted'), local_dev['device'])
|
||||
continue
|
||||
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
|
||||
if not os.path.exists(obj_path):
|
||||
|
@ -626,6 +630,10 @@ class ObjectReplicator(Daemon):
|
|||
for failure_dev in policy.object_ring.devs
|
||||
if failure_dev])
|
||||
continue
|
||||
if not found_local:
|
||||
self.logger.error("Can't find itself %s with port %s in ring "
|
||||
"file, not replicating",
|
||||
", ".join(ips), self.port)
|
||||
return jobs
|
||||
|
||||
def collect_jobs(self, override_devices=None, override_partitions=None,
|
||||
|
@ -695,7 +703,7 @@ class ObjectReplicator(Daemon):
|
|||
self._add_failure_stats([(failure_dev['replication_ip'],
|
||||
failure_dev['device'])
|
||||
for failure_dev in job['nodes']])
|
||||
self.logger.warn(_('%s is not mounted'), job['device'])
|
||||
self.logger.warning(_('%s is not mounted'), job['device'])
|
||||
continue
|
||||
if not self.check_ring(job['policy'].object_ring):
|
||||
self.logger.info(_("Ring change detected. Aborting "
|
||||
|
|
|
@ -895,7 +895,10 @@ class ObjectController(BaseStorageServer):
|
|||
container, obj, request, device,
|
||||
policy)
|
||||
if orig_timestamp < req_timestamp:
|
||||
disk_file.delete(req_timestamp)
|
||||
try:
|
||||
disk_file.delete(req_timestamp)
|
||||
except DiskFileNoSpace:
|
||||
return HTTPInsufficientStorage(drive=device, request=request)
|
||||
self.container_update(
|
||||
'DELETE', account, container, obj, request,
|
||||
HeaderKeyDict({'x-timestamp': req_timestamp.internal}),
|
||||
|
|
|
@ -48,7 +48,7 @@ class ObjectUpdater(Daemon):
|
|||
self.container_ring = None
|
||||
self.concurrency = int(conf.get('concurrency', 1))
|
||||
self.slowdown = float(conf.get('slowdown', 0.01))
|
||||
self.node_timeout = int(conf.get('node_timeout', 10))
|
||||
self.node_timeout = float(conf.get('node_timeout', 10))
|
||||
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||
self.successes = 0
|
||||
self.failures = 0
|
||||
|
@ -84,7 +84,7 @@ class ObjectUpdater(Daemon):
|
|||
if self.mount_check and \
|
||||
not ismount(os.path.join(self.devices, device)):
|
||||
self.logger.increment('errors')
|
||||
self.logger.warn(
|
||||
self.logger.warning(
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
continue
|
||||
while len(pids) >= self.concurrency:
|
||||
|
@ -127,7 +127,7 @@ class ObjectUpdater(Daemon):
|
|||
if self.mount_check and \
|
||||
not ismount(os.path.join(self.devices, device)):
|
||||
self.logger.increment('errors')
|
||||
self.logger.warn(
|
||||
self.logger.warning(
|
||||
_('Skipping %s as it is not mounted'), device)
|
||||
continue
|
||||
self.object_sweep(os.path.join(self.devices, device))
|
||||
|
@ -159,8 +159,9 @@ class ObjectUpdater(Daemon):
|
|||
try:
|
||||
base, policy = split_policy_string(asyncdir)
|
||||
except PolicyError as e:
|
||||
self.logger.warn(_('Directory %r does not map '
|
||||
'to a valid policy (%s)') % (asyncdir, e))
|
||||
self.logger.warning(_('Directory %r does not map '
|
||||
'to a valid policy (%s)') %
|
||||
(asyncdir, e))
|
||||
continue
|
||||
for prefix in self._listdir(async_pending):
|
||||
prefix_path = os.path.join(async_pending, prefix)
|
||||
|
|
|
@ -235,17 +235,17 @@ def cors_validation(func):
|
|||
# - headers provided by the user in
|
||||
# x-container-meta-access-control-expose-headers
|
||||
if 'Access-Control-Expose-Headers' not in resp.headers:
|
||||
expose_headers = [
|
||||
expose_headers = set([
|
||||
'cache-control', 'content-language', 'content-type',
|
||||
'expires', 'last-modified', 'pragma', 'etag',
|
||||
'x-timestamp', 'x-trans-id']
|
||||
'x-timestamp', 'x-trans-id'])
|
||||
for header in resp.headers:
|
||||
if header.startswith('X-Container-Meta') or \
|
||||
header.startswith('X-Object-Meta'):
|
||||
expose_headers.append(header.lower())
|
||||
expose_headers.add(header.lower())
|
||||
if cors_info.get('expose_headers'):
|
||||
expose_headers.extend(
|
||||
[header_line.strip()
|
||||
expose_headers = expose_headers.union(
|
||||
[header_line.strip().lower()
|
||||
for header_line in
|
||||
cors_info['expose_headers'].split(' ')
|
||||
if header_line.strip()])
|
||||
|
@ -317,6 +317,7 @@ def get_account_info(env, app, swift_source=None):
|
|||
|
||||
This call bypasses auth. Success does not imply that the request has
|
||||
authorization to the account.
|
||||
|
||||
:raises ValueError: when path can't be split(path, 2, 4)
|
||||
"""
|
||||
(version, account, _junk, _junk) = \
|
||||
|
@ -336,9 +337,10 @@ def _get_cache_key(account, container):
|
|||
"""
|
||||
Get the keys for both memcache (cache_key) and env (env_key)
|
||||
where info about accounts and containers is cached
|
||||
|
||||
:param account: The name of the account
|
||||
:param container: The name of the container (or None if account)
|
||||
:returns a tuple of (cache_key, env_key)
|
||||
:returns: a tuple of (cache_key, env_key)
|
||||
"""
|
||||
|
||||
if container:
|
||||
|
@ -356,10 +358,11 @@ def _get_cache_key(account, container):
|
|||
def get_object_env_key(account, container, obj):
|
||||
"""
|
||||
Get the keys for env (env_key) where info about object is cached
|
||||
|
||||
:param account: The name of the account
|
||||
:param container: The name of the container
|
||||
:param obj: The name of the object
|
||||
:returns a string env_key
|
||||
:returns: a string env_key
|
||||
"""
|
||||
env_key = 'swift.object/%s/%s/%s' % (account,
|
||||
container, obj)
|
||||
|
@ -460,7 +463,7 @@ def _get_info_cache(app, env, account, container=None):
|
|||
|
||||
:param app: the application object
|
||||
:param env: the environment used by the current request
|
||||
:returns the cached info or None if not cached
|
||||
:returns: the cached info or None if not cached
|
||||
"""
|
||||
|
||||
cache_key, env_key = _get_cache_key(account, container)
|
||||
|
@ -932,20 +935,19 @@ class ResumingGetter(object):
|
|||
self.pop_range()
|
||||
except StopIteration:
|
||||
req.environ['swift.non_client_disconnect'] = True
|
||||
return
|
||||
|
||||
except ChunkReadTimeout:
|
||||
self.app.exception_occurred(node[0], _('Object'),
|
||||
_('Trying to read during GET'))
|
||||
raise
|
||||
except ChunkWriteTimeout:
|
||||
self.app.logger.warn(
|
||||
self.app.logger.warning(
|
||||
_('Client did not read from proxy within %ss') %
|
||||
self.app.client_timeout)
|
||||
self.app.logger.increment('client_timeouts')
|
||||
except GeneratorExit:
|
||||
if not req.environ.get('swift.non_client_disconnect'):
|
||||
self.app.logger.warn(_('Client disconnected on read'))
|
||||
self.app.logger.warning(_('Client disconnected on read'))
|
||||
except Exception:
|
||||
self.app.logger.exception(_('Trying to send to client'))
|
||||
raise
|
||||
|
@ -1283,7 +1285,7 @@ class Controller(object):
|
|||
def generate_request_headers(self, orig_req=None, additional=None,
|
||||
transfer=False):
|
||||
"""
|
||||
Create a list of headers to be used in backend requets
|
||||
Create a list of headers to be used in backend requests
|
||||
|
||||
:param orig_req: the original request sent by the client to the proxy
|
||||
:param additional: additional headers to send to the backend
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue