Import upstream version 1.0.1
This commit is contained in:
parent
fe067413b1
commit
c2f9d1e560
21
AUTHORS
Normal file
21
AUTHORS
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
Maintainer
|
||||||
|
----------
|
||||||
|
OpenStack, LLC.
|
||||||
|
IRC: #openstack on irc.freenode.net
|
||||||
|
|
||||||
|
Original Authors
|
||||||
|
----------------
|
||||||
|
Michael Barton
|
||||||
|
John Dickinson
|
||||||
|
Greg Holt
|
||||||
|
Greg Lange
|
||||||
|
Jay Payne
|
||||||
|
Will Reese
|
||||||
|
Chuck Thier
|
||||||
|
|
||||||
|
Contributors
|
||||||
|
------------
|
||||||
|
Chmouel Boudjnah
|
||||||
|
Ed Leafe
|
||||||
|
Conrad Weidenkeller
|
||||||
|
Monty Taylor
|
202
LICENSE
Normal file
202
LICENSE
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
4
MANIFEST.in
Normal file
4
MANIFEST.in
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
include AUTHORS LICENSE
|
||||||
|
graft doc
|
||||||
|
graft etc
|
||||||
|
|
2
PKG-INFO
2
PKG-INFO
@ -1,6 +1,6 @@
|
|||||||
Metadata-Version: 1.0
|
Metadata-Version: 1.0
|
||||||
Name: swift
|
Name: swift
|
||||||
Version: 1.0.0-1
|
Version: 1.0.1
|
||||||
Summary: Swift
|
Summary: Swift
|
||||||
Home-page: https://launchpad.net/swift
|
Home-page: https://launchpad.net/swift
|
||||||
Author: OpenStack, LLC.
|
Author: OpenStack, LLC.
|
||||||
|
2
README
2
README
@ -4,7 +4,7 @@ Swift
|
|||||||
A distributed object store that was originally developed as the basis for
|
A distributed object store that was originally developed as the basis for
|
||||||
Rackspace's Cloud Files.
|
Rackspace's Cloud Files.
|
||||||
|
|
||||||
To build documentation run `make html` in the /doc folder, and then browse to
|
To build documentation run `python setup.py build_sphinx`, and then browse to
|
||||||
/doc/build/html/index.html.
|
/doc/build/html/index.html.
|
||||||
|
|
||||||
The best place to get started is the "SAIO - Swift All In One", which will walk
|
The best place to get started is the "SAIO - Swift All In One", which will walk
|
||||||
|
@ -82,10 +82,10 @@ def do_start(server, once=False):
|
|||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
if once:
|
if once:
|
||||||
os.execl('/usr/bin/swift-%s' % server, server,
|
os.execlp('swift-%s' % server, server,
|
||||||
ini_file, 'once')
|
ini_file, 'once')
|
||||||
else:
|
else:
|
||||||
os.execl('/usr/bin/swift-%s' % server, server, ini_file)
|
os.execlp('swift-%s' % server, server, ini_file)
|
||||||
except OSError:
|
except OSError:
|
||||||
print 'unable to launch %s' % server
|
print 'unable to launch %s' % server
|
||||||
sys.exit(0)
|
sys.exit(0)
|
93
bin/swift-object-replicator
Executable file
93
bin/swift-object-replicator
Executable file
@ -0,0 +1,93 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from ConfigParser import ConfigParser
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
|
||||||
|
from eventlet import sleep, hubs
|
||||||
|
hubs.use_hub('poll')
|
||||||
|
|
||||||
|
from swift.obj.replicator import ObjectReplicator
|
||||||
|
from swift.common.utils import get_logger, drop_privileges, LoggerFileObject
|
||||||
|
|
||||||
|
TRUE_VALUES = set(('true', '1', 'yes', 'True', 'Yes'))
|
||||||
|
|
||||||
|
def read_configs(conf_file):
|
||||||
|
c = ConfigParser()
|
||||||
|
if not c.read(conf_file):
|
||||||
|
print "Unable to read config file: %s" % conf_file
|
||||||
|
sys.exit(1)
|
||||||
|
conf = dict(c.items('object-server'))
|
||||||
|
repl_conf = dict(c.items('object-replicator'))
|
||||||
|
if not repl_conf:
|
||||||
|
sys.exit()
|
||||||
|
conf['replication_concurrency'] = repl_conf.get('concurrency',1)
|
||||||
|
conf['vm_test_mode'] = repl_conf.get('vm_test_mode', 'no')
|
||||||
|
conf['daemonize'] = repl_conf.get('daemonize', 'yes')
|
||||||
|
conf['run_pause'] = repl_conf.get('run_pause', '30')
|
||||||
|
conf['log_facility'] = repl_conf.get('log_facility', 'LOG_LOCAL1')
|
||||||
|
conf['log_level'] = repl_conf.get('log_level', 'INFO')
|
||||||
|
conf['timeout'] = repl_conf.get('timeout', '5')
|
||||||
|
conf['stats_interval'] = repl_conf.get('stats_interval', '3600')
|
||||||
|
conf['reclaim_age'] = int(repl_conf.get('reclaim_age', 86400))
|
||||||
|
|
||||||
|
return conf
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
print "Usage: object-replicator CONFIG_FILE [once]"
|
||||||
|
sys.exit()
|
||||||
|
try:
|
||||||
|
conf = read_configs(sys.argv[1])
|
||||||
|
except:
|
||||||
|
print "Problem reading the config. Aborting object replication."
|
||||||
|
sys.exit()
|
||||||
|
once = len(sys.argv) > 2 and sys.argv[2] == 'once'
|
||||||
|
logger = get_logger(conf, 'object-replicator')
|
||||||
|
# log uncaught exceptions
|
||||||
|
sys.excepthook = lambda *exc_info: \
|
||||||
|
logger.critical('UNCAUGHT EXCEPTION', exc_info=exc_info)
|
||||||
|
sys.stdout = sys.stderr = LoggerFileObject(logger)
|
||||||
|
drop_privileges(conf.get('user', 'swift'))
|
||||||
|
if not once and conf.get('daemonize', 'true') in TRUE_VALUES:
|
||||||
|
logger.info("Starting object replicator in daemon mode.")
|
||||||
|
# Run the replicator continually
|
||||||
|
while True:
|
||||||
|
start = time.time()
|
||||||
|
logger.info("Starting object replication pass.")
|
||||||
|
# Run the replicator
|
||||||
|
replicator = ObjectReplicator(conf, logger)
|
||||||
|
replicator.run()
|
||||||
|
total = (time.time() - start)/60
|
||||||
|
# Reload the config
|
||||||
|
logger.info("Object replication complete. (%.02f minutes)" % total)
|
||||||
|
conf = read_configs(sys.argv[1])
|
||||||
|
if conf.get('daemonize', 'true') not in TRUE_VALUES:
|
||||||
|
# Stop running
|
||||||
|
logger.info("Daemon mode turned off in config, stopping.")
|
||||||
|
break
|
||||||
|
logger.debug('Replication sleeping for %s seconds.' %
|
||||||
|
conf['run_pause'])
|
||||||
|
sleep(int(conf['run_pause']))
|
||||||
|
else:
|
||||||
|
start = time.time()
|
||||||
|
logger.info("Running object replicator in script mode.")
|
||||||
|
replicator = ObjectReplicator(conf, logger)
|
||||||
|
replicator.run()
|
||||||
|
total = (time.time() - start)/60
|
||||||
|
logger.info("Object replication complete. (%.02f minutes)" % total)
|
@ -30,10 +30,15 @@ if __name__ == '__main__':
|
|||||||
print "Unable to read config file."
|
print "Unable to read config file."
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
conf = dict(c.items('proxy-server'))
|
conf = dict(c.items('proxy-server'))
|
||||||
|
if c.has_section('auth-server'):
|
||||||
|
auth_conf = dict(c.items('auth-server'))
|
||||||
|
else:
|
||||||
|
auth_conf = {}
|
||||||
swift_dir = conf.get('swift_dir', '/etc/swift')
|
swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||||
c = ConfigParser()
|
m, c = auth_conf.get('class',
|
||||||
c.read(os.path.join(swift_dir, 'auth-server.conf'))
|
'swift.common.auth.DevAuthMiddleware').rsplit('.', 1)
|
||||||
auth_conf = dict(c.items('auth-server'))
|
m = __import__(m, fromlist=[c])
|
||||||
|
authware = m.__dict__[c]
|
||||||
|
|
||||||
memcache = MemcacheRing([s.strip() for s in
|
memcache = MemcacheRing([s.strip() for s in
|
||||||
conf.get('memcache_servers', '127.0.0.1:11211').split(',')
|
conf.get('memcache_servers', '127.0.0.1:11211').split(',')
|
||||||
@ -41,5 +46,5 @@ if __name__ == '__main__':
|
|||||||
logger = get_logger(conf, 'proxy')
|
logger = get_logger(conf, 'proxy')
|
||||||
app = Application(conf, memcache, logger)
|
app = Application(conf, memcache, logger)
|
||||||
# Wrap the app with auth
|
# Wrap the app with auth
|
||||||
app = DevAuthMiddleware(app, auth_conf, memcache, logger)
|
app = authware(app, auth_conf, memcache, logger)
|
||||||
run_wsgi(app, conf, logger=logger, default_port=80)
|
run_wsgi(app, conf, logger=logger, default_port=80)
|
105
doc/Makefile
Normal file
105
doc/Makefile
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Makefile for Sphinx documentation
|
||||||
|
#
|
||||||
|
|
||||||
|
# You can set these variables from the command line.
|
||||||
|
SPHINXOPTS =
|
||||||
|
SPHINXBUILD = sphinx-build
|
||||||
|
PAPER =
|
||||||
|
BUILDDIR = build
|
||||||
|
export PYTHONPATH = ../
|
||||||
|
|
||||||
|
# Internal variables.
|
||||||
|
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||||
|
PAPEROPT_letter = -D latex_paper_size=letter
|
||||||
|
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||||
|
|
||||||
|
.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
|
||||||
|
|
||||||
|
help:
|
||||||
|
@echo "Please use \`make <target>' where <target> is one of"
|
||||||
|
@echo " html to make standalone HTML files"
|
||||||
|
@echo " dirhtml to make HTML files named index.html in directories"
|
||||||
|
@echo " pickle to make pickle files"
|
||||||
|
@echo " json to make JSON files"
|
||||||
|
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||||
|
@echo " qthelp to make HTML files and a qthelp project"
|
||||||
|
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||||
|
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||||
|
@echo " linkcheck to check all external links for integrity"
|
||||||
|
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||||
|
|
||||||
|
clean:
|
||||||
|
-rm -rf $(BUILDDIR)/*
|
||||||
|
|
||||||
|
html:
|
||||||
|
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||||
|
|
||||||
|
dirhtml:
|
||||||
|
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||||
|
@echo
|
||||||
|
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||||
|
|
||||||
|
pickle:
|
||||||
|
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can process the pickle files."
|
||||||
|
|
||||||
|
json:
|
||||||
|
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can process the JSON files."
|
||||||
|
|
||||||
|
htmlhelp:
|
||||||
|
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||||
|
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||||
|
|
||||||
|
qthelp:
|
||||||
|
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||||
|
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||||
|
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Swift.qhcp"
|
||||||
|
@echo "To view the help file:"
|
||||||
|
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Swift.qhc"
|
||||||
|
|
||||||
|
latex:
|
||||||
|
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||||
|
@echo
|
||||||
|
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||||
|
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
|
||||||
|
"run these through (pdf)latex."
|
||||||
|
|
||||||
|
changes:
|
||||||
|
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||||
|
@echo
|
||||||
|
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||||
|
|
||||||
|
linkcheck:
|
||||||
|
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||||
|
@echo
|
||||||
|
@echo "Link check complete; look for any errors in the above output " \
|
||||||
|
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||||
|
|
||||||
|
doctest:
|
||||||
|
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||||
|
@echo "Testing of doctests in the sources finished, look at the " \
|
||||||
|
"results in $(BUILDDIR)/doctest/output.txt."
|
416
doc/source/_static/basic.css
Normal file
416
doc/source/_static/basic.css
Normal file
@ -0,0 +1,416 @@
|
|||||||
|
/**
|
||||||
|
* Sphinx stylesheet -- basic theme
|
||||||
|
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* -- main layout ----------------------------------------------------------- */
|
||||||
|
|
||||||
|
div.clearer {
|
||||||
|
clear: both;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- relbar ---------------------------------------------------------------- */
|
||||||
|
|
||||||
|
div.related {
|
||||||
|
width: 100%;
|
||||||
|
font-size: 90%;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.related h3 {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.related ul {
|
||||||
|
margin: 0;
|
||||||
|
padding: 0 0 0 10px;
|
||||||
|
list-style: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.related li {
|
||||||
|
display: inline;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.related li.right {
|
||||||
|
float: right;
|
||||||
|
margin-right: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- sidebar --------------------------------------------------------------- */
|
||||||
|
|
||||||
|
div.sphinxsidebarwrapper {
|
||||||
|
padding: 10px 5px 0 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar {
|
||||||
|
float: left;
|
||||||
|
width: 230px;
|
||||||
|
margin-left: -100%;
|
||||||
|
font-size: 90%;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar ul {
|
||||||
|
list-style: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar ul ul,
|
||||||
|
div.sphinxsidebar ul.want-points {
|
||||||
|
margin-left: 20px;
|
||||||
|
list-style: square;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar ul ul {
|
||||||
|
margin-top: 0;
|
||||||
|
margin-bottom: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar form {
|
||||||
|
margin-top: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar input {
|
||||||
|
border: 1px solid #98dbcc;
|
||||||
|
font-family: sans-serif;
|
||||||
|
font-size: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
img {
|
||||||
|
border: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- search page ----------------------------------------------------------- */
|
||||||
|
|
||||||
|
ul.search {
|
||||||
|
margin: 10px 0 0 20px;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ul.search li {
|
||||||
|
padding: 5px 0 5px 20px;
|
||||||
|
background-image: url(file.png);
|
||||||
|
background-repeat: no-repeat;
|
||||||
|
background-position: 0 7px;
|
||||||
|
}
|
||||||
|
|
||||||
|
ul.search li a {
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
ul.search li div.context {
|
||||||
|
color: #888;
|
||||||
|
margin: 2px 0 0 30px;
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
ul.keywordmatches li.goodmatch a {
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- index page ------------------------------------------------------------ */
|
||||||
|
|
||||||
|
table.contentstable {
|
||||||
|
width: 90%;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.contentstable p.biglink {
|
||||||
|
line-height: 150%;
|
||||||
|
}
|
||||||
|
|
||||||
|
a.biglink {
|
||||||
|
font-size: 1.3em;
|
||||||
|
}
|
||||||
|
|
||||||
|
span.linkdescr {
|
||||||
|
font-style: italic;
|
||||||
|
padding-top: 5px;
|
||||||
|
font-size: 90%;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- general index --------------------------------------------------------- */
|
||||||
|
|
||||||
|
table.indextable td {
|
||||||
|
text-align: left;
|
||||||
|
vertical-align: top;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.indextable dl, table.indextable dd {
|
||||||
|
margin-top: 0;
|
||||||
|
margin-bottom: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.indextable tr.pcap {
|
||||||
|
height: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.indextable tr.cap {
|
||||||
|
margin-top: 10px;
|
||||||
|
background-color: #f2f2f2;
|
||||||
|
}
|
||||||
|
|
||||||
|
img.toggler {
|
||||||
|
margin-right: 3px;
|
||||||
|
margin-top: 3px;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- general body styles --------------------------------------------------- */
|
||||||
|
|
||||||
|
a.headerlink {
|
||||||
|
visibility: hidden;
|
||||||
|
}
|
||||||
|
|
||||||
|
h1:hover > a.headerlink,
|
||||||
|
h2:hover > a.headerlink,
|
||||||
|
h3:hover > a.headerlink,
|
||||||
|
h4:hover > a.headerlink,
|
||||||
|
h5:hover > a.headerlink,
|
||||||
|
h6:hover > a.headerlink,
|
||||||
|
dt:hover > a.headerlink {
|
||||||
|
visibility: visible;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.body p.caption {
|
||||||
|
text-align: inherit;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.body td {
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
.field-list ul {
|
||||||
|
padding-left: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.first {
|
||||||
|
}
|
||||||
|
|
||||||
|
p.rubric {
|
||||||
|
margin-top: 30px;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- sidebars -------------------------------------------------------------- */
|
||||||
|
|
||||||
|
div.sidebar {
|
||||||
|
margin: 0 0 0.5em 1em;
|
||||||
|
border: 1px solid #ddb;
|
||||||
|
padding: 7px 7px 0 7px;
|
||||||
|
background-color: #ffe;
|
||||||
|
width: 40%;
|
||||||
|
float: right;
|
||||||
|
}
|
||||||
|
|
||||||
|
p.sidebar-title {
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- topics ---------------------------------------------------------------- */
|
||||||
|
|
||||||
|
div.topic {
|
||||||
|
border: 1px solid #ccc;
|
||||||
|
padding: 7px 7px 0 7px;
|
||||||
|
margin: 10px 0 10px 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
p.topic-title {
|
||||||
|
font-size: 1.1em;
|
||||||
|
font-weight: bold;
|
||||||
|
margin-top: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- admonitions ----------------------------------------------------------- */
|
||||||
|
|
||||||
|
div.admonition {
|
||||||
|
margin-top: 10px;
|
||||||
|
margin-bottom: 10px;
|
||||||
|
padding: 7px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.admonition dt {
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.admonition dl {
|
||||||
|
margin-bottom: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
p.admonition-title {
|
||||||
|
margin: 0px 10px 5px 0px;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.body p.centered {
|
||||||
|
text-align: center;
|
||||||
|
margin-top: 25px;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- tables ---------------------------------------------------------------- */
|
||||||
|
|
||||||
|
table.docutils {
|
||||||
|
border: 0;
|
||||||
|
border-collapse: collapse;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.docutils td, table.docutils th {
|
||||||
|
padding: 1px 8px 1px 0;
|
||||||
|
border-top: 0;
|
||||||
|
border-left: 0;
|
||||||
|
border-right: 0;
|
||||||
|
border-bottom: 1px solid #aaa;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.field-list td, table.field-list th {
|
||||||
|
border: 0 !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.footnote td, table.footnote th {
|
||||||
|
border: 0 !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
th {
|
||||||
|
text-align: left;
|
||||||
|
padding-right: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- other body styles ----------------------------------------------------- */
|
||||||
|
|
||||||
|
dl {
|
||||||
|
margin-bottom: 15px;
|
||||||
|
}
|
||||||
|
|
||||||
|
dd p {
|
||||||
|
margin-top: 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
dd ul, dd table {
|
||||||
|
margin-bottom: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
dd {
|
||||||
|
margin-top: 3px;
|
||||||
|
margin-bottom: 10px;
|
||||||
|
margin-left: 30px;
|
||||||
|
}
|
||||||
|
|
||||||
|
dt:target, .highlight {
|
||||||
|
background-color: #fbe54e;
|
||||||
|
}
|
||||||
|
|
||||||
|
dl.glossary dt {
|
||||||
|
font-weight: bold;
|
||||||
|
font-size: 1.1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.field-list ul {
|
||||||
|
margin: 0;
|
||||||
|
padding-left: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.field-list p {
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.refcount {
|
||||||
|
color: #060;
|
||||||
|
}
|
||||||
|
|
||||||
|
.optional {
|
||||||
|
font-size: 1.3em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.versionmodified {
|
||||||
|
font-style: italic;
|
||||||
|
}
|
||||||
|
|
||||||
|
.system-message {
|
||||||
|
background-color: #fda;
|
||||||
|
padding: 5px;
|
||||||
|
border: 3px solid red;
|
||||||
|
}
|
||||||
|
|
||||||
|
.footnote:target {
|
||||||
|
background-color: #ffa
|
||||||
|
}
|
||||||
|
|
||||||
|
.line-block {
|
||||||
|
display: block;
|
||||||
|
margin-top: 1em;
|
||||||
|
margin-bottom: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.line-block .line-block {
|
||||||
|
margin-top: 0;
|
||||||
|
margin-bottom: 0;
|
||||||
|
margin-left: 1.5em;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- code displays --------------------------------------------------------- */
|
||||||
|
|
||||||
|
pre {
|
||||||
|
overflow: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
td.linenos pre {
|
||||||
|
padding: 5px 0px;
|
||||||
|
border: 0;
|
||||||
|
background-color: transparent;
|
||||||
|
color: #aaa;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.highlighttable {
|
||||||
|
margin-left: 0.5em;
|
||||||
|
}
|
||||||
|
|
||||||
|
table.highlighttable td {
|
||||||
|
padding: 0 0.5em 0 0.5em;
|
||||||
|
}
|
||||||
|
|
||||||
|
tt.descname {
|
||||||
|
background-color: transparent;
|
||||||
|
font-weight: bold;
|
||||||
|
font-size: 1.2em;
|
||||||
|
}
|
||||||
|
|
||||||
|
tt.descclassname {
|
||||||
|
background-color: transparent;
|
||||||
|
}
|
||||||
|
|
||||||
|
tt.xref, a tt {
|
||||||
|
background-color: transparent;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
|
||||||
|
background-color: transparent;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- math display ---------------------------------------------------------- */
|
||||||
|
|
||||||
|
img.math {
|
||||||
|
vertical-align: middle;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.body div.math p {
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
span.eqno {
|
||||||
|
float: right;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- printout stylesheet --------------------------------------------------- */
|
||||||
|
|
||||||
|
@media print {
|
||||||
|
div.document,
|
||||||
|
div.documentwrapper,
|
||||||
|
div.bodywrapper {
|
||||||
|
margin: 0 !important;
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar,
|
||||||
|
div.related,
|
||||||
|
div.footer,
|
||||||
|
#top-link {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
}
|
230
doc/source/_static/default.css
Normal file
230
doc/source/_static/default.css
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
/**
|
||||||
|
* Sphinx stylesheet -- default theme
|
||||||
|
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
*/
|
||||||
|
|
||||||
|
@import url("basic.css");
|
||||||
|
|
||||||
|
/* -- page layout ----------------------------------------------------------- */
|
||||||
|
|
||||||
|
body {
|
||||||
|
font-family: sans-serif;
|
||||||
|
font-size: 100%;
|
||||||
|
background-color: #11303d;
|
||||||
|
color: #000;
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.document {
|
||||||
|
background-color: #1c4e63;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.documentwrapper {
|
||||||
|
float: left;
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.bodywrapper {
|
||||||
|
margin: 0 0 0 230px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.body {
|
||||||
|
background-color: #ffffff;
|
||||||
|
color: #000000;
|
||||||
|
padding: 0 20px 30px 20px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.footer {
|
||||||
|
color: #ffffff;
|
||||||
|
width: 100%;
|
||||||
|
padding: 9px 0 9px 0;
|
||||||
|
text-align: center;
|
||||||
|
font-size: 75%;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.footer a {
|
||||||
|
color: #ffffff;
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.related {
|
||||||
|
background-color: #133f52;
|
||||||
|
line-height: 30px;
|
||||||
|
color: #ffffff;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.related a {
|
||||||
|
color: #ffffff;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar {
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar h3 {
|
||||||
|
font-family: 'Trebuchet MS', sans-serif;
|
||||||
|
color: #ffffff;
|
||||||
|
font-size: 1.4em;
|
||||||
|
font-weight: normal;
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar h3 a {
|
||||||
|
color: #ffffff;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar h4 {
|
||||||
|
font-family: 'Trebuchet MS', sans-serif;
|
||||||
|
color: #ffffff;
|
||||||
|
font-size: 1.3em;
|
||||||
|
font-weight: normal;
|
||||||
|
margin: 5px 0 0 0;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar p {
|
||||||
|
color: #ffffff;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar p.topless {
|
||||||
|
margin: 5px 10px 10px 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar ul {
|
||||||
|
margin: 10px;
|
||||||
|
padding: 0;
|
||||||
|
color: #ffffff;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar a {
|
||||||
|
color: #98dbcc;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.sphinxsidebar input {
|
||||||
|
border: 1px solid #98dbcc;
|
||||||
|
font-family: sans-serif;
|
||||||
|
font-size: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- body styles ----------------------------------------------------------- */
|
||||||
|
|
||||||
|
a {
|
||||||
|
color: #355f7c;
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:hover {
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.body p, div.body dd, div.body li {
|
||||||
|
text-align: left;
|
||||||
|
line-height: 130%;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.body h1,
|
||||||
|
div.body h2,
|
||||||
|
div.body h3,
|
||||||
|
div.body h4,
|
||||||
|
div.body h5,
|
||||||
|
div.body h6 {
|
||||||
|
font-family: 'Trebuchet MS', sans-serif;
|
||||||
|
background-color: #f2f2f2;
|
||||||
|
font-weight: normal;
|
||||||
|
color: #20435c;
|
||||||
|
border-bottom: 1px solid #ccc;
|
||||||
|
margin: 20px -20px 10px -20px;
|
||||||
|
padding: 3px 0 3px 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.body h1 { margin-top: 0; font-size: 200%; }
|
||||||
|
div.body h2 { font-size: 160%; }
|
||||||
|
div.body h3 { font-size: 140%; }
|
||||||
|
div.body h4 { font-size: 120%; }
|
||||||
|
div.body h5 { font-size: 110%; }
|
||||||
|
div.body h6 { font-size: 100%; }
|
||||||
|
|
||||||
|
a.headerlink {
|
||||||
|
color: #c60f0f;
|
||||||
|
font-size: 0.8em;
|
||||||
|
padding: 0 4px 0 4px;
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
a.headerlink:hover {
|
||||||
|
background-color: #c60f0f;
|
||||||
|
color: white;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.body p, div.body dd, div.body li {
|
||||||
|
text-align: left;
|
||||||
|
line-height: 130%;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.admonition p.admonition-title + p {
|
||||||
|
display: inline;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.admonition p {
|
||||||
|
margin-bottom: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.admonition pre {
|
||||||
|
margin-bottom: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.admonition ul, div.admonition ol {
|
||||||
|
margin-bottom: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.note {
|
||||||
|
background-color: #eee;
|
||||||
|
border: 1px solid #ccc;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.seealso {
|
||||||
|
background-color: #ffc;
|
||||||
|
border: 1px solid #ff6;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.topic {
|
||||||
|
background-color: #eee;
|
||||||
|
}
|
||||||
|
|
||||||
|
div.warning {
|
||||||
|
background-color: #ffe4e4;
|
||||||
|
border: 1px solid #f66;
|
||||||
|
}
|
||||||
|
|
||||||
|
p.admonition-title {
|
||||||
|
display: inline;
|
||||||
|
}
|
||||||
|
|
||||||
|
p.admonition-title:after {
|
||||||
|
content: ":";
|
||||||
|
}
|
||||||
|
|
||||||
|
pre {
|
||||||
|
padding: 5px;
|
||||||
|
background-color: #eeffcc;
|
||||||
|
color: #333333;
|
||||||
|
line-height: 120%;
|
||||||
|
border: 1px solid #ac9;
|
||||||
|
border-left: none;
|
||||||
|
border-right: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
tt {
|
||||||
|
background-color: #ecf0f3;
|
||||||
|
padding: 0 1px 0 1px;
|
||||||
|
font-size: 0.95em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.warning tt {
|
||||||
|
background: #efc2c2;
|
||||||
|
}
|
||||||
|
|
||||||
|
.note tt {
|
||||||
|
background: #d6d6d6;
|
||||||
|
}
|
36
doc/source/account.rst
Normal file
36
doc/source/account.rst
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
.. _account:
|
||||||
|
|
||||||
|
*******
|
||||||
|
Account
|
||||||
|
*******
|
||||||
|
|
||||||
|
.. _account-server:
|
||||||
|
|
||||||
|
Account Server
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. automodule:: swift.account.server
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _account-auditor:
|
||||||
|
|
||||||
|
Account Auditor
|
||||||
|
===============
|
||||||
|
|
||||||
|
.. automodule:: swift.account.auditor
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _account-reaper:
|
||||||
|
|
||||||
|
Account Reaper
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. automodule:: swift.account.reaper
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
15
doc/source/auth.rst
Normal file
15
doc/source/auth.rst
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
.. _auth:
|
||||||
|
|
||||||
|
*************************
|
||||||
|
Developer's Authorization
|
||||||
|
*************************
|
||||||
|
|
||||||
|
.. _auth-server:
|
||||||
|
|
||||||
|
Auth Server
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. automodule:: swift.auth.server
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
209
doc/source/conf.py
Normal file
209
doc/source/conf.py
Normal file
@ -0,0 +1,209 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
#
|
||||||
|
# Swift documentation build configuration file, created by
|
||||||
|
# sphinx-quickstart on Tue May 18 13:50:15 2010.
|
||||||
|
#
|
||||||
|
# This file is execfile()d with the current directory set to its containing dir.
|
||||||
|
#
|
||||||
|
# Note that not all possible configuration values are present in this
|
||||||
|
# autogenerated file.
|
||||||
|
#
|
||||||
|
# All configuration values have a default; values that are commented out
|
||||||
|
# serve to show the default.
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
|
||||||
|
# If extensions (or modules to document with autodoc) are in another directory,
|
||||||
|
# add these directories to sys.path here. If the directory is relative to the
|
||||||
|
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||||
|
#sys.path.append(os.path.abspath('.'))
|
||||||
|
|
||||||
|
# -- General configuration -----------------------------------------------------
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||||
|
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||||
|
extensions = ['sphinx.ext.autodoc']
|
||||||
|
|
||||||
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
|
templates_path = ['_templates']
|
||||||
|
|
||||||
|
# The suffix of source filenames.
|
||||||
|
source_suffix = '.rst'
|
||||||
|
|
||||||
|
# The encoding of source files.
|
||||||
|
#source_encoding = 'utf-8'
|
||||||
|
|
||||||
|
# The master toctree document.
|
||||||
|
master_doc = 'index'
|
||||||
|
|
||||||
|
# General information about the project.
|
||||||
|
project = u'Swift'
|
||||||
|
copyright = u'2010, OpenStack, LLC.'
|
||||||
|
|
||||||
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
# built documents.
|
||||||
|
#
|
||||||
|
# The short X.Y version.
|
||||||
|
version = '1.0'
|
||||||
|
# The full version, including alpha/beta/rc tags.
|
||||||
|
release = '1.0.1'
|
||||||
|
|
||||||
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
|
# for a list of supported languages.
|
||||||
|
#language = None
|
||||||
|
|
||||||
|
# There are two options for replacing |today|: either, you set today to some
|
||||||
|
# non-false value, then it is used:
|
||||||
|
#today = ''
|
||||||
|
# Else, today_fmt is used as the format for a strftime call.
|
||||||
|
#today_fmt = '%B %d, %Y'
|
||||||
|
|
||||||
|
# List of documents that shouldn't be included in the build.
|
||||||
|
#unused_docs = []
|
||||||
|
|
||||||
|
# List of directories, relative to source directory, that shouldn't be searched
|
||||||
|
# for source files.
|
||||||
|
exclude_trees = []
|
||||||
|
|
||||||
|
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||||
|
#default_role = None
|
||||||
|
|
||||||
|
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||||
|
#add_function_parentheses = True
|
||||||
|
|
||||||
|
# If true, the current module name will be prepended to all description
|
||||||
|
# unit titles (such as .. function::).
|
||||||
|
#add_module_names = True
|
||||||
|
|
||||||
|
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||||
|
# output. They are ignored by default.
|
||||||
|
#show_authors = False
|
||||||
|
|
||||||
|
# The name of the Pygments (syntax highlighting) style to use.
|
||||||
|
pygments_style = 'sphinx'
|
||||||
|
|
||||||
|
# A list of ignored prefixes for module index sorting.
|
||||||
|
#modindex_common_prefix = []
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for HTML output ---------------------------------------------------
|
||||||
|
|
||||||
|
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||||
|
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||||
|
html_theme = 'default'
|
||||||
|
|
||||||
|
# Theme options are theme-specific and customize the look and feel of a theme
|
||||||
|
# further. For a list of options available for each theme, see the
|
||||||
|
# documentation.
|
||||||
|
#html_theme_options = {}
|
||||||
|
|
||||||
|
# Add any paths that contain custom themes here, relative to this directory.
|
||||||
|
#html_theme_path = []
|
||||||
|
|
||||||
|
# The name for this set of Sphinx documents. If None, it defaults to
|
||||||
|
# "<project> v<release> documentation".
|
||||||
|
#html_title = None
|
||||||
|
|
||||||
|
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||||
|
#html_short_title = None
|
||||||
|
|
||||||
|
# The name of an image file (relative to this directory) to place at the top
|
||||||
|
# of the sidebar.
|
||||||
|
#html_logo = None
|
||||||
|
|
||||||
|
# The name of an image file (within the static path) to use as favicon of the
|
||||||
|
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||||
|
# pixels large.
|
||||||
|
#html_favicon = None
|
||||||
|
|
||||||
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
|
html_static_path = ['_static']
|
||||||
|
|
||||||
|
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||||
|
# using the given strftime format.
|
||||||
|
#html_last_updated_fmt = '%b %d, %Y'
|
||||||
|
|
||||||
|
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||||
|
# typographically correct entities.
|
||||||
|
#html_use_smartypants = True
|
||||||
|
|
||||||
|
# Custom sidebar templates, maps document names to template names.
|
||||||
|
#html_sidebars = {}
|
||||||
|
|
||||||
|
# Additional templates that should be rendered to pages, maps page names to
|
||||||
|
# template names.
|
||||||
|
#html_additional_pages = {}
|
||||||
|
|
||||||
|
# If false, no module index is generated.
|
||||||
|
#html_use_modindex = True
|
||||||
|
|
||||||
|
# If false, no index is generated.
|
||||||
|
#html_use_index = True
|
||||||
|
|
||||||
|
# If true, the index is split into individual pages for each letter.
|
||||||
|
#html_split_index = False
|
||||||
|
|
||||||
|
# If true, links to the reST sources are added to the pages.
|
||||||
|
#html_show_sourcelink = True
|
||||||
|
|
||||||
|
# If true, an OpenSearch description file will be output, and all pages will
|
||||||
|
# contain a <link> tag referring to it. The value of this option must be the
|
||||||
|
# base URL from which the finished HTML is served.
|
||||||
|
#html_use_opensearch = ''
|
||||||
|
|
||||||
|
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
||||||
|
#html_file_suffix = ''
|
||||||
|
|
||||||
|
# Output file base name for HTML help builder.
|
||||||
|
htmlhelp_basename = 'Swiftdoc'
|
||||||
|
|
||||||
|
|
||||||
|
# -- Options for LaTeX output --------------------------------------------------
|
||||||
|
|
||||||
|
# The paper size ('letter' or 'a4').
|
||||||
|
#latex_paper_size = 'letter'
|
||||||
|
|
||||||
|
# The font size ('10pt', '11pt' or '12pt').
|
||||||
|
#latex_font_size = '10pt'
|
||||||
|
|
||||||
|
# Grouping the document tree into LaTeX files. List of tuples
|
||||||
|
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||||
|
latex_documents = [
|
||||||
|
('index', 'Swift.tex', u'Swift Documentation',
|
||||||
|
u'Swift Team', 'manual'),
|
||||||
|
]
|
||||||
|
|
||||||
|
# The name of an image file (relative to this directory) to place at the top of
|
||||||
|
# the title page.
|
||||||
|
#latex_logo = None
|
||||||
|
|
||||||
|
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||||
|
# not chapters.
|
||||||
|
#latex_use_parts = False
|
||||||
|
|
||||||
|
# Additional stuff for the LaTeX preamble.
|
||||||
|
#latex_preamble = ''
|
||||||
|
|
||||||
|
# Documents to append as an appendix to all manuals.
|
||||||
|
#latex_appendices = []
|
||||||
|
|
||||||
|
# If false, no module index is generated.
|
||||||
|
#latex_use_modindex = True
|
36
doc/source/container.rst
Normal file
36
doc/source/container.rst
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
.. _Container:
|
||||||
|
|
||||||
|
*********
|
||||||
|
Container
|
||||||
|
*********
|
||||||
|
|
||||||
|
.. _container-server:
|
||||||
|
|
||||||
|
Container Server
|
||||||
|
================
|
||||||
|
|
||||||
|
.. automodule:: swift.container.server
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _container-updater:
|
||||||
|
|
||||||
|
Container Updater
|
||||||
|
=================
|
||||||
|
|
||||||
|
.. automodule:: swift.container.updater
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _container-auditor:
|
||||||
|
|
||||||
|
Container Auditor
|
||||||
|
=================
|
||||||
|
|
||||||
|
.. automodule:: swift.container.auditor
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
25
doc/source/db.rst
Normal file
25
doc/source/db.rst
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
.. _account_and_container_db:
|
||||||
|
|
||||||
|
***************************
|
||||||
|
Account DB and Container DB
|
||||||
|
***************************
|
||||||
|
|
||||||
|
.. _db:
|
||||||
|
|
||||||
|
DB
|
||||||
|
==
|
||||||
|
|
||||||
|
.. automodule:: swift.common.db
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _db-replicator:
|
||||||
|
|
||||||
|
DB replicator
|
||||||
|
=============
|
||||||
|
|
||||||
|
.. automodule:: swift.common.db_replicator
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
54
doc/source/development_guidelines.rst
Normal file
54
doc/source/development_guidelines.rst
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
======================
|
||||||
|
Development Guidelines
|
||||||
|
======================
|
||||||
|
|
||||||
|
-----------------
|
||||||
|
Coding Guidelines
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
For the most part we try to follow PEP 8 guidelines which can be viewed
|
||||||
|
here: http://www.python.org/dev/peps/pep-0008/
|
||||||
|
|
||||||
|
There is a useful pep8 command line tool for checking files for pep8
|
||||||
|
compliance which can be installed with ``easy_install pep8``.
|
||||||
|
|
||||||
|
------------------------
|
||||||
|
Documentation Guidelines
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
The documentation in docstrings should follow the PEP 257 conventions
|
||||||
|
(as mentioned in the PEP 8 guidelines).
|
||||||
|
|
||||||
|
More specifically:
|
||||||
|
|
||||||
|
1. Triple qutes should be used for all docstrings.
|
||||||
|
2. If the docstring is simple and fits on one line, then just use
|
||||||
|
one line.
|
||||||
|
3. For docstrings that take multiple lines, there should be a newline
|
||||||
|
after the opening quotes, and before the closing quotes.
|
||||||
|
4. Sphinx is used to build documentation, so use the restructured text
|
||||||
|
markup to designate parameters, return values, etc. Documentation on
|
||||||
|
the sphinx specific markup can be found here:
|
||||||
|
http://sphinx.pocoo.org/markup/index.html
|
||||||
|
|
||||||
|
---------------------
|
||||||
|
License and Copyright
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
Every source file should have the following copyright and license statement at
|
||||||
|
the top::
|
||||||
|
|
||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
469
doc/source/development_saio.rst
Normal file
469
doc/source/development_saio.rst
Normal file
@ -0,0 +1,469 @@
|
|||||||
|
=======================
|
||||||
|
SAIO - Swift All In One
|
||||||
|
=======================
|
||||||
|
|
||||||
|
-----------------------------------
|
||||||
|
Instructions for seting up a dev VM
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
This documents setting up a virtual machine for doing Swift development. The
|
||||||
|
virtual machine will emulate running a four node Swift cluster. It assumes
|
||||||
|
you're using *VMware Fusion 3* on *Mac OS X Snow Leopard*, but should give a
|
||||||
|
good idea what to do on other environments.
|
||||||
|
|
||||||
|
* Get the *Ubuntu 10.04 LTS (Lucid Lynx)* server image from:
|
||||||
|
http://cdimage.ubuntu.com/releases/10.04/release/ubuntu-10.04-dvd-amd64.iso
|
||||||
|
* Create guest virtual machine:
|
||||||
|
|
||||||
|
#. `Continue without disc`
|
||||||
|
#. `Use operating system installation disc image file`, pick the .iso
|
||||||
|
from above.
|
||||||
|
#. Select `Linux` and `Ubuntu 64-bit`.
|
||||||
|
#. Fill in the *Linux Easy Install* details.
|
||||||
|
#. `Customize Settings`, name the image whatever you want
|
||||||
|
(`SAIO` for instance.)
|
||||||
|
#. When the `Settings` window comes up, select `Hard Disk`, create an
|
||||||
|
extra disk (the defaults are fine).
|
||||||
|
#. Start the virtual machine up and wait for the easy install to
|
||||||
|
finish.
|
||||||
|
|
||||||
|
* As root on guest (you'll have to log in as you, then `sudo su -`):
|
||||||
|
|
||||||
|
#. `apt-get install python-software-properties`
|
||||||
|
#. `add-apt-repository ppa:swift-core/ppa`
|
||||||
|
#. `apt-get update`
|
||||||
|
#. `apt-get install curl gcc bzr memcached python-configobj
|
||||||
|
python-coverage python-dev python-nose python-setuptools python-simplejson
|
||||||
|
python-xattr sqlite3 xfsprogs python-webob python-eventlet
|
||||||
|
python-greenlet`
|
||||||
|
#. Install anything else you want, like screen, ssh, vim, etc.
|
||||||
|
#. `fdisk /dev/sdb` (set up a single partition)
|
||||||
|
#. `mkfs.xfs -i size=1024 /dev/sdb1`
|
||||||
|
#. `mkdir /mnt/sdb1`
|
||||||
|
#. Edit `/etc/fstab` and add
|
||||||
|
`/dev/sdb1 /mnt/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0`
|
||||||
|
#. `mount /mnt/sdb1`
|
||||||
|
#. `mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test`
|
||||||
|
#. `chown <your-user-name>:<your-group-name> /mnt/sdb1/*`
|
||||||
|
#. `mkdir /srv`
|
||||||
|
#. `for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done`
|
||||||
|
#. `mkdir -p /etc/swift/object-server /etc/swift/container-server /etc/swift/account-server /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4 /var/run/swift`
|
||||||
|
#. `chown -R <your-user-name>:<your-group-name> /etc/swift /srv/[1-4]/ /var/run/swift` -- **Make sure to include the trailing slash after /srv/[1-4]/**
|
||||||
|
#. Add to `/etc/rc.local` (before the `exit 0`)::
|
||||||
|
|
||||||
|
mkdir /var/run/swift
|
||||||
|
chown <your-user-name>:<your-user-name> /var/run/swift
|
||||||
|
|
||||||
|
#. Create /etc/rsyncd.conf::
|
||||||
|
|
||||||
|
uid = <Your user name>
|
||||||
|
gid = <Your group name>
|
||||||
|
log file = /var/log/rsyncd.log
|
||||||
|
pid file = /var/run/rsyncd.pid
|
||||||
|
|
||||||
|
|
||||||
|
[account6012]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/1/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/account6012.lock
|
||||||
|
|
||||||
|
[account6022]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/2/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/account6022.lock
|
||||||
|
|
||||||
|
[account6032]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/3/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/account6032.lock
|
||||||
|
|
||||||
|
[account6042]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/4/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/account6042.lock
|
||||||
|
|
||||||
|
|
||||||
|
[container6011]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/1/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/container6011.lock
|
||||||
|
|
||||||
|
[container6021]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/2/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/container6021.lock
|
||||||
|
|
||||||
|
[container6031]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/3/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/container6031.lock
|
||||||
|
|
||||||
|
[container6041]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/4/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/container6041.lock
|
||||||
|
|
||||||
|
|
||||||
|
[object6010]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/1/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/object6010.lock
|
||||||
|
|
||||||
|
[object6020]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/2/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/object6020.lock
|
||||||
|
|
||||||
|
[object6030]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/3/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/object6030.lock
|
||||||
|
|
||||||
|
[object6040]
|
||||||
|
max connections = 25
|
||||||
|
path = /srv/4/node/
|
||||||
|
read only = false
|
||||||
|
lock file = /var/lock/object6040.lock
|
||||||
|
|
||||||
|
#. Edit the following line in /etc/default/rsync::
|
||||||
|
|
||||||
|
RSYNC_ENABLE=true
|
||||||
|
|
||||||
|
#. `service rsync restart`
|
||||||
|
|
||||||
|
* As you on guest:
|
||||||
|
|
||||||
|
#. `mkdir ~/bin`
|
||||||
|
#. Create `~/.bazaar/bazaar.conf`::
|
||||||
|
|
||||||
|
[DEFAULT]
|
||||||
|
email = Your Name <your-email-address>
|
||||||
|
#. If you are using launchpad to get the code or make changes, run
|
||||||
|
`bzr launchpad-login <launchpad_id>`
|
||||||
|
#. Create the swift repo with `bzr init-repo swift`
|
||||||
|
#. Check out your bzr branch of swift, for example:
|
||||||
|
`cd ~/swift; bzr branch lp:swift trunk`
|
||||||
|
#. `cd ~/swift/trunk; sudo python setup.py develop`
|
||||||
|
#. Edit `~/.bashrc` and add to the end::
|
||||||
|
|
||||||
|
export PATH_TO_TEST_XFS=/mnt/sdb1/test
|
||||||
|
export SWIFT_TEST_CONFIG_FILE=/etc/swift/func_test.conf
|
||||||
|
export PATH=${PATH}:~/bin
|
||||||
|
|
||||||
|
#. `. ~/.bashrc`
|
||||||
|
#. Create `/etc/swift/auth-server.conf`::
|
||||||
|
|
||||||
|
[auth-server]
|
||||||
|
default_cluster_url = http://127.0.0.1:8080/v1
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
#. Create `/etc/swift/proxy-server.conf`::
|
||||||
|
|
||||||
|
[proxy-server]
|
||||||
|
bind_port = 8080
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
#. Create `/etc/swift/account-server/1.conf`::
|
||||||
|
|
||||||
|
[account-server]
|
||||||
|
devices = /srv/1/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6012
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[account-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[account-auditor]
|
||||||
|
|
||||||
|
[account-reaper]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/account-server/2.conf`::
|
||||||
|
|
||||||
|
[account-server]
|
||||||
|
devices = /srv/2/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6022
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[account-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[account-auditor]
|
||||||
|
|
||||||
|
[account-reaper]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/account-server/3.conf`::
|
||||||
|
|
||||||
|
[account-server]
|
||||||
|
devices = /srv/3/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6032
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[account-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[account-auditor]
|
||||||
|
|
||||||
|
[account-reaper]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/account-server/4.conf`::
|
||||||
|
|
||||||
|
[account-server]
|
||||||
|
devices = /srv/4/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6042
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[account-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[account-auditor]
|
||||||
|
|
||||||
|
[account-reaper]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/container-server/1.conf`::
|
||||||
|
|
||||||
|
[container-server]
|
||||||
|
devices = /srv/1/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6011
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[container-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[container-updater]
|
||||||
|
|
||||||
|
[container-auditor]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/container-server/2.conf`::
|
||||||
|
|
||||||
|
[container-server]
|
||||||
|
devices = /srv/2/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6021
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[container-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[container-updater]
|
||||||
|
|
||||||
|
[container-auditor]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/container-server/3.conf`::
|
||||||
|
|
||||||
|
[container-server]
|
||||||
|
devices = /srv/3/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6031
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[container-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[container-updater]
|
||||||
|
|
||||||
|
[container-auditor]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/container-server/4.conf`::
|
||||||
|
|
||||||
|
[container-server]
|
||||||
|
devices = /srv/4/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6041
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[container-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[container-updater]
|
||||||
|
|
||||||
|
[container-auditor]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/object-server/1.conf`::
|
||||||
|
|
||||||
|
[object-server]
|
||||||
|
devices = /srv/1/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6010
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[object-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[object-updater]
|
||||||
|
|
||||||
|
[object-auditor]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/object-server/2.conf`::
|
||||||
|
|
||||||
|
[object-server]
|
||||||
|
devices = /srv/2/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6020
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[object-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[object-updater]
|
||||||
|
|
||||||
|
[object-auditor]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/object-server/3.conf`::
|
||||||
|
|
||||||
|
[object-server]
|
||||||
|
devices = /srv/3/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6030
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[object-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[object-updater]
|
||||||
|
|
||||||
|
[object-auditor]
|
||||||
|
|
||||||
|
#. Create `/etc/swift/object-server/4.conf`::
|
||||||
|
|
||||||
|
[object-server]
|
||||||
|
devices = /srv/4/node
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6040
|
||||||
|
user = <your-user-name>
|
||||||
|
|
||||||
|
[object-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[object-updater]
|
||||||
|
|
||||||
|
[object-auditor]
|
||||||
|
|
||||||
|
#. Create `~/bin/resetswift`::
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
swift-init all stop
|
||||||
|
sleep 5
|
||||||
|
sudo umount /mnt/sdb1
|
||||||
|
sudo mkfs.xfs -f -i size=1024 /dev/sdb1
|
||||||
|
sudo mount /mnt/sdb1
|
||||||
|
sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 /mnt/sdb1/test
|
||||||
|
sudo chown <your-user-name>:<your-group-name> /mnt/sdb1/*
|
||||||
|
mkdir -p /srv/1/node/sdb1 /srv/2/node/sdb2 /srv/3/node/sdb3 /srv/4/node/sdb4
|
||||||
|
sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog
|
||||||
|
sudo service rsyslog restart
|
||||||
|
sudo service memcached restart
|
||||||
|
|
||||||
|
#. Create `~/bin/remakerings`::
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
cd /etc/swift
|
||||||
|
|
||||||
|
rm *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
||||||
|
|
||||||
|
swift-ring-builder object.builder create 18 3 1
|
||||||
|
swift-ring-builder object.builder add z1-127.0.0.1:6010/sdb1 1
|
||||||
|
swift-ring-builder object.builder add z2-127.0.0.1:6020/sdb2 1
|
||||||
|
swift-ring-builder object.builder add z3-127.0.0.1:6030/sdb3 1
|
||||||
|
swift-ring-builder object.builder add z4-127.0.0.1:6040/sdb4 1
|
||||||
|
swift-ring-builder object.builder rebalance
|
||||||
|
swift-ring-builder container.builder create 18 3 1
|
||||||
|
swift-ring-builder container.builder add z1-127.0.0.1:6011/sdb1 1
|
||||||
|
swift-ring-builder container.builder add z2-127.0.0.1:6021/sdb2 1
|
||||||
|
swift-ring-builder container.builder add z3-127.0.0.1:6031/sdb3 1
|
||||||
|
swift-ring-builder container.builder add z4-127.0.0.1:6041/sdb4 1
|
||||||
|
swift-ring-builder container.builder rebalance
|
||||||
|
swift-ring-builder account.builder create 18 3 1
|
||||||
|
swift-ring-builder account.builder add z1-127.0.0.1:6012/sdb1 1
|
||||||
|
swift-ring-builder account.builder add z2-127.0.0.1:6022/sdb2 1
|
||||||
|
swift-ring-builder account.builder add z3-127.0.0.1:6032/sdb3 1
|
||||||
|
swift-ring-builder account.builder add z4-127.0.0.1:6042/sdb4 1
|
||||||
|
swift-ring-builder account.builder rebalance
|
||||||
|
|
||||||
|
#. Create `~/bin/startmain`::
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
swift-init auth-server start
|
||||||
|
swift-init proxy-server start
|
||||||
|
swift-init account-server start
|
||||||
|
swift-init container-server start
|
||||||
|
swift-init object-server start
|
||||||
|
|
||||||
|
#. Create `~/bin/startrest`::
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
swift-auth-recreate-accounts
|
||||||
|
swift-init object-updater start
|
||||||
|
swift-init container-updater start
|
||||||
|
swift-init object-replicator start
|
||||||
|
swift-init container-replicator start
|
||||||
|
swift-init account-replicator start
|
||||||
|
swift-init object-auditor start
|
||||||
|
swift-init container-auditor start
|
||||||
|
swift-init account-auditor start
|
||||||
|
swift-init account-reaper start
|
||||||
|
|
||||||
|
#. `chmod +x ~/bin/*`
|
||||||
|
#. `remakerings`
|
||||||
|
#. `cd ~/swift/trunk; ./.unittests`
|
||||||
|
#. `startmain` (The ``Unable to increase file descriptor limit. Running as non-root?`` warnings are expected and ok.)
|
||||||
|
#. `swift-auth-create-account test tester testing`
|
||||||
|
#. Get an `X-Storage-Url` and `X-Auth-Token`: ``curl -v -H 'X-Storage-User: test:tester' -H 'X-Storage-Pass: testing' http://127.0.0.1:11000/v1.0``
|
||||||
|
#. Check that you can GET account: ``curl -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>``
|
||||||
|
#. Check that `st` works: `st -A http://127.0.0.1:11000/v1.0 -U test:tester -K testing stat`
|
||||||
|
#. Create `/etc/swift/func_test.conf`::
|
||||||
|
|
||||||
|
auth_host = 127.0.0.1
|
||||||
|
auth_port = 11000
|
||||||
|
auth_ssl = no
|
||||||
|
|
||||||
|
account = test
|
||||||
|
username = tester
|
||||||
|
password = testing
|
||||||
|
|
||||||
|
collate = C
|
||||||
|
|
||||||
|
#. `cd ~/swift/trunk; ./.functests`
|
||||||
|
#. `cd ~/swift/trunk; ./.probetests` (Note for future reference: probe tests
|
||||||
|
will reset your environment)
|
||||||
|
|
||||||
|
If you plan to work on documentation (and who doesn't?!):
|
||||||
|
|
||||||
|
#. `sudo apt-get install python-sphinx`
|
||||||
|
#. `python setup.py build_sphinx`
|
||||||
|
|
||||||
|
----------------
|
||||||
|
Debugging Issues
|
||||||
|
----------------
|
||||||
|
|
||||||
|
If all doesn't go as planned, and tests fail, or you can't auth, or something doesn't work, here are some good starting places to look for issues:
|
||||||
|
|
||||||
|
#. Everything is logged in /var/log/syslog, so that is a good first place to
|
||||||
|
look for errors (most likely python tracebacks).
|
||||||
|
#. Make sure all of the server processes are running. For the base
|
||||||
|
functionality, the Proxy, Account, Container, Object and Auth servers
|
||||||
|
should be running
|
||||||
|
#. If one of the servers are not running, and no errors are logged to syslog,
|
||||||
|
it may be useful to try to start the server manually, for example:
|
||||||
|
`swift-object-server /etc/swift/object-server/1.conf` will start the
|
||||||
|
object server. If there are problems not showing up in syslog,
|
||||||
|
then you will likely see the traceback on startup.
|
48
doc/source/index.rst
Normal file
48
doc/source/index.rst
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
.. Swift documentation master file, created by
|
||||||
|
sphinx-quickstart on Tue May 18 13:50:15 2010.
|
||||||
|
You can adapt this file completely to your liking, but it should at least
|
||||||
|
contain the root `toctree` directive.
|
||||||
|
|
||||||
|
Welcome to Swift's documentation!
|
||||||
|
=================================
|
||||||
|
|
||||||
|
Overview:
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
overview_ring
|
||||||
|
overview_reaper
|
||||||
|
overview_auth
|
||||||
|
overview_replication
|
||||||
|
|
||||||
|
Development:
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
development_guidelines
|
||||||
|
development_saio
|
||||||
|
|
||||||
|
Source:
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
ring
|
||||||
|
proxy
|
||||||
|
account
|
||||||
|
container
|
||||||
|
db
|
||||||
|
object
|
||||||
|
auth
|
||||||
|
misc
|
||||||
|
|
||||||
|
|
||||||
|
Indices and tables
|
||||||
|
==================
|
||||||
|
|
||||||
|
* :ref:`genindex`
|
||||||
|
* :ref:`modindex`
|
||||||
|
* :ref:`search`
|
||||||
|
|
99
doc/source/misc.rst
Normal file
99
doc/source/misc.rst
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
.. _misc:
|
||||||
|
|
||||||
|
****
|
||||||
|
Misc
|
||||||
|
****
|
||||||
|
|
||||||
|
.. _exceptions:
|
||||||
|
|
||||||
|
Exceptions
|
||||||
|
==========
|
||||||
|
|
||||||
|
.. automodule:: swift.common.exceptions
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _constraints:
|
||||||
|
|
||||||
|
Constraints
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. automodule:: swift.common.constraints
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _utils:
|
||||||
|
|
||||||
|
Utils
|
||||||
|
=====
|
||||||
|
|
||||||
|
.. automodule:: swift.common.utils
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _common_auth:
|
||||||
|
|
||||||
|
Auth
|
||||||
|
====
|
||||||
|
|
||||||
|
.. automodule:: swift.common.auth
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _wsgi:
|
||||||
|
|
||||||
|
WSGI
|
||||||
|
====
|
||||||
|
|
||||||
|
.. automodule:: swift.common.wsgi
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _client:
|
||||||
|
|
||||||
|
Client
|
||||||
|
======
|
||||||
|
|
||||||
|
.. automodule:: swift.common.client
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _direct_client:
|
||||||
|
|
||||||
|
Direct Client
|
||||||
|
=============
|
||||||
|
|
||||||
|
.. automodule:: swift.common.direct_client
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _buffered_http:
|
||||||
|
|
||||||
|
Buffered HTTP
|
||||||
|
=============
|
||||||
|
|
||||||
|
.. automodule:: swift.common.bufferedhttp
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _healthcheck:
|
||||||
|
|
||||||
|
Healthcheck
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. automodule:: swift.common.healthcheck
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _memecached:
|
||||||
|
|
||||||
|
MemCacheD
|
||||||
|
=========
|
||||||
|
|
||||||
|
.. automodule:: swift.common.memcached
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
46
doc/source/object.rst
Normal file
46
doc/source/object.rst
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
.. _object:
|
||||||
|
|
||||||
|
******
|
||||||
|
Object
|
||||||
|
******
|
||||||
|
|
||||||
|
.. _object-server:
|
||||||
|
|
||||||
|
Object Server
|
||||||
|
=============
|
||||||
|
|
||||||
|
.. automodule:: swift.obj.server
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _object-replicator:
|
||||||
|
|
||||||
|
Object Replicator
|
||||||
|
=================
|
||||||
|
|
||||||
|
.. automodule:: swift.obj.replicator
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _object-updater:
|
||||||
|
|
||||||
|
Object Updater
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. automodule:: swift.obj.updater
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _object-auditor:
|
||||||
|
|
||||||
|
Object Auditor
|
||||||
|
==============
|
||||||
|
|
||||||
|
.. automodule:: swift.obj.auditor
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
47
doc/source/overview_auth.rst
Normal file
47
doc/source/overview_auth.rst
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
===============
|
||||||
|
The Auth System
|
||||||
|
===============
|
||||||
|
|
||||||
|
The auth system for Swift is based on the auth system from an existing
|
||||||
|
architecture -- actually from a few existing auth systems -- and is therefore a
|
||||||
|
bit disjointed. The distilled points about it are:
|
||||||
|
|
||||||
|
* The authentication/authorization part is outside Swift itself
|
||||||
|
* The user of Swift passes in an auth token with each request
|
||||||
|
* Swift validates each token with the external auth system and caches the
|
||||||
|
result
|
||||||
|
* The token does not change from request to request, but does expire
|
||||||
|
|
||||||
|
The token can be passed into Swift using the X-Auth-Token or the
|
||||||
|
X-Storage-Token header. Both have the same format: just a simple string
|
||||||
|
representing the token. Some external systems use UUID tokens, some an MD5 hash
|
||||||
|
of something unique, some use "something else" but the salient point is that
|
||||||
|
the token is a string which can be sent as-is back to the auth system for
|
||||||
|
validation.
|
||||||
|
|
||||||
|
The validation call is, for historical reasons, an XMLRPC call. There are two
|
||||||
|
types of auth systems, type 0 and type 1. With type 0, the XMLRPC call is given
|
||||||
|
the token and the Swift account name (also known as the account hash because
|
||||||
|
it's usually of the format <reseller>_<hash>). With type 1, the call is given
|
||||||
|
the container name and HTTP method as well as the token and account hash. Both
|
||||||
|
types are also given a service login and password recorded in Swift's
|
||||||
|
resellers.conf. For a valid token, both auth system types respond with a
|
||||||
|
session TTL and overall expiration in seconds from now. Swift does not honor
|
||||||
|
the session TTL but will cache the token up to the expiration time. Tokens can
|
||||||
|
be purged through a call to Swift's services server.
|
||||||
|
|
||||||
|
How the user gets the token to use with Swift is up to the reseller software
|
||||||
|
itself. For instance, with Cloud Files the user has a starting URL to an auth
|
||||||
|
system. The user starts a session by sending a ReST request to that auth system
|
||||||
|
to receive the auth token, a URL to the Swift system, and a URL to the CDN
|
||||||
|
system.
|
||||||
|
|
||||||
|
------------------
|
||||||
|
History and Future
|
||||||
|
------------------
|
||||||
|
|
||||||
|
What's established in Swift for authentication/authorization has history from
|
||||||
|
before Swift, so that won't be recorded here. It was minimally integrated with
|
||||||
|
Swift to meet project deadlines, but in the near future Swift should have a
|
||||||
|
pluggable auth/reseller system to support the above as well as other
|
||||||
|
architectures.
|
64
doc/source/overview_reaper.rst
Normal file
64
doc/source/overview_reaper.rst
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
==================
|
||||||
|
The Account Reaper
|
||||||
|
==================
|
||||||
|
|
||||||
|
The Account Reaper removes data from deleted accounts in the background.
|
||||||
|
|
||||||
|
An account is marked for deletion by a reseller through the services server's
|
||||||
|
remove_storage_account XMLRPC call. This simply puts the value DELETED into the
|
||||||
|
status column of the account_stat table in the account database (and replicas),
|
||||||
|
indicating the data for the account should be deleted later. There is no set
|
||||||
|
retention time and no undelete; it is assumed the reseller will implement such
|
||||||
|
features and only call remove_storage_account once it is truly desired the
|
||||||
|
account's data be removed.
|
||||||
|
|
||||||
|
The account reaper runs on each account server and scans the server
|
||||||
|
occasionally for account databases marked for deletion. It will only trigger on
|
||||||
|
accounts that server is the primary node for, so that multiple account servers
|
||||||
|
aren't all trying to do the same work at the same time. Using multiple servers
|
||||||
|
to delete one account might improve deletion speed, but requires coordination
|
||||||
|
so they aren't duplicating effort. Speed really isn't as much of a concern with
|
||||||
|
data deletion and large accounts aren't deleted that often.
|
||||||
|
|
||||||
|
The deletion process for an account itself is pretty straightforward. For each
|
||||||
|
container in the account, each object is deleted and then the container is
|
||||||
|
deleted. Any deletion requests that fail won't stop the overall process, but
|
||||||
|
will cause the overall process to fail eventually (for example, if an object
|
||||||
|
delete times out, the container won't be able to be deleted later and therefore
|
||||||
|
the account won't be deleted either). The overall process continues even on a
|
||||||
|
failure so that it doesn't get hung up reclaiming cluster space because of one
|
||||||
|
troublesome spot. The account reaper will keep trying to delete an account
|
||||||
|
until it evetually becomes empty, at which point the database reclaim process
|
||||||
|
within the db_replicator will eventually remove the database files.
|
||||||
|
|
||||||
|
-------
|
||||||
|
History
|
||||||
|
-------
|
||||||
|
|
||||||
|
At first, a simple approach of deleting an account through completely external
|
||||||
|
calls was considered as it required no changes to the system. All data would
|
||||||
|
simply be deleted in the same way the actual user would, through the public
|
||||||
|
ReST API. However, the downside was that it would use proxy resources and log
|
||||||
|
everything when it didn't really need to. Also, it would likely need a
|
||||||
|
dedicated server or two, just for issuing the delete requests.
|
||||||
|
|
||||||
|
A completely bottom-up approach was also considered, where the object and
|
||||||
|
container servers would occasionally scan the data they held and check if the
|
||||||
|
account was deleted, removing the data if so. The upside was the speed of
|
||||||
|
reclamation with no impact on the proxies or logging, but the downside was that
|
||||||
|
nearly 100% of the scanning would result in no action creating a lot of I/O
|
||||||
|
load for no reason.
|
||||||
|
|
||||||
|
A more container server centric approach was also considered, where the account
|
||||||
|
server would mark all the containers for deletion and the container servers
|
||||||
|
would delete the objects in each container and then themselves. This has the
|
||||||
|
benefit of still speedy reclamation for accounts with a lot of containers, but
|
||||||
|
has the downside of a pretty big load spike. The process could be slowed down
|
||||||
|
to alleviate the load spike possibility, but then the benefit of speedy
|
||||||
|
reclamation is lost and what's left is just a more complex process. Also,
|
||||||
|
scanning all the containers for those marked for deletion when the majority
|
||||||
|
wouldn't be seemed wasteful. The db_replicator could do this work while
|
||||||
|
performing its replication scan, but it would have to spawn and track deletion
|
||||||
|
processes which seemed needlessly complex.
|
||||||
|
|
||||||
|
In the end, an account server centric approach seemed best, as described above.
|
40
doc/source/overview_replication.rst
Normal file
40
doc/source/overview_replication.rst
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
===========
|
||||||
|
Replication
|
||||||
|
===========
|
||||||
|
|
||||||
|
Since each replica in swift functions independently, and clients generally require only a simple majority of nodes responding to consider an operation successful, transient failures like network partitions can quickly cause replicas to diverge. These differences are eventually reconciled by asynchronous, peer-to-peer replicator processes. The replicator processes traverse their local filesystems, concurrently performing operations in a manner that balances load across physical disks.
|
||||||
|
|
||||||
|
Replication uses a push model, with records and files generally only being copied from local to remote replicas. This is important because data on the node may not belong there (as in the case of handoffs and ring changes), and a replicator can't know what data exists elsewhere in the cluster that it should pull in. It's the duty of any node that contains data to ensure that data gets to where it belongs. Replica placement is handled by the ring.
|
||||||
|
|
||||||
|
Every deleted record or file in the system is marked by a tombstone, so that deletions can be replicated alongside creations. These tombstones are cleaned up by the replication process after a period of time referred to as the consistency window, which is related to replication duration and how long transient failures can remove a node from the cluster. Tombstone cleanup must be tied to replication to reach replica convergence.
|
||||||
|
|
||||||
|
If a replicator detects that a remote drive is has failed, it will use the ring's "get_more_nodes" interface to choose an alternate node to synchronize with. The replicator can generally maintain desired levels of replication in the face of hardware failures, though some replicas may not be in an immediately usable location.
|
||||||
|
|
||||||
|
Replication is an area of active development, and likely rife with potential improvements to speed and correctness.
|
||||||
|
|
||||||
|
There are two major classes of replicator - the db replicator, which replicates accounts and containers, and the object replicator, which replicates object data.
|
||||||
|
|
||||||
|
|
||||||
|
--------------
|
||||||
|
DB Replication
|
||||||
|
--------------
|
||||||
|
|
||||||
|
The first step performed by db replication is a low-cost hash comparison to find out whether or not two replicas already match. Under normal operation, this check is able to verify that most databases in the system are already synchronized very quickly. If the hashes differ, the replicator brings the databases in sync by sharing records added since the last sync point.
|
||||||
|
|
||||||
|
This sync point is a high water mark noting the last record at which two databases were known to be in sync, and is stored in each database as a tuple of the remote database id and record id. Database ids are unique amongst all replicas of the database, and record ids are monotonically increasing integers. After all new records have been pushed to the remote database, the entire sync table of the local database is pushed, so the remote database knows it's now in sync with everyone the local database has previously synchronized with.
|
||||||
|
|
||||||
|
If a replica is found to be missing entirely, the whole local database file is transmitted to the peer using rsync(1) and vested with a new unique id.
|
||||||
|
|
||||||
|
In practice, DB replication can process hundreds of databases per concurrency setting per second (up to the number of available CPUs or disks) and is bound by the number of DB transactions that must be performed.
|
||||||
|
|
||||||
|
|
||||||
|
------------------
|
||||||
|
Object Replication
|
||||||
|
------------------
|
||||||
|
|
||||||
|
The initial implementation of object replication simply performed an rsync to push data from a local partition to all remote servers it was expected to exist on. While this performed adequately at small scale, replication times skyrocketed once directory structures could no longer be held in RAM. We now use a modification of this scheme in which a hash of the contents for each suffix directory is saved to a per-partition hashes file. The hash for a suffix directory is invalidated when the contents of that suffix directory are modified.
|
||||||
|
|
||||||
|
The object replication process reads in these hash files, calculating any invalidated hashes. It then transmits the hashes to each remote server that should hold the partition, and only suffix directories with differing hashes on the remote server are rsynced. After pushing files to the remote server, the replication process notifies it to recalculate hashes for the rsynced suffix directories.
|
||||||
|
|
||||||
|
Performance of object replication is generally bound by the number of uncached directories it has to traverse, usually as a result of invalidated suffix directory hashes. Using write volume and partition counts from our running systems, it was designed so that around 2% of the hash space on a normal node will be invalidated per day, which has experimentally given us acceptable replication speeds.
|
||||||
|
|
234
doc/source/overview_ring.rst
Normal file
234
doc/source/overview_ring.rst
Normal file
@ -0,0 +1,234 @@
|
|||||||
|
=========
|
||||||
|
The Rings
|
||||||
|
=========
|
||||||
|
|
||||||
|
The rings determine where data should reside in the cluster. There is a
|
||||||
|
separate ring for account databases, container databases, and individual
|
||||||
|
objects but each ring works in the same way. These rings are externally
|
||||||
|
managed, in that the server processes themselves do not modify the rings, they
|
||||||
|
are instead given new rings modified by other tools.
|
||||||
|
|
||||||
|
The ring uses a configurable number of bits from a path's MD5 hash as a
|
||||||
|
partition index that designates a device. The number of bits kept from the hash
|
||||||
|
is known as the partition power, and 2 to the partition power indicates the
|
||||||
|
partition count. Partitioning the full MD5 hash ring allows other parts of the
|
||||||
|
cluster to work in batches of items at once which ends up either more efficient
|
||||||
|
or at least less complex than working with each item separately or the entire
|
||||||
|
cluster all at once.
|
||||||
|
|
||||||
|
Another configurable value is the replica count, which indicates how many of
|
||||||
|
the partition->device assignments comprise a single ring. For a given partition
|
||||||
|
number, each replica's device will not be in the same zone as any other
|
||||||
|
replica's device. Zones can be used to group devices based on physical
|
||||||
|
locations, power separations, network separations, or any other attribute that
|
||||||
|
would lessen multiple replicas being unavailable at the same time.
|
||||||
|
|
||||||
|
------------
|
||||||
|
Ring Builder
|
||||||
|
------------
|
||||||
|
|
||||||
|
The rings are built and managed manually by a utility called the ring-builder.
|
||||||
|
The ring-builder assigns partitions to devices and writes an optimized Python
|
||||||
|
structure to a gzipped, pickled file on disk for shipping out to the servers.
|
||||||
|
The server processes just check the modification time of the file occasionally
|
||||||
|
and reload their in-memory copies of the ring structure as needed. Because of
|
||||||
|
how the ring-builder manages changes to the ring, using a slightly older ring
|
||||||
|
usually just means one of the three replicas for a subset of the partitions
|
||||||
|
will be incorrect, which can be easily worked around.
|
||||||
|
|
||||||
|
The ring-builder also keeps its own builder file with the ring information and
|
||||||
|
additional data required to build future rings. It is very important to keep
|
||||||
|
multiple backup copies of these builder files. One option is to copy the
|
||||||
|
builder files out to every server while copying the ring files themselves.
|
||||||
|
Another is to upload the builder files into the cluster itself. Complete loss
|
||||||
|
of a builder file will mean creating a new ring from scratch, nearly all
|
||||||
|
partitions will end up assigned to different devices, and therefore nearly all
|
||||||
|
data stored will have to be replicated to new locations. So, recovery from a
|
||||||
|
builder file loss is possible, but data will definitely be unreachable for an
|
||||||
|
extended time.
|
||||||
|
|
||||||
|
-------------------
|
||||||
|
Ring Data Structure
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
The ring data structure consists of three top level fields: a list of devices
|
||||||
|
in the cluster, a list of lists of device ids indicating partition to device
|
||||||
|
assignments, and an integer indicating the number of bits to shift an MD5 hash
|
||||||
|
to calculate the partition for the hash.
|
||||||
|
|
||||||
|
***************
|
||||||
|
List of Devices
|
||||||
|
***************
|
||||||
|
|
||||||
|
The list of devices is known internally to the Ring class as devs. Each item in
|
||||||
|
the list of devices is a dictionary with the following keys:
|
||||||
|
|
||||||
|
====== ======= ==============================================================
|
||||||
|
id integer The index into the list devices.
|
||||||
|
zone integer The zone the devices resides in.
|
||||||
|
weight float The relative weight of the device in comparison to other
|
||||||
|
devices. This usually corresponds directly to the amount of
|
||||||
|
disk space the device has compared to other devices. For
|
||||||
|
instance a device with 1 terabyte of space might have a weight
|
||||||
|
of 100.0 and another device with 2 terabytes of space might
|
||||||
|
have a weight of 200.0. This weight can also be used to bring
|
||||||
|
back into balance a device that has ended up with more or less
|
||||||
|
data than desired over time. A good average weight of 100.0
|
||||||
|
allows flexibility in lowering the weight later if necessary.
|
||||||
|
ip string The IP address of the server containing the device.
|
||||||
|
port int The TCP port the listening server process uses that serves
|
||||||
|
requests for the device.
|
||||||
|
device string The on disk name of the device on the server.
|
||||||
|
For example: sdb1
|
||||||
|
meta string A general-use field for storing additional information for the
|
||||||
|
device. This information isn't used directly by the server
|
||||||
|
processes, but can be useful in debugging. For example, the
|
||||||
|
date and time of installation and hardware manufacturer could
|
||||||
|
be stored here.
|
||||||
|
====== ======= ==============================================================
|
||||||
|
|
||||||
|
Note: The list of devices may contain holes, or indexes set to None, for
|
||||||
|
devices that have been removed from the cluster. Generally, device ids are not
|
||||||
|
reused. Also, some devices may be temporarily disabled by setting their weight
|
||||||
|
to 0.0. To obtain a list of active devices (for uptime polling, for example)
|
||||||
|
the Python code would look like: ``devices = [device for device in self.devs if
|
||||||
|
device and device['weight']]``
|
||||||
|
|
||||||
|
*************************
|
||||||
|
Partition Assignment List
|
||||||
|
*************************
|
||||||
|
|
||||||
|
This is a list of array('I') of devices ids. The outermost list contains an
|
||||||
|
array('I') for each replica. Each array('I') has a length equal to the
|
||||||
|
partition count for the ring. Each integer in the array('I') is an index into
|
||||||
|
the above list of devices. The partition list is known internally to the Ring
|
||||||
|
class as _replica2part2dev_id.
|
||||||
|
|
||||||
|
So, to create a list of device dictionaries assigned to a partition, the Python
|
||||||
|
code would look like: ``devices = [self.devs[part2dev_id[partition]] for
|
||||||
|
part2dev_id in self._replica2part2dev_id]``
|
||||||
|
|
||||||
|
array('I') is used for memory conservation as there may be millions of
|
||||||
|
partitions.
|
||||||
|
|
||||||
|
*********************
|
||||||
|
Partition Shift Value
|
||||||
|
*********************
|
||||||
|
|
||||||
|
The partition shift value is known internally to the Ring class as _part_shift.
|
||||||
|
This value used to shift an MD5 hash to calculate the partition on which the
|
||||||
|
data for that hash should reside. Only the top four bytes of the hash is used
|
||||||
|
in this process. For example, to compute the partition for the path
|
||||||
|
/account/container/object the Python code might look like: ``partition =
|
||||||
|
unpack_from('>I', md5('/account/container/object').digest())[0] >>
|
||||||
|
self._part_shift``
|
||||||
|
|
||||||
|
-----------------
|
||||||
|
Building the Ring
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
The initial building of the ring first calculates the number of partitions that
|
||||||
|
should ideally be assigned to each device based the device's weight. For
|
||||||
|
example, if the partition power of 20 the ring will have 1,048,576 partitions.
|
||||||
|
If there are 1,000 devices of equal weight they will each desire 1,048.576
|
||||||
|
partitions. The devices are then sorted by the number of partitions they desire
|
||||||
|
and kept in order throughout the initialization process.
|
||||||
|
|
||||||
|
Then, the ring builder assigns each partition's replica to the device that
|
||||||
|
desires the most partitions at that point, with the restriction that the device
|
||||||
|
is not in the same zone as any other replica for that partition. Once assigned,
|
||||||
|
the device's desired partition count is decremented and moved to its new sorted
|
||||||
|
location in the list of devices and the process continues.
|
||||||
|
|
||||||
|
When building a new ring based on an old ring, the desired number of partitions
|
||||||
|
each device wants is recalculated. Next the partitions to be reassigned are
|
||||||
|
gathered up. Any removed devices have all their assigned partitions unassigned
|
||||||
|
and added to the gathered list. Any devices that have more partitions than they
|
||||||
|
now desire have random partitions unassigned from them and added to the
|
||||||
|
gathered list. Lastly, the gathered partitions are then reassigned to devices
|
||||||
|
using a similar method as in the initial assignment described above.
|
||||||
|
|
||||||
|
Whenever a partition has a replica reassigned, the time of the reassignment is
|
||||||
|
recorded. This is taken into account when gathering partitions to reassign so
|
||||||
|
that no partition is moved twice in a configurable amount of time. This
|
||||||
|
configurable amount of time is known internally to the RingBuilder class as
|
||||||
|
min_part_hours. This restriction is ignored for replicas of partitions on
|
||||||
|
devices that have been removed, as removing a device only happens on device
|
||||||
|
failure and there's no choice but to make a reassignment.
|
||||||
|
|
||||||
|
The above processes don't always perfectly rebalance a ring due to the random
|
||||||
|
nature of gathering partitions for reassignment. To help reach a more balanced
|
||||||
|
ring, the rebalance process is repeated until near perfect (less 1% off) or
|
||||||
|
when the balance doesn't improve by at least 1% (indicating we probably can't
|
||||||
|
get perfect balance due to wildly imbalanced zones or too many partitions
|
||||||
|
recently moved).
|
||||||
|
|
||||||
|
-------
|
||||||
|
History
|
||||||
|
-------
|
||||||
|
|
||||||
|
The ring code went through many iterations before arriving at what it is now
|
||||||
|
and while it has been stable for a while now, the algorithm may be tweaked or
|
||||||
|
perhaps even fundamentally changed if new ideas emerge. This section will try
|
||||||
|
to describe the previous ideas attempted and attempt to explain why they were
|
||||||
|
discarded.
|
||||||
|
|
||||||
|
A "live ring" option was considered where each server could maintain its own
|
||||||
|
copy of the ring and the servers would use a gossip protocol to communicate the
|
||||||
|
changes they made. This was discarded as too complex and error prone to code
|
||||||
|
correctly in the project time span available. One bug could easily gossip bad
|
||||||
|
data out to the entire cluster and be difficult to recover from. Having an
|
||||||
|
externally managed ring simplifies the process, allows full validation of data
|
||||||
|
before it's shipped out to the servers, and guarantees each server is using a
|
||||||
|
ring from the same timeline. It also means that the servers themselves aren't
|
||||||
|
spending a lot of resources maintaining rings.
|
||||||
|
|
||||||
|
A couple of "ring server" options were considered. One was where all ring
|
||||||
|
lookups would be done by calling a service on a separate server or set of
|
||||||
|
servers, but this was discarded due to the latency involved. Another was much
|
||||||
|
like the current process but where servers could submit change requests to the
|
||||||
|
ring server to have a new ring built and shipped back out to the servers. This
|
||||||
|
was discarded due to project time constraints and because ring changes are
|
||||||
|
currently infrequent enough that manual control was sufficient. However, lack
|
||||||
|
of quick automatic ring changes did mean that other parts of the system had to
|
||||||
|
be coded to handle devices being unavailable for a period of hours until
|
||||||
|
someone could manually update the ring.
|
||||||
|
|
||||||
|
The current ring process has each replica of a partition independently assigned
|
||||||
|
to a device. A version of the ring that used a third of the memory was tried,
|
||||||
|
where the first replica of a partition was directly assigned and the other two
|
||||||
|
were determined by "walking" the ring until finding additional devices in other
|
||||||
|
zones. This was discarded as control was lost as to how many replicas for a
|
||||||
|
given partition moved at once. Keeping each replica independent allows for
|
||||||
|
moving only one partition replica within a given time window (except due to
|
||||||
|
device failures). Using the additional memory was deemed a good tradeoff for
|
||||||
|
moving data around the cluster much less often.
|
||||||
|
|
||||||
|
Another ring design was tried where the partition to device assignments weren't
|
||||||
|
stored in a big list in memory but instead each device was assigned a set of
|
||||||
|
hashes, or anchors. The partition would be determined from the data item's hash
|
||||||
|
and the nearest device anchors would determine where the replicas should be
|
||||||
|
stored. However, to get reasonable distribution of data each device had to have
|
||||||
|
a lot of anchors and walking through those anchors to find replicas started to
|
||||||
|
add up. In the end, the memory savings wasn't that great and more processing
|
||||||
|
power was used, so the idea was discarded.
|
||||||
|
|
||||||
|
A completely non-partitioned ring was also tried but discarded as the
|
||||||
|
partitioning helps many other parts of the system, especially replication.
|
||||||
|
Replication can be attempted and retried in a partition batch with the other
|
||||||
|
replicas rather than each data item independently attempted and retried. Hashes
|
||||||
|
of directory structures can be calculated and compared with other replicas to
|
||||||
|
reduce directory walking and network traffic.
|
||||||
|
|
||||||
|
Partitioning and independently assigning partition replicas also allowed for
|
||||||
|
the best balanced cluster. The best of the other strategies tended to give
|
||||||
|
+-10% variance on device balance with devices of equal weight and +-15% with
|
||||||
|
devices of varying weights. The current strategy allows us to get +-3% and +-8%
|
||||||
|
respectively.
|
||||||
|
|
||||||
|
Various hashing algorithms were tried. SHA offers better security, but the ring
|
||||||
|
doesn't need to be cryptographically secure and SHA is slower. Murmur was much
|
||||||
|
faster, but MD5 was built-in and hash computation is a small percentage of the
|
||||||
|
overall request handling time. In all, once it was decided the servers wouldn't
|
||||||
|
be maintaining the rings themselves anyway and only doing hash lookups, MD5 was
|
||||||
|
chosen for its general availability, good distribution, and adequate speed.
|
15
doc/source/proxy.rst
Normal file
15
doc/source/proxy.rst
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
.. _proxy:
|
||||||
|
|
||||||
|
*****
|
||||||
|
Proxy
|
||||||
|
*****
|
||||||
|
|
||||||
|
.. _proxy-server:
|
||||||
|
|
||||||
|
Proxy Server
|
||||||
|
============
|
||||||
|
|
||||||
|
.. automodule:: swift.proxy.server
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
25
doc/source/ring.rst
Normal file
25
doc/source/ring.rst
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
.. _consistent_hashing_ring:
|
||||||
|
|
||||||
|
********************************
|
||||||
|
Partitioned Consistent Hash Ring
|
||||||
|
********************************
|
||||||
|
|
||||||
|
.. _ring:
|
||||||
|
|
||||||
|
Ring
|
||||||
|
====
|
||||||
|
|
||||||
|
.. automodule:: swift.common.ring.ring
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. _ring-builder:
|
||||||
|
|
||||||
|
Ring Builder
|
||||||
|
============
|
||||||
|
|
||||||
|
.. automodule:: swift.common.ring.builder
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
51
etc/account-server.conf-sample
Normal file
51
etc/account-server.conf-sample
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
[account-server]
|
||||||
|
# swift_dir = /etc/swift
|
||||||
|
# devices = /srv/node
|
||||||
|
# mount_check = true
|
||||||
|
# bind_ip = 0.0.0.0
|
||||||
|
# bind_port = 6002
|
||||||
|
# workers = 1
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
# user = swift
|
||||||
|
|
||||||
|
[account-replicator]
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
# per_diff = 1000
|
||||||
|
# concurrency = 8
|
||||||
|
# run_pause = 30
|
||||||
|
# How long without an error before a node's error count is reset. This will
|
||||||
|
# also be how long before a node is reenabled after suppression is triggered.
|
||||||
|
# error_suppression_interval = 60
|
||||||
|
# How many errors can accumulate before a node is temporarily ignored.
|
||||||
|
# error_suppression_limit = 10
|
||||||
|
# node_timeout = 10
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# The replicator also performs reclamation
|
||||||
|
# reclaim_age = 86400
|
||||||
|
|
||||||
|
[account-stats]
|
||||||
|
# cf_account = AUTH_7abbc116-8a07-4b63-819d-02715d3e0f31
|
||||||
|
# container_name = account_stats
|
||||||
|
# proxy_server_conf = /etc/swift/proxy-server.conf
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
|
[account-auditor]
|
||||||
|
# Will audit, at most, 1 account per device per interval
|
||||||
|
# interval = 1800
|
||||||
|
# Maximum containers randomly picked for a given account audit
|
||||||
|
# max_container_count = 100
|
||||||
|
# node_timeout = 10
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
|
[account-reaper]
|
||||||
|
# concurrency = 25
|
||||||
|
# interval = 3600
|
||||||
|
# node_timeout = 10
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
15
etc/auth-server.conf-sample
Normal file
15
etc/auth-server.conf-sample
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
[auth-server]
|
||||||
|
# swift_dir = /etc/swift
|
||||||
|
# bind_ip = 0.0.0.0
|
||||||
|
# bind_port = 11000
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
# workers = 1
|
||||||
|
# reseller_prefix = AUTH
|
||||||
|
# default_cluster_url = http://127.0.0.1:9000/v1
|
||||||
|
# token_life = 86400
|
||||||
|
# log_headers = False
|
||||||
|
# cert_file = Default is no cert; format is path like /etc/swift/auth.crt
|
||||||
|
# key_file = Default is no key; format is path like /etc/swift/auth.key
|
||||||
|
# node_timeout = 10
|
||||||
|
user = swift
|
43
etc/container-server.conf-sample
Normal file
43
etc/container-server.conf-sample
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
[container-server]
|
||||||
|
# swift_dir = /etc/swift
|
||||||
|
# devices = /srv/node
|
||||||
|
# mount_check = true
|
||||||
|
# bind_ip = 0.0.0.0
|
||||||
|
# bind_port = 6001
|
||||||
|
# workers = 1
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
# user = swift
|
||||||
|
# node_timeout = 3
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
|
||||||
|
[container-replicator]
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
# per_diff = 1000
|
||||||
|
# concurrency = 8
|
||||||
|
# run_pause = 30
|
||||||
|
# node_timeout = 10
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# The replicator also performs reclamation
|
||||||
|
# reclaim_age = 604800
|
||||||
|
|
||||||
|
[container-updater]
|
||||||
|
# interval = 300
|
||||||
|
# concurrency = 4
|
||||||
|
# node_timeout = 3
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# slowdown will sleep that amount between containers
|
||||||
|
# slowdown = 0.01
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
|
[container-auditor]
|
||||||
|
# Will audit, at most, 1 container per device per interval
|
||||||
|
# interval = 1800
|
||||||
|
# Maximum objects randomly picked for a given container audit
|
||||||
|
# max_object_count = 100
|
||||||
|
# node_timeout = 10
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
6
etc/drive-audit.conf-sample
Normal file
6
etc/drive-audit.conf-sample
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
[drive-audit]
|
||||||
|
# device_dir = /srv/node
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
# minutes = 60
|
||||||
|
# error_limit = 1
|
46
etc/object-server.conf-sample
Normal file
46
etc/object-server.conf-sample
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
[object-server]
|
||||||
|
# swift_dir = /etc/swift
|
||||||
|
# devices = /srv/node
|
||||||
|
# mount_check = true
|
||||||
|
# bind_ip = 0.0.0.0
|
||||||
|
# bind_port = 6000
|
||||||
|
# workers = 1
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
# log_requests = True
|
||||||
|
# user = swift
|
||||||
|
# node_timeout = 3
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# network_chunk_size = 8192
|
||||||
|
# disk_chunk_size = 32768
|
||||||
|
# max_upload_time = 86400
|
||||||
|
# slow = 1
|
||||||
|
|
||||||
|
[object-replicator]
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
# daemonize = on
|
||||||
|
# run_pause = 30
|
||||||
|
# concurrency = 1
|
||||||
|
# timeout = 300
|
||||||
|
# stats_interval = 3600
|
||||||
|
# The replicator also performs reclamation
|
||||||
|
# reclaim_age = 604800
|
||||||
|
|
||||||
|
[object-updater]
|
||||||
|
# interval = 300
|
||||||
|
# concurrency = 1
|
||||||
|
# node_timeout = 10
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# slowdown will sleep that amount between objects
|
||||||
|
# slowdown = 0.01
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
|
||||||
|
[object-auditor]
|
||||||
|
# Will audit, at most, 1 object per device per interval
|
||||||
|
# interval = 1800
|
||||||
|
# node_timeout = 10
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
41
etc/proxy-server.conf-sample
Normal file
41
etc/proxy-server.conf-sample
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
[proxy-server]
|
||||||
|
# bind_ip = 0.0.0.0
|
||||||
|
# bind_port = 80
|
||||||
|
# cert_file = /etc/swift/proxy.crt
|
||||||
|
# key_file = /etc/swift/proxy.key
|
||||||
|
# swift_dir = /etc/swift
|
||||||
|
# log_facility = LOG_LOCAL0
|
||||||
|
# log_level = INFO
|
||||||
|
# log_headers = False
|
||||||
|
# workers = 1
|
||||||
|
# user = swift
|
||||||
|
# recheck_account_existence = 60
|
||||||
|
# recheck_container_existence = 60
|
||||||
|
# object_chunk_size = 8192
|
||||||
|
# container_chunk_size = 8192
|
||||||
|
# account_chunk_size = 8192
|
||||||
|
# client_chunk_size = 8192
|
||||||
|
# Default for memcache_servers is below, but you can specify multiple servers
|
||||||
|
# with the format: 10.1.2.3:11211,10.1.2.4:11211
|
||||||
|
# memcache_servers = 127.0.0.1:11211
|
||||||
|
# node_timeout = 10
|
||||||
|
# client_timeout = 60
|
||||||
|
# conn_timeout = 0.5
|
||||||
|
# How long without an error before a node's error count is reset. This will
|
||||||
|
# also be how long before a node is reenabled after suppression is triggered.
|
||||||
|
# error_suppression_interval = 60
|
||||||
|
# How many errors can accumulate before a node is temporarily ignored.
|
||||||
|
# error_suppression_limit = 10
|
||||||
|
# How many ops per second to one container (as a float)
|
||||||
|
# rate_limit = 20000.0
|
||||||
|
# How many ops per second for account-level operations
|
||||||
|
# account_rate_limit = 200.0
|
||||||
|
# rate_limit_account_whitelist = acct1,acct2,etc
|
||||||
|
# rate_limit_account_blacklist = acct3,acct4,etc
|
||||||
|
# container_put_lock_timeout = 5
|
||||||
|
|
||||||
|
# [auth-server]
|
||||||
|
# class = swift.common.auth.DevAuthMiddleware
|
||||||
|
# ip = 127.0.0.1
|
||||||
|
# port = 11000
|
||||||
|
# node_timeout = 10
|
19
etc/rsyncd.conf-sample
Normal file
19
etc/rsyncd.conf-sample
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
uid = swift
|
||||||
|
gid = swift
|
||||||
|
log file = /var/log/rsyncd.log
|
||||||
|
pid file = /var/run/rsyncd.pid
|
||||||
|
|
||||||
|
[account]
|
||||||
|
max connections = 2
|
||||||
|
path = /srv/node
|
||||||
|
read only = false
|
||||||
|
|
||||||
|
[container]
|
||||||
|
max connections = 4
|
||||||
|
path = /srv/node
|
||||||
|
read only = false
|
||||||
|
|
||||||
|
[object]
|
||||||
|
max connections = 8
|
||||||
|
path = /srv/node
|
||||||
|
read only = false
|
12
etc/stats.conf-sample
Normal file
12
etc/stats.conf-sample
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
[stats]
|
||||||
|
auth_url = http://saio:11000/auth
|
||||||
|
auth_user = test:tester
|
||||||
|
auth_key = testing
|
||||||
|
# swift_dir = /etc/swift
|
||||||
|
# dispersion_coverage = 1
|
||||||
|
# container_put_count = 1000
|
||||||
|
# object_put_count = 1000
|
||||||
|
# big_container_count = 1000000
|
||||||
|
# retries = 5
|
||||||
|
# concurrency = 50
|
||||||
|
# csv_output = /etc/swift/stats.csv
|
10
setup.cfg
Normal file
10
setup.cfg
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
[build_sphinx]
|
||||||
|
all_files = 1
|
||||||
|
build-dir = doc/build
|
||||||
|
source-dir = doc/source
|
||||||
|
|
||||||
|
[egg_info]
|
||||||
|
tag_build =
|
||||||
|
tag_date = 0
|
||||||
|
tag_svn_revision = 0
|
||||||
|
|
42
setup.py
42
setup.py
@ -14,16 +14,20 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from distutils.core import setup
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
|
name='swift'
|
||||||
|
version='1.0.1'
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='swift',
|
name=name,
|
||||||
version='1.0.0-1',
|
version=version,
|
||||||
description='Swift',
|
description='Swift',
|
||||||
license='Apache License (2.0)',
|
license='Apache License (2.0)',
|
||||||
author='OpenStack, LLC.',
|
author='OpenStack, LLC.',
|
||||||
url='https://launchpad.net/swift',
|
url='https://launchpad.net/swift',
|
||||||
packages=['swift', 'swift.common'],
|
packages=find_packages(exclude=['test','bin']),
|
||||||
|
test_suite = 'nose.collector',
|
||||||
classifiers=[
|
classifiers=[
|
||||||
'Development Status :: 4 - Beta',
|
'Development Status :: 4 - Beta',
|
||||||
'License :: OSI Approved :: Apache Software License',
|
'License :: OSI Approved :: Apache Software License',
|
||||||
@ -31,18 +35,20 @@ setup(
|
|||||||
'Programming Language :: Python :: 2.6',
|
'Programming Language :: Python :: 2.6',
|
||||||
'Environment :: No Input/Output (Daemon)',
|
'Environment :: No Input/Output (Daemon)',
|
||||||
],
|
],
|
||||||
scripts=['bin/st.py', 'bin/swift-account-auditor.py',
|
scripts=['bin/st', 'bin/swift-account-auditor',
|
||||||
'bin/swift-account-audit.py', 'bin/swift-account-reaper.py',
|
'bin/swift-account-audit', 'bin/swift-account-reaper',
|
||||||
'bin/swift-account-replicator.py', 'bin/swift-account-server.py',
|
'bin/swift-account-replicator', 'bin/swift-account-server',
|
||||||
'bin/swift-auth-create-account.py',
|
'bin/swift-auth-create-account',
|
||||||
'bin/swift-auth-recreate-accounts.py', 'bin/swift-auth-server.py',
|
'bin/swift-auth-recreate-accounts', 'bin/swift-auth-server',
|
||||||
'bin/swift-container-auditor.py',
|
'bin/swift-container-auditor',
|
||||||
'bin/swift-container-replicator.py',
|
'bin/swift-container-replicator',
|
||||||
'bin/swift-container-server.py', 'bin/swift-container-updater.py',
|
'bin/swift-container-server', 'bin/swift-container-updater',
|
||||||
'bin/swift-drive-audit.py', 'bin/swift-get-nodes.py',
|
'bin/swift-drive-audit', 'bin/swift-get-nodes',
|
||||||
'bin/swift-init.py', 'bin/swift-object-auditor.py',
|
'bin/swift-init', 'bin/swift-object-auditor',
|
||||||
'bin/swift-object-info.py', 'bin/swift-object-server.py',
|
'bin/swift-object-info',
|
||||||
'bin/swift-object-updater.py', 'bin/swift-proxy-server.py',
|
'bin/swift-object-replicator',
|
||||||
'bin/swift-ring-builder.py', 'bin/swift-stats-populate.py',
|
'bin/swift-object-server',
|
||||||
'bin/swift-stats-report.py']
|
'bin/swift-object-updater', 'bin/swift-proxy-server',
|
||||||
|
'bin/swift-ring-builder', 'bin/swift-stats-populate',
|
||||||
|
'bin/swift-stats-report']
|
||||||
)
|
)
|
||||||
|
15
swift.egg-info/PKG-INFO
Normal file
15
swift.egg-info/PKG-INFO
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
Metadata-Version: 1.0
|
||||||
|
Name: swift
|
||||||
|
Version: 1.0.1
|
||||||
|
Summary: Swift
|
||||||
|
Home-page: https://launchpad.net/swift
|
||||||
|
Author: OpenStack, LLC.
|
||||||
|
Author-email: UNKNOWN
|
||||||
|
License: Apache License (2.0)
|
||||||
|
Description: UNKNOWN
|
||||||
|
Platform: UNKNOWN
|
||||||
|
Classifier: Development Status :: 4 - Beta
|
||||||
|
Classifier: License :: OSI Approved :: Apache Software License
|
||||||
|
Classifier: Operating System :: POSIX :: Linux
|
||||||
|
Classifier: Programming Language :: Python :: 2.6
|
||||||
|
Classifier: Environment :: No Input/Output (Daemon)
|
130
swift.egg-info/SOURCES.txt
Normal file
130
swift.egg-info/SOURCES.txt
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
AUTHORS
|
||||||
|
LICENSE
|
||||||
|
MANIFEST.in
|
||||||
|
README
|
||||||
|
setup.cfg
|
||||||
|
setup.py
|
||||||
|
bin/st
|
||||||
|
bin/swift-account-audit
|
||||||
|
bin/swift-account-auditor
|
||||||
|
bin/swift-account-reaper
|
||||||
|
bin/swift-account-replicator
|
||||||
|
bin/swift-account-server
|
||||||
|
bin/swift-auth-create-account
|
||||||
|
bin/swift-auth-recreate-accounts
|
||||||
|
bin/swift-auth-server
|
||||||
|
bin/swift-container-auditor
|
||||||
|
bin/swift-container-replicator
|
||||||
|
bin/swift-container-server
|
||||||
|
bin/swift-container-updater
|
||||||
|
bin/swift-drive-audit
|
||||||
|
bin/swift-get-nodes
|
||||||
|
bin/swift-init
|
||||||
|
bin/swift-object-auditor
|
||||||
|
bin/swift-object-info
|
||||||
|
bin/swift-object-replicator
|
||||||
|
bin/swift-object-server
|
||||||
|
bin/swift-object-updater
|
||||||
|
bin/swift-proxy-server
|
||||||
|
bin/swift-ring-builder
|
||||||
|
bin/swift-stats-populate
|
||||||
|
bin/swift-stats-report
|
||||||
|
doc/Makefile
|
||||||
|
doc/source/account.rst
|
||||||
|
doc/source/auth.rst
|
||||||
|
doc/source/conf.py
|
||||||
|
doc/source/container.rst
|
||||||
|
doc/source/db.rst
|
||||||
|
doc/source/development_guidelines.rst
|
||||||
|
doc/source/development_saio.rst
|
||||||
|
doc/source/index.rst
|
||||||
|
doc/source/misc.rst
|
||||||
|
doc/source/object.rst
|
||||||
|
doc/source/overview_auth.rst
|
||||||
|
doc/source/overview_reaper.rst
|
||||||
|
doc/source/overview_replication.rst
|
||||||
|
doc/source/overview_ring.rst
|
||||||
|
doc/source/proxy.rst
|
||||||
|
doc/source/ring.rst
|
||||||
|
doc/source/_static/basic.css
|
||||||
|
doc/source/_static/default.css
|
||||||
|
etc/account-server.conf-sample
|
||||||
|
etc/auth-server.conf-sample
|
||||||
|
etc/container-server.conf-sample
|
||||||
|
etc/drive-audit.conf-sample
|
||||||
|
etc/object-server.conf-sample
|
||||||
|
etc/proxy-server.conf-sample
|
||||||
|
etc/rsyncd.conf-sample
|
||||||
|
etc/stats.conf-sample
|
||||||
|
swift/__init__.py
|
||||||
|
swift.egg-info/PKG-INFO
|
||||||
|
swift.egg-info/SOURCES.txt
|
||||||
|
swift.egg-info/dependency_links.txt
|
||||||
|
swift.egg-info/top_level.txt
|
||||||
|
swift/account/__init__.py
|
||||||
|
swift/account/auditor.py
|
||||||
|
swift/account/reaper.py
|
||||||
|
swift/account/server.py
|
||||||
|
swift/auth/__init__.py
|
||||||
|
swift/auth/server.py
|
||||||
|
swift/common/__init__.py
|
||||||
|
swift/common/auth.py
|
||||||
|
swift/common/bufferedhttp.py
|
||||||
|
swift/common/client.py
|
||||||
|
swift/common/constraints.py
|
||||||
|
swift/common/db.py
|
||||||
|
swift/common/db_replicator.py
|
||||||
|
swift/common/direct_client.py
|
||||||
|
swift/common/exceptions.py
|
||||||
|
swift/common/healthcheck.py
|
||||||
|
swift/common/memcached.py
|
||||||
|
swift/common/utils.py
|
||||||
|
swift/common/wsgi.py
|
||||||
|
swift/common/ring/__init__.py
|
||||||
|
swift/common/ring/builder.py
|
||||||
|
swift/common/ring/ring.py
|
||||||
|
swift/container/__init__.py
|
||||||
|
swift/container/auditor.py
|
||||||
|
swift/container/server.py
|
||||||
|
swift/container/updater.py
|
||||||
|
swift/obj/__init__.py
|
||||||
|
swift/obj/auditor.py
|
||||||
|
swift/obj/replicator.py
|
||||||
|
swift/obj/server.py
|
||||||
|
swift/obj/updater.py
|
||||||
|
swift/proxy/__init__.py
|
||||||
|
swift/proxy/server.py
|
||||||
|
test/unit/__init__.py
|
||||||
|
test/unit/account/__init__.py
|
||||||
|
test/unit/account/test_auditor.py
|
||||||
|
test/unit/account/test_reaper.py
|
||||||
|
test/unit/account/test_server.py
|
||||||
|
test/unit/auth/__init__.py
|
||||||
|
test/unit/auth/test_server.py
|
||||||
|
test/unit/common/__init__.py
|
||||||
|
test/unit/common/test_auth.py
|
||||||
|
test/unit/common/test_bufferedhttp.py
|
||||||
|
test/unit/common/test_client.py
|
||||||
|
test/unit/common/test_constraints.py
|
||||||
|
test/unit/common/test_db.py
|
||||||
|
test/unit/common/test_db_replicator.py
|
||||||
|
test/unit/common/test_direct_client.py
|
||||||
|
test/unit/common/test_exceptions.py
|
||||||
|
test/unit/common/test_healthcheck.py
|
||||||
|
test/unit/common/test_memcached.py
|
||||||
|
test/unit/common/test_utils.py
|
||||||
|
test/unit/common/test_wsgi.py
|
||||||
|
test/unit/common/ring/__init__.py
|
||||||
|
test/unit/common/ring/test_builder.py
|
||||||
|
test/unit/common/ring/test_ring.py
|
||||||
|
test/unit/container/__init__.py
|
||||||
|
test/unit/container/test_auditor.py
|
||||||
|
test/unit/container/test_server.py
|
||||||
|
test/unit/container/test_updater.py
|
||||||
|
test/unit/obj/__init__.py
|
||||||
|
test/unit/obj/test_auditor.py
|
||||||
|
test/unit/obj/test_replicator.py
|
||||||
|
test/unit/obj/test_server.py
|
||||||
|
test/unit/obj/test_updater.py
|
||||||
|
test/unit/proxy/__init__.py
|
||||||
|
test/unit/proxy/test_server.py
|
1
swift.egg-info/dependency_links.txt
Normal file
1
swift.egg-info/dependency_links.txt
Normal file
@ -0,0 +1 @@
|
|||||||
|
|
2
swift.egg-info/top_level.txt
Normal file
2
swift.egg-info/top_level.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
test
|
||||||
|
swift
|
0
swift/account/__init__.py
Normal file
0
swift/account/__init__.py
Normal file
194
swift/account/auditor.py
Normal file
194
swift/account/auditor.py
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
from random import choice, random
|
||||||
|
from urllib import quote
|
||||||
|
|
||||||
|
from eventlet import Timeout
|
||||||
|
|
||||||
|
from swift.account import server as account_server
|
||||||
|
from swift.common.db import AccountBroker
|
||||||
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
from swift.common.exceptions import ConnectionTimeout
|
||||||
|
from swift.common.ring import Ring
|
||||||
|
from swift.common.utils import get_logger
|
||||||
|
|
||||||
|
|
||||||
|
class AuditException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AccountAuditor(object):
|
||||||
|
"""Audit accounts."""
|
||||||
|
|
||||||
|
def __init__(self, server_conf, auditor_conf):
|
||||||
|
self.logger = get_logger(auditor_conf, 'account-auditor')
|
||||||
|
self.devices = server_conf.get('devices', '/srv/node')
|
||||||
|
self.mount_check = server_conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
self.interval = int(auditor_conf.get('interval', 1800))
|
||||||
|
swift_dir = server_conf.get('swift_dir', '/etc/swift')
|
||||||
|
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
|
||||||
|
self.container_ring = None
|
||||||
|
self.node_timeout = int(auditor_conf.get('node_timeout', 10))
|
||||||
|
self.conn_timeout = float(auditor_conf.get('conn_timeout', 0.5))
|
||||||
|
self.max_container_count = \
|
||||||
|
int(auditor_conf.get('max_container_count', 100))
|
||||||
|
self.container_passes = 0
|
||||||
|
self.container_failures = 0
|
||||||
|
self.container_errors = 0
|
||||||
|
|
||||||
|
def get_container_ring(self):
|
||||||
|
"""
|
||||||
|
Get the container ring. Load the ring if neccesary.
|
||||||
|
|
||||||
|
:returns: container ring
|
||||||
|
"""
|
||||||
|
if not self.container_ring:
|
||||||
|
self.logger.debug(
|
||||||
|
|
||||||
|
'Loading container ring from %s' % self.container_ring_path)
|
||||||
|
self.container_ring = Ring(self.container_ring_path)
|
||||||
|
return self.container_ring
|
||||||
|
|
||||||
|
def audit_forever(self): # pragma: no cover
|
||||||
|
"""Run the account audit until stopped."""
|
||||||
|
reported = time.time()
|
||||||
|
time.sleep(random() * self.interval)
|
||||||
|
while True:
|
||||||
|
begin = time.time()
|
||||||
|
pids = []
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
if self.mount_check and not \
|
||||||
|
os.path.ismount(os.path.join(self.devices, device)):
|
||||||
|
self.logger.debug(
|
||||||
|
'Skipping %s as it is not mounted' % device)
|
||||||
|
continue
|
||||||
|
self.account_audit(device)
|
||||||
|
if time.time() - reported >= 3600: # once an hour
|
||||||
|
self.logger.info(
|
||||||
|
'Since %s: Remote audits with containers: %s passed '
|
||||||
|
'audit, %s failed audit, %s errors' %
|
||||||
|
(time.ctime(reported), self.container_passes,
|
||||||
|
self.container_failures, self.container_errors))
|
||||||
|
reported = time.time()
|
||||||
|
self.container_passes = 0
|
||||||
|
self.container_failures = 0
|
||||||
|
self.container_errors = 0
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
if elapsed < self.interval:
|
||||||
|
time.sleep(self.interval - elapsed)
|
||||||
|
|
||||||
|
def audit_once(self):
|
||||||
|
"""Run the account audit once."""
|
||||||
|
self.logger.info('Begin account audit "once" mode')
|
||||||
|
begin = time.time()
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
if self.mount_check and \
|
||||||
|
not os.path.ismount(os.path.join(self.devices, device)):
|
||||||
|
self.logger.debug(
|
||||||
|
'Skipping %s as it is not mounted' % device)
|
||||||
|
continue
|
||||||
|
self.account_audit(device)
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
self.logger.info(
|
||||||
|
'Account audit "once" mode completed: %.02fs' % elapsed)
|
||||||
|
|
||||||
|
def account_audit(self, device):
|
||||||
|
"""
|
||||||
|
Audit any accounts found on the device.
|
||||||
|
|
||||||
|
:param device: device to audit
|
||||||
|
"""
|
||||||
|
datadir = os.path.join(self.devices, device, account_server.DATADIR)
|
||||||
|
if not os.path.exists(datadir):
|
||||||
|
return
|
||||||
|
broker = None
|
||||||
|
partition = None
|
||||||
|
attempts = 100
|
||||||
|
while not broker and attempts:
|
||||||
|
attempts -= 1
|
||||||
|
try:
|
||||||
|
partition = choice(os.listdir(datadir))
|
||||||
|
fpath = os.path.join(datadir, partition)
|
||||||
|
if not os.path.isdir(fpath):
|
||||||
|
continue
|
||||||
|
suffix = choice(os.listdir(fpath))
|
||||||
|
fpath = os.path.join(fpath, suffix)
|
||||||
|
if not os.path.isdir(fpath):
|
||||||
|
continue
|
||||||
|
hsh = choice(os.listdir(fpath))
|
||||||
|
fpath = os.path.join(fpath, hsh)
|
||||||
|
if not os.path.isdir(fpath):
|
||||||
|
continue
|
||||||
|
except IndexError:
|
||||||
|
continue
|
||||||
|
for fname in sorted(os.listdir(fpath), reverse=True):
|
||||||
|
if fname.endswith('.db'):
|
||||||
|
broker = AccountBroker(os.path.join(fpath, fname))
|
||||||
|
if broker.is_deleted():
|
||||||
|
broker = None
|
||||||
|
break
|
||||||
|
if not broker:
|
||||||
|
return
|
||||||
|
info = broker.get_info()
|
||||||
|
for container in broker.get_random_containers(
|
||||||
|
max_count=self.max_container_count):
|
||||||
|
found = False
|
||||||
|
results = []
|
||||||
|
part, nodes = \
|
||||||
|
self.get_container_ring().get_nodes(info['account'], container)
|
||||||
|
for node in nodes:
|
||||||
|
try:
|
||||||
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
|
conn = http_connect(node['ip'], node['port'],
|
||||||
|
node['device'], part, 'HEAD',
|
||||||
|
'/%s/%s' % (info['account'], container))
|
||||||
|
with Timeout(self.node_timeout):
|
||||||
|
resp = conn.getresponse()
|
||||||
|
body = resp.read()
|
||||||
|
if 200 <= resp.status <= 299:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
results.append('%s:%s/%s %s %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], resp.status,
|
||||||
|
resp.reason))
|
||||||
|
except socket.error, err:
|
||||||
|
results.append('%s:%s/%s Socket Error: %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], err))
|
||||||
|
except ConnectionTimeout:
|
||||||
|
results.append(
|
||||||
|
'%(ip)s:%(port)s/%(device)s ConnectionTimeout' % node)
|
||||||
|
except Timeout:
|
||||||
|
results.append('%(ip)s:%(port)s/%(device)s Timeout' % node)
|
||||||
|
except Exception, err:
|
||||||
|
self.logger.exception('ERROR With remote server '
|
||||||
|
'%(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
results.append('%s:%s/%s Exception: %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], err))
|
||||||
|
if found:
|
||||||
|
self.container_passes += 1
|
||||||
|
self.logger.debug('Audit passed for /%s %s container %s' %
|
||||||
|
(info['account'], broker.db_file, container))
|
||||||
|
else:
|
||||||
|
self.container_errors += 1
|
||||||
|
self.logger.error('ERROR Could not find container /%s/%s '
|
||||||
|
'referenced by %s on any of the primary container '
|
||||||
|
'servers it should be on: %s' % (info['account'],
|
||||||
|
container, broker.db_file, results))
|
407
swift/account/reaper.py
Normal file
407
swift/account/reaper.py
Normal file
@ -0,0 +1,407 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
from logging import DEBUG
|
||||||
|
from math import sqrt
|
||||||
|
from time import time
|
||||||
|
|
||||||
|
from eventlet import GreenPool, sleep
|
||||||
|
|
||||||
|
from swift.account.server import DATADIR
|
||||||
|
from swift.common.db import AccountBroker
|
||||||
|
from swift.common.direct_client import ClientException, \
|
||||||
|
direct_delete_container, direct_delete_object, direct_get_container
|
||||||
|
from swift.common.ring import Ring
|
||||||
|
from swift.common.utils import get_logger, whataremyips
|
||||||
|
|
||||||
|
|
||||||
|
class AccountReaper(object):
|
||||||
|
"""
|
||||||
|
Removes data from status=DELETED accounts. These are accounts that have
|
||||||
|
been asked to be removed by the reseller via services
|
||||||
|
remove_storage_account XMLRPC call.
|
||||||
|
|
||||||
|
The account is not deleted immediately by the services call, but instead
|
||||||
|
the account is simply marked for deletion by setting the status column in
|
||||||
|
the account_stat table of the account database. This account reaper scans
|
||||||
|
for such accounts and removes the data in the background. The background
|
||||||
|
deletion process will occur on the primary account server for the account.
|
||||||
|
|
||||||
|
:param server_conf: The [account-server] dictionary of the account server
|
||||||
|
configuration file
|
||||||
|
:param reaper_conf: The [account-reaper] dictionary of the account server
|
||||||
|
configuration file
|
||||||
|
|
||||||
|
See the etc/account-server.conf-sample for information on the possible
|
||||||
|
configuration parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
log_name = 'account-reaper'
|
||||||
|
|
||||||
|
def __init__(self, server_conf, reaper_conf):
|
||||||
|
self.logger = get_logger(reaper_conf, self.log_name)
|
||||||
|
self.devices = server_conf.get('devices', '/srv/node')
|
||||||
|
self.mount_check = server_conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
self.interval = int(reaper_conf.get('interval', 3600))
|
||||||
|
swift_dir = server_conf.get('swift_dir', '/etc/swift')
|
||||||
|
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
|
||||||
|
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
|
||||||
|
self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
|
||||||
|
self.account_ring = None
|
||||||
|
self.container_ring = None
|
||||||
|
self.object_ring = None
|
||||||
|
self.node_timeout = int(reaper_conf.get('node_timeout', 10))
|
||||||
|
self.conn_timeout = float(reaper_conf.get('conn_timeout', 0.5))
|
||||||
|
self.myips = whataremyips()
|
||||||
|
self.concurrency = int(reaper_conf.get('concurrency', 25))
|
||||||
|
self.container_concurrency = self.object_concurrency = \
|
||||||
|
sqrt(self.concurrency)
|
||||||
|
self.container_pool = GreenPool(size=self.container_concurrency)
|
||||||
|
|
||||||
|
def get_account_ring(self):
|
||||||
|
""" The account :class:`swift.common.ring.Ring` for the cluster. """
|
||||||
|
if not self.account_ring:
|
||||||
|
self.logger.debug(
|
||||||
|
'Loading account ring from %s' % self.account_ring_path)
|
||||||
|
self.account_ring = Ring(self.account_ring_path)
|
||||||
|
return self.account_ring
|
||||||
|
|
||||||
|
def get_container_ring(self):
|
||||||
|
""" The container :class:`swift.common.ring.Ring` for the cluster. """
|
||||||
|
if not self.container_ring:
|
||||||
|
self.logger.debug(
|
||||||
|
'Loading container ring from %s' % self.container_ring_path)
|
||||||
|
self.container_ring = Ring(self.container_ring_path)
|
||||||
|
return self.container_ring
|
||||||
|
|
||||||
|
def get_object_ring(self):
|
||||||
|
""" The object :class:`swift.common.ring.Ring` for the cluster. """
|
||||||
|
if not self.object_ring:
|
||||||
|
self.logger.debug(
|
||||||
|
'Loading object ring from %s' % self.object_ring_path)
|
||||||
|
self.object_ring = Ring(self.object_ring_path)
|
||||||
|
return self.object_ring
|
||||||
|
|
||||||
|
def reap_forever(self):
|
||||||
|
"""
|
||||||
|
Main entry point when running the reaper in its normal daemon mode.
|
||||||
|
This repeatedly calls :func:`reap_once` no quicker than the
|
||||||
|
configuration interval.
|
||||||
|
"""
|
||||||
|
self.logger.debug('Daemon started.')
|
||||||
|
sleep(random.random() * self.interval)
|
||||||
|
while True:
|
||||||
|
begin = time()
|
||||||
|
self.reap_once()
|
||||||
|
elapsed = time() - begin
|
||||||
|
if elapsed < self.interval:
|
||||||
|
sleep(self.interval - elapsed)
|
||||||
|
|
||||||
|
def reap_once(self):
|
||||||
|
"""
|
||||||
|
Main entry point when running the reaper in 'once' mode, where it will
|
||||||
|
do a single pass over all accounts on the server. This is called
|
||||||
|
repeatedly by :func:`reap_forever`. This will call :func:`reap_device`
|
||||||
|
once for each device on the server.
|
||||||
|
"""
|
||||||
|
self.logger.debug('Begin devices pass: %s' % self.devices)
|
||||||
|
begin = time()
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
if self.mount_check and \
|
||||||
|
not os.path.ismount(os.path.join(self.devices, device)):
|
||||||
|
self.logger.debug(
|
||||||
|
'Skipping %s as it is not mounted' % device)
|
||||||
|
continue
|
||||||
|
self.reap_device(device)
|
||||||
|
elapsed = time() - begin
|
||||||
|
self.logger.info('Devices pass completed: %.02fs' % elapsed)
|
||||||
|
|
||||||
|
def reap_device(self, device):
|
||||||
|
"""
|
||||||
|
Called once per pass for each device on the server. This will scan the
|
||||||
|
accounts directory for the device, looking for partitions this device
|
||||||
|
is the primary for, then looking for account databases that are marked
|
||||||
|
status=DELETED and still have containers and calling
|
||||||
|
:func:`reap_account`. Account databases marked status=DELETED that no
|
||||||
|
longer have containers will eventually be permanently removed by the
|
||||||
|
reclaim process within the account replicator (see
|
||||||
|
:mod:`swift.db_replicator`).
|
||||||
|
|
||||||
|
:param device: The device to look for accounts to be deleted.
|
||||||
|
"""
|
||||||
|
datadir = os.path.join(self.devices, device, DATADIR)
|
||||||
|
if not os.path.exists(datadir):
|
||||||
|
return
|
||||||
|
for partition in os.listdir(datadir):
|
||||||
|
partition_path = os.path.join(datadir, partition)
|
||||||
|
if not partition.isdigit():
|
||||||
|
continue
|
||||||
|
nodes = self.get_account_ring().get_part_nodes(int(partition))
|
||||||
|
if nodes[0]['ip'] not in self.myips or \
|
||||||
|
not os.path.isdir(partition_path):
|
||||||
|
continue
|
||||||
|
for suffix in os.listdir(partition_path):
|
||||||
|
suffix_path = os.path.join(partition_path, suffix)
|
||||||
|
if not os.path.isdir(suffix_path):
|
||||||
|
continue
|
||||||
|
for hsh in os.listdir(suffix_path):
|
||||||
|
hsh_path = os.path.join(suffix_path, hsh)
|
||||||
|
if not os.path.isdir(hsh_path):
|
||||||
|
continue
|
||||||
|
for fname in sorted(os.listdir(hsh_path), reverse=True):
|
||||||
|
if fname.endswith('.ts'):
|
||||||
|
break
|
||||||
|
elif fname.endswith('.db'):
|
||||||
|
broker = \
|
||||||
|
AccountBroker(os.path.join(hsh_path, fname))
|
||||||
|
if broker.is_status_deleted() and \
|
||||||
|
not broker.empty():
|
||||||
|
self.reap_account(broker, partition, nodes)
|
||||||
|
|
||||||
|
def reap_account(self, broker, partition, nodes):
|
||||||
|
"""
|
||||||
|
Called once per pass for each account this server is the primary for
|
||||||
|
and attempts to delete the data for the given account. The reaper will
|
||||||
|
only delete one account at any given time. It will call
|
||||||
|
:func:`reap_container` up to sqrt(self.concurrency) times concurrently
|
||||||
|
while reaping the account.
|
||||||
|
|
||||||
|
If there is any exception while deleting a single container, the
|
||||||
|
process will continue for any other containers and the failed
|
||||||
|
containers will be tried again the next time this function is called
|
||||||
|
with the same parameters.
|
||||||
|
|
||||||
|
If there is any exception while listing the containers for deletion,
|
||||||
|
the process will stop (but will obviously be tried again the next time
|
||||||
|
this function is called with the same parameters). This isn't likely
|
||||||
|
since the listing comes from the local database.
|
||||||
|
|
||||||
|
After the process completes (successfully or not) statistics about what
|
||||||
|
was accomplished will be logged.
|
||||||
|
|
||||||
|
This function returns nothing and should raise no exception but only
|
||||||
|
update various self.stats_* values for what occurs.
|
||||||
|
|
||||||
|
:param broker: The AccountBroker for the account to delete.
|
||||||
|
:param partition: The partition in the account ring the account is on.
|
||||||
|
:param nodes: The primary node dicts for the account to delete.
|
||||||
|
|
||||||
|
* See also: :class:`swift.common.db.AccountBroker` for the broker class.
|
||||||
|
* See also: :func:`swift.common.ring.Ring.get_nodes` for a description
|
||||||
|
of the node dicts.
|
||||||
|
"""
|
||||||
|
begin = time()
|
||||||
|
account = broker.get_info()['account']
|
||||||
|
self.logger.info('Beginning pass on account %s' % account)
|
||||||
|
self.stats_return_codes = {}
|
||||||
|
self.stats_containers_deleted = 0
|
||||||
|
self.stats_objects_deleted = 0
|
||||||
|
self.stats_containers_remaining = 0
|
||||||
|
self.stats_objects_remaining = 0
|
||||||
|
self.stats_containers_possibly_remaining = 0
|
||||||
|
self.stats_objects_possibly_remaining = 0
|
||||||
|
try:
|
||||||
|
marker = ''
|
||||||
|
while True:
|
||||||
|
containers = \
|
||||||
|
list(broker.list_containers_iter(1000, marker, None, None))
|
||||||
|
if not containers:
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
for (container, _, _, _) in containers:
|
||||||
|
self.container_pool.spawn(self.reap_container, account,
|
||||||
|
partition, nodes, container)
|
||||||
|
self.container_pool.waitall()
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception(
|
||||||
|
'Exception with containers for account %s' % account)
|
||||||
|
marker = containers[-1][0]
|
||||||
|
log = 'Completed pass on account %s' % account
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception(
|
||||||
|
'Exception with account %s' % account)
|
||||||
|
log = 'Incomplete pass on account %s' % account
|
||||||
|
if self.stats_containers_deleted:
|
||||||
|
log += ', %s containers deleted' % self.stats_containers_deleted
|
||||||
|
if self.stats_objects_deleted:
|
||||||
|
log += ', %s objects deleted' % self.stats_objects_deleted
|
||||||
|
if self.stats_containers_remaining:
|
||||||
|
log += ', %s containers remaining' % self.stats_containers_remaining
|
||||||
|
if self.stats_objects_remaining:
|
||||||
|
log += ', %s objects remaining' % self.stats_objects_remaining
|
||||||
|
if self.stats_containers_possibly_remaining:
|
||||||
|
log += ', %s containers possibly remaining' % \
|
||||||
|
self.stats_containers_possibly_remaining
|
||||||
|
if self.stats_objects_possibly_remaining:
|
||||||
|
log += ', %s objects possibly remaining' % \
|
||||||
|
self.stats_objects_possibly_remaining
|
||||||
|
if self.stats_return_codes:
|
||||||
|
log += ', return codes: '
|
||||||
|
for code in sorted(self.stats_return_codes.keys()):
|
||||||
|
log += '%s %sxxs, ' % (self.stats_return_codes[code], code)
|
||||||
|
log = log[:-2]
|
||||||
|
log += ', elapsed: %.02fs' % (time() - begin)
|
||||||
|
self.logger.info(log)
|
||||||
|
|
||||||
|
def reap_container(self, account, account_partition, account_nodes,
|
||||||
|
container):
|
||||||
|
"""
|
||||||
|
Deletes the data and the container itself for the given container. This
|
||||||
|
will call :func:`reap_object` up to sqrt(self.concurrency) times
|
||||||
|
concurrently for the objects in the container.
|
||||||
|
|
||||||
|
If there is any exception while deleting a single object, the process
|
||||||
|
will continue for any other objects in the container and the failed
|
||||||
|
objects will be tried again the next time this function is called with
|
||||||
|
the same parameters.
|
||||||
|
|
||||||
|
If there is any exception while listing the objects for deletion, the
|
||||||
|
process will stop (but will obviously be tried again the next time this
|
||||||
|
function is called with the same parameters). This is a possibility
|
||||||
|
since the listing comes from querying just the primary remote container
|
||||||
|
server.
|
||||||
|
|
||||||
|
Once all objects have been attempted to be deleted, the container
|
||||||
|
itself will be attempted to be deleted by sending a delete request to
|
||||||
|
all container nodes. The format of the delete request is such that each
|
||||||
|
container server will update a corresponding account server, removing
|
||||||
|
the container from the account's listing.
|
||||||
|
|
||||||
|
This function returns nothing and should raise no exception but only
|
||||||
|
update various self.stats_* values for what occurs.
|
||||||
|
|
||||||
|
:param account: The name of the account for the container.
|
||||||
|
:param account_partition: The partition for the account on the account
|
||||||
|
ring.
|
||||||
|
:param account_nodes: The primary node dicts for the account.
|
||||||
|
:param container: The name of the container to delete.
|
||||||
|
|
||||||
|
* See also: :func:`swift.common.ring.Ring.get_nodes` for a description
|
||||||
|
of the account node dicts.
|
||||||
|
"""
|
||||||
|
account_nodes = list(account_nodes)
|
||||||
|
part, nodes = self.get_container_ring().get_nodes(account, container)
|
||||||
|
node = nodes[-1]
|
||||||
|
pool = GreenPool(size=self.object_concurrency)
|
||||||
|
marker = ''
|
||||||
|
while True:
|
||||||
|
objects = None
|
||||||
|
try:
|
||||||
|
objects = direct_get_container(node, part, account, container,
|
||||||
|
marker=marker, conn_timeout=self.conn_timeout,
|
||||||
|
response_timeout=self.node_timeout)
|
||||||
|
self.stats_return_codes[2] = \
|
||||||
|
self.stats_return_codes.get(2, 0) + 1
|
||||||
|
except ClientException, err:
|
||||||
|
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||||
|
self.logger.exception(
|
||||||
|
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
self.stats_return_codes[err.http_status / 100] = \
|
||||||
|
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||||
|
if not objects:
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
for obj in objects:
|
||||||
|
if isinstance(obj['name'], unicode):
|
||||||
|
obj['name'] = obj['name'].encode('utf8')
|
||||||
|
pool.spawn(self.reap_object, account, container, part,
|
||||||
|
nodes, obj['name'])
|
||||||
|
pool.waitall()
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception('Exception with objects for container '
|
||||||
|
'%s for account %s' % (container, account))
|
||||||
|
marker = objects[-1]['name']
|
||||||
|
successes = 0
|
||||||
|
failures = 0
|
||||||
|
for node in nodes:
|
||||||
|
anode = account_nodes.pop()
|
||||||
|
try:
|
||||||
|
direct_delete_container(node, part, account, container,
|
||||||
|
conn_timeout=self.conn_timeout,
|
||||||
|
response_timeout=self.node_timeout,
|
||||||
|
headers={'X-Account-Host': '%(ip)s:%(port)s' % anode,
|
||||||
|
'X-Account-Partition': str(account_partition),
|
||||||
|
'X-Account-Device': anode['device'],
|
||||||
|
'X-Account-Override-Deleted': 'yes'})
|
||||||
|
successes += 1
|
||||||
|
self.stats_return_codes[2] = \
|
||||||
|
self.stats_return_codes.get(2, 0) + 1
|
||||||
|
except ClientException, err:
|
||||||
|
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||||
|
self.logger.exception(
|
||||||
|
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
failures += 1
|
||||||
|
self.stats_return_codes[err.http_status / 100] = \
|
||||||
|
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||||
|
if successes > failures:
|
||||||
|
self.stats_containers_deleted += 1
|
||||||
|
elif not successes:
|
||||||
|
self.stats_containers_remaining += 1
|
||||||
|
else:
|
||||||
|
self.stats_containers_possibly_remaining += 1
|
||||||
|
|
||||||
|
def reap_object(self, account, container, container_partition,
|
||||||
|
container_nodes, obj):
|
||||||
|
"""
|
||||||
|
Deletes the given object by issuing a delete request to each node for
|
||||||
|
the object. The format of the delete request is such that each object
|
||||||
|
server will update a corresponding container server, removing the
|
||||||
|
object from the container's listing.
|
||||||
|
|
||||||
|
This function returns nothing and should raise no exception but only
|
||||||
|
update various self.stats_* values for what occurs.
|
||||||
|
|
||||||
|
:param account: The name of the account for the object.
|
||||||
|
:param container: The name of the container for the object.
|
||||||
|
:param container_partition: The partition for the container on the
|
||||||
|
container ring.
|
||||||
|
:param container_nodes: The primary node dicts for the container.
|
||||||
|
:param obj: The name of the object to delete.
|
||||||
|
|
||||||
|
* See also: :func:`swift.common.ring.Ring.get_nodes` for a description
|
||||||
|
of the container node dicts.
|
||||||
|
"""
|
||||||
|
container_nodes = list(container_nodes)
|
||||||
|
part, nodes = self.get_object_ring().get_nodes(account, container, obj)
|
||||||
|
successes = 0
|
||||||
|
failures = 0
|
||||||
|
for node in nodes:
|
||||||
|
cnode = container_nodes.pop()
|
||||||
|
try:
|
||||||
|
direct_delete_object(node, part, account, container, obj,
|
||||||
|
conn_timeout=self.conn_timeout,
|
||||||
|
response_timeout=self.node_timeout,
|
||||||
|
headers={'X-Container-Host': '%(ip)s:%(port)s' % cnode,
|
||||||
|
'X-Container-Partition': str(container_partition),
|
||||||
|
'X-Container-Device': cnode['device']})
|
||||||
|
successes += 1
|
||||||
|
self.stats_return_codes[2] = \
|
||||||
|
self.stats_return_codes.get(2, 0) + 1
|
||||||
|
except ClientException, err:
|
||||||
|
if self.logger.getEffectiveLevel() <= DEBUG:
|
||||||
|
self.logger.exception(
|
||||||
|
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
failures += 1
|
||||||
|
self.stats_return_codes[err.http_status / 100] = \
|
||||||
|
self.stats_return_codes.get(err.http_status / 100, 0) + 1
|
||||||
|
if successes > failures:
|
||||||
|
self.stats_objects_deleted += 1
|
||||||
|
elif not successes:
|
||||||
|
self.stats_objects_remaining += 1
|
||||||
|
else:
|
||||||
|
self.stats_objects_possibly_remaining += 1
|
295
swift/account/server.py
Normal file
295
swift/account/server.py
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
from datetime import datetime
|
||||||
|
from urllib import unquote
|
||||||
|
from swift.common.utils import get_logger
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
from webob import Request, Response
|
||||||
|
from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPConflict, \
|
||||||
|
HTTPCreated, HTTPForbidden, HTTPInternalServerError, \
|
||||||
|
HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, HTTPPreconditionFailed
|
||||||
|
import simplejson
|
||||||
|
from xml.sax import saxutils
|
||||||
|
|
||||||
|
from swift.common import ACCOUNT_LISTING_LIMIT
|
||||||
|
from swift.common.db import AccountBroker
|
||||||
|
from swift.common.exceptions import MessageTimeout
|
||||||
|
from swift.common.utils import get_param, split_path, storage_directory, \
|
||||||
|
hash_path
|
||||||
|
from swift.common.constraints import check_mount, check_float, \
|
||||||
|
check_xml_encodable
|
||||||
|
from swift.common.healthcheck import healthcheck
|
||||||
|
from swift.common.db_replicator import ReplicatorRpc
|
||||||
|
|
||||||
|
|
||||||
|
DATADIR = 'accounts'
|
||||||
|
|
||||||
|
|
||||||
|
class AccountController(object):
|
||||||
|
"""WSGI controller for the account server."""
|
||||||
|
log_name = 'account'
|
||||||
|
|
||||||
|
def __init__(self, conf):
|
||||||
|
self.logger = get_logger(conf, self.log_name)
|
||||||
|
self.root = conf.get('devices', '/srv/node')
|
||||||
|
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
self.replicator_rpc = \
|
||||||
|
ReplicatorRpc(self.root, DATADIR, AccountBroker, self.mount_check)
|
||||||
|
|
||||||
|
def _get_account_broker(self, drive, part, account):
|
||||||
|
hsh = hash_path(account)
|
||||||
|
db_dir = storage_directory(DATADIR, part, hsh)
|
||||||
|
db_path = os.path.join(self.root, drive, db_dir, hsh + '.db')
|
||||||
|
return AccountBroker(db_path, account=account, logger=self.logger)
|
||||||
|
|
||||||
|
def DELETE(self, req):
|
||||||
|
"""Handle HTTP DELETE request."""
|
||||||
|
try:
|
||||||
|
drive, part, account = split_path(unquote(req.path), 3)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
|
request=req)
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
if 'x-timestamp' not in req.headers or \
|
||||||
|
not check_float(req.headers['x-timestamp']):
|
||||||
|
return HTTPBadRequest(body='Missing timestamp', request=req,
|
||||||
|
content_type='text/plain')
|
||||||
|
broker = self._get_account_broker(drive, part, account)
|
||||||
|
if broker.is_deleted():
|
||||||
|
return HTTPNotFound(request=req)
|
||||||
|
broker.delete_db(req.headers['x-timestamp'])
|
||||||
|
return HTTPNoContent(request=req)
|
||||||
|
|
||||||
|
def PUT(self, req):
|
||||||
|
"""Handle HTTP PUT request."""
|
||||||
|
drive, part, account, container = split_path(unquote(req.path), 3, 4)
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
broker = self._get_account_broker(drive, part, account)
|
||||||
|
if container: # put account container
|
||||||
|
if 'x-cf-trans-id' in req.headers:
|
||||||
|
broker.pending_timeout = 3
|
||||||
|
if req.headers.get('x-account-override-deleted', 'no').lower() != \
|
||||||
|
'yes' and broker.is_deleted():
|
||||||
|
return HTTPNotFound(request=req)
|
||||||
|
broker.put_container(container, req.headers['x-put-timestamp'],
|
||||||
|
req.headers['x-delete-timestamp'],
|
||||||
|
req.headers['x-object-count'],
|
||||||
|
req.headers['x-bytes-used'])
|
||||||
|
if req.headers['x-delete-timestamp'] > \
|
||||||
|
req.headers['x-put-timestamp']:
|
||||||
|
return HTTPNoContent(request=req)
|
||||||
|
else:
|
||||||
|
return HTTPCreated(request=req)
|
||||||
|
else: # put account
|
||||||
|
if not os.path.exists(broker.db_file):
|
||||||
|
broker.initialize(req.headers['x-timestamp'])
|
||||||
|
return HTTPCreated(request=req)
|
||||||
|
elif broker.is_status_deleted():
|
||||||
|
return HTTPForbidden(request=req, body='Recently deleted')
|
||||||
|
else:
|
||||||
|
broker.update_put_timestamp(req.headers['x-timestamp'])
|
||||||
|
return HTTPAccepted(request=req)
|
||||||
|
|
||||||
|
def HEAD(self, req):
|
||||||
|
"""Handle HTTP HEAD request."""
|
||||||
|
# TODO: Refactor: The account server used to provide a 'account and
|
||||||
|
# container existence check all-in-one' call by doing a HEAD with a
|
||||||
|
# container path. However, container existence is now checked with the
|
||||||
|
# container servers directly so this is no longer needed. We should
|
||||||
|
# refactor out the container existence check here and retest
|
||||||
|
# everything.
|
||||||
|
try:
|
||||||
|
drive, part, account, container = split_path(unquote(req.path), 3, 4)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
|
request=req)
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
broker = self._get_account_broker(drive, part, account)
|
||||||
|
if not container:
|
||||||
|
broker.pending_timeout = 0.1
|
||||||
|
broker.stale_reads_ok = True
|
||||||
|
if broker.is_deleted():
|
||||||
|
return HTTPNotFound(request=req)
|
||||||
|
info = broker.get_info()
|
||||||
|
headers = {
|
||||||
|
'X-Account-Container-Count': info['container_count'],
|
||||||
|
'X-Account-Object-Count': info['object_count'],
|
||||||
|
'X-Account-Bytes-Used': info['bytes_used'],
|
||||||
|
'X-Timestamp': info['created_at'],
|
||||||
|
'X-PUT-Timestamp': info['put_timestamp'],
|
||||||
|
}
|
||||||
|
if container:
|
||||||
|
container_ts = broker.get_container_timestamp(container)
|
||||||
|
if container_ts is not None:
|
||||||
|
headers['X-Container-Timestamp'] = container_ts
|
||||||
|
return HTTPNoContent(request=req, headers=headers)
|
||||||
|
|
||||||
|
def GET(self, req):
|
||||||
|
"""Handle HTTP GET request."""
|
||||||
|
try:
|
||||||
|
drive, part, account = split_path(unquote(req.path), 3)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
|
request=req)
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
broker = self._get_account_broker(drive, part, account)
|
||||||
|
broker.pending_timeout = 0.1
|
||||||
|
broker.stale_reads_ok = True
|
||||||
|
if broker.is_deleted():
|
||||||
|
return HTTPNotFound(request=req)
|
||||||
|
info = broker.get_info()
|
||||||
|
resp_headers = {
|
||||||
|
'X-Account-Container-Count': info['container_count'],
|
||||||
|
'X-Account-Object-Count': info['object_count'],
|
||||||
|
'X-Account-Bytes-Used': info['bytes_used'],
|
||||||
|
'X-Timestamp': info['created_at'],
|
||||||
|
'X-PUT-Timestamp': info['put_timestamp']
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
prefix = get_param(req, 'prefix')
|
||||||
|
delimiter = get_param(req, 'delimiter')
|
||||||
|
if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
|
||||||
|
# delimiters can be made more flexible later
|
||||||
|
return HTTPPreconditionFailed(body='Bad delimiter')
|
||||||
|
limit = ACCOUNT_LISTING_LIMIT
|
||||||
|
given_limit = get_param(req, 'limit')
|
||||||
|
if given_limit and given_limit.isdigit():
|
||||||
|
limit = int(given_limit)
|
||||||
|
if limit > ACCOUNT_LISTING_LIMIT:
|
||||||
|
return HTTPPreconditionFailed(request=req,
|
||||||
|
body='Maximum limit is %d' % ACCOUNT_LISTING_LIMIT)
|
||||||
|
marker = get_param(req, 'marker', '')
|
||||||
|
query_format = get_param(req, 'format')
|
||||||
|
except UnicodeDecodeError, err:
|
||||||
|
return HTTPBadRequest(body='parameters not utf8',
|
||||||
|
content_type='text/plain', request=req)
|
||||||
|
header_format = req.accept.first_match(['text/plain',
|
||||||
|
'application/json',
|
||||||
|
'application/xml'])
|
||||||
|
format = query_format if query_format else header_format
|
||||||
|
if format.startswith('application/'):
|
||||||
|
format = format[12:]
|
||||||
|
account_list = broker.list_containers_iter(limit, marker, prefix,
|
||||||
|
delimiter)
|
||||||
|
if format == 'json':
|
||||||
|
out_content_type = 'application/json'
|
||||||
|
json_pattern = ['"name":%s', '"count":%s', '"bytes":%s']
|
||||||
|
json_pattern = '{' + ','.join(json_pattern) + '}'
|
||||||
|
json_out = []
|
||||||
|
for (name, object_count, bytes_used, is_subdir) in account_list:
|
||||||
|
name = simplejson.dumps(name)
|
||||||
|
if is_subdir:
|
||||||
|
json_out.append('{"subdir":%s}'% name)
|
||||||
|
else:
|
||||||
|
json_out.append(json_pattern %
|
||||||
|
(name, object_count, bytes_used))
|
||||||
|
account_list = '[' + ','.join(json_out) + ']'
|
||||||
|
elif format == 'xml':
|
||||||
|
out_content_type = 'application/xml'
|
||||||
|
output_list = ['<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
|
'<account name="%s">'%account]
|
||||||
|
for (name, object_count, bytes_used, is_subdir) in account_list:
|
||||||
|
name = saxutils.escape(name)
|
||||||
|
if is_subdir:
|
||||||
|
output_list.append('<subdir name="%s" />' % name)
|
||||||
|
else:
|
||||||
|
item = '<container><name>%s</name><count>%s</count>' \
|
||||||
|
'<bytes>%s</bytes></container>' % \
|
||||||
|
(name, object_count, bytes_used)
|
||||||
|
output_list.append(item)
|
||||||
|
output_list.append('</account>')
|
||||||
|
account_list = '\n'.join(output_list)
|
||||||
|
else:
|
||||||
|
if not account_list:
|
||||||
|
return HTTPNoContent(request=req, headers=resp_headers)
|
||||||
|
out_content_type = 'text/plain'
|
||||||
|
account_list = '\n'.join(r[0] for r in account_list) + '\n'
|
||||||
|
ret = Response(body=account_list, request=req, headers=resp_headers)
|
||||||
|
ret.content_type = out_content_type
|
||||||
|
ret.charset = 'utf8'
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def POST(self, req):
|
||||||
|
"""
|
||||||
|
Handle HTTP POST request.
|
||||||
|
Handler for RPC calls for account replication.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
post_args = split_path(unquote(req.path), 3)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
|
request=req)
|
||||||
|
drive, partition, hash = post_args
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
try:
|
||||||
|
args = simplejson.load(req.body_file)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain')
|
||||||
|
ret = self.replicator_rpc.dispatch(post_args, args)
|
||||||
|
ret.request = req
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def __call__(self, env, start_response):
|
||||||
|
start_time = time.time()
|
||||||
|
req = Request(env)
|
||||||
|
if req.path_info == '/healthcheck':
|
||||||
|
return healthcheck(req)(env, start_response)
|
||||||
|
elif not check_xml_encodable(req.path_info):
|
||||||
|
res = HTTPPreconditionFailed(body='Invalid UTF8')
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if hasattr(self, req.method):
|
||||||
|
res = getattr(self, req.method)(req)
|
||||||
|
else:
|
||||||
|
res = HTTPMethodNotAllowed()
|
||||||
|
except:
|
||||||
|
self.logger.exception('ERROR __call__ error with %s %s '
|
||||||
|
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
|
||||||
|
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
|
||||||
|
'-')))
|
||||||
|
res = HTTPInternalServerError(body=traceback.format_exc())
|
||||||
|
trans_time = '%.4f' % (time.time() - start_time)
|
||||||
|
additional_info = ''
|
||||||
|
if res.headers.get('x-container-timestamp') is not None:
|
||||||
|
additional_info += 'x-container-timestamp: %s' % \
|
||||||
|
res.headers['x-container-timestamp']
|
||||||
|
log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s "%s"' % (
|
||||||
|
req.remote_addr,
|
||||||
|
time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()),
|
||||||
|
req.method, req.path,
|
||||||
|
res.status.split()[0], res.content_length or '-',
|
||||||
|
req.headers.get('x-cf-trans-id', '-'),
|
||||||
|
req.referer or '-', req.user_agent or '-',
|
||||||
|
trans_time,
|
||||||
|
additional_info)
|
||||||
|
if req.method.upper() == 'POST':
|
||||||
|
self.logger.debug(log_message)
|
||||||
|
else:
|
||||||
|
self.logger.info(log_message)
|
||||||
|
return res(env, start_response)
|
||||||
|
|
0
swift/auth/__init__.py
Normal file
0
swift/auth/__init__.py
Normal file
503
swift/auth/server.py
Normal file
503
swift/auth/server.py
Normal file
@ -0,0 +1,503 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from time import gmtime, strftime, time
|
||||||
|
from urllib import unquote, quote
|
||||||
|
from uuid import uuid4
|
||||||
|
|
||||||
|
from webob import Request, Response
|
||||||
|
from webob.exc import HTTPBadRequest, HTTPNoContent, HTTPUnauthorized, \
|
||||||
|
HTTPServiceUnavailable, HTTPNotFound
|
||||||
|
|
||||||
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
from swift.common.db import DatabaseConnectionError, get_db_connection
|
||||||
|
from swift.common.ring import Ring
|
||||||
|
from swift.common.utils import get_logger, normalize_timestamp, split_path
|
||||||
|
|
||||||
|
|
||||||
|
class AuthController(object):
|
||||||
|
"""
|
||||||
|
Sample implementation of an authorization server for development work. This
|
||||||
|
server only implements the basic functionality and isn't written for high
|
||||||
|
availability or to scale to thousands (or even hundreds) of requests per
|
||||||
|
second. It is mainly for use by developers working on the rest of the
|
||||||
|
system.
|
||||||
|
|
||||||
|
The design of the auth system was restricted by a couple of existing
|
||||||
|
systems.
|
||||||
|
|
||||||
|
This implementation stores an account name, user name, and password (in
|
||||||
|
plain text!) as well as a corresponding Swift cluster url and account hash.
|
||||||
|
One existing auth system used account, user, and password whereas another
|
||||||
|
used just account and an "API key". Here, we support both systems with
|
||||||
|
their various, sometimes colliding headers.
|
||||||
|
|
||||||
|
The most common use case is by the end user:
|
||||||
|
|
||||||
|
* The user makes a ReST call to the auth server requesting a token and url
|
||||||
|
to use to access the Swift cluster.
|
||||||
|
* The auth system validates the user info and returns a token and url for
|
||||||
|
the user to use with the Swift cluster.
|
||||||
|
* The user makes a ReST call to the Swift cluster using the url given with
|
||||||
|
the token as the X-Auth-Token header.
|
||||||
|
* The Swift cluster makes an ReST call to the auth server to validate the
|
||||||
|
token for the given account hash, caching the result for future requests
|
||||||
|
up to the expiration the auth server returns.
|
||||||
|
* The auth server validates the token / account hash given and returns the
|
||||||
|
expiration for the token.
|
||||||
|
* The Swift cluster completes the user's request.
|
||||||
|
|
||||||
|
Another use case is creating a new account:
|
||||||
|
|
||||||
|
* The developer makes a ReST call to create a new account.
|
||||||
|
* The auth server makes ReST calls to the Swift cluster's account servers
|
||||||
|
to create a new account on its end.
|
||||||
|
* The auth server records the information in its database.
|
||||||
|
|
||||||
|
A last use case is recreating existing accounts; this is really only useful
|
||||||
|
on a development system when the drives are reformatted quite often but
|
||||||
|
the auth server's database is retained:
|
||||||
|
|
||||||
|
* A developer makes an ReST call to have the existing accounts recreated.
|
||||||
|
* For each account in its database, the auth server makes ReST calls to
|
||||||
|
the Swift cluster's account servers to create a specific account on its
|
||||||
|
end.
|
||||||
|
|
||||||
|
:param conf: The [auth-server] dictionary of the auth server configuration
|
||||||
|
file
|
||||||
|
:param ring: Overrides loading the account ring from a file; useful for
|
||||||
|
testing.
|
||||||
|
|
||||||
|
See the etc/auth-server.conf-sample for information on the possible
|
||||||
|
configuration parameters.
|
||||||
|
"""
|
||||||
|
|
||||||
|
log_name = 'auth'
|
||||||
|
|
||||||
|
def __init__(self, conf, ring=None):
|
||||||
|
self.logger = get_logger(conf, self.log_name)
|
||||||
|
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||||
|
self.default_cluster_url = \
|
||||||
|
conf.get('default_cluster_url', 'http://127.0.0.1:9000/v1')
|
||||||
|
self.token_life = int(conf.get('token_life', 86400))
|
||||||
|
self.log_headers = conf.get('log_headers') == 'True'
|
||||||
|
if ring:
|
||||||
|
self.account_ring = ring
|
||||||
|
else:
|
||||||
|
self.account_ring = \
|
||||||
|
Ring(os.path.join(self.swift_dir, 'account.ring.gz'))
|
||||||
|
self.db_file = os.path.join(self.swift_dir, 'auth.db')
|
||||||
|
self.conn = get_db_connection(self.db_file, okay_to_create=True)
|
||||||
|
self.conn.execute('''CREATE TABLE IF NOT EXISTS account (
|
||||||
|
account TEXT, url TEXT, cfaccount TEXT,
|
||||||
|
user TEXT, password TEXT)''')
|
||||||
|
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_account_account
|
||||||
|
ON account (account)''')
|
||||||
|
self.conn.execute('''CREATE TABLE IF NOT EXISTS token (
|
||||||
|
cfaccount TEXT, token TEXT, created FLOAT)''')
|
||||||
|
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_cfaccount
|
||||||
|
ON token (cfaccount)''')
|
||||||
|
self.conn.execute('''CREATE INDEX IF NOT EXISTS ix_token_created
|
||||||
|
ON token (created)''')
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
def add_storage_account(self, account_name=''):
|
||||||
|
"""
|
||||||
|
Creates an account within the Swift cluster by making a ReST call to
|
||||||
|
each of the responsible account servers.
|
||||||
|
|
||||||
|
:param account_name: The desired name for the account; if omitted a
|
||||||
|
UUID4 will be used.
|
||||||
|
:returns: False upon failure, otherwise the name of the account
|
||||||
|
within the Swift cluster.
|
||||||
|
"""
|
||||||
|
begin = time()
|
||||||
|
orig_account_name = account_name
|
||||||
|
if not account_name:
|
||||||
|
account_name = str(uuid4())
|
||||||
|
partition, nodes = self.account_ring.get_nodes(account_name)
|
||||||
|
headers = {'X-Timestamp': normalize_timestamp(time()),
|
||||||
|
'x-cf-trans-id': 'tx' + str(uuid4())}
|
||||||
|
statuses = []
|
||||||
|
for node in nodes:
|
||||||
|
try:
|
||||||
|
conn = None
|
||||||
|
conn = http_connect(node['ip'], node['port'], node['device'],
|
||||||
|
partition, 'PUT', '/'+account_name, headers)
|
||||||
|
source = conn.getresponse()
|
||||||
|
statuses.append(source.status)
|
||||||
|
if source.status >= 500:
|
||||||
|
self.logger.error('ERROR With account server %s:%s/%s: '
|
||||||
|
'Response %s %s: %s' %
|
||||||
|
(node['ip'], node['port'], node['device'],
|
||||||
|
source.status, source.reason, source.read(1024)))
|
||||||
|
conn = None
|
||||||
|
except BaseException, err:
|
||||||
|
log_call = self.logger.exception
|
||||||
|
msg = 'ERROR With account server ' \
|
||||||
|
'%(ip)s:%(port)s/%(device)s (will retry later): ' % node
|
||||||
|
if isinstance(err, socket.error):
|
||||||
|
if err[0] == errno.ECONNREFUSED:
|
||||||
|
log_call = self.logger.error
|
||||||
|
msg += 'Connection refused'
|
||||||
|
elif err[0] == errno.EHOSTUNREACH:
|
||||||
|
log_call = self.logger.error
|
||||||
|
msg += 'Host unreachable'
|
||||||
|
log_call(msg)
|
||||||
|
rv = False
|
||||||
|
if len([s for s in statuses if (200 <= s < 300)]) > len(nodes) / 2:
|
||||||
|
rv = account_name
|
||||||
|
return rv
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def get_conn(self):
|
||||||
|
"""
|
||||||
|
Returns a DB API connection instance to the auth server's SQLite
|
||||||
|
database. This is a contextmanager call to be use with the 'with'
|
||||||
|
statement. It takes no parameters.
|
||||||
|
"""
|
||||||
|
if not self.conn:
|
||||||
|
# We go ahead and make another db connection even if this is a
|
||||||
|
# reentry call; just in case we had an error that caused self.conn
|
||||||
|
# to become None. Even if we make an extra conn, we'll only keep
|
||||||
|
# one after the 'with' block.
|
||||||
|
self.conn = get_db_connection(self.db_file)
|
||||||
|
conn = self.conn
|
||||||
|
self.conn = None
|
||||||
|
try:
|
||||||
|
yield conn
|
||||||
|
conn.rollback()
|
||||||
|
self.conn = conn
|
||||||
|
except Exception, err:
|
||||||
|
try:
|
||||||
|
conn.close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
self.conn = get_db_connection(self.db_file)
|
||||||
|
raise err
|
||||||
|
|
||||||
|
def purge_old_tokens(self):
|
||||||
|
"""
|
||||||
|
Removes tokens that have expired from the auth server's database. This
|
||||||
|
is called by :func:`validate_token` and :func:`GET` to help keep the
|
||||||
|
database clean.
|
||||||
|
"""
|
||||||
|
with self.get_conn() as conn:
|
||||||
|
conn.execute('DELETE FROM token WHERE created < ?',
|
||||||
|
(time() - self.token_life,))
|
||||||
|
conn.commit()
|
||||||
|
|
||||||
|
def validate_token(self, token, account_hash):
|
||||||
|
"""
|
||||||
|
Tests if the given token is a valid token
|
||||||
|
|
||||||
|
:param token: The token to validate
|
||||||
|
:param account_hash: The account hash the token is being used with
|
||||||
|
:returns: TTL if valid, False otherwise
|
||||||
|
"""
|
||||||
|
begin = time()
|
||||||
|
self.purge_old_tokens()
|
||||||
|
rv = False
|
||||||
|
with self.get_conn() as conn:
|
||||||
|
row = conn.execute('''
|
||||||
|
SELECT created FROM token
|
||||||
|
WHERE cfaccount = ? AND token = ?''',
|
||||||
|
(account_hash, token)).fetchone()
|
||||||
|
if row is not None:
|
||||||
|
created = row[0]
|
||||||
|
if time() - created >= self.token_life:
|
||||||
|
conn.execute('''
|
||||||
|
DELETE FROM token
|
||||||
|
WHERE cfaccount = ? AND token = ?''',
|
||||||
|
(account_hash, token))
|
||||||
|
conn.commit()
|
||||||
|
else:
|
||||||
|
rv = self.token_life - (time() - created)
|
||||||
|
self.logger.info('validate_token(%s, %s, _, _) = %s [%.02f]' %
|
||||||
|
(repr(token), repr(account_hash), repr(rv),
|
||||||
|
time() - begin))
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def create_account(self, new_account, new_user, new_password):
|
||||||
|
"""
|
||||||
|
Handles the create_account call for developers, used to request
|
||||||
|
an account be created both on a Swift cluster and in the auth server
|
||||||
|
database.
|
||||||
|
|
||||||
|
This will make ReST requests to the Swift cluster's account servers
|
||||||
|
to have an account created on its side. The resulting account hash
|
||||||
|
along with the URL to use to access the account, the account name, the
|
||||||
|
user name, and the password is recorded in the auth server's database.
|
||||||
|
The url is constructed now and stored separately to support changing
|
||||||
|
the configuration file's default_cluster_url for directing new accounts
|
||||||
|
to a different Swift cluster while still supporting old accounts going
|
||||||
|
to the Swift clusters they were created on.
|
||||||
|
|
||||||
|
:param new_account: The name for the new account
|
||||||
|
:param new_user: The name for the new user
|
||||||
|
:param new_password: The password for the new account
|
||||||
|
|
||||||
|
:returns: False if the create fails, storage url if successful
|
||||||
|
"""
|
||||||
|
begin = time()
|
||||||
|
if not all((new_account, new_user, new_password)):
|
||||||
|
return False
|
||||||
|
account_hash = self.add_storage_account()
|
||||||
|
if not account_hash:
|
||||||
|
self.logger.info(
|
||||||
|
'FAILED create_account(%s, %s, _,) [%.02f]' %
|
||||||
|
(repr(new_account), repr(new_user), time() - begin))
|
||||||
|
return False
|
||||||
|
url = self.default_cluster_url.rstrip('/') + '/' + account_hash
|
||||||
|
with self.get_conn() as conn:
|
||||||
|
conn.execute('''INSERT INTO account
|
||||||
|
(account, url, cfaccount, user, password)
|
||||||
|
VALUES (?, ?, ?, ?, ?)''',
|
||||||
|
(new_account, url, account_hash, new_user, new_password))
|
||||||
|
conn.commit()
|
||||||
|
self.logger.info(
|
||||||
|
'SUCCESS create_account(%s, %s, _) = %s [%.02f]' %
|
||||||
|
(repr(new_account), repr(new_user), repr(url), time() - begin))
|
||||||
|
return url
|
||||||
|
|
||||||
|
def recreate_accounts(self):
|
||||||
|
"""
|
||||||
|
Recreates the accounts from the existing auth database in the Swift
|
||||||
|
cluster. This is useful on a development system when the drives are
|
||||||
|
reformatted quite often but the auth server's database is retained.
|
||||||
|
|
||||||
|
:returns: A string indicating accounts and failures
|
||||||
|
"""
|
||||||
|
begin = time()
|
||||||
|
with self.get_conn() as conn:
|
||||||
|
account_hashes = [r[0] for r in
|
||||||
|
conn.execute('SELECT cfaccount FROM account').fetchall()]
|
||||||
|
failures = []
|
||||||
|
for i, account_hash in enumerate(account_hashes):
|
||||||
|
if not self.add_storage_account(account_hash):
|
||||||
|
failures.append(account_hash)
|
||||||
|
rv = '%d accounts, failures %s' % (len(account_hashes), repr(failures))
|
||||||
|
self.logger.info('recreate_accounts(_, _) = %s [%.02f]' %
|
||||||
|
(rv, time() - begin))
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def handle_token(self, request):
|
||||||
|
"""
|
||||||
|
Hanles ReST request from Swift to validate tokens
|
||||||
|
|
||||||
|
Valid URL paths:
|
||||||
|
* GET /token/<account-hash>/<token>
|
||||||
|
|
||||||
|
If the HTTP equest returns with a 204, then the token is valid,
|
||||||
|
and the TTL of the token will be available in the X-Auth-Ttl header.
|
||||||
|
|
||||||
|
:param request: webob.Request object
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
_, account_hash, token = split_path(request.path, minsegs=3)
|
||||||
|
except ValueError:
|
||||||
|
return HTTPBadRequest()
|
||||||
|
ttl = self.validate_token(token, account_hash)
|
||||||
|
if not ttl:
|
||||||
|
return HTTPNotFound()
|
||||||
|
return HTTPNoContent(headers={'x-auth-ttl': ttl})
|
||||||
|
|
||||||
|
def handle_account_create(self, request):
|
||||||
|
"""
|
||||||
|
Handles Rest requests from developers to have an account created.
|
||||||
|
|
||||||
|
Valid URL paths:
|
||||||
|
* PUT /account/<account-name>/<user-name> - create the account
|
||||||
|
|
||||||
|
Valid headers:
|
||||||
|
* X-Auth-Key: <password> (Only required when creating an account)
|
||||||
|
|
||||||
|
If the HTTP request returns with a 204, then the account was created,
|
||||||
|
and the storage url will be available in the X-Storage-Url header.
|
||||||
|
|
||||||
|
:param request: webob.Request object
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
_, account_name, user_name = split_path(request.path, minsegs=3)
|
||||||
|
except ValueError:
|
||||||
|
return HTTPBadRequest()
|
||||||
|
if 'X-Auth-Key' not in request.headers:
|
||||||
|
return HTTPBadRequest('X-Auth-Key is required')
|
||||||
|
password = request.headers['x-auth-key']
|
||||||
|
storage_url = self.create_account(account_name, user_name, password)
|
||||||
|
if not storage_url:
|
||||||
|
return HTTPServiceUnavailable()
|
||||||
|
return HTTPNoContent(headers={'x-storage-url': storage_url})
|
||||||
|
|
||||||
|
def handle_account_recreate(self, request):
|
||||||
|
"""
|
||||||
|
Handles ReST requests from developers to have accounts in the Auth
|
||||||
|
system recreated in Swift. I know this is bad ReST style, but this
|
||||||
|
isn't production right? :)
|
||||||
|
|
||||||
|
Valid URL paths:
|
||||||
|
* POST /recreate_accounts
|
||||||
|
|
||||||
|
:param request: webob.Request object
|
||||||
|
"""
|
||||||
|
result = self.recreate_accounts()
|
||||||
|
return Response(result, 200, request = request)
|
||||||
|
|
||||||
|
def handle_auth(self, request):
|
||||||
|
"""
|
||||||
|
Handles ReST requests from end users for a Swift cluster url and auth
|
||||||
|
token. This can handle all the various headers and formats that
|
||||||
|
existing auth systems used, so it's a bit of a chameleon.
|
||||||
|
|
||||||
|
Valid URL paths:
|
||||||
|
* GET /v1/<account-name>/auth
|
||||||
|
* GET /auth
|
||||||
|
* GET /v1.0
|
||||||
|
|
||||||
|
Valid headers:
|
||||||
|
* X-Auth-User: <account-name>:<user-name>
|
||||||
|
* X-Auth-Key: <password>
|
||||||
|
* X-Storage-User: [<account-name>:]<user-name>
|
||||||
|
The [<account-name>:] is only optional here if the
|
||||||
|
/v1/<account-name>/auth path is used.
|
||||||
|
* X-Storage-Pass: <password>
|
||||||
|
|
||||||
|
The (currently) preferred method is to use /v1.0 path and the
|
||||||
|
X-Auth-User and X-Auth-Key headers.
|
||||||
|
|
||||||
|
:param request: A webob.Request instance.
|
||||||
|
"""
|
||||||
|
pathsegs = \
|
||||||
|
split_path(request.path, minsegs=1, maxsegs=3, rest_with_last=True)
|
||||||
|
if pathsegs[0] == 'v1' and pathsegs[2] == 'auth':
|
||||||
|
account = pathsegs[1]
|
||||||
|
user = request.headers.get('x-storage-user')
|
||||||
|
if not user:
|
||||||
|
user = request.headers.get('x-auth-user')
|
||||||
|
if not user or ':' not in user:
|
||||||
|
return HTTPUnauthorized()
|
||||||
|
account2, user = user.split(':', 1)
|
||||||
|
if account != account2:
|
||||||
|
return HTTPUnauthorized()
|
||||||
|
password = request.headers.get('x-storage-pass')
|
||||||
|
if not password:
|
||||||
|
password = request.headers.get('x-auth-key')
|
||||||
|
elif pathsegs[0] in ('auth', 'v1.0'):
|
||||||
|
user = request.headers.get('x-auth-user')
|
||||||
|
if not user:
|
||||||
|
user = request.headers.get('x-storage-user')
|
||||||
|
if not user or ':' not in user:
|
||||||
|
return HTTPUnauthorized()
|
||||||
|
account, user = user.split(':', 1)
|
||||||
|
password = request.headers.get('x-auth-key')
|
||||||
|
if not password:
|
||||||
|
password = request.headers.get('x-storage-pass')
|
||||||
|
else:
|
||||||
|
return HTTPBadRequest()
|
||||||
|
if not all((account, user, password)):
|
||||||
|
return HTTPUnauthorized()
|
||||||
|
self.purge_old_tokens()
|
||||||
|
with self.get_conn() as conn:
|
||||||
|
row = conn.execute('''
|
||||||
|
SELECT cfaccount, url FROM account
|
||||||
|
WHERE account = ? AND user = ? AND password = ?''',
|
||||||
|
(account, user, password)).fetchone()
|
||||||
|
if row is None:
|
||||||
|
return HTTPUnauthorized()
|
||||||
|
cfaccount = row[0]
|
||||||
|
url = row[1]
|
||||||
|
row = conn.execute('SELECT token FROM token WHERE cfaccount = ?',
|
||||||
|
(cfaccount,)).fetchone()
|
||||||
|
if row:
|
||||||
|
token = row[0]
|
||||||
|
else:
|
||||||
|
token = 'tk' + str(uuid4())
|
||||||
|
conn.execute('''
|
||||||
|
INSERT INTO token (cfaccount, token, created)
|
||||||
|
VALUES (?, ?, ?)''',
|
||||||
|
(cfaccount, token, time()))
|
||||||
|
conn.commit()
|
||||||
|
return HTTPNoContent(headers={'x-auth-token': token,
|
||||||
|
'x-storage-token': token,
|
||||||
|
'x-storage-url': url})
|
||||||
|
|
||||||
|
|
||||||
|
def handleREST(self, env, start_response):
|
||||||
|
"""
|
||||||
|
Handles routing of ReST requests. This handler also logs all requests.
|
||||||
|
|
||||||
|
:param env: WSGI environment
|
||||||
|
:param start_response: WSGI start_response function
|
||||||
|
"""
|
||||||
|
req = Request(env)
|
||||||
|
logged_headers = None
|
||||||
|
if self.log_headers:
|
||||||
|
logged_headers = '\n'.join('%s: %s' % (k, v)
|
||||||
|
for k, v in req.headers.items()).replace('"', "#042")
|
||||||
|
start_time = time()
|
||||||
|
# Figure out how to handle the request
|
||||||
|
try:
|
||||||
|
if req.method == 'GET' and req.path.startswith('/v1') or \
|
||||||
|
req.path.startswith('/auth'):
|
||||||
|
handler = self.handle_auth
|
||||||
|
elif req.method == 'GET' and req.path.startswith('/token/'):
|
||||||
|
handler = self.handle_token
|
||||||
|
elif req.method == 'PUT' and req.path.startswith('/account/'):
|
||||||
|
handler = self.handle_account_create
|
||||||
|
elif req.method == 'POST' and \
|
||||||
|
req.path == '/recreate_accounts':
|
||||||
|
handler = self.handle_account_recreate
|
||||||
|
else:
|
||||||
|
return HTTPBadRequest(request=env)(env, start_response)
|
||||||
|
response = handler(req)
|
||||||
|
except:
|
||||||
|
self.logger.exception('ERROR Unhandled exception in ReST request')
|
||||||
|
return HTTPServiceUnavailable(request=req)(env, start_response)
|
||||||
|
trans_time = '%.4f' % (time() - start_time)
|
||||||
|
if not response.content_length and response.app_iter and \
|
||||||
|
hasattr(response.app_iter, '__len__'):
|
||||||
|
response.content_length = sum(map(len, response.app_iter))
|
||||||
|
the_request = '%s %s' % (req.method, quote(unquote(req.path)))
|
||||||
|
if req.query_string:
|
||||||
|
the_request = the_request + '?' + req.query_string
|
||||||
|
the_request += ' ' + req.environ['SERVER_PROTOCOL']
|
||||||
|
client = req.headers.get('x-cluster-client-ip')
|
||||||
|
if not client and 'x-forwarded-for' in req.headers:
|
||||||
|
client = req.headers['x-forwarded-for'].split(',')[0].strip()
|
||||||
|
if not client:
|
||||||
|
client = req.remote_addr
|
||||||
|
self.logger.info(
|
||||||
|
'%s - - [%s] "%s" %s %s "%s" "%s" - - - - - - - - - "-" "%s" '
|
||||||
|
'"%s" %s' % (
|
||||||
|
client,
|
||||||
|
strftime('%d/%b/%Y:%H:%M:%S +0000', gmtime()),
|
||||||
|
the_request,
|
||||||
|
response.status_int,
|
||||||
|
response.content_length or '-',
|
||||||
|
req.referer or '-',
|
||||||
|
req.user_agent or '-',
|
||||||
|
req.remote_addr,
|
||||||
|
logged_headers or '-',
|
||||||
|
trans_time))
|
||||||
|
return response(env, start_response)
|
||||||
|
|
||||||
|
def __call__(self, env, start_response):
|
||||||
|
""" Used by the eventlet.wsgi.server """
|
||||||
|
return self.handleREST(env, start_response)
|
@ -34,8 +34,8 @@ class DevAuthMiddleware(object):
|
|||||||
self.memcache_client = memcache_client
|
self.memcache_client = memcache_client
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.auth_host = conf.get('bind_ip', '127.0.0.1')
|
self.auth_host = conf.get('ip', '127.0.0.1')
|
||||||
self.auth_port = int(conf.get('bind_port', 11000))
|
self.auth_port = int(conf.get('port', 11000))
|
||||||
self.timeout = int(conf.get('node_timeout', 10))
|
self.timeout = int(conf.get('node_timeout', 10))
|
||||||
|
|
||||||
def __call__(self, env, start_response):
|
def __call__(self, env, start_response):
|
||||||
|
2
swift/common/ring/__init__.py
Normal file
2
swift/common/ring/__init__.py
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
from ring import RingData, Ring
|
||||||
|
from builder import RingBuilder
|
460
swift/common/ring/builder.py
Normal file
460
swift/common/ring/builder.py
Normal file
@ -0,0 +1,460 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from array import array
|
||||||
|
from bisect import bisect
|
||||||
|
from random import randint
|
||||||
|
from time import time
|
||||||
|
|
||||||
|
from swift.common.ring import RingData
|
||||||
|
|
||||||
|
|
||||||
|
class RingBuilder(object):
|
||||||
|
"""
|
||||||
|
Used to build swift.common.RingData instances to be written to disk and
|
||||||
|
used with swift.common.ring.Ring instances. See bin/ring-builder.py for
|
||||||
|
example usage.
|
||||||
|
|
||||||
|
The instance variable devs_changed indicates if the device information has
|
||||||
|
changed since the last balancing. This can be used by tools to know whether
|
||||||
|
a rebalance request is an isolated request or due to added, changed, or
|
||||||
|
removed devices.
|
||||||
|
|
||||||
|
:param part_power: number of partitions = 2**part_power
|
||||||
|
:param replicas: number of replicas for each partition
|
||||||
|
:param min_part_hours: minimum number of hours between partition changes
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, part_power, replicas, min_part_hours):
|
||||||
|
self.part_power = part_power
|
||||||
|
self.replicas = replicas
|
||||||
|
self.min_part_hours = min_part_hours
|
||||||
|
self.parts = 2 ** self.part_power
|
||||||
|
self.devs = []
|
||||||
|
self.devs_changed = False
|
||||||
|
self.version = 0
|
||||||
|
|
||||||
|
# _replica2part2dev maps from replica number to partition number to
|
||||||
|
# device id. So, for a three replica, 2**23 ring, it's an array of
|
||||||
|
# three 2**23 arrays of device ids (unsigned shorts). This can work a
|
||||||
|
# bit faster than the 2**23 array of triplet arrays of device ids in
|
||||||
|
# many circumstances. Making one big 2**23 * 3 array didn't seem to
|
||||||
|
# have any speed change; though you're welcome to try it again (it was
|
||||||
|
# a while ago, code-wise, when I last tried it).
|
||||||
|
self._replica2part2dev = None
|
||||||
|
|
||||||
|
# _last_part_moves is a 2**23 array of unsigned bytes representing the
|
||||||
|
# number of hours since a given partition was last moved. This is used
|
||||||
|
# to guarantee we don't move a partition twice within a given number of
|
||||||
|
# hours (24 is my usual test). Removing a device or setting it's weight
|
||||||
|
# to 0 overrides this behavior as it's assumed those actions are done
|
||||||
|
# because of device failure.
|
||||||
|
# _last_part_moves_epoch indicates the time the offsets in
|
||||||
|
# _last_part_moves is based on.
|
||||||
|
self._last_part_moves_epoch = None
|
||||||
|
self._last_part_moves = None
|
||||||
|
|
||||||
|
self._last_part_gather_start = 0
|
||||||
|
self._remove_devs = []
|
||||||
|
self._ring = None
|
||||||
|
|
||||||
|
def change_min_part_hours(self, min_part_hours):
|
||||||
|
"""
|
||||||
|
Changes the value used to decide if a given partition can be moved
|
||||||
|
again. This restriction is to give the overall system enough time to
|
||||||
|
settle a partition to its new location before moving it to yet another
|
||||||
|
location. While no data would be lost if a partition is moved several
|
||||||
|
times quickly, it could make that data unreachable for a short period
|
||||||
|
of time.
|
||||||
|
|
||||||
|
This should be set to at least the average full partition replication
|
||||||
|
time. Starting it at 24 hours and then lowering it to what the
|
||||||
|
replicator reports as the longest partition cycle is best.
|
||||||
|
|
||||||
|
:param min_part_hours: new value for min_part_hours
|
||||||
|
"""
|
||||||
|
self.min_part_hours = min_part_hours
|
||||||
|
|
||||||
|
def get_ring(self):
|
||||||
|
"""
|
||||||
|
Get the ring, or more specifically, the swift.common.ring.RingData.
|
||||||
|
This ring data is the minimum required for use of the ring. The ring
|
||||||
|
builder itself keeps additional data such as when partitions were last
|
||||||
|
moved.
|
||||||
|
"""
|
||||||
|
if not self._ring:
|
||||||
|
devs = [None] * len(self.devs)
|
||||||
|
for dev in self.devs:
|
||||||
|
if dev is None:
|
||||||
|
continue
|
||||||
|
devs[dev['id']] = dict((k, v) for k, v in dev.items()
|
||||||
|
if k not in ('parts', 'parts_wanted'))
|
||||||
|
self._ring = \
|
||||||
|
RingData([array('H', p2d) for p2d in self._replica2part2dev],
|
||||||
|
devs, 32 - self.part_power)
|
||||||
|
return self._ring
|
||||||
|
|
||||||
|
def add_dev(self, dev):
|
||||||
|
"""
|
||||||
|
Add a device to the ring. This device dict should have a minimum of the
|
||||||
|
following keys:
|
||||||
|
|
||||||
|
====== ===============================================================
|
||||||
|
id unique integer identifier amongst devices
|
||||||
|
weight a float of the relative weight of this device as compared to
|
||||||
|
others; this indicates how many partitions the builder will try
|
||||||
|
to assign to this device
|
||||||
|
zone integer indicating which zone the device is in; a given
|
||||||
|
partition will not be assigned to multiple devices within the
|
||||||
|
same zone ip the ip address of the device
|
||||||
|
port the tcp port of the device
|
||||||
|
device the device's name on disk (sdb1, for example)
|
||||||
|
meta general use 'extra' field; for example: the online date, the
|
||||||
|
hardware description
|
||||||
|
====== ===============================================================
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
This will not rebalance the ring immediately as you may want to
|
||||||
|
make multiple changes for a single rebalance.
|
||||||
|
|
||||||
|
:param dev: device dict
|
||||||
|
"""
|
||||||
|
if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None:
|
||||||
|
raise Exception('Duplicate device id: %d' % dev['id'])
|
||||||
|
while dev['id'] >= len(self.devs):
|
||||||
|
self.devs.append(None)
|
||||||
|
dev['weight'] = float(dev['weight'])
|
||||||
|
dev['parts'] = 0
|
||||||
|
self.devs[dev['id']] = dev
|
||||||
|
self._set_parts_wanted()
|
||||||
|
self.devs_changed = True
|
||||||
|
self.version += 1
|
||||||
|
|
||||||
|
def set_dev_weight(self, dev_id, weight):
|
||||||
|
"""
|
||||||
|
Set the weight of a device. This should be called rather than just
|
||||||
|
altering the weight key in the device dict directly, as the builder
|
||||||
|
will need to rebuild some internal state to reflect the change.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
This will not rebalance the ring immediately as you may want to
|
||||||
|
make multiple changes for a single rebalance.
|
||||||
|
|
||||||
|
:param dev_id: device id
|
||||||
|
:param weight: new weight for device
|
||||||
|
"""
|
||||||
|
self.devs[dev_id]['weight'] = weight
|
||||||
|
self._set_parts_wanted()
|
||||||
|
self.devs_changed = True
|
||||||
|
self.version += 1
|
||||||
|
|
||||||
|
def remove_dev(self, dev_id):
|
||||||
|
"""
|
||||||
|
Remove a device from the ring.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
This will not rebalance the ring immediately as you may want to
|
||||||
|
make multiple changes for a single rebalance.
|
||||||
|
|
||||||
|
:param dev_id: device id
|
||||||
|
"""
|
||||||
|
dev = self.devs[dev_id]
|
||||||
|
dev['weight'] = 0
|
||||||
|
self._remove_devs.append(dev)
|
||||||
|
self._set_parts_wanted()
|
||||||
|
self.devs_changed = True
|
||||||
|
self.version += 1
|
||||||
|
|
||||||
|
def rebalance(self):
|
||||||
|
"""
|
||||||
|
Rebalance the ring.
|
||||||
|
|
||||||
|
This is the main work function of the builder, as it will assign and
|
||||||
|
reassign partitions to devices in the ring based on weights, distinct
|
||||||
|
zones, recent reassignments, etc.
|
||||||
|
|
||||||
|
The process doesn't always perfectly assign partitions (that'd take a
|
||||||
|
lot more analysis and therefore a lot more time -- I had code that did
|
||||||
|
that before). Because of this, it keeps rebalancing until the device
|
||||||
|
skew (number of partitions a device wants compared to what it has) gets
|
||||||
|
below 1% or doesn't change by more than 1% (only happens with ring that
|
||||||
|
can't be balanced no matter what -- like with 3 zones of differing
|
||||||
|
weights with replicas set to 3).
|
||||||
|
"""
|
||||||
|
self._ring = None
|
||||||
|
if self._last_part_moves_epoch is None:
|
||||||
|
self._initial_balance()
|
||||||
|
self.devs_changed = False
|
||||||
|
return self.parts, self.get_balance()
|
||||||
|
retval = 0
|
||||||
|
self._update_last_part_moves()
|
||||||
|
last_balance = 0
|
||||||
|
while True:
|
||||||
|
reassign_parts = self._gather_reassign_parts()
|
||||||
|
self._reassign_parts(reassign_parts)
|
||||||
|
retval += len(reassign_parts)
|
||||||
|
while self._remove_devs:
|
||||||
|
self.devs[self._remove_devs.pop()['id']] = None
|
||||||
|
balance = self.get_balance()
|
||||||
|
if balance < 1 or abs(last_balance - balance) < 1 or \
|
||||||
|
retval == self.parts:
|
||||||
|
break
|
||||||
|
last_balance = balance
|
||||||
|
self.devs_changed = False
|
||||||
|
self.version += 1
|
||||||
|
return retval, balance
|
||||||
|
|
||||||
|
def validate(self, stats=False):
|
||||||
|
"""
|
||||||
|
Validate the ring.
|
||||||
|
|
||||||
|
This is a safety function to try to catch any bugs in the building
|
||||||
|
process. It ensures partitions have been assigned to distinct zones,
|
||||||
|
aren't doubly assigned, etc. It can also optionally check the even
|
||||||
|
distribution of partitions across devices.
|
||||||
|
|
||||||
|
:param stats: if True, check distribution of partitions across devices
|
||||||
|
:returns: if stats is True, a tuple of (device usage, worst stat), else
|
||||||
|
(None, None)
|
||||||
|
:raises Exception: problem was found with the ring.
|
||||||
|
"""
|
||||||
|
if sum(d['parts'] for d in self.devs if d is not None) != \
|
||||||
|
self.parts * self.replicas:
|
||||||
|
raise Exception(
|
||||||
|
'All partitions are not double accounted for: %d != %d' %
|
||||||
|
(sum(d['parts'] for d in self.devs if d is not None),
|
||||||
|
self.parts * self.replicas))
|
||||||
|
if stats:
|
||||||
|
dev_usage = array('I', (0 for _ in xrange(len(self.devs))))
|
||||||
|
for part in xrange(self.parts):
|
||||||
|
zones = {}
|
||||||
|
for replica in xrange(self.replicas):
|
||||||
|
dev_id = self._replica2part2dev[replica][part]
|
||||||
|
if stats:
|
||||||
|
dev_usage[dev_id] += 1
|
||||||
|
zone = self.devs[dev_id]['zone']
|
||||||
|
if zone in zones:
|
||||||
|
raise Exception(
|
||||||
|
'Partition %d not in %d distinct zones. ' \
|
||||||
|
'Zones were: %s' %
|
||||||
|
(part, self.replicas,
|
||||||
|
[self.devs[self._replica2part2dev[r][part]]['zone']
|
||||||
|
for r in xrange(self.replicas)]))
|
||||||
|
zones[zone] = True
|
||||||
|
if stats:
|
||||||
|
weighted_parts = self.parts * self.replicas / \
|
||||||
|
sum(d['weight'] for d in self.devs if d is not None)
|
||||||
|
worst = 0
|
||||||
|
for dev in self.devs:
|
||||||
|
if dev is None:
|
||||||
|
continue
|
||||||
|
if not dev['weight']:
|
||||||
|
if dev_usage[dev['id']]:
|
||||||
|
worst = 999.99
|
||||||
|
break
|
||||||
|
continue
|
||||||
|
skew = abs(100.0 * dev_usage[dev['id']] /
|
||||||
|
(dev['weight'] * weighted_parts) - 100.0)
|
||||||
|
if skew > worst:
|
||||||
|
worst = skew
|
||||||
|
return dev_usage, worst
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
def get_balance(self):
|
||||||
|
"""
|
||||||
|
Get the balance of the ring. The balance value is the highest
|
||||||
|
percentage off the desired amount of partitions a given device wants.
|
||||||
|
For instance, if the "worst" device wants (based on its relative weight
|
||||||
|
and its zone's relative weight) 123 partitions and it has 124
|
||||||
|
partitions, the balance value would be 0.83 (1 extra / 123 wanted * 100
|
||||||
|
for percentage).
|
||||||
|
|
||||||
|
:returns: balance of the ring
|
||||||
|
"""
|
||||||
|
weighted_parts = self.parts * self.replicas / \
|
||||||
|
sum(d['weight'] for d in self.devs if d is not None)
|
||||||
|
balance = 0
|
||||||
|
for dev in self.devs:
|
||||||
|
if dev is None:
|
||||||
|
continue
|
||||||
|
if not dev['weight']:
|
||||||
|
if dev['parts']:
|
||||||
|
balance = 999.99
|
||||||
|
break
|
||||||
|
continue
|
||||||
|
dev_balance = abs(100.0 * dev['parts'] /
|
||||||
|
(dev['weight'] * weighted_parts) - 100.0)
|
||||||
|
if dev_balance > balance:
|
||||||
|
balance = dev_balance
|
||||||
|
return balance
|
||||||
|
|
||||||
|
def pretend_min_part_hours_passed(self):
|
||||||
|
"""
|
||||||
|
Override min_part_hours by marking all partitions as having been moved
|
||||||
|
255 hours ago. This can be used to force a full rebalance on the next
|
||||||
|
call to rebalance.
|
||||||
|
"""
|
||||||
|
for part in xrange(self.parts):
|
||||||
|
self._last_part_moves[part] = 0xff
|
||||||
|
|
||||||
|
def _set_parts_wanted(self):
|
||||||
|
"""
|
||||||
|
Sets the parts_wanted key for each of the devices to the number of
|
||||||
|
partitions the device wants based on its relative weight. This key is
|
||||||
|
used to sort the devices according to "most wanted" during rebalancing
|
||||||
|
to best distribute partitions.
|
||||||
|
"""
|
||||||
|
weighted_parts = self.parts * self.replicas / \
|
||||||
|
sum(d['weight'] for d in self.devs if d is not None)
|
||||||
|
for dev in self.devs:
|
||||||
|
if dev is not None:
|
||||||
|
if not dev['weight']:
|
||||||
|
dev['parts_wanted'] = self.parts * -2
|
||||||
|
else:
|
||||||
|
dev['parts_wanted'] = \
|
||||||
|
int(weighted_parts * dev['weight']) - dev['parts']
|
||||||
|
|
||||||
|
def _initial_balance(self):
|
||||||
|
"""
|
||||||
|
Initial partition assignment is treated separately from rebalancing an
|
||||||
|
existing ring. Initial assignment is performed by ordering all the
|
||||||
|
devices by how many partitions they still want (and kept in order
|
||||||
|
during the process). The partitions are then iterated through,
|
||||||
|
assigning them to the next "most wanted" device, with distinct zone
|
||||||
|
restrictions.
|
||||||
|
"""
|
||||||
|
for dev in self.devs:
|
||||||
|
dev['sort_key'] = \
|
||||||
|
'%08x.%04x' % (dev['parts_wanted'], randint(0, 0xffff))
|
||||||
|
available_devs = sorted((d for d in self.devs if d is not None),
|
||||||
|
key=lambda x: x['sort_key'])
|
||||||
|
self._replica2part2dev = [array('H') for _ in xrange(self.replicas)]
|
||||||
|
for _ in xrange(self.parts):
|
||||||
|
other_zones = array('H')
|
||||||
|
for replica in xrange(self.replicas):
|
||||||
|
index = len(available_devs) - 1
|
||||||
|
while available_devs[index]['zone'] in other_zones:
|
||||||
|
index -= 1
|
||||||
|
dev = available_devs.pop(index)
|
||||||
|
self._replica2part2dev[replica].append(dev['id'])
|
||||||
|
dev['parts_wanted'] -= 1
|
||||||
|
dev['parts'] += 1
|
||||||
|
dev['sort_key'] = \
|
||||||
|
'%08x.%04x' % (dev['parts_wanted'], randint(0, 0xffff))
|
||||||
|
index = 0
|
||||||
|
end = len(available_devs)
|
||||||
|
while index < end:
|
||||||
|
mid = (index + end) // 2
|
||||||
|
if dev['sort_key'] < available_devs[mid]['sort_key']:
|
||||||
|
end = mid
|
||||||
|
else:
|
||||||
|
index = mid + 1
|
||||||
|
available_devs.insert(index, dev)
|
||||||
|
other_zones.append(dev['zone'])
|
||||||
|
self._last_part_moves = array('B', (0 for _ in xrange(self.parts)))
|
||||||
|
self._last_part_moves_epoch = int(time())
|
||||||
|
for dev in self.devs:
|
||||||
|
del dev['sort_key']
|
||||||
|
|
||||||
|
def _update_last_part_moves(self):
|
||||||
|
"""
|
||||||
|
Updates how many hours ago each partition was moved based on the
|
||||||
|
current time. The builder won't move a partition that has been moved
|
||||||
|
more recently than min_part_hours.
|
||||||
|
"""
|
||||||
|
elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600
|
||||||
|
for part in xrange(self.parts):
|
||||||
|
self._last_part_moves[part] = \
|
||||||
|
min(self._last_part_moves[part] + elapsed_hours, 0xff)
|
||||||
|
self._last_part_moves_epoch = int(time())
|
||||||
|
|
||||||
|
def _gather_reassign_parts(self):
|
||||||
|
"""
|
||||||
|
Returns an array('I') of partitions to be reassigned by gathering them
|
||||||
|
from removed devices and overweight devices.
|
||||||
|
"""
|
||||||
|
reassign_parts = array('I')
|
||||||
|
if self._remove_devs:
|
||||||
|
dev_ids = [d['id'] for d in self._remove_devs if d['parts']]
|
||||||
|
if dev_ids:
|
||||||
|
for replica in xrange(self.replicas):
|
||||||
|
part2dev = self._replica2part2dev[replica]
|
||||||
|
for part in xrange(self.parts):
|
||||||
|
if part2dev[part] in dev_ids:
|
||||||
|
part2dev[part] = 0xffff
|
||||||
|
self._last_part_moves[part] = 0
|
||||||
|
reassign_parts.append(part)
|
||||||
|
start = self._last_part_gather_start / 4 + randint(0, self.parts / 2)
|
||||||
|
self._last_part_gather_start = start
|
||||||
|
for replica in xrange(self.replicas):
|
||||||
|
part2dev = self._replica2part2dev[replica]
|
||||||
|
for half in (xrange(start, self.parts), xrange(0, start)):
|
||||||
|
for part in half:
|
||||||
|
if self._last_part_moves[part] < self.min_part_hours:
|
||||||
|
continue
|
||||||
|
dev = self.devs[part2dev[part]]
|
||||||
|
if dev['parts_wanted'] < 0:
|
||||||
|
part2dev[part] = 0xffff
|
||||||
|
self._last_part_moves[part] = 0
|
||||||
|
dev['parts_wanted'] += 1
|
||||||
|
dev['parts'] -= 1
|
||||||
|
reassign_parts.append(part)
|
||||||
|
return reassign_parts
|
||||||
|
|
||||||
|
def _reassign_parts(self, reassign_parts):
|
||||||
|
"""
|
||||||
|
For an existing ring data set, partitions are reassigned similarly to
|
||||||
|
the initial assignment. The devices are ordered by how many partitions
|
||||||
|
they still want and kept in that order throughout the process. The
|
||||||
|
gathered partitions are iterated through, assigning them to devices
|
||||||
|
according to the "most wanted" and distinct zone restrictions.
|
||||||
|
"""
|
||||||
|
for dev in self.devs:
|
||||||
|
if dev is not None:
|
||||||
|
dev['sort_key'] = '%08x.%04x' % (self.parts +
|
||||||
|
dev['parts_wanted'], randint(0, 0xffff))
|
||||||
|
available_devs = \
|
||||||
|
sorted((d for d in self.devs if d is not None and d['weight']),
|
||||||
|
key=lambda x: x['sort_key'])
|
||||||
|
for part in reassign_parts:
|
||||||
|
other_zones = array('H')
|
||||||
|
replace = None
|
||||||
|
for replica in xrange(self.replicas):
|
||||||
|
if self._replica2part2dev[replica][part] == 0xffff:
|
||||||
|
replace = replica
|
||||||
|
else:
|
||||||
|
other_zones.append(self.devs[
|
||||||
|
self._replica2part2dev[replica][part]]['zone'])
|
||||||
|
index = len(available_devs) - 1
|
||||||
|
while available_devs[index]['zone'] in other_zones:
|
||||||
|
index -= 1
|
||||||
|
dev = available_devs.pop(index)
|
||||||
|
self._replica2part2dev[replace][part] = dev['id']
|
||||||
|
dev['parts_wanted'] -= 1
|
||||||
|
dev['parts'] += 1
|
||||||
|
dev['sort_key'] = '%08x.%04x' % (self.parts + dev['parts_wanted'],
|
||||||
|
randint(0, 0xffff))
|
||||||
|
index = 0
|
||||||
|
end = len(available_devs)
|
||||||
|
while index < end:
|
||||||
|
mid = (index + end) // 2
|
||||||
|
if dev['sort_key'] < available_devs[mid]['sort_key']:
|
||||||
|
end = mid
|
||||||
|
else:
|
||||||
|
index = mid + 1
|
||||||
|
available_devs.insert(index, dev)
|
||||||
|
for dev in self.devs:
|
||||||
|
if dev is not None:
|
||||||
|
del dev['sort_key']
|
141
swift/common/ring/ring.py
Normal file
141
swift/common/ring/ring.py
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import cPickle as pickle
|
||||||
|
from gzip import GzipFile
|
||||||
|
from hashlib import md5
|
||||||
|
from os.path import getmtime
|
||||||
|
from struct import unpack_from
|
||||||
|
from time import time
|
||||||
|
from swift.common.utils import hash_path
|
||||||
|
|
||||||
|
|
||||||
|
class RingData(object):
|
||||||
|
"""Partitioned consistent hashing ring data (used for serialization)."""
|
||||||
|
def __init__(self, replica2part2dev_id, devs, part_shift):
|
||||||
|
self.devs = devs
|
||||||
|
self._replica2part2dev_id = replica2part2dev_id
|
||||||
|
self._part_shift = part_shift
|
||||||
|
|
||||||
|
|
||||||
|
class Ring(object):
|
||||||
|
"""
|
||||||
|
Partitioned consistent hashing ring.
|
||||||
|
|
||||||
|
:param pickle_gz_path: path to ring file
|
||||||
|
:param reload_time: time interval in seconds to check for a ring change
|
||||||
|
"""
|
||||||
|
def __init__(self, pickle_gz_path, reload_time=15):
|
||||||
|
self.pickle_gz_path = pickle_gz_path
|
||||||
|
self.reload_time = reload_time
|
||||||
|
self._reload(force=True)
|
||||||
|
|
||||||
|
def _reload(self, force=False):
|
||||||
|
self._rtime = time() + self.reload_time
|
||||||
|
if force or self.has_changed():
|
||||||
|
ring_data = pickle.load(GzipFile(self.pickle_gz_path, 'rb'))
|
||||||
|
self._mtime = getmtime(self.pickle_gz_path)
|
||||||
|
self.devs = ring_data.devs
|
||||||
|
self.zone2devs = {}
|
||||||
|
for dev in self.devs:
|
||||||
|
if not dev:
|
||||||
|
continue
|
||||||
|
if dev['zone'] in self.zone2devs:
|
||||||
|
self.zone2devs[dev['zone']].append(dev)
|
||||||
|
else:
|
||||||
|
self.zone2devs[dev['zone']] = [dev]
|
||||||
|
self._replica2part2dev_id = ring_data._replica2part2dev_id
|
||||||
|
self._part_shift = ring_data._part_shift
|
||||||
|
|
||||||
|
@property
|
||||||
|
def replica_count(self):
|
||||||
|
"""Number of replicas used in the ring."""
|
||||||
|
return len(self._replica2part2dev_id)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def partition_count(self):
|
||||||
|
"""Number of partitions in the ring."""
|
||||||
|
return len(self._replica2part2dev_id[0])
|
||||||
|
|
||||||
|
def has_changed(self):
|
||||||
|
"""
|
||||||
|
Check to see if the ring on disk is different than the current one in
|
||||||
|
memory.
|
||||||
|
|
||||||
|
:returns: True if the ring on disk has changed, False otherwise
|
||||||
|
"""
|
||||||
|
return getmtime(self.pickle_gz_path) != self._mtime
|
||||||
|
|
||||||
|
def get_part_nodes(self, part):
|
||||||
|
"""
|
||||||
|
Get the nodes that are responsible for the partition.
|
||||||
|
|
||||||
|
:param part: partition to get nodes for
|
||||||
|
:returns: list of node dicts
|
||||||
|
|
||||||
|
See :func:`get_nodes` for a description of the node dicts.
|
||||||
|
"""
|
||||||
|
if time() > self._rtime:
|
||||||
|
self._reload()
|
||||||
|
return [self.devs[r[part]] for r in self._replica2part2dev_id]
|
||||||
|
|
||||||
|
def get_nodes(self, account, container=None, obj=None):
|
||||||
|
"""
|
||||||
|
Get the partition and nodes for an account/container/object.
|
||||||
|
|
||||||
|
:param account: account name
|
||||||
|
:param container: container name
|
||||||
|
:param obj: object name
|
||||||
|
:returns: a tuple of (partition, list of node dicts)
|
||||||
|
|
||||||
|
Each node dict will have at least the following keys:
|
||||||
|
|
||||||
|
====== ===============================================================
|
||||||
|
id unique integer identifier amongst devices
|
||||||
|
weight a float of the relative weight of this device as compared to
|
||||||
|
others; this indicates how many partitions the builder will try
|
||||||
|
to assign to this device
|
||||||
|
zone integer indicating which zone the device is in; a given
|
||||||
|
partition will not be assigned to multiple devices within the
|
||||||
|
same zone ip the ip address of the device
|
||||||
|
port the tcp port of the device
|
||||||
|
device the device's name on disk (sdb1, for example)
|
||||||
|
meta general use 'extra' field; for example: the online date, the
|
||||||
|
hardware description
|
||||||
|
====== ===============================================================
|
||||||
|
"""
|
||||||
|
key = hash_path(account, container, obj, raw_digest=True)
|
||||||
|
if time() > self._rtime:
|
||||||
|
self._reload()
|
||||||
|
part = unpack_from('>I', key)[0] >> self._part_shift
|
||||||
|
return part, [self.devs[r[part]] for r in self._replica2part2dev_id]
|
||||||
|
|
||||||
|
def get_more_nodes(self, part):
|
||||||
|
"""
|
||||||
|
Generator to get extra nodes for a partition for hinted handoff.
|
||||||
|
|
||||||
|
:param part: partition to get handoff nodes for
|
||||||
|
:returns: generator of node dicts
|
||||||
|
|
||||||
|
See :func:`get_nodes` for a description of the node dicts.
|
||||||
|
"""
|
||||||
|
if time() > self._rtime:
|
||||||
|
self._reload()
|
||||||
|
zones = sorted(self.zone2devs.keys())
|
||||||
|
for part2dev_id in self._replica2part2dev_id:
|
||||||
|
zones.remove(self.devs[part2dev_id[part]]['zone'])
|
||||||
|
while zones:
|
||||||
|
zone = zones.pop(part % len(zones))
|
||||||
|
yield self.zone2devs[zone][part % len(self.zone2devs[zone])]
|
0
swift/container/__init__.py
Normal file
0
swift/container/__init__.py
Normal file
271
swift/container/auditor.py
Normal file
271
swift/container/auditor.py
Normal file
@ -0,0 +1,271 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
from random import choice, random
|
||||||
|
from urllib import quote
|
||||||
|
|
||||||
|
from eventlet import Timeout
|
||||||
|
|
||||||
|
from swift.container import server as container_server
|
||||||
|
from swift.common.db import ContainerBroker
|
||||||
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
from swift.common.exceptions import ConnectionTimeout
|
||||||
|
from swift.common.ring import Ring
|
||||||
|
from swift.common.utils import get_logger
|
||||||
|
|
||||||
|
|
||||||
|
class AuditException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ContainerAuditor(object):
|
||||||
|
"""Audit containers."""
|
||||||
|
|
||||||
|
def __init__(self, server_conf, auditor_conf):
|
||||||
|
self.logger = get_logger(auditor_conf, 'container-auditor')
|
||||||
|
self.devices = server_conf.get('devices', '/srv/node')
|
||||||
|
self.mount_check = server_conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
self.interval = int(auditor_conf.get('interval', 1800))
|
||||||
|
swift_dir = server_conf.get('swift_dir', '/etc/swift')
|
||||||
|
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
|
||||||
|
self.account_ring = None
|
||||||
|
self.object_ring_path = os.path.join(swift_dir, 'object.ring.gz')
|
||||||
|
self.object_ring = None
|
||||||
|
self.node_timeout = int(auditor_conf.get('node_timeout', 10))
|
||||||
|
self.conn_timeout = float(auditor_conf.get('conn_timeout', 0.5))
|
||||||
|
self.max_object_count = int(auditor_conf.get('max_object_count', 100))
|
||||||
|
self.account_passes = 0
|
||||||
|
self.account_failures = 0
|
||||||
|
self.account_errors = 0
|
||||||
|
self.object_passes = 0
|
||||||
|
self.object_failures = 0
|
||||||
|
self.object_errors = 0
|
||||||
|
|
||||||
|
def get_account_ring(self):
|
||||||
|
"""
|
||||||
|
Get the account ring. Loads the ring if neccesary.
|
||||||
|
|
||||||
|
:returns: account ring
|
||||||
|
"""
|
||||||
|
if not self.account_ring:
|
||||||
|
self.logger.debug(
|
||||||
|
'Loading account ring from %s' % self.account_ring_path)
|
||||||
|
self.account_ring = Ring(self.account_ring_path)
|
||||||
|
return self.account_ring
|
||||||
|
|
||||||
|
def get_object_ring(self):
|
||||||
|
"""
|
||||||
|
Get the object ring. Loads the ring if neccesary.
|
||||||
|
|
||||||
|
:returns: object ring
|
||||||
|
"""
|
||||||
|
if not self.object_ring:
|
||||||
|
self.logger.debug(
|
||||||
|
'Loading object ring from %s' % self.object_ring_path)
|
||||||
|
self.object_ring = Ring(self.object_ring_path)
|
||||||
|
return self.object_ring
|
||||||
|
|
||||||
|
def audit_forever(self): # pragma: no cover
|
||||||
|
"""Run the container audit until stopped."""
|
||||||
|
reported = time.time()
|
||||||
|
time.sleep(random() * self.interval)
|
||||||
|
while True:
|
||||||
|
begin = time.time()
|
||||||
|
pids = []
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
if self.mount_check and not\
|
||||||
|
os.path.ismount(os.path.join(self.devices, device)):
|
||||||
|
self.logger.debug(
|
||||||
|
'Skipping %s as it is not mounted' % device)
|
||||||
|
continue
|
||||||
|
self.container_audit(device)
|
||||||
|
if time.time() - reported >= 3600: # once an hour
|
||||||
|
self.logger.info(
|
||||||
|
'Since %s: Remote audits with accounts: %s passed audit, '
|
||||||
|
'%s failed audit, %s errors Remote audits with objects: '
|
||||||
|
'%s passed audit, %s failed audit, %s errors' %
|
||||||
|
(time.ctime(reported), self.account_passes,
|
||||||
|
self.account_failures, self.account_errors,
|
||||||
|
self.object_passes, self.object_failures,
|
||||||
|
self.object_errors))
|
||||||
|
reported = time.time()
|
||||||
|
self.account_passes = 0
|
||||||
|
self.account_failures = 0
|
||||||
|
self.account_errors = 0
|
||||||
|
self.object_passes = 0
|
||||||
|
self.object_failures = 0
|
||||||
|
self.object_errors = 0
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
if elapsed < self.interval:
|
||||||
|
time.sleep(self.interval - elapsed)
|
||||||
|
|
||||||
|
def audit_once(self):
|
||||||
|
"""Run the container audit once."""
|
||||||
|
self.logger.info('Begin container audit "once" mode')
|
||||||
|
begin = time.time()
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
if self.mount_check and \
|
||||||
|
not os.path.ismount(os.path.join(self.devices, device)):
|
||||||
|
self.logger.debug(
|
||||||
|
'Skipping %s as it is not mounted' % device)
|
||||||
|
continue
|
||||||
|
self.container_audit(device)
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
self.logger.info(
|
||||||
|
'Container audit "once" mode completed: %.02fs' % elapsed)
|
||||||
|
|
||||||
|
def container_audit(self, device):
|
||||||
|
"""
|
||||||
|
Audit any containers found on the device
|
||||||
|
|
||||||
|
:param device: device to audit
|
||||||
|
"""
|
||||||
|
datadir = os.path.join(self.devices, device, container_server.DATADIR)
|
||||||
|
if not os.path.exists(datadir):
|
||||||
|
return
|
||||||
|
broker = None
|
||||||
|
partition = None
|
||||||
|
attempts = 100
|
||||||
|
while not broker and attempts:
|
||||||
|
attempts -= 1
|
||||||
|
try:
|
||||||
|
partition = choice(os.listdir(datadir))
|
||||||
|
fpath = os.path.join(datadir, partition)
|
||||||
|
if not os.path.isdir(fpath):
|
||||||
|
continue
|
||||||
|
suffix = choice(os.listdir(fpath))
|
||||||
|
fpath = os.path.join(fpath, suffix)
|
||||||
|
if not os.path.isdir(fpath):
|
||||||
|
continue
|
||||||
|
hsh = choice(os.listdir(fpath))
|
||||||
|
fpath = os.path.join(fpath, hsh)
|
||||||
|
if not os.path.isdir(fpath):
|
||||||
|
continue
|
||||||
|
except IndexError:
|
||||||
|
continue
|
||||||
|
for fname in sorted(os.listdir(fpath), reverse=True):
|
||||||
|
if fname.endswith('.db'):
|
||||||
|
broker = ContainerBroker(os.path.join(fpath, fname))
|
||||||
|
if broker.is_deleted():
|
||||||
|
broker = None
|
||||||
|
break
|
||||||
|
if not broker:
|
||||||
|
return
|
||||||
|
info = broker.get_info()
|
||||||
|
found = False
|
||||||
|
good_response = False
|
||||||
|
results = []
|
||||||
|
part, nodes = self.get_account_ring().get_nodes(info['account'])
|
||||||
|
for node in nodes:
|
||||||
|
try:
|
||||||
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
|
conn = http_connect(node['ip'], node['port'],
|
||||||
|
node['device'], part, 'GET',
|
||||||
|
'/%s' % info['account'],
|
||||||
|
query_string='prefix=%s' %
|
||||||
|
quote(info['container']))
|
||||||
|
with Timeout(self.node_timeout):
|
||||||
|
resp = conn.getresponse()
|
||||||
|
body = resp.read()
|
||||||
|
if 200 <= resp.status <= 299:
|
||||||
|
good_reponse = True
|
||||||
|
for cname in body.split('\n'):
|
||||||
|
if cname == info['container']:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
if found:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
results.append('%s:%s/%s %s %s = %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], resp.status,
|
||||||
|
resp.reason, repr(body)))
|
||||||
|
else:
|
||||||
|
results.append('%s:%s/%s %s %s' %
|
||||||
|
(node['ip'], node['port'], node['device'],
|
||||||
|
resp.status, resp.reason))
|
||||||
|
except socket.error, err:
|
||||||
|
results.append('%s:%s/%s Socket Error: %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], err))
|
||||||
|
except ConnectionTimeout:
|
||||||
|
results.append('%(ip)s:%(port)s/%(device)s ConnectionTimeout' %
|
||||||
|
node)
|
||||||
|
except Timeout:
|
||||||
|
results.append('%(ip)s:%(port)s/%(device)s Timeout' % node)
|
||||||
|
except Exception, err:
|
||||||
|
self.logger.exception('ERROR With remote server '
|
||||||
|
'%(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
results.append('%s:%s/%s Exception: %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], err))
|
||||||
|
if found:
|
||||||
|
self.account_passes += 1
|
||||||
|
self.logger.debug('Audit passed for /%s/%s %s' % (info['account'],
|
||||||
|
info['container'], broker.db_file))
|
||||||
|
else:
|
||||||
|
if good_response:
|
||||||
|
self.account_failures += 1
|
||||||
|
else:
|
||||||
|
self.account_errors += 1
|
||||||
|
self.logger.error('ERROR Could not find container /%s/%s %s on '
|
||||||
|
'any of the primary account servers it should be on: %s' %
|
||||||
|
(info['account'], info['container'], broker.db_file, results))
|
||||||
|
for obj in broker.get_random_objects(max_count=self.max_object_count):
|
||||||
|
found = False
|
||||||
|
results = []
|
||||||
|
part, nodes = self.get_object_ring().get_nodes(info['account'],
|
||||||
|
info['container'], obj)
|
||||||
|
for node in nodes:
|
||||||
|
try:
|
||||||
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
|
conn = http_connect(node['ip'], node['port'],
|
||||||
|
node['device'], part, 'HEAD',
|
||||||
|
'/%s/%s/%s' %
|
||||||
|
(info['account'], info['container'], obj))
|
||||||
|
with Timeout(self.node_timeout):
|
||||||
|
resp = conn.getresponse()
|
||||||
|
body = resp.read()
|
||||||
|
if 200 <= resp.status <= 299:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
results.append('%s:%s/%s %s %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], resp.status,
|
||||||
|
resp.reason))
|
||||||
|
except socket.error, err:
|
||||||
|
results.append('%s:%s/%s Socket Error: %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], err))
|
||||||
|
except ConnectionTimeout:
|
||||||
|
results.append(
|
||||||
|
'%(ip)s:%(port)s/%(device)s ConnectionTimeout' % node)
|
||||||
|
except Timeout:
|
||||||
|
results.append('%(ip)s:%(port)s/%(device)s Timeout' % node)
|
||||||
|
except Exception, err:
|
||||||
|
self.logger.exception('ERROR With remote server '
|
||||||
|
'%(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
results.append('%s:%s/%s Exception: %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], err))
|
||||||
|
if found:
|
||||||
|
self.object_passes += 1
|
||||||
|
self.logger.debug('Audit passed for /%s/%s %s object %s' %
|
||||||
|
(info['account'], info['container'], broker.db_file, obj))
|
||||||
|
else:
|
||||||
|
self.object_errors += 1
|
||||||
|
self.logger.error('ERROR Could not find object /%s/%s/%s '
|
||||||
|
'referenced by %s on any of the primary object '
|
||||||
|
'servers it should be on: %s' % (info['account'],
|
||||||
|
info['container'], obj, broker.db_file, results))
|
383
swift/container/server.py
Normal file
383
swift/container/server.py
Normal file
@ -0,0 +1,383 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
from urllib import unquote
|
||||||
|
from xml.sax import saxutils
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
import simplejson
|
||||||
|
from eventlet.timeout import Timeout
|
||||||
|
from eventlet import TimeoutError
|
||||||
|
from webob import Request, Response
|
||||||
|
from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPConflict, \
|
||||||
|
HTTPCreated, HTTPException, HTTPInternalServerError, HTTPNoContent, \
|
||||||
|
HTTPNotFound, HTTPPreconditionFailed, HTTPMethodNotAllowed
|
||||||
|
|
||||||
|
from swift.common import CONTAINER_LISTING_LIMIT
|
||||||
|
from swift.common.db import ContainerBroker
|
||||||
|
from swift.common.utils import get_logger, get_param, hash_path, \
|
||||||
|
storage_directory, split_path, mkdirs
|
||||||
|
from swift.common.constraints import check_mount, check_float, \
|
||||||
|
check_xml_encodable
|
||||||
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
from swift.common.healthcheck import healthcheck
|
||||||
|
from swift.common.exceptions import ConnectionTimeout, MessageTimeout
|
||||||
|
from swift.common.db_replicator import ReplicatorRpc
|
||||||
|
|
||||||
|
DATADIR = 'containers'
|
||||||
|
|
||||||
|
|
||||||
|
class ContainerController(object):
|
||||||
|
"""WSGI Controller for the container server."""
|
||||||
|
|
||||||
|
log_name = 'container'
|
||||||
|
|
||||||
|
def __init__(self, conf):
|
||||||
|
self.logger = get_logger(conf, self.log_name)
|
||||||
|
self.root = conf.get('devices', '/srv/node/')
|
||||||
|
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
self.node_timeout = int(conf.get('node_timeout', 3))
|
||||||
|
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||||
|
self.replicator_rpc = ReplicatorRpc(self.root, DATADIR,
|
||||||
|
ContainerBroker, self.mount_check)
|
||||||
|
|
||||||
|
def _get_container_broker(self, drive, part, account, container):
|
||||||
|
"""
|
||||||
|
Get a DB broker for the container.
|
||||||
|
|
||||||
|
:param drive: drive that holds the container
|
||||||
|
:param part: partition the container is in
|
||||||
|
:param account: account name
|
||||||
|
:param container: container name
|
||||||
|
:returns: ContainerBroker object
|
||||||
|
"""
|
||||||
|
hsh = hash_path(account, container)
|
||||||
|
db_dir = storage_directory(DATADIR, part, hsh)
|
||||||
|
db_path = os.path.join(self.root, drive, db_dir, hsh + '.db')
|
||||||
|
return ContainerBroker(db_path, account=account, container=container,
|
||||||
|
logger=self.logger)
|
||||||
|
|
||||||
|
def account_update(self, req, account, container, broker):
|
||||||
|
"""
|
||||||
|
Update the account server with latest container info.
|
||||||
|
|
||||||
|
:param req: webob.Request object
|
||||||
|
:param account: account name
|
||||||
|
:param container: container name
|
||||||
|
:param borker: container DB broker object
|
||||||
|
:returns: if the account request returns a 404 error code,
|
||||||
|
HTTPNotFound response object, otherwise None.
|
||||||
|
"""
|
||||||
|
account_host = req.headers.get('X-Account-Host')
|
||||||
|
account_partition = req.headers.get('X-Account-Partition')
|
||||||
|
account_device = req.headers.get('X-Account-Device')
|
||||||
|
if all([account_host, account_partition, account_device]):
|
||||||
|
account_ip, account_port = account_host.split(':')
|
||||||
|
new_path = '/' + '/'.join([account, container])
|
||||||
|
info = broker.get_info()
|
||||||
|
account_headers = {'x-put-timestamp': info['put_timestamp'],
|
||||||
|
'x-delete-timestamp': info['delete_timestamp'],
|
||||||
|
'x-object-count': info['object_count'],
|
||||||
|
'x-bytes-used': info['bytes_used'],
|
||||||
|
'x-cf-trans-id': req.headers.get('X-Cf-Trans-Id', '-')}
|
||||||
|
if req.headers.get('x-account-override-deleted', 'no').lower() == \
|
||||||
|
'yes':
|
||||||
|
account_headers['x-account-override-deleted'] = 'yes'
|
||||||
|
try:
|
||||||
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
|
conn = http_connect(account_ip, account_port,
|
||||||
|
account_device, account_partition, 'PUT', new_path,
|
||||||
|
account_headers)
|
||||||
|
with Timeout(self.node_timeout):
|
||||||
|
account_response = conn.getresponse()
|
||||||
|
account_response.read()
|
||||||
|
if account_response.status == 404:
|
||||||
|
return HTTPNotFound(request=req)
|
||||||
|
elif account_response.status < 200 or \
|
||||||
|
account_response.status > 299:
|
||||||
|
self.logger.error('ERROR Account update failed '
|
||||||
|
'with %s:%s/%s transaction %s (will retry '
|
||||||
|
'later): Response %s %s' % (account_ip,
|
||||||
|
account_port, account_device,
|
||||||
|
req.headers.get('x-cf-trans-id'),
|
||||||
|
account_response.status,
|
||||||
|
account_response.reason))
|
||||||
|
except:
|
||||||
|
self.logger.exception('ERROR account update failed with '
|
||||||
|
'%s:%s/%s transaction %s (will retry later)' %
|
||||||
|
(account_ip, account_port, account_device,
|
||||||
|
req.headers.get('x-cf-trans-id', '-')))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def DELETE(self, req):
|
||||||
|
"""Handle HTTP DELETE request."""
|
||||||
|
try:
|
||||||
|
drive, part, account, container, obj = split_path(
|
||||||
|
unquote(req.path), 4, 5, True)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
|
request=req)
|
||||||
|
if 'x-timestamp' not in req.headers or \
|
||||||
|
not check_float(req.headers['x-timestamp']):
|
||||||
|
return HTTPBadRequest(body='Missing timestamp', request=req,
|
||||||
|
content_type='text/plain')
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
broker = self._get_container_broker(drive, part, account, container)
|
||||||
|
if not os.path.exists(broker.db_file):
|
||||||
|
return HTTPNotFound()
|
||||||
|
if obj: # delete object
|
||||||
|
broker.delete_object(obj, req.headers.get('x-timestamp'))
|
||||||
|
return HTTPNoContent(request=req)
|
||||||
|
else:
|
||||||
|
# delete container
|
||||||
|
if not broker.empty():
|
||||||
|
return HTTPConflict(request=req)
|
||||||
|
existed = float(broker.get_info()['put_timestamp']) and \
|
||||||
|
not broker.is_deleted()
|
||||||
|
broker.delete_db(req.headers['X-Timestamp'])
|
||||||
|
if not broker.is_deleted():
|
||||||
|
return HTTPConflict(request=req)
|
||||||
|
resp = self.account_update(req, account, container, broker)
|
||||||
|
if resp:
|
||||||
|
return resp
|
||||||
|
if existed:
|
||||||
|
return HTTPNoContent(request=req)
|
||||||
|
return HTTPAccepted(request=req)
|
||||||
|
|
||||||
|
def PUT(self, req):
|
||||||
|
"""Handle HTTP PUT request."""
|
||||||
|
try:
|
||||||
|
drive, part, account, container, obj = split_path(
|
||||||
|
unquote(req.path), 4, 5, True)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
|
request=req)
|
||||||
|
if 'x-timestamp' not in req.headers or \
|
||||||
|
not check_float(req.headers['x-timestamp']):
|
||||||
|
return HTTPBadRequest(body='Missing timestamp', request=req,
|
||||||
|
content_type='text/plain')
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
broker = self._get_container_broker(drive, part, account, container)
|
||||||
|
if obj: # put container object
|
||||||
|
if not os.path.exists(broker.db_file):
|
||||||
|
return HTTPNotFound()
|
||||||
|
broker.put_object(obj, req.headers['x-timestamp'],
|
||||||
|
int(req.headers['x-size']), req.headers['x-content-type'],
|
||||||
|
req.headers['x-etag'])
|
||||||
|
return HTTPCreated(request=req)
|
||||||
|
else: # put container
|
||||||
|
if not os.path.exists(broker.db_file):
|
||||||
|
broker.initialize(req.headers['x-timestamp'])
|
||||||
|
created = True
|
||||||
|
else:
|
||||||
|
created = broker.is_deleted()
|
||||||
|
broker.update_put_timestamp(req.headers['x-timestamp'])
|
||||||
|
if broker.is_deleted():
|
||||||
|
return HTTPConflict(request=req)
|
||||||
|
resp = self.account_update(req, account, container, broker)
|
||||||
|
if resp:
|
||||||
|
return resp
|
||||||
|
if created:
|
||||||
|
return HTTPCreated(request=req)
|
||||||
|
else:
|
||||||
|
return HTTPAccepted(request=req)
|
||||||
|
|
||||||
|
def HEAD(self, req):
|
||||||
|
"""Handle HTTP HEAD request."""
|
||||||
|
try:
|
||||||
|
drive, part, account, container, obj = split_path(
|
||||||
|
unquote(req.path), 4, 5, True)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
|
request=req)
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
broker = self._get_container_broker(drive, part, account, container)
|
||||||
|
broker.pending_timeout = 0.1
|
||||||
|
broker.stale_reads_ok = True
|
||||||
|
if broker.is_deleted():
|
||||||
|
return HTTPNotFound(request=req)
|
||||||
|
info = broker.get_info()
|
||||||
|
headers = {
|
||||||
|
'X-Container-Object-Count': info['object_count'],
|
||||||
|
'X-Container-Bytes-Used': info['bytes_used'],
|
||||||
|
'X-Timestamp': info['created_at'],
|
||||||
|
'X-PUT-Timestamp': info['put_timestamp'],
|
||||||
|
}
|
||||||
|
return HTTPNoContent(request=req, headers=headers)
|
||||||
|
|
||||||
|
def GET(self, req):
|
||||||
|
"""Handle HTTP GET request."""
|
||||||
|
try:
|
||||||
|
drive, part, account, container, obj = split_path(
|
||||||
|
unquote(req.path), 4, 5, True)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
|
request=req)
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
broker = self._get_container_broker(drive, part, account, container)
|
||||||
|
broker.pending_timeout = 0.1
|
||||||
|
broker.stale_reads_ok = True
|
||||||
|
if broker.is_deleted():
|
||||||
|
return HTTPNotFound(request=req)
|
||||||
|
info = broker.get_info()
|
||||||
|
resp_headers = {
|
||||||
|
'X-Container-Object-Count': info['object_count'],
|
||||||
|
'X-Container-Bytes-Used': info['bytes_used'],
|
||||||
|
'X-Timestamp': info['created_at'],
|
||||||
|
'X-PUT-Timestamp': info['put_timestamp'],
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
path = get_param(req, 'path')
|
||||||
|
prefix = get_param(req, 'prefix')
|
||||||
|
delimiter = get_param(req, 'delimiter')
|
||||||
|
if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
|
||||||
|
# delimiters can be made more flexible later
|
||||||
|
return HTTPPreconditionFailed(body='Bad delimiter')
|
||||||
|
marker = get_param(req, 'marker', '')
|
||||||
|
limit = CONTAINER_LISTING_LIMIT
|
||||||
|
given_limit = get_param(req, 'limit')
|
||||||
|
if given_limit and given_limit.isdigit():
|
||||||
|
limit = int(given_limit)
|
||||||
|
if limit > CONTAINER_LISTING_LIMIT:
|
||||||
|
return HTTPPreconditionFailed(request=req,
|
||||||
|
body='Maximum limit is %d' % CONTAINER_LISTING_LIMIT)
|
||||||
|
query_format = get_param(req, 'format')
|
||||||
|
except UnicodeDecodeError, err:
|
||||||
|
return HTTPBadRequest(body='parameters not utf8',
|
||||||
|
content_type='text/plain', request=req)
|
||||||
|
header_format = req.accept.first_match(['text/plain',
|
||||||
|
'application/json',
|
||||||
|
'application/xml'])
|
||||||
|
format = query_format if query_format else header_format
|
||||||
|
if format.startswith('application/'):
|
||||||
|
format = format[12:]
|
||||||
|
container_list = broker.list_objects_iter(limit, marker, prefix,
|
||||||
|
delimiter, path)
|
||||||
|
if format == 'json':
|
||||||
|
out_content_type = 'application/json'
|
||||||
|
json_pattern = ['"name":%s', '"hash":"%s"', '"bytes":%s',
|
||||||
|
'"content_type":%s, "last_modified":"%s"']
|
||||||
|
json_pattern = '{' + ','.join(json_pattern) + '}'
|
||||||
|
json_out = []
|
||||||
|
for (name, created_at, size, content_type, etag) in container_list:
|
||||||
|
# escape name and format date here
|
||||||
|
name = simplejson.dumps(name)
|
||||||
|
created_at = datetime.utcfromtimestamp(
|
||||||
|
float(created_at)).isoformat()
|
||||||
|
if content_type is None:
|
||||||
|
json_out.append('{"subdir":%s}' % name)
|
||||||
|
else:
|
||||||
|
content_type = simplejson.dumps(content_type)
|
||||||
|
json_out.append(json_pattern % (name,
|
||||||
|
etag,
|
||||||
|
size,
|
||||||
|
content_type,
|
||||||
|
created_at))
|
||||||
|
container_list = '[' + ','.join(json_out) + ']'
|
||||||
|
elif format == 'xml':
|
||||||
|
out_content_type = 'application/xml'
|
||||||
|
xml_output = []
|
||||||
|
for (name, created_at, size, content_type, etag) in container_list:
|
||||||
|
# escape name and format date here
|
||||||
|
name = saxutils.escape(name)
|
||||||
|
created_at = datetime.utcfromtimestamp(
|
||||||
|
float(created_at)).isoformat()
|
||||||
|
if content_type is None:
|
||||||
|
xml_output.append('<subdir name="%s" />' % name)
|
||||||
|
else:
|
||||||
|
content_type = saxutils.escape(content_type)
|
||||||
|
xml_output.append('<object><name>%s</name><hash>%s</hash>'\
|
||||||
|
'<bytes>%d</bytes><content_type>%s</content_type>'\
|
||||||
|
'<last_modified>%s</last_modified></object>' % \
|
||||||
|
(name, etag, size, content_type, created_at))
|
||||||
|
container_list = ''.join([
|
||||||
|
'<?xml version="1.0" encoding="UTF-8"?>\n',
|
||||||
|
'<container name=%s>' % saxutils.quoteattr(container),
|
||||||
|
''.join(xml_output), '</container>'])
|
||||||
|
else:
|
||||||
|
if not container_list:
|
||||||
|
return HTTPNoContent(request=req, headers=resp_headers)
|
||||||
|
out_content_type = 'text/plain'
|
||||||
|
container_list = '\n'.join(r[0] for r in container_list) + '\n'
|
||||||
|
ret = Response(body=container_list, request=req, headers=resp_headers)
|
||||||
|
ret.content_type = out_content_type
|
||||||
|
ret.charset = 'utf8'
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def POST(self, req):
|
||||||
|
"""
|
||||||
|
Handle HTTP POST request (json-encoded RPC calls for replication.)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
post_args = split_path(unquote(req.path), 3)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain',
|
||||||
|
request=req)
|
||||||
|
drive, partition, hash = post_args
|
||||||
|
if self.mount_check and not check_mount(self.root, drive):
|
||||||
|
return Response(status='507 %s is not mounted' % drive)
|
||||||
|
try:
|
||||||
|
args = simplejson.load(req.body_file)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), content_type='text/plain')
|
||||||
|
ret = self.replicator_rpc.dispatch(post_args, args)
|
||||||
|
ret.request = req
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def __call__(self, env, start_response):
|
||||||
|
start_time = time.time()
|
||||||
|
req = Request(env)
|
||||||
|
if req.path_info == '/healthcheck':
|
||||||
|
return healthcheck(req)(env, start_response)
|
||||||
|
elif not check_xml_encodable(req.path_info):
|
||||||
|
res = HTTPPreconditionFailed(body='Invalid UTF8')
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if hasattr(self, req.method):
|
||||||
|
res = getattr(self, req.method)(req)
|
||||||
|
else:
|
||||||
|
res = HTTPMethodNotAllowed()
|
||||||
|
except:
|
||||||
|
self.logger.exception('ERROR __call__ error with %s %s '
|
||||||
|
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
|
||||||
|
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
|
||||||
|
'-')))
|
||||||
|
res = HTTPInternalServerError(body=traceback.format_exc())
|
||||||
|
trans_time = '%.4f' % (time.time() - start_time)
|
||||||
|
log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s' % (
|
||||||
|
req.remote_addr,
|
||||||
|
time.strftime('%d/%b/%Y:%H:%M:%S +0000',
|
||||||
|
time.gmtime()),
|
||||||
|
req.method, req.path,
|
||||||
|
res.status.split()[0], res.content_length or '-',
|
||||||
|
req.headers.get('x-cf-trans-id', '-'),
|
||||||
|
req.referer or '-', req.user_agent or '-',
|
||||||
|
trans_time)
|
||||||
|
if req.method.upper() == 'POST':
|
||||||
|
self.logger.debug(log_message)
|
||||||
|
else:
|
||||||
|
self.logger.info(log_message)
|
||||||
|
return res(env, start_response)
|
232
swift/container/updater.py
Normal file
232
swift/container/updater.py
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from random import random, shuffle
|
||||||
|
|
||||||
|
from eventlet import spawn, patcher, Timeout
|
||||||
|
|
||||||
|
from swift.container.server import DATADIR
|
||||||
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
from swift.common.db import ContainerBroker
|
||||||
|
from swift.common.exceptions import ConnectionTimeout
|
||||||
|
from swift.common.ring import Ring
|
||||||
|
from swift.common.utils import get_logger, whataremyips
|
||||||
|
|
||||||
|
|
||||||
|
class ContainerUpdater(object):
|
||||||
|
"""Update container information in account listings."""
|
||||||
|
|
||||||
|
def __init__(self, server_conf, updater_conf):
|
||||||
|
self.logger = get_logger(updater_conf, 'container-updater')
|
||||||
|
self.devices = server_conf.get('devices', '/srv/node')
|
||||||
|
self.mount_check = server_conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
swift_dir = server_conf.get('swift_dir', '/etc/swift')
|
||||||
|
self.interval = int(updater_conf.get('interval', 300))
|
||||||
|
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
|
||||||
|
self.account_ring = None
|
||||||
|
self.concurrency = int(updater_conf.get('concurrency', 4))
|
||||||
|
self.slowdown = float(updater_conf.get('slowdown', 0.01))
|
||||||
|
self.node_timeout = int(updater_conf.get('node_timeout', 3))
|
||||||
|
self.conn_timeout = float(updater_conf.get('conn_timeout', 0.5))
|
||||||
|
self.no_changes = 0
|
||||||
|
self.successes = 0
|
||||||
|
self.failures = 0
|
||||||
|
|
||||||
|
def get_account_ring(self):
|
||||||
|
"""Get the account ring. Load it if it hasn't been yet."""
|
||||||
|
if not self.account_ring:
|
||||||
|
self.logger.debug(
|
||||||
|
'Loading account ring from %s' % self.account_ring_path)
|
||||||
|
self.account_ring = Ring(self.account_ring_path)
|
||||||
|
return self.account_ring
|
||||||
|
|
||||||
|
def get_paths(self):
|
||||||
|
"""
|
||||||
|
Get paths to all of the partitions on each drive to be processed.
|
||||||
|
|
||||||
|
:returns: a list of paths
|
||||||
|
"""
|
||||||
|
paths = []
|
||||||
|
ips = whataremyips()
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
dev_path = os.path.join(self.devices, device)
|
||||||
|
if self.mount_check and not os.path.ismount(dev_path):
|
||||||
|
self.logger.warn('%s is not mounted' % device)
|
||||||
|
continue
|
||||||
|
con_path = os.path.join(dev_path, DATADIR)
|
||||||
|
if not os.path.exists(con_path):
|
||||||
|
continue
|
||||||
|
for partition in os.listdir(con_path):
|
||||||
|
paths.append(os.path.join(con_path, partition))
|
||||||
|
shuffle(paths)
|
||||||
|
return paths
|
||||||
|
|
||||||
|
def update_forever(self): # pragma: no cover
|
||||||
|
"""
|
||||||
|
Run the updator continuously.
|
||||||
|
"""
|
||||||
|
time.sleep(random() * self.interval)
|
||||||
|
while True:
|
||||||
|
self.logger.info('Begin container update sweep')
|
||||||
|
begin = time.time()
|
||||||
|
pids = []
|
||||||
|
# read from account ring to ensure it's fresh
|
||||||
|
self.get_account_ring().get_nodes('')
|
||||||
|
for path in self.get_paths():
|
||||||
|
while len(pids) >= self.concurrency:
|
||||||
|
pids.remove(os.wait()[0])
|
||||||
|
pid = os.fork()
|
||||||
|
if pid:
|
||||||
|
pids.append(pid)
|
||||||
|
else:
|
||||||
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||||
|
patcher.monkey_patch(all=False, socket=True)
|
||||||
|
self.no_changes = 0
|
||||||
|
self.successes = 0
|
||||||
|
self.failures = 0
|
||||||
|
forkbegin = time.time()
|
||||||
|
self.container_sweep(path)
|
||||||
|
elapsed = time.time() - forkbegin
|
||||||
|
self.logger.debug(
|
||||||
|
'Container update sweep of %s completed: '
|
||||||
|
'%.02fs, %s successes, %s failures, %s with no changes'
|
||||||
|
% (path, elapsed, self.successes, self.failures,
|
||||||
|
self.no_changes))
|
||||||
|
sys.exit()
|
||||||
|
while pids:
|
||||||
|
pids.remove(os.wait()[0])
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
self.logger.info('Container update sweep completed: %.02fs' %
|
||||||
|
elapsed)
|
||||||
|
if elapsed < self.interval:
|
||||||
|
time.sleep(self.interval - elapsed)
|
||||||
|
|
||||||
|
def update_once_single_threaded(self):
|
||||||
|
"""
|
||||||
|
Run the updater once.
|
||||||
|
"""
|
||||||
|
patcher.monkey_patch(all=False, socket=True)
|
||||||
|
self.logger.info('Begin container update single threaded sweep')
|
||||||
|
begin = time.time()
|
||||||
|
self.no_changes = 0
|
||||||
|
self.successes = 0
|
||||||
|
self.failures = 0
|
||||||
|
for path in self.get_paths():
|
||||||
|
self.container_sweep(path)
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
self.logger.info('Container update single threaded sweep completed: '
|
||||||
|
'%.02fs, %s successes, %s failures, %s with no changes' %
|
||||||
|
(elapsed, self.successes, self.failures, self.no_changes))
|
||||||
|
|
||||||
|
def container_sweep(self, path):
|
||||||
|
"""
|
||||||
|
Walk the path looking for container DBs and process them.
|
||||||
|
|
||||||
|
:param path: path to walk
|
||||||
|
"""
|
||||||
|
for root, dirs, files in os.walk(path):
|
||||||
|
for file in files:
|
||||||
|
if file.endswith('.db'):
|
||||||
|
self.process_container(os.path.join(root, file))
|
||||||
|
time.sleep(self.slowdown)
|
||||||
|
|
||||||
|
def process_container(self, dbfile):
|
||||||
|
"""
|
||||||
|
Process a container, and update the information in the account.
|
||||||
|
|
||||||
|
:param dbfile: container DB to process
|
||||||
|
"""
|
||||||
|
broker = ContainerBroker(dbfile, logger=self.logger)
|
||||||
|
info = broker.get_info()
|
||||||
|
# Don't send updates if the container was auto-created since it
|
||||||
|
# definitely doesn't have up to date statistics.
|
||||||
|
if float(info['put_timestamp']) <= 0:
|
||||||
|
return
|
||||||
|
if info['put_timestamp'] > info['reported_put_timestamp'] or \
|
||||||
|
info['delete_timestamp'] > info['reported_delete_timestamp'] \
|
||||||
|
or info['object_count'] != info['reported_object_count'] or \
|
||||||
|
info['bytes_used'] != info['reported_bytes_used']:
|
||||||
|
container = '/%s/%s' % (info['account'], info['container'])
|
||||||
|
part, nodes = self.get_account_ring().get_nodes(info['account'])
|
||||||
|
events = [spawn(self.container_report, node, part, container,
|
||||||
|
info['put_timestamp'], info['delete_timestamp'],
|
||||||
|
info['object_count'], info['bytes_used'])
|
||||||
|
for node in nodes]
|
||||||
|
successes = 0
|
||||||
|
failures = 0
|
||||||
|
for event in events:
|
||||||
|
if 200 <= event.wait() < 300:
|
||||||
|
successes += 1
|
||||||
|
else:
|
||||||
|
failures += 1
|
||||||
|
if successes > failures:
|
||||||
|
self.successes += 1
|
||||||
|
self.logger.debug(
|
||||||
|
'Update report sent for %s %s' % (container, dbfile))
|
||||||
|
broker.reported(info['put_timestamp'],
|
||||||
|
info['delete_timestamp'], info['object_count'],
|
||||||
|
info['bytes_used'])
|
||||||
|
else:
|
||||||
|
self.failures += 1
|
||||||
|
self.logger.debug(
|
||||||
|
'Update report failed for %s %s' % (container, dbfile))
|
||||||
|
else:
|
||||||
|
self.no_changes += 1
|
||||||
|
|
||||||
|
def container_report(self, node, part, container, put_timestamp,
|
||||||
|
delete_timestamp, count, bytes):
|
||||||
|
"""
|
||||||
|
Report container info to an account server.
|
||||||
|
|
||||||
|
:param node: node dictionary from the account ring
|
||||||
|
:param part: partition the account is on
|
||||||
|
:param container: container name
|
||||||
|
:param put_timestamp: put timestamp
|
||||||
|
:param delete_timestamp: delete timestamp
|
||||||
|
:param count: object count in the container
|
||||||
|
:param bytes: bytes used in the container
|
||||||
|
"""
|
||||||
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
|
try:
|
||||||
|
conn = http_connect(
|
||||||
|
node['ip'], node['port'], node['device'], part,
|
||||||
|
'PUT', container,
|
||||||
|
headers={'X-Put-Timestamp': put_timestamp,
|
||||||
|
'X-Delete-Timestamp': delete_timestamp,
|
||||||
|
'X-Object-Count': count,
|
||||||
|
'X-Bytes-Used': bytes,
|
||||||
|
'X-Account-Override-Deleted': 'yes'})
|
||||||
|
except:
|
||||||
|
self.logger.exception('ERROR account update failed with '
|
||||||
|
'%(ip)s:%(port)s/%(device)s (will retry later): ' % node)
|
||||||
|
return 500
|
||||||
|
with Timeout(self.node_timeout):
|
||||||
|
try:
|
||||||
|
resp = conn.getresponse()
|
||||||
|
resp.read()
|
||||||
|
return resp.status
|
||||||
|
except:
|
||||||
|
if self.logger.getEffectiveLevel() <= logging.DEBUG:
|
||||||
|
self.logger.exception(
|
||||||
|
'Exception with %(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
return 500
|
0
swift/obj/__init__.py
Normal file
0
swift/obj/__init__.py
Normal file
233
swift/obj/auditor.py
Normal file
233
swift/obj/auditor.py
Normal file
@ -0,0 +1,233 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import cPickle as pickle
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from hashlib import md5
|
||||||
|
from random import choice, random
|
||||||
|
from urllib import quote
|
||||||
|
|
||||||
|
from eventlet import Timeout
|
||||||
|
|
||||||
|
from swift.obj import server as object_server
|
||||||
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
from swift.common.exceptions import ConnectionTimeout
|
||||||
|
from swift.common.ring import Ring
|
||||||
|
from swift.common.utils import get_logger, renamer
|
||||||
|
from swift.common.exceptions import AuditException
|
||||||
|
|
||||||
|
|
||||||
|
class ObjectAuditor(object):
|
||||||
|
"""Audit objects."""
|
||||||
|
|
||||||
|
def __init__(self, server_conf, auditor_conf):
|
||||||
|
self.logger = get_logger(auditor_conf, 'object-auditor')
|
||||||
|
self.devices = server_conf.get('devices', '/srv/node')
|
||||||
|
self.mount_check = server_conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
self.interval = int(auditor_conf.get('interval', 1800))
|
||||||
|
swift_dir = server_conf.get('swift_dir', '/etc/swift')
|
||||||
|
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
|
||||||
|
self.container_ring = None
|
||||||
|
self.node_timeout = int(auditor_conf.get('node_timeout', 10))
|
||||||
|
self.conn_timeout = float(auditor_conf.get('conn_timeout', 0.5))
|
||||||
|
self.passes = 0
|
||||||
|
self.quarantines = 0
|
||||||
|
self.errors = 0
|
||||||
|
self.container_passes = 0
|
||||||
|
self.container_failures = 0
|
||||||
|
self.container_errors = 0
|
||||||
|
|
||||||
|
def get_container_ring(self):
|
||||||
|
"""
|
||||||
|
Get the container ring, loading it if neccesary.
|
||||||
|
|
||||||
|
:returns: container ring
|
||||||
|
"""
|
||||||
|
if not self.container_ring:
|
||||||
|
self.logger.debug(
|
||||||
|
'Loading container ring from %s' % self.container_ring_path)
|
||||||
|
self.container_ring = Ring(self.container_ring_path)
|
||||||
|
return self.container_ring
|
||||||
|
|
||||||
|
def audit_forever(self): # pragma: no cover
|
||||||
|
"""Run the object audit until stopped."""
|
||||||
|
reported = time.time()
|
||||||
|
time.sleep(random() * self.interval)
|
||||||
|
while True:
|
||||||
|
begin = time.time()
|
||||||
|
pids = []
|
||||||
|
# read from container ring to ensure it's fresh
|
||||||
|
self.get_container_ring().get_nodes('')
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
if self.mount_check and not \
|
||||||
|
os.path.ismount(os.path.join(self.devices, device)):
|
||||||
|
self.logger.debug(
|
||||||
|
'Skipping %s as it is not mounted' % device)
|
||||||
|
continue
|
||||||
|
self.object_audit(device)
|
||||||
|
if time.time() - reported >= 3600: # once an hour
|
||||||
|
self.logger.info(
|
||||||
|
'Since %s: Locally: %d passed audit, %d quarantined, %d '
|
||||||
|
'errors Remote audits with containers: %s passed audit, '
|
||||||
|
'%s failed audit, %s errors' %
|
||||||
|
(time.ctime(reported), self.passes, self.quarantines,
|
||||||
|
self.errors, self.container_passes,
|
||||||
|
self.container_failures, self.container_errors))
|
||||||
|
reported = time.time()
|
||||||
|
self.passes = 0
|
||||||
|
self.quarantines = 0
|
||||||
|
self.errors = 0
|
||||||
|
self.container_passes = 0
|
||||||
|
self.container_failures = 0
|
||||||
|
self.container_errors = 0
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
if elapsed < self.interval:
|
||||||
|
time.sleep(self.interval - elapsed)
|
||||||
|
|
||||||
|
def audit_once(self):
|
||||||
|
"""Run the object audit once."""
|
||||||
|
self.logger.info('Begin object audit "once" mode')
|
||||||
|
begin = time.time()
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
if self.mount_check and \
|
||||||
|
not os.path.ismount(os.path.join(self.devices, device)):
|
||||||
|
self.logger.debug(
|
||||||
|
'Skipping %s as it is not mounted' % device)
|
||||||
|
continue
|
||||||
|
self.object_audit(device)
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
self.logger.info(
|
||||||
|
'Object audit "once" mode completed: %.02fs' % elapsed)
|
||||||
|
|
||||||
|
def object_audit(self, device):
|
||||||
|
"""Walk the device, and audit any objects found."""
|
||||||
|
datadir = os.path.join(self.devices, device, object_server.DATADIR)
|
||||||
|
if not os.path.exists(datadir):
|
||||||
|
return
|
||||||
|
name = None
|
||||||
|
partition = None
|
||||||
|
attempts = 100
|
||||||
|
while not name and attempts:
|
||||||
|
attempts -= 1
|
||||||
|
try:
|
||||||
|
partition = choice(os.listdir(datadir))
|
||||||
|
fpath = os.path.join(datadir, partition)
|
||||||
|
if not os.path.isdir(fpath):
|
||||||
|
continue
|
||||||
|
suffix = choice(os.listdir(fpath))
|
||||||
|
fpath = os.path.join(fpath, suffix)
|
||||||
|
if not os.path.isdir(fpath):
|
||||||
|
continue
|
||||||
|
hsh = choice(os.listdir(fpath))
|
||||||
|
fpath = os.path.join(fpath, hsh)
|
||||||
|
if not os.path.isdir(fpath):
|
||||||
|
continue
|
||||||
|
except IndexError:
|
||||||
|
continue
|
||||||
|
for fname in sorted(os.listdir(fpath), reverse=True):
|
||||||
|
if fname.endswith('.ts'):
|
||||||
|
break
|
||||||
|
if fname.endswith('.data'):
|
||||||
|
name = object_server.read_metadata(
|
||||||
|
os.path.join(fpath, fname))['name']
|
||||||
|
break
|
||||||
|
if not name:
|
||||||
|
return
|
||||||
|
_, account, container, obj = name.split('/', 3)
|
||||||
|
df = object_server.DiskFile(self.devices, device, partition, account,
|
||||||
|
container, obj, keep_data_fp=True)
|
||||||
|
try:
|
||||||
|
if os.path.getsize(df.data_file) != \
|
||||||
|
int(df.metadata['Content-Length']):
|
||||||
|
raise AuditException('Content-Length of %s does not match '
|
||||||
|
'file size of %s' % (int(df.metadata['Content-Length']),
|
||||||
|
os.path.getsize(df.data_file)))
|
||||||
|
etag = md5()
|
||||||
|
for chunk in df:
|
||||||
|
etag.update(chunk)
|
||||||
|
etag = etag.hexdigest()
|
||||||
|
if etag != df.metadata['ETag']:
|
||||||
|
raise AuditException("ETag of %s does not match file's md5 of "
|
||||||
|
"%s" % (df.metadata['ETag'], etag))
|
||||||
|
except AuditException, err:
|
||||||
|
self.quarantines += 1
|
||||||
|
self.logger.error('ERROR Object %s failed audit and will be '
|
||||||
|
'quarantined: %s' % (df.datadir, err))
|
||||||
|
renamer(df.datadir, os.path.join(self.devices, device,
|
||||||
|
'quarantined', 'objects', os.path.basename(df.datadir)))
|
||||||
|
return
|
||||||
|
except:
|
||||||
|
self.errors += 1
|
||||||
|
self.logger.exception('ERROR Trying to audit %s' % df.datadir)
|
||||||
|
return
|
||||||
|
self.passes += 1
|
||||||
|
found = False
|
||||||
|
good_response = False
|
||||||
|
results = []
|
||||||
|
part, nodes = self.get_container_ring().get_nodes(account, container)
|
||||||
|
for node in nodes:
|
||||||
|
try:
|
||||||
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
|
conn = http_connect(node['ip'], node['port'],
|
||||||
|
node['device'], part, 'GET',
|
||||||
|
'/%s/%s' % (account, container),
|
||||||
|
query_string='prefix=%s' % quote(obj))
|
||||||
|
with Timeout(self.node_timeout):
|
||||||
|
resp = conn.getresponse()
|
||||||
|
body = resp.read()
|
||||||
|
if 200 <= resp.status <= 299:
|
||||||
|
good_reponse = True
|
||||||
|
for oname in body.split('\n'):
|
||||||
|
if oname == obj:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
if found:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
results.append('%s:%s/%s %s %s = %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], resp.status,
|
||||||
|
resp.reason, repr(body)))
|
||||||
|
else:
|
||||||
|
results.append('%s:%s/%s %s %s' %
|
||||||
|
(node['ip'], node['port'], node['device'],
|
||||||
|
resp.status, resp.reason))
|
||||||
|
except socket.error, err:
|
||||||
|
results.append('%s:%s/%s Socket Error: %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], err))
|
||||||
|
except ConnectionTimeout:
|
||||||
|
results.append('%(ip)s:%(port)s/%(device)s ConnectionTimeout' %
|
||||||
|
node)
|
||||||
|
except Timeout:
|
||||||
|
results.append('%(ip)s:%(port)s/%(device)s Timeout' % node)
|
||||||
|
except Exception, err:
|
||||||
|
self.logger.exception('ERROR With remote server '
|
||||||
|
'%(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
results.append('%s:%s/%s Exception: %s' % (node['ip'],
|
||||||
|
node['port'], node['device'], err))
|
||||||
|
if found:
|
||||||
|
self.container_passes += 1
|
||||||
|
self.logger.debug('Audit passed for %s %s' % (name, df.datadir))
|
||||||
|
else:
|
||||||
|
if good_response:
|
||||||
|
self.container_failures += 1
|
||||||
|
else:
|
||||||
|
self.container_errors += 1
|
||||||
|
self.logger.error('ERROR Could not find object %s %s on any of '
|
||||||
|
'the primary container servers it should be on: %s' % (name,
|
||||||
|
df.datadir, results))
|
501
swift/obj/replicator.py
Normal file
501
swift/obj/replicator.py
Normal file
@ -0,0 +1,501 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os, sys
|
||||||
|
from os.path import isdir, join
|
||||||
|
from ConfigParser import ConfigParser
|
||||||
|
import random
|
||||||
|
import shutil
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
import hashlib
|
||||||
|
import itertools
|
||||||
|
import cPickle as pickle
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
from eventlet import GreenPool, tpool, Timeout, sleep
|
||||||
|
from eventlet.green import subprocess
|
||||||
|
from eventlet.support.greenlets import GreenletExit
|
||||||
|
|
||||||
|
from swift.common.ring import Ring
|
||||||
|
from swift.common.utils import whataremyips, unlink_older_than, lock_path, \
|
||||||
|
renamer, compute_eta
|
||||||
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
|
||||||
|
|
||||||
|
REPLICAS = 3
|
||||||
|
MAX_HANDOFFS = 5
|
||||||
|
PICKLE_PROTOCOL = 2
|
||||||
|
ONE_WEEK = 604800
|
||||||
|
HASH_FILE = 'hashes.pkl'
|
||||||
|
|
||||||
|
def hash_suffix(path, reclaim_age):
|
||||||
|
"""
|
||||||
|
Performs reclamation and returns an md5 of all (remaining) files.
|
||||||
|
|
||||||
|
:param reclaim_age: age in seconds at which to remove tombstones
|
||||||
|
"""
|
||||||
|
md5 = hashlib.md5()
|
||||||
|
for hsh in sorted(os.listdir(path)):
|
||||||
|
hsh_path = join(path, hsh)
|
||||||
|
files = os.listdir(hsh_path)
|
||||||
|
if len(files) == 1:
|
||||||
|
if files[0].endswith('.ts'):
|
||||||
|
# remove tombstones older than reclaim_age
|
||||||
|
ts = files[0].rsplit('.', 1)[0]
|
||||||
|
if (time.time() - float(ts)) > reclaim_age:
|
||||||
|
os.unlink(join(hsh_path, files[0]))
|
||||||
|
files.remove(files[0])
|
||||||
|
elif files:
|
||||||
|
files.sort(reverse=True)
|
||||||
|
meta = data = tomb = None
|
||||||
|
for filename in files:
|
||||||
|
if not meta and filename.endswith('.meta'):
|
||||||
|
meta = filename
|
||||||
|
if not data and filename.endswith('.data'):
|
||||||
|
data = filename
|
||||||
|
if not tomb and filename.endswith('.ts'):
|
||||||
|
tomb = filename
|
||||||
|
if (filename < tomb or # any file older than tomb
|
||||||
|
filename < data or # any file older than data
|
||||||
|
(filename.endswith('.meta') and
|
||||||
|
filename < meta)): # old meta
|
||||||
|
os.unlink(join(hsh_path, filename))
|
||||||
|
files.remove(filename)
|
||||||
|
if not files:
|
||||||
|
os.rmdir(hsh_path)
|
||||||
|
for filename in files:
|
||||||
|
md5.update(filename)
|
||||||
|
try:
|
||||||
|
os.rmdir(path)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
return md5.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def recalculate_hashes(partition_dir, suffixes, reclaim_age=ONE_WEEK):
|
||||||
|
"""
|
||||||
|
Recalculates hashes for the given suffixes in the partition and updates
|
||||||
|
them in the partition's hashes file.
|
||||||
|
|
||||||
|
:param partition_dir: directory of the partition in which to recalculate
|
||||||
|
:param suffixes: list of suffixes to recalculate
|
||||||
|
:param reclaim_age: age in seconds at which tombstones should be removed
|
||||||
|
"""
|
||||||
|
def tpool_listdir(partition_dir):
|
||||||
|
return dict(((suff, None) for suff in os.listdir(partition_dir)
|
||||||
|
if len(suff) == 3 and isdir(join(partition_dir, suff))))
|
||||||
|
hashes_file = join(partition_dir, HASH_FILE)
|
||||||
|
with lock_path(partition_dir):
|
||||||
|
try:
|
||||||
|
with open(hashes_file, 'rb') as fp:
|
||||||
|
hashes = pickle.load(fp)
|
||||||
|
except Exception:
|
||||||
|
hashes = tpool.execute(tpool_listdir, partition_dir)
|
||||||
|
for suffix in suffixes:
|
||||||
|
suffix_dir = join(partition_dir, suffix)
|
||||||
|
if os.path.exists(suffix_dir):
|
||||||
|
hashes[suffix] = hash_suffix(suffix_dir, reclaim_age)
|
||||||
|
elif suffix in hashes:
|
||||||
|
del hashes[suffix]
|
||||||
|
with open(hashes_file + '.tmp', 'wb') as fp:
|
||||||
|
pickle.dump(hashes, fp, PICKLE_PROTOCOL)
|
||||||
|
renamer(hashes_file + '.tmp', hashes_file)
|
||||||
|
|
||||||
|
|
||||||
|
def invalidate_hash(suffix_dir):
|
||||||
|
"""
|
||||||
|
Invalidates the hash for a suffix_dir in the partition's hashes file.
|
||||||
|
|
||||||
|
:param suffix_dir: absolute path to suffix dir whose hash needs invalidating
|
||||||
|
"""
|
||||||
|
suffix = os.path.basename(suffix_dir)
|
||||||
|
partition_dir = os.path.dirname(suffix_dir)
|
||||||
|
hashes_file = join(partition_dir, HASH_FILE)
|
||||||
|
with lock_path(partition_dir):
|
||||||
|
try:
|
||||||
|
with open(hashes_file, 'rb') as fp:
|
||||||
|
hashes = pickle.load(fp)
|
||||||
|
if suffix in hashes and not hashes[suffix]:
|
||||||
|
return
|
||||||
|
except Exception:
|
||||||
|
return
|
||||||
|
hashes[suffix] = None
|
||||||
|
with open(hashes_file + '.tmp', 'wb') as fp:
|
||||||
|
pickle.dump(hashes, fp, PICKLE_PROTOCOL)
|
||||||
|
renamer(hashes_file + '.tmp', hashes_file)
|
||||||
|
|
||||||
|
|
||||||
|
def get_hashes(partition_dir, do_listdir=True, reclaim_age=ONE_WEEK):
|
||||||
|
"""
|
||||||
|
Get a list of hashes for the suffix dir. do_listdir causes it to mistrust
|
||||||
|
the hash cache for suffix existence at the (unexpectedly high) cost of a
|
||||||
|
listdir. reclaim_age is just passed on to hash_suffix.
|
||||||
|
|
||||||
|
:param partition_dir: absolute path of partition to get hashes for
|
||||||
|
:param do_listdir: force existence check for all hashes in the partition
|
||||||
|
:param reclaim_age: age at which to remove tombstones
|
||||||
|
|
||||||
|
:returns: tuple of (number of suffix dirs hashed, dictionary of hashes)
|
||||||
|
"""
|
||||||
|
def tpool_listdir(hashes, partition_dir):
|
||||||
|
return dict(((suff, hashes.get(suff, None))
|
||||||
|
for suff in os.listdir(partition_dir)
|
||||||
|
if len(suff) == 3 and isdir(join(partition_dir, suff))))
|
||||||
|
hashed = 0
|
||||||
|
hashes_file = join(partition_dir, HASH_FILE)
|
||||||
|
with lock_path(partition_dir):
|
||||||
|
modified = False
|
||||||
|
hashes = {}
|
||||||
|
try:
|
||||||
|
with open(hashes_file, 'rb') as fp:
|
||||||
|
hashes = pickle.load(fp)
|
||||||
|
except Exception:
|
||||||
|
do_listdir = True
|
||||||
|
if do_listdir:
|
||||||
|
hashes = tpool.execute(tpool_listdir, hashes, partition_dir)
|
||||||
|
modified = True
|
||||||
|
for suffix, hash_ in hashes.items():
|
||||||
|
if not hash_:
|
||||||
|
suffix_dir = join(partition_dir, suffix)
|
||||||
|
if os.path.exists(suffix_dir):
|
||||||
|
try:
|
||||||
|
hashes[suffix] = hash_suffix(suffix_dir, reclaim_age)
|
||||||
|
hashed += 1
|
||||||
|
except OSError:
|
||||||
|
logging.exception('Error hashing suffix')
|
||||||
|
hashes[suffix] = None
|
||||||
|
else:
|
||||||
|
del hashes[suffix]
|
||||||
|
modified = True
|
||||||
|
sleep()
|
||||||
|
if modified:
|
||||||
|
with open(hashes_file + '.tmp', 'wb') as fp:
|
||||||
|
pickle.dump(hashes, fp, PICKLE_PROTOCOL)
|
||||||
|
renamer(hashes_file + '.tmp', hashes_file)
|
||||||
|
return hashed, hashes
|
||||||
|
|
||||||
|
|
||||||
|
class ObjectReplicator(object):
|
||||||
|
"""
|
||||||
|
Replicate objects.
|
||||||
|
|
||||||
|
Encapsulates most logic and data needed by the object replication process.
|
||||||
|
Each call to .run() performs one replication pass. It's up to the caller
|
||||||
|
to do this in a loop.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, conf, logger):
|
||||||
|
"""
|
||||||
|
:param conf: configuration object obtained from ConfigParser
|
||||||
|
:param logger: logging object
|
||||||
|
"""
|
||||||
|
self.conf = conf
|
||||||
|
self.logger = logger
|
||||||
|
self.devices_dir = conf.get('devices', '/srv/node')
|
||||||
|
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
self.vm_test_mode = conf.get(
|
||||||
|
'vm_test_mode', 'no').lower() in ('yes', 'true', 'on', '1')
|
||||||
|
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
||||||
|
self.port = int(conf.get('bind_port', 6000))
|
||||||
|
self.concurrency = int(conf.get('replication_concurrency', 1))
|
||||||
|
self.timeout = conf['timeout']
|
||||||
|
self.stats_interval = int(conf['stats_interval'])
|
||||||
|
self.object_ring = Ring(join(self.swift_dir, 'object.ring.gz'))
|
||||||
|
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
|
||||||
|
self.next_check = time.time() + self.ring_check_interval
|
||||||
|
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
|
||||||
|
self.partition_times = []
|
||||||
|
|
||||||
|
def _rsync(self, args):
|
||||||
|
"""
|
||||||
|
Execute the rsync binary to replicate a partition.
|
||||||
|
|
||||||
|
:returns: a tuple of (rsync exit code, rsync standard output)
|
||||||
|
"""
|
||||||
|
start_time = time.time()
|
||||||
|
ret_val = None
|
||||||
|
try:
|
||||||
|
with Timeout(120):
|
||||||
|
proc = subprocess.Popen(args, stdout = subprocess.PIPE,
|
||||||
|
stderr = subprocess.STDOUT)
|
||||||
|
results = proc.stdout.read()
|
||||||
|
ret_val = proc.wait()
|
||||||
|
finally:
|
||||||
|
if ret_val is None:
|
||||||
|
proc.kill()
|
||||||
|
total_time = time.time() - start_time
|
||||||
|
if results:
|
||||||
|
for result in results.split('\n'):
|
||||||
|
if result == '':
|
||||||
|
continue
|
||||||
|
if result.startswith('cd+'):
|
||||||
|
continue
|
||||||
|
self.logger.info(result)
|
||||||
|
self.logger.info(
|
||||||
|
"Sync of %s at %s complete (%.03f) [%d]" % (
|
||||||
|
args[-2], args[-1], total_time, ret_val))
|
||||||
|
else:
|
||||||
|
self.logger.debug(
|
||||||
|
"Sync of %s at %s complete (%.03f) [%d]" % (
|
||||||
|
args[-2], args[-1], total_time, ret_val))
|
||||||
|
if ret_val:
|
||||||
|
self.logger.error('Bad rsync return code: %d' % ret_val)
|
||||||
|
return ret_val, results
|
||||||
|
|
||||||
|
def rsync(self, node, job, suffixes):
|
||||||
|
"""
|
||||||
|
Synchronize local suffix directories from a partition with a remote
|
||||||
|
node.
|
||||||
|
|
||||||
|
:param node: the "dev" entry for the remote node to sync with
|
||||||
|
:param job: information about the partition being synced
|
||||||
|
:param suffixes: a list of suffixes which need to be pushed
|
||||||
|
|
||||||
|
:returns: boolean indicating success or failure
|
||||||
|
"""
|
||||||
|
if not os.path.exists(job['path']):
|
||||||
|
return False
|
||||||
|
args = [
|
||||||
|
'rsync',
|
||||||
|
'--recursive',
|
||||||
|
'--whole-file',
|
||||||
|
'--human-readable',
|
||||||
|
'--xattrs',
|
||||||
|
'--itemize-changes',
|
||||||
|
'--ignore-existing',
|
||||||
|
'--timeout=%s' % self.timeout,
|
||||||
|
'--contimeout=%s' % self.timeout,
|
||||||
|
]
|
||||||
|
if self.vm_test_mode:
|
||||||
|
rsync_module = '%s::object%s' % (node['ip'], node['port'])
|
||||||
|
else:
|
||||||
|
rsync_module = '%s::object' % node['ip']
|
||||||
|
had_any = False
|
||||||
|
for suffix in suffixes:
|
||||||
|
spath = join(job['path'], suffix)
|
||||||
|
if os.path.exists(spath):
|
||||||
|
args.append(spath)
|
||||||
|
had_any = True
|
||||||
|
if not had_any:
|
||||||
|
return False
|
||||||
|
args.append(join(rsync_module, node['device'],
|
||||||
|
'objects', job['partition']))
|
||||||
|
ret_val, results = self._rsync(args)
|
||||||
|
return ret_val == 0
|
||||||
|
|
||||||
|
def check_ring(self):
|
||||||
|
"""
|
||||||
|
Check to see if the ring has been updated
|
||||||
|
|
||||||
|
:returns: boolean indicating whether or not the ring has changed
|
||||||
|
"""
|
||||||
|
if time.time() > self.next_check:
|
||||||
|
self.next_check = time.time() + self.ring_check_interval
|
||||||
|
if self.object_ring.has_changed():
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def update_deleted(self, job):
|
||||||
|
"""
|
||||||
|
High-level method that replicates a single partition that doesn't belong
|
||||||
|
on this node.
|
||||||
|
|
||||||
|
:param job: a dict containing info about the partition to be replicated
|
||||||
|
"""
|
||||||
|
def tpool_get_suffixes(path):
|
||||||
|
return [suff for suff in os.listdir(path)
|
||||||
|
if len(suff) == 3 and isdir(join(path, suff))]
|
||||||
|
self.replication_count += 1
|
||||||
|
begin = time.time()
|
||||||
|
try:
|
||||||
|
responses = []
|
||||||
|
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
|
||||||
|
if suffixes:
|
||||||
|
for node in job['nodes']:
|
||||||
|
success = self.rsync(node, job, suffixes)
|
||||||
|
if success:
|
||||||
|
with Timeout(60):
|
||||||
|
http_connect(node['ip'], node['port'],
|
||||||
|
node['device'], job['partition'], 'REPLICATE',
|
||||||
|
'/' + '-'.join(suffixes),
|
||||||
|
headers={'Content-Length': '0'}
|
||||||
|
).getresponse().read()
|
||||||
|
responses.append(success)
|
||||||
|
if not suffixes or (len(responses) == REPLICAS and all(responses)):
|
||||||
|
self.logger.info("Removing partition: %s" % job['path'])
|
||||||
|
tpool.execute(shutil.rmtree, job['path'], ignore_errors=True)
|
||||||
|
except (Exception, Timeout):
|
||||||
|
self.logger.exception("Error syncing handoff partition")
|
||||||
|
finally:
|
||||||
|
self.partition_times.append(time.time() - begin)
|
||||||
|
|
||||||
|
def update(self, job):
|
||||||
|
"""
|
||||||
|
High-level method that replicates a single partition.
|
||||||
|
|
||||||
|
:param job: a dict containing info about the partition to be replicated
|
||||||
|
"""
|
||||||
|
self.replication_count += 1
|
||||||
|
begin = time.time()
|
||||||
|
try:
|
||||||
|
hashed, local_hash = get_hashes(job['path'],
|
||||||
|
do_listdir=(self.replication_count % 10) == 0,
|
||||||
|
reclaim_age=self.reclaim_age)
|
||||||
|
self.suffix_hash += hashed
|
||||||
|
successes = 0
|
||||||
|
nodes = itertools.chain(job['nodes'],
|
||||||
|
self.object_ring.get_more_nodes(int(job['partition'])))
|
||||||
|
while successes < (REPLICAS - 1):
|
||||||
|
node = next(nodes)
|
||||||
|
try:
|
||||||
|
with Timeout(60):
|
||||||
|
resp = http_connect(node['ip'], node['port'],
|
||||||
|
node['device'], job['partition'], 'REPLICATE',
|
||||||
|
'', headers={'Content-Length': '0'}
|
||||||
|
).getresponse()
|
||||||
|
if resp.status != 200:
|
||||||
|
self.logger.error("Invalid response %s from %s" %
|
||||||
|
(resp.status, node['ip']))
|
||||||
|
continue
|
||||||
|
remote_hash = pickle.loads(resp.read())
|
||||||
|
del resp
|
||||||
|
successes += 1
|
||||||
|
suffixes = [suffix for suffix in local_hash
|
||||||
|
if local_hash[suffix] != remote_hash.get(suffix, -1)]
|
||||||
|
if not suffixes:
|
||||||
|
continue
|
||||||
|
success = self.rsync(node, job, suffixes)
|
||||||
|
recalculate_hashes(job['path'], suffixes,
|
||||||
|
reclaim_age=self.reclaim_age)
|
||||||
|
with Timeout(60):
|
||||||
|
http_connect(node['ip'], node['port'],
|
||||||
|
node['device'], job['partition'], 'REPLICATE',
|
||||||
|
'/' + '-'.join(suffixes),
|
||||||
|
headers={'Content-Length': '0'}
|
||||||
|
).getresponse().read()
|
||||||
|
self.suffix_sync += len(suffixes)
|
||||||
|
except (Exception, Timeout):
|
||||||
|
logging.exception("Error syncing with node: %s" % node)
|
||||||
|
self.suffix_count += len(local_hash)
|
||||||
|
except (Exception, Timeout):
|
||||||
|
self.logger.exception("Error syncing partition")
|
||||||
|
finally:
|
||||||
|
self.partition_times.append(time.time() - begin)
|
||||||
|
|
||||||
|
def stats_line(self):
|
||||||
|
"""
|
||||||
|
Logs various stats for the currently running replication pass.
|
||||||
|
"""
|
||||||
|
if self.replication_count:
|
||||||
|
rate = self.replication_count / (time.time() - self.start)
|
||||||
|
left = int((self.job_count - self.replication_count) / rate)
|
||||||
|
self.logger.info("%d/%d (%.2f%%) partitions replicated in %.2f seconds (%.2f/sec, %s remaining)"
|
||||||
|
% (self.replication_count, self.job_count,
|
||||||
|
self.replication_count * 100.0 / self.job_count,
|
||||||
|
time.time() - self.start, rate,
|
||||||
|
'%d%s' % compute_eta(self.start, self.replication_count, self.job_count)))
|
||||||
|
if self.suffix_count:
|
||||||
|
self.logger.info("%d suffixes checked - %.2f%% hashed, %.2f%% synced" %
|
||||||
|
(self.suffix_count,
|
||||||
|
(self.suffix_hash * 100.0) / self.suffix_count,
|
||||||
|
(self.suffix_sync * 100.0) / self.suffix_count))
|
||||||
|
self.partition_times.sort()
|
||||||
|
self.logger.info("Partition times: max %.4fs, min %.4fs, med %.4fs"
|
||||||
|
% (self.partition_times[-1], self.partition_times[0],
|
||||||
|
self.partition_times[len(self.partition_times) // 2]))
|
||||||
|
else:
|
||||||
|
self.logger.info("Nothing replicated for %s seconds." % (time.time() - self.start))
|
||||||
|
|
||||||
|
def kill_coros(self):
|
||||||
|
"""Utility function that kills all coroutines currently running."""
|
||||||
|
for coro in list(self.run_pool.coroutines_running):
|
||||||
|
try:
|
||||||
|
coro.kill(GreenletExit)
|
||||||
|
except GreenletExit:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def heartbeat(self):
|
||||||
|
"""
|
||||||
|
Loop that runs in the background during replication. It periodically
|
||||||
|
logs progress and attempts to detect lockups, killing any running
|
||||||
|
coroutines if the replicator hasn't made progress since last hearbeat.
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
if self.replication_count == self.last_replication_count:
|
||||||
|
self.logger.error("Lockup detected.. killing live coros.")
|
||||||
|
self.kill_coros()
|
||||||
|
self.last_replication_count = self.replication_count
|
||||||
|
eventlet.sleep(300)
|
||||||
|
self.stats_line()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""Run a replication pass"""
|
||||||
|
self.start = time.time()
|
||||||
|
self.suffix_count = 0
|
||||||
|
self.suffix_sync = 0
|
||||||
|
self.suffix_hash = 0
|
||||||
|
self.replication_count = 0
|
||||||
|
self.last_replication_count = -1
|
||||||
|
self.partition_times = []
|
||||||
|
jobs = []
|
||||||
|
stats = eventlet.spawn(self.heartbeat)
|
||||||
|
try:
|
||||||
|
ips = whataremyips()
|
||||||
|
self.run_pool = GreenPool(size=self.concurrency)
|
||||||
|
for local_dev in [
|
||||||
|
dev for dev in self.object_ring.devs
|
||||||
|
if dev and dev['ip'] in ips and dev['port'] == self.port
|
||||||
|
]:
|
||||||
|
dev_path = join(self.devices_dir, local_dev['device'])
|
||||||
|
obj_path = join(dev_path, 'objects')
|
||||||
|
tmp_path = join(dev_path, 'tmp')
|
||||||
|
if self.mount_check and not os.path.ismount(dev_path):
|
||||||
|
self.logger.warn('%s is not mounted' % local_dev['device'])
|
||||||
|
continue
|
||||||
|
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
|
||||||
|
if not os.path.exists(obj_path):
|
||||||
|
continue
|
||||||
|
for partition in os.listdir(obj_path):
|
||||||
|
try:
|
||||||
|
nodes = [node for node in
|
||||||
|
self.object_ring.get_part_nodes(int(partition))
|
||||||
|
if node['id'] != local_dev['id']]
|
||||||
|
jobs.append(dict(path=join(obj_path, partition),
|
||||||
|
nodes=nodes, delete=len(nodes) > 2,
|
||||||
|
partition=partition))
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
random.shuffle(jobs)
|
||||||
|
# Partititons that need to be deleted take priority
|
||||||
|
jobs.sort(key=lambda job: not job['delete'])
|
||||||
|
self.job_count = len(jobs)
|
||||||
|
for job in jobs:
|
||||||
|
if not self.check_ring():
|
||||||
|
self.logger.info(
|
||||||
|
"Ring change detected. Aborting current replication pass.")
|
||||||
|
return
|
||||||
|
if job['delete']:
|
||||||
|
self.run_pool.spawn(self.update_deleted, job)
|
||||||
|
else:
|
||||||
|
self.run_pool.spawn(self.update, job)
|
||||||
|
with Timeout(120):
|
||||||
|
self.run_pool.waitall()
|
||||||
|
except (Exception, Timeout):
|
||||||
|
self.logger.exception("Exception while replicating")
|
||||||
|
self.kill_coros()
|
||||||
|
self.stats_line()
|
||||||
|
stats.kill()
|
599
swift/obj/server.py
Normal file
599
swift/obj/server.py
Normal file
@ -0,0 +1,599 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
""" Object Server for Swift """
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
import cPickle as pickle
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
from datetime import datetime
|
||||||
|
from hashlib import md5
|
||||||
|
from tempfile import mkstemp
|
||||||
|
from urllib import unquote
|
||||||
|
from contextlib import contextmanager
|
||||||
|
|
||||||
|
from webob import Request, Response, UTC
|
||||||
|
from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
|
||||||
|
HTTPInternalServerError, HTTPLengthRequired, HTTPNoContent, HTTPNotFound, \
|
||||||
|
HTTPNotImplemented, HTTPNotModified, HTTPPreconditionFailed, \
|
||||||
|
HTTPRequestTimeout, HTTPUnprocessableEntity, HTTPMethodNotAllowed
|
||||||
|
from xattr import getxattr, setxattr
|
||||||
|
from eventlet import sleep, Timeout
|
||||||
|
|
||||||
|
from swift.common.exceptions import MessageTimeout
|
||||||
|
from swift.common.utils import mkdirs, normalize_timestamp, \
|
||||||
|
storage_directory, hash_path, get_logger, renamer, fallocate, \
|
||||||
|
split_path, drop_buffer_cache
|
||||||
|
from swift.common.healthcheck import healthcheck
|
||||||
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
from swift.common.constraints import check_object_creation, check_mount, \
|
||||||
|
check_float, check_xml_encodable
|
||||||
|
from swift.common.exceptions import ConnectionTimeout
|
||||||
|
from swift.obj.replicator import get_hashes, invalidate_hash, \
|
||||||
|
recalculate_hashes
|
||||||
|
|
||||||
|
|
||||||
|
DATADIR = 'objects'
|
||||||
|
ASYNCDIR = 'async_pending'
|
||||||
|
PICKLE_PROTOCOL = 2
|
||||||
|
METADATA_KEY = 'user.swift.metadata'
|
||||||
|
MAX_OBJECT_NAME_LENGTH = 1024
|
||||||
|
|
||||||
|
|
||||||
|
def read_metadata(fd):
|
||||||
|
"""
|
||||||
|
Helper function to read the pickled metadata from an object file.
|
||||||
|
|
||||||
|
:param fd: file descriptor to load the metadata from
|
||||||
|
|
||||||
|
:returns: dictionary of metadata
|
||||||
|
"""
|
||||||
|
metadata = ''
|
||||||
|
key = 0
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
metadata += getxattr(fd, '%s%s' % (METADATA_KEY, (key or '')))
|
||||||
|
key += 1
|
||||||
|
except IOError:
|
||||||
|
pass
|
||||||
|
return pickle.loads(metadata)
|
||||||
|
|
||||||
|
|
||||||
|
class DiskFile(object):
|
||||||
|
"""
|
||||||
|
Manage object files on disk.
|
||||||
|
|
||||||
|
:param path: path to devices on the node
|
||||||
|
:param device: device name
|
||||||
|
:param partition: partition on the device the object lives in
|
||||||
|
:param account: account name for the object
|
||||||
|
:param container: container name for the object
|
||||||
|
:param obj: object name for the object
|
||||||
|
:param keep_data_fp: if True, don't close the fp, otherwise close it
|
||||||
|
:param disk_chunk_Size: size of chunks on file reads
|
||||||
|
"""
|
||||||
|
def __init__(self, path, device, partition, account, container, obj,
|
||||||
|
keep_data_fp=False, disk_chunk_size=65536):
|
||||||
|
self.disk_chunk_size = disk_chunk_size
|
||||||
|
self.name = '/' + '/'.join((account, container, obj))
|
||||||
|
name_hash = hash_path(account, container, obj)
|
||||||
|
self.datadir = os.path.join(path, device,
|
||||||
|
storage_directory(DATADIR, partition, name_hash))
|
||||||
|
self.tmpdir = os.path.join(path, device, 'tmp')
|
||||||
|
self.metadata = {}
|
||||||
|
self.meta_file = None
|
||||||
|
self.data_file = None
|
||||||
|
if not os.path.exists(self.datadir):
|
||||||
|
return
|
||||||
|
files = sorted(os.listdir(self.datadir), reverse=True)
|
||||||
|
for file in files:
|
||||||
|
if file.endswith('.ts'):
|
||||||
|
self.data_file = self.meta_file = None
|
||||||
|
self.metadata = {'deleted': True}
|
||||||
|
return
|
||||||
|
if file.endswith('.meta') and not self.meta_file:
|
||||||
|
self.meta_file = os.path.join(self.datadir, file)
|
||||||
|
if file.endswith('.data') and not self.data_file:
|
||||||
|
self.data_file = os.path.join(self.datadir, file)
|
||||||
|
break
|
||||||
|
if not self.data_file:
|
||||||
|
return
|
||||||
|
self.fp = open(self.data_file, 'rb')
|
||||||
|
self.metadata = read_metadata(self.fp)
|
||||||
|
if not keep_data_fp:
|
||||||
|
self.close()
|
||||||
|
if self.meta_file:
|
||||||
|
with open(self.meta_file) as mfp:
|
||||||
|
for key in self.metadata.keys():
|
||||||
|
if key.lower() not in ('content-type', 'content-encoding',
|
||||||
|
'deleted', 'content-length', 'etag'):
|
||||||
|
del self.metadata[key]
|
||||||
|
self.metadata.update(read_metadata(mfp))
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
"""Returns an iterator over the data file."""
|
||||||
|
try:
|
||||||
|
dropped_cache = 0
|
||||||
|
read = 0
|
||||||
|
while True:
|
||||||
|
chunk = self.fp.read(self.disk_chunk_size)
|
||||||
|
if chunk:
|
||||||
|
read += len(chunk)
|
||||||
|
if read - dropped_cache > (1024 * 1024):
|
||||||
|
drop_buffer_cache(self.fp.fileno(), dropped_cache,
|
||||||
|
read - dropped_cache)
|
||||||
|
dropped_cache = read
|
||||||
|
yield chunk
|
||||||
|
else:
|
||||||
|
drop_buffer_cache(self.fp.fileno(), dropped_cache,
|
||||||
|
read - dropped_cache)
|
||||||
|
break
|
||||||
|
finally:
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
def app_iter_range(self, start, stop):
|
||||||
|
"""Returns an iterator over the data file for range (start, stop)"""
|
||||||
|
if start:
|
||||||
|
self.fp.seek(start)
|
||||||
|
if stop is not None:
|
||||||
|
length = stop - start
|
||||||
|
else:
|
||||||
|
length = None
|
||||||
|
for chunk in self:
|
||||||
|
if length is not None:
|
||||||
|
length -= len(chunk)
|
||||||
|
if length < 0:
|
||||||
|
# Chop off the extra:
|
||||||
|
yield chunk[:length]
|
||||||
|
break
|
||||||
|
yield chunk
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the file."""
|
||||||
|
if self.fp:
|
||||||
|
self.fp.close()
|
||||||
|
self.fp = None
|
||||||
|
|
||||||
|
def is_deleted(self):
|
||||||
|
"""
|
||||||
|
Check if the file is deleted.
|
||||||
|
|
||||||
|
:returns: True if the file doesn't exist or has been flagged as
|
||||||
|
deleted.
|
||||||
|
"""
|
||||||
|
return not self.data_file or 'deleted' in self.metadata
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def mkstemp(self):
|
||||||
|
"""Contextmanager to make a temporary file."""
|
||||||
|
if not os.path.exists(self.tmpdir):
|
||||||
|
mkdirs(self.tmpdir)
|
||||||
|
fd, tmppath = mkstemp(dir=self.tmpdir)
|
||||||
|
try:
|
||||||
|
yield fd, tmppath
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
os.close(fd)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
os.unlink(tmppath)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def put(self, fd, tmppath, metadata, extension='.data'):
|
||||||
|
"""
|
||||||
|
Finalize writing the file on disk, and renames it from the temp file to
|
||||||
|
the real location. This should be called after the data has been
|
||||||
|
written to the temp file.
|
||||||
|
|
||||||
|
:params fd: file descriptor of the temp file
|
||||||
|
:param tmppath: path to the temporary file being used
|
||||||
|
:param metadata: dictionary of metada to be written
|
||||||
|
:param extention: extension to be used when making the file
|
||||||
|
"""
|
||||||
|
metadata['name'] = self.name
|
||||||
|
timestamp = normalize_timestamp(metadata['X-Timestamp'])
|
||||||
|
metastr = pickle.dumps(metadata, PICKLE_PROTOCOL)
|
||||||
|
key = 0
|
||||||
|
while metastr:
|
||||||
|
setxattr(fd, '%s%s' % (METADATA_KEY, key or ''), metastr[:254])
|
||||||
|
metastr = metastr[254:]
|
||||||
|
key += 1
|
||||||
|
if 'Content-Length' in metadata:
|
||||||
|
drop_buffer_cache(fd, 0, int(metadata['Content-Length']))
|
||||||
|
os.fsync(fd)
|
||||||
|
invalidate_hash(os.path.dirname(self.datadir))
|
||||||
|
renamer(tmppath, os.path.join(self.datadir, timestamp + extension))
|
||||||
|
self.metadata = metadata
|
||||||
|
|
||||||
|
def unlinkold(self, timestamp):
|
||||||
|
"""
|
||||||
|
Remove any older versions of the object file. Any file that has an
|
||||||
|
older timestamp than timestamp will be deleted.
|
||||||
|
|
||||||
|
:param timestamp: timestamp to compare with each file
|
||||||
|
"""
|
||||||
|
timestamp = normalize_timestamp(timestamp)
|
||||||
|
for fname in os.listdir(self.datadir):
|
||||||
|
if fname < timestamp:
|
||||||
|
try:
|
||||||
|
os.unlink(os.path.join(self.datadir, fname))
|
||||||
|
except OSError, err: # pragma: no cover
|
||||||
|
if err.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class ObjectController(object):
|
||||||
|
"""Implements the WSGI application for the Swift Object Server."""
|
||||||
|
|
||||||
|
log_name = 'object'
|
||||||
|
|
||||||
|
def __init__(self, conf):
|
||||||
|
"""
|
||||||
|
Creates a new WSGI application for the Swift Object Server. An
|
||||||
|
example configuration is given at
|
||||||
|
<source-dir>/etc/object-server.conf-sample or
|
||||||
|
/etc/swift/object-server.conf-sample.
|
||||||
|
"""
|
||||||
|
self.logger = get_logger(conf, self.log_name)
|
||||||
|
self.devices = conf.get('devices', '/srv/node/')
|
||||||
|
self.mount_check = conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
self.node_timeout = int(conf.get('node_timeout', 3))
|
||||||
|
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
||||||
|
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
|
||||||
|
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
|
||||||
|
self.log_requests = conf.get('log_requests', 't')[:1].lower() == 't'
|
||||||
|
self.max_upload_time = int(conf.get('max_upload_time', 86400))
|
||||||
|
self.slow = int(conf.get('slow', 0))
|
||||||
|
self.chunks_per_sync = int(conf.get('chunks_per_sync', 8000))
|
||||||
|
|
||||||
|
def container_update(self, op, account, container, obj, headers_in,
|
||||||
|
headers_out, objdevice):
|
||||||
|
"""
|
||||||
|
Update the container when objects are updated.
|
||||||
|
|
||||||
|
:param op: operation performed (ex: 'PUT', or 'DELETE')
|
||||||
|
:param account: account name for the object
|
||||||
|
:param container: container name for the object
|
||||||
|
:param obj: object name
|
||||||
|
:param headers_in: dictionary of headers from the original request
|
||||||
|
:param headers_out: dictionary of headers to send in the container
|
||||||
|
request
|
||||||
|
:param objdevice: device name that the object is in
|
||||||
|
"""
|
||||||
|
host = headers_in.get('X-Container-Host', None)
|
||||||
|
partition = headers_in.get('X-Container-Partition', None)
|
||||||
|
contdevice = headers_in.get('X-Container-Device', None)
|
||||||
|
if not all([host, partition, contdevice]):
|
||||||
|
return
|
||||||
|
full_path = '/%s/%s/%s' % (account, container, obj)
|
||||||
|
try:
|
||||||
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
|
ip, port = host.split(':')
|
||||||
|
conn = http_connect(ip, port, contdevice, partition, op,
|
||||||
|
full_path, headers_out)
|
||||||
|
with Timeout(self.node_timeout):
|
||||||
|
response = conn.getresponse()
|
||||||
|
response.read()
|
||||||
|
if 200 <= response.status < 300:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.logger.error('ERROR Container update failed (saving '
|
||||||
|
'for async update later): %d response from %s:%s/%s' %
|
||||||
|
(response.status, ip, port, contdevice))
|
||||||
|
except:
|
||||||
|
self.logger.exception('ERROR container update failed with '
|
||||||
|
'%s:%s/%s transaction %s (saving for async update later)' %
|
||||||
|
(ip, port, contdevice, headers_in.get('x-cf-trans-id', '-')))
|
||||||
|
async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
|
||||||
|
fd, tmppath = mkstemp(dir=os.path.join(self.devices, objdevice, 'tmp'))
|
||||||
|
with os.fdopen(fd, 'wb') as fo:
|
||||||
|
pickle.dump({'op': op, 'account': account, 'container': container,
|
||||||
|
'obj': obj, 'headers': headers_out}, fo)
|
||||||
|
fo.flush()
|
||||||
|
os.fsync(fd)
|
||||||
|
ohash = hash_path(account, container, obj)
|
||||||
|
renamer(tmppath, os.path.join(async_dir, ohash[-3:], ohash + '-' +
|
||||||
|
normalize_timestamp(headers_out['x-timestamp'])))
|
||||||
|
|
||||||
|
def POST(self, request):
|
||||||
|
"""Handle HTTP POST requests for the Swift Object Server."""
|
||||||
|
try:
|
||||||
|
device, partition, account, container, obj = \
|
||||||
|
split_path(unquote(request.path), 5, 5, True)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), request=request,
|
||||||
|
content_type='text/plain')
|
||||||
|
if 'x-timestamp' not in request.headers or \
|
||||||
|
not check_float(request.headers['x-timestamp']):
|
||||||
|
return HTTPBadRequest(body='Missing timestamp', request=request,
|
||||||
|
content_type='text/plain')
|
||||||
|
if self.mount_check and not check_mount(self.devices, device):
|
||||||
|
return Response(status='507 %s is not mounted' % device)
|
||||||
|
file = DiskFile(self.devices, device, partition, account, container,
|
||||||
|
obj, disk_chunk_size=self.disk_chunk_size)
|
||||||
|
deleted = file.is_deleted()
|
||||||
|
if file.is_deleted():
|
||||||
|
response_class = HTTPNotFound
|
||||||
|
else:
|
||||||
|
response_class = HTTPAccepted
|
||||||
|
old_metadata = file.metadata
|
||||||
|
metadata = {'X-Timestamp': request.headers['x-timestamp']}
|
||||||
|
metadata.update(val for val in request.headers.iteritems()
|
||||||
|
if val[0].lower().startswith('x-object-meta-'))
|
||||||
|
with file.mkstemp() as (fd, tmppath):
|
||||||
|
file.put(fd, tmppath, metadata, extension='.meta')
|
||||||
|
return response_class(request=request)
|
||||||
|
|
||||||
|
def PUT(self, request):
|
||||||
|
"""Handle HTTP PUT requests for the Swift Object Server."""
|
||||||
|
try:
|
||||||
|
device, partition, account, container, obj = \
|
||||||
|
split_path(unquote(request.path), 5, 5, True)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), request=request,
|
||||||
|
content_type='text/plain')
|
||||||
|
if self.mount_check and not check_mount(self.devices, device):
|
||||||
|
return Response(status='507 %s is not mounted' % device)
|
||||||
|
if 'x-timestamp' not in request.headers or \
|
||||||
|
not check_float(request.headers['x-timestamp']):
|
||||||
|
return HTTPBadRequest(body='Missing timestamp', request=request,
|
||||||
|
content_type='text/plain')
|
||||||
|
error_response = check_object_creation(request, obj)
|
||||||
|
if error_response:
|
||||||
|
return error_response
|
||||||
|
file = DiskFile(self.devices, device, partition, account, container,
|
||||||
|
obj, disk_chunk_size=self.disk_chunk_size)
|
||||||
|
upload_expiration = time.time() + self.max_upload_time
|
||||||
|
etag = md5()
|
||||||
|
upload_size = 0
|
||||||
|
with file.mkstemp() as (fd, tmppath):
|
||||||
|
if 'content-length' in request.headers:
|
||||||
|
fallocate(fd, int(request.headers['content-length']))
|
||||||
|
chunk_count = 0
|
||||||
|
dropped_cache = 0
|
||||||
|
for chunk in iter(lambda: request.body_file.read(
|
||||||
|
self.network_chunk_size), ''):
|
||||||
|
upload_size += len(chunk)
|
||||||
|
if time.time() > upload_expiration:
|
||||||
|
return HTTPRequestTimeout(request=request)
|
||||||
|
etag.update(chunk)
|
||||||
|
while chunk:
|
||||||
|
written = os.write(fd, chunk)
|
||||||
|
chunk = chunk[written:]
|
||||||
|
chunk_count += 1
|
||||||
|
# For large files sync every 512MB (by default) written
|
||||||
|
if chunk_count % self.chunks_per_sync == 0:
|
||||||
|
os.fdatasync(fd)
|
||||||
|
drop_buffer_cache(fd, dropped_cache,
|
||||||
|
upload_size - dropped_cache)
|
||||||
|
dropped_cache = upload_size
|
||||||
|
|
||||||
|
if 'content-length' in request.headers and \
|
||||||
|
int(request.headers['content-length']) != upload_size:
|
||||||
|
return Response(status='499 Client Disconnect')
|
||||||
|
etag = etag.hexdigest()
|
||||||
|
if 'etag' in request.headers and \
|
||||||
|
request.headers['etag'].lower() != etag:
|
||||||
|
return HTTPUnprocessableEntity(request=request)
|
||||||
|
metadata = {
|
||||||
|
'X-Timestamp': request.headers['x-timestamp'],
|
||||||
|
'Content-Type': request.headers['content-type'],
|
||||||
|
'ETag': etag,
|
||||||
|
'Content-Length': str(os.fstat(fd).st_size),
|
||||||
|
}
|
||||||
|
metadata.update(val for val in request.headers.iteritems()
|
||||||
|
if val[0].lower().startswith('x-object-meta-') and
|
||||||
|
len(val[0]) > 14)
|
||||||
|
if 'content-encoding' in request.headers:
|
||||||
|
metadata['Content-Encoding'] = \
|
||||||
|
request.headers['Content-Encoding']
|
||||||
|
file.put(fd, tmppath, metadata)
|
||||||
|
file.unlinkold(metadata['X-Timestamp'])
|
||||||
|
self.container_update('PUT', account, container, obj, request.headers,
|
||||||
|
{'x-size': file.metadata['Content-Length'],
|
||||||
|
'x-content-type': file.metadata['Content-Type'],
|
||||||
|
'x-timestamp': file.metadata['X-Timestamp'],
|
||||||
|
'x-etag': file.metadata['ETag'],
|
||||||
|
'x-cf-trans-id': request.headers.get('x-cf-trans-id', '-')},
|
||||||
|
device)
|
||||||
|
resp = HTTPCreated(request=request, etag=etag)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def GET(self, request):
|
||||||
|
"""Handle HTTP GET requests for the Swift Object Server."""
|
||||||
|
try:
|
||||||
|
device, partition, account, container, obj = \
|
||||||
|
split_path(unquote(request.path), 5, 5, True)
|
||||||
|
except ValueError, err:
|
||||||
|
return HTTPBadRequest(body=str(err), request=request,
|
||||||
|
content_type='text/plain')
|
||||||
|
if self.mount_check and not check_mount(self.devices, device):
|
||||||
|
return Response(status='507 %s is not mounted' % device)
|
||||||
|
file = DiskFile(self.devices, device, partition, account, container,
|
||||||
|
obj, keep_data_fp=True, disk_chunk_size=self.disk_chunk_size)
|
||||||
|
if file.is_deleted():
|
||||||
|
if request.headers.get('if-match') == '*':
|
||||||
|
return HTTPPreconditionFailed(request=request)
|
||||||
|
else:
|
||||||
|
return HTTPNotFound(request=request)
|
||||||
|
if request.headers.get('if-match') not in (None, '*') and \
|
||||||
|
file.metadata['ETag'] not in request.if_match:
|
||||||
|
file.close()
|
||||||
|
return HTTPPreconditionFailed(request=request)
|
||||||
|
if request.headers.get('if-none-match') != None:
|
||||||
|
if file.metadata['ETag'] in request.if_none_match:
|
||||||
|
resp = HTTPNotModified(request=request)
|
||||||
|
resp.etag = file.metadata['ETag']
|
||||||
|
file.close()
|
||||||
|
return resp
|
||||||
|
try:
|
||||||
|
if_unmodified_since = request.if_unmodified_since
|
||||||
|
except (OverflowError, ValueError):
|
||||||
|
# catches timestamps before the epoch
|
||||||
|
return HTTPPreconditionFailed(request=request)
|
||||||
|
if if_unmodified_since and \
|
||||||
|
datetime.fromtimestamp(float(file.metadata['X-Timestamp']), UTC) > \
|
||||||
|
if_unmodified_since:
|
||||||
|
file.close()
|
||||||
|
return HTTPPreconditionFailed(request=request)
|
||||||
|
try:
|
||||||
|
if_modified_since = request.if_modified_since
|
||||||
|
except (OverflowError, ValueError):
|
||||||
|
# catches timestamps before the epoch
|
||||||
|
return HTTPPreconditionFailed(request=request)
|
||||||
|
if if_modified_since and \
|
||||||
|
datetime.fromtimestamp(float(file.metadata['X-Timestamp']), UTC) < \
|
||||||
|
if_modified_since:
|
||||||
|
file.close()
|
||||||
|
return HTTPNotModified(request=request)
|
||||||
|
response = Response(content_type=file.metadata.get('Content-Type',
|
||||||
|
'application/octet-stream'), app_iter=file,
|
||||||
|
request=request, conditional_response=True)
|
||||||
|
for key, value in file.metadata.iteritems():
|
||||||
|
if key.lower().startswith('x-object-meta-'):
|
||||||
|
response.headers[key] = value
|
||||||
|
response.etag = file.metadata['ETag']
|
||||||
|
response.last_modified = float(file.metadata['X-Timestamp'])
|
||||||
|
response.content_length = int(file.metadata['Content-Length'])
|
||||||
|
if 'Content-Encoding' in file.metadata:
|
||||||
|
response.content_encoding = file.metadata['Content-Encoding']
|
||||||
|
return request.get_response(response)
|
||||||
|
|
||||||
|
def HEAD(self, request):
|
||||||
|
"""Handle HTTP HEAD requests for the Swift Object Server."""
|
||||||
|
try:
|
||||||
|
device, partition, account, container, obj = \
|
||||||
|
split_path(unquote(request.path), 5, 5, True)
|
||||||
|
except ValueError, err:
|
||||||
|
resp = HTTPBadRequest(request=request)
|
||||||
|
resp.content_type = 'text/plain'
|
||||||
|
resp.body = str(err)
|
||||||
|
return resp
|
||||||
|
if self.mount_check and not check_mount(self.devices, device):
|
||||||
|
return Response(status='507 %s is not mounted' % device)
|
||||||
|
file = DiskFile(self.devices, device, partition, account, container,
|
||||||
|
obj, disk_chunk_size=self.disk_chunk_size)
|
||||||
|
if file.is_deleted():
|
||||||
|
return HTTPNotFound(request=request)
|
||||||
|
response = Response(content_type=file.metadata['Content-Type'],
|
||||||
|
request=request, conditional_response=True)
|
||||||
|
for key, value in file.metadata.iteritems():
|
||||||
|
if key.lower().startswith('x-object-meta-'):
|
||||||
|
response.headers[key] = value
|
||||||
|
response.etag = file.metadata['ETag']
|
||||||
|
response.last_modified = float(file.metadata['X-Timestamp'])
|
||||||
|
response.content_length = int(file.metadata['Content-Length'])
|
||||||
|
if 'Content-Encoding' in file.metadata:
|
||||||
|
response.content_encoding = file.metadata['Content-Encoding']
|
||||||
|
return response
|
||||||
|
|
||||||
|
def DELETE(self, request):
|
||||||
|
"""Handle HTTP DELETE requests for the Swift Object Server."""
|
||||||
|
try:
|
||||||
|
device, partition, account, container, obj = \
|
||||||
|
split_path(unquote(request.path), 5, 5, True)
|
||||||
|
except ValueError, e:
|
||||||
|
return HTTPBadRequest(body=str(e), request=request,
|
||||||
|
content_type='text/plain')
|
||||||
|
if 'x-timestamp' not in request.headers or \
|
||||||
|
not check_float(request.headers['x-timestamp']):
|
||||||
|
return HTTPBadRequest(body='Missing timestamp', request=request,
|
||||||
|
content_type='text/plain')
|
||||||
|
if self.mount_check and not check_mount(self.devices, device):
|
||||||
|
return Response(status='507 %s is not mounted' % device)
|
||||||
|
response_class = HTTPNoContent
|
||||||
|
file = DiskFile(self.devices, device, partition, account, container,
|
||||||
|
obj, disk_chunk_size=self.disk_chunk_size)
|
||||||
|
if file.is_deleted():
|
||||||
|
response_class = HTTPNotFound
|
||||||
|
metadata = {
|
||||||
|
'X-Timestamp': request.headers['X-Timestamp'], 'deleted': True,
|
||||||
|
}
|
||||||
|
with file.mkstemp() as (fd, tmppath):
|
||||||
|
file.put(fd, tmppath, metadata, extension='.ts')
|
||||||
|
file.unlinkold(metadata['X-Timestamp'])
|
||||||
|
self.container_update('DELETE', account, container, obj,
|
||||||
|
request.headers, {'x-timestamp': metadata['X-Timestamp'],
|
||||||
|
'x-cf-trans-id': request.headers.get('x-cf-trans-id', '-')},
|
||||||
|
device)
|
||||||
|
resp = response_class(request=request)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def REPLICATE(self, request):
|
||||||
|
"""
|
||||||
|
Handle REPLICATE requests for the Swift Object Server. This is used
|
||||||
|
by the object replicator to get hashes for directories.
|
||||||
|
"""
|
||||||
|
device, partition, suffix = split_path(
|
||||||
|
unquote(request.path), 2, 3, True)
|
||||||
|
if self.mount_check and not check_mount(self.devices, device):
|
||||||
|
return Response(status='507 %s is not mounted' % device)
|
||||||
|
if suffix:
|
||||||
|
recalculate_hashes(os.path.join(self.devices, device,
|
||||||
|
DATADIR, partition), suffix.split('-'))
|
||||||
|
return Response()
|
||||||
|
path = os.path.join(self.devices, device, DATADIR, partition)
|
||||||
|
if not os.path.exists(path):
|
||||||
|
mkdirs(path)
|
||||||
|
_, hashes = get_hashes(path, do_listdir=False)
|
||||||
|
return Response(body=pickle.dumps(hashes))
|
||||||
|
|
||||||
|
def __call__(self, env, start_response):
|
||||||
|
"""WSGI Application entry point for the Swift Object Server."""
|
||||||
|
start_time = time.time()
|
||||||
|
req = Request(env)
|
||||||
|
if req.path_info == '/healthcheck':
|
||||||
|
return healthcheck(req)(env, start_response)
|
||||||
|
elif not check_xml_encodable(req.path_info):
|
||||||
|
res = HTTPPreconditionFailed(body='Invalid UTF8')
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if hasattr(self, req.method):
|
||||||
|
res = getattr(self, req.method)(req)
|
||||||
|
else:
|
||||||
|
res = HTTPMethodNotAllowed()
|
||||||
|
except:
|
||||||
|
self.logger.exception('ERROR __call__ error with %s %s '
|
||||||
|
'transaction %s' % (env.get('REQUEST_METHOD', '-'),
|
||||||
|
env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID',
|
||||||
|
'-')))
|
||||||
|
res = HTTPInternalServerError(body=traceback.format_exc())
|
||||||
|
trans_time = time.time() - start_time
|
||||||
|
if self.log_requests:
|
||||||
|
log_line = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %.4f' % (
|
||||||
|
req.remote_addr,
|
||||||
|
time.strftime('%d/%b/%Y:%H:%M:%S +0000',
|
||||||
|
time.gmtime()),
|
||||||
|
req.method, req.path, res.status.split()[0],
|
||||||
|
res.content_length or '-', req.referer or '-',
|
||||||
|
req.headers.get('x-cf-trans-id', '-'),
|
||||||
|
req.user_agent or '-',
|
||||||
|
trans_time)
|
||||||
|
if req.method == 'REPLICATE':
|
||||||
|
self.logger.debug(log_line)
|
||||||
|
else:
|
||||||
|
self.logger.info(log_line)
|
||||||
|
if req.method in ('PUT', 'DELETE'):
|
||||||
|
slow = self.slow - trans_time
|
||||||
|
if slow > 0:
|
||||||
|
sleep(slow)
|
||||||
|
return res(env, start_response)
|
197
swift/obj/updater.py
Normal file
197
swift/obj/updater.py
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import cPickle as pickle
|
||||||
|
import errno
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from random import random
|
||||||
|
|
||||||
|
from eventlet import patcher, Timeout
|
||||||
|
|
||||||
|
from swift.common.bufferedhttp import http_connect
|
||||||
|
from swift.common.exceptions import ConnectionTimeout
|
||||||
|
from swift.common.ring import Ring
|
||||||
|
from swift.common.utils import get_logger, renamer
|
||||||
|
from swift.common.db_replicator import ReplConnection
|
||||||
|
from swift.obj.server import ASYNCDIR
|
||||||
|
|
||||||
|
|
||||||
|
class ObjectUpdater(object):
|
||||||
|
"""Update object information in container listings."""
|
||||||
|
|
||||||
|
def __init__(self, server_conf, updater_conf):
|
||||||
|
self.logger = get_logger(updater_conf, 'object-updater')
|
||||||
|
self.devices = server_conf.get('devices', '/srv/node')
|
||||||
|
self.mount_check = server_conf.get('mount_check', 'true').lower() in \
|
||||||
|
('true', 't', '1', 'on', 'yes', 'y')
|
||||||
|
swift_dir = server_conf.get('swift_dir', '/etc/swift')
|
||||||
|
self.interval = int(updater_conf.get('interval', 300))
|
||||||
|
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
|
||||||
|
self.container_ring = None
|
||||||
|
self.concurrency = int(updater_conf.get('concurrency', 1))
|
||||||
|
self.slowdown = float(updater_conf.get('slowdown', 0.01))
|
||||||
|
self.node_timeout = int(updater_conf.get('node_timeout', 10))
|
||||||
|
self.conn_timeout = float(updater_conf.get('conn_timeout', 0.5))
|
||||||
|
self.successes = 0
|
||||||
|
self.failures = 0
|
||||||
|
|
||||||
|
def get_container_ring(self):
|
||||||
|
"""Get the container ring. Load it, if it hasn't been yet."""
|
||||||
|
if not self.container_ring:
|
||||||
|
self.logger.debug(
|
||||||
|
'Loading container ring from %s' % self.container_ring_path)
|
||||||
|
self.container_ring = Ring(self.container_ring_path)
|
||||||
|
return self.container_ring
|
||||||
|
|
||||||
|
def update_forever(self): # pragma: no cover
|
||||||
|
"""Run the updater continuously."""
|
||||||
|
time.sleep(random() * self.interval)
|
||||||
|
while True:
|
||||||
|
self.logger.info('Begin object update sweep')
|
||||||
|
begin = time.time()
|
||||||
|
pids = []
|
||||||
|
# read from container ring to ensure it's fresh
|
||||||
|
self.get_container_ring().get_nodes('')
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
if self.mount_check and not \
|
||||||
|
os.path.ismount(os.path.join(self.devices, device)):
|
||||||
|
self.logger.warn(
|
||||||
|
'Skipping %s as it is not mounted' % device)
|
||||||
|
continue
|
||||||
|
while len(pids) >= self.concurrency:
|
||||||
|
pids.remove(os.wait()[0])
|
||||||
|
pid = os.fork()
|
||||||
|
if pid:
|
||||||
|
pids.append(pid)
|
||||||
|
else:
|
||||||
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||||
|
patcher.monkey_patch(all=False, socket=True)
|
||||||
|
self.successes = 0
|
||||||
|
self.failures = 0
|
||||||
|
forkbegin = time.time()
|
||||||
|
self.object_sweep(os.path.join(self.devices, device))
|
||||||
|
elapsed = time.time() - forkbegin
|
||||||
|
self.logger.info('Object update sweep of %s completed: '
|
||||||
|
'%.02fs, %s successes, %s failures' %
|
||||||
|
(device, elapsed, self.successes, self.failures))
|
||||||
|
sys.exit()
|
||||||
|
while pids:
|
||||||
|
pids.remove(os.wait()[0])
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
self.logger.info('Object update sweep completed: %.02fs' % elapsed)
|
||||||
|
if elapsed < self.interval:
|
||||||
|
time.sleep(self.interval - elapsed)
|
||||||
|
|
||||||
|
def update_once_single_threaded(self):
|
||||||
|
"""Run the updater once"""
|
||||||
|
self.logger.info('Begin object update single threaded sweep')
|
||||||
|
begin = time.time()
|
||||||
|
self.successes = 0
|
||||||
|
self.failures = 0
|
||||||
|
for device in os.listdir(self.devices):
|
||||||
|
if self.mount_check and \
|
||||||
|
not os.path.ismount(os.path.join(self.devices, device)):
|
||||||
|
self.logger.warn(
|
||||||
|
'Skipping %s as it is not mounted' % device)
|
||||||
|
continue
|
||||||
|
self.object_sweep(os.path.join(self.devices, device))
|
||||||
|
elapsed = time.time() - begin
|
||||||
|
self.logger.info('Object update single threaded sweep completed: '
|
||||||
|
'%.02fs, %s successes, %s failures' %
|
||||||
|
(elapsed, self.successes, self.failures))
|
||||||
|
|
||||||
|
def object_sweep(self, device):
|
||||||
|
"""
|
||||||
|
If there are async pendings on the device, walk each one and update.
|
||||||
|
|
||||||
|
:param device: path to device
|
||||||
|
"""
|
||||||
|
async_pending = os.path.join(device, ASYNCDIR)
|
||||||
|
if not os.path.isdir(async_pending):
|
||||||
|
return
|
||||||
|
for prefix in os.listdir(async_pending):
|
||||||
|
prefix_path = os.path.join(async_pending, prefix)
|
||||||
|
if not os.path.isdir(prefix_path):
|
||||||
|
continue
|
||||||
|
for update in os.listdir(prefix_path):
|
||||||
|
update_path = os.path.join(prefix_path, update)
|
||||||
|
if not os.path.isfile(update_path):
|
||||||
|
continue
|
||||||
|
self.process_object_update(update_path, device)
|
||||||
|
time.sleep(self.slowdown)
|
||||||
|
try:
|
||||||
|
os.rmdir(prefix_path)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def process_object_update(self, update_path, device):
|
||||||
|
"""
|
||||||
|
Process the object information to be updated and update.
|
||||||
|
|
||||||
|
:param update_path: path to pickled object update file
|
||||||
|
:param device: path to device
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
update = pickle.load(open(update_path, 'rb'))
|
||||||
|
except Exception, err:
|
||||||
|
self.logger.exception(
|
||||||
|
'ERROR Pickle problem, quarantining %s' % update_path)
|
||||||
|
renamer(update_path, os.path.join(device,
|
||||||
|
'quarantined', 'objects', os.path.basename(update_path)))
|
||||||
|
return
|
||||||
|
part, nodes = self.get_container_ring().get_nodes(
|
||||||
|
update['account'], update['container'])
|
||||||
|
obj = '/%s/%s/%s' % \
|
||||||
|
(update['account'], update['container'], update['obj'])
|
||||||
|
success = True
|
||||||
|
for node in nodes:
|
||||||
|
status = self.object_update(node, part, update['op'], obj,
|
||||||
|
update['headers'])
|
||||||
|
if not (200 <= status < 300) and status != 404:
|
||||||
|
success = False
|
||||||
|
if success:
|
||||||
|
self.successes += 1
|
||||||
|
self.logger.debug('Update sent for %s %s' % (obj, update_path))
|
||||||
|
os.unlink(update_path)
|
||||||
|
else:
|
||||||
|
self.failures += 1
|
||||||
|
self.logger.debug('Update failed for %s %s' % (obj, update_path))
|
||||||
|
|
||||||
|
def object_update(self, node, part, op, obj, headers):
|
||||||
|
"""
|
||||||
|
Perform the object update to the container
|
||||||
|
|
||||||
|
:param node: node dictionary from the container ring
|
||||||
|
:param part: partition that holds the container
|
||||||
|
:param op: operation performed (ex: 'POST' or 'DELETE')
|
||||||
|
:param obj: object name being updated
|
||||||
|
:param headers: headers to send with the update
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with ConnectionTimeout(self.conn_timeout):
|
||||||
|
conn = http_connect(node['ip'], node['port'], node['device'],
|
||||||
|
part, op, obj, headers)
|
||||||
|
with Timeout(self.node_timeout):
|
||||||
|
resp = conn.getresponse()
|
||||||
|
resp.read()
|
||||||
|
return resp.status
|
||||||
|
except:
|
||||||
|
self.logger.exception('ERROR with remote server '
|
||||||
|
'%(ip)s:%(port)s/%(device)s' % node)
|
||||||
|
return 500
|
0
swift/proxy/__init__.py
Normal file
0
swift/proxy/__init__.py
Normal file
1190
swift/proxy/server.py
Normal file
1190
swift/proxy/server.py
Normal file
File diff suppressed because it is too large
Load Diff
24
test/unit/__init__.py
Normal file
24
test/unit/__init__.py
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
""" Swift tests """
|
||||||
|
|
||||||
|
from eventlet.green import socket
|
||||||
|
|
||||||
|
|
||||||
|
def readuntil2crlfs(fd):
|
||||||
|
rv = ''
|
||||||
|
lc = ''
|
||||||
|
crlfs = 0
|
||||||
|
while crlfs < 2:
|
||||||
|
c = fd.read(1)
|
||||||
|
rv = rv + c
|
||||||
|
if c == '\r' and lc != '\n':
|
||||||
|
crlfs = 0
|
||||||
|
if lc == '\r' and c == '\n':
|
||||||
|
crlfs += 1
|
||||||
|
lc = c
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
def connect_tcp(hostport):
|
||||||
|
rv = socket.socket()
|
||||||
|
rv.connect(hostport)
|
||||||
|
return rv
|
0
test/unit/account/__init__.py
Normal file
0
test/unit/account/__init__.py
Normal file
28
test/unit/account/test_auditor.py
Normal file
28
test/unit/account/test_auditor.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# TODO: Tests
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
from swift.account import auditor
|
||||||
|
|
||||||
|
class TestAuditor(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_placeholder(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
28
test/unit/account/test_reaper.py
Normal file
28
test/unit/account/test_reaper.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# TODO: Tests
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
from swift.account import reaper
|
||||||
|
|
||||||
|
class TestReaper(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_placeholder(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
889
test/unit/account/test_server.py
Normal file
889
test/unit/account/test_server.py
Normal file
@ -0,0 +1,889 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import os
|
||||||
|
import unittest
|
||||||
|
from shutil import rmtree
|
||||||
|
from StringIO import StringIO
|
||||||
|
|
||||||
|
import simplejson
|
||||||
|
import xml.dom.minidom
|
||||||
|
from webob import Request
|
||||||
|
|
||||||
|
from swift.account.server import AccountController, ACCOUNT_LISTING_LIMIT
|
||||||
|
from swift.common.utils import normalize_timestamp
|
||||||
|
|
||||||
|
|
||||||
|
class TestAccountController(unittest.TestCase):
|
||||||
|
""" Test swift.account_server.AccountController """
|
||||||
|
def setUp(self):
|
||||||
|
""" Set up for testing swift.account_server.AccountController """
|
||||||
|
self.testdir = os.path.join(os.path.dirname(__file__), 'account_server')
|
||||||
|
self.controller = AccountController(
|
||||||
|
{'devices': self.testdir, 'mount_check': 'false'})
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
""" Tear down for testing swift.account_server.AccountController """
|
||||||
|
try:
|
||||||
|
rmtree(self.testdir)
|
||||||
|
except OSError, err:
|
||||||
|
if err.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def test_DELETE_not_found(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
resp = self.controller.DELETE(req)
|
||||||
|
self.assertEquals(resp.status_int, 404)
|
||||||
|
|
||||||
|
def test_DELETE_empty(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
|
||||||
|
'HTTP_X_TIMESTAMP': '1'})
|
||||||
|
resp = self.controller.DELETE(req)
|
||||||
|
self.assertEquals(resp.status_int, 204)
|
||||||
|
|
||||||
|
def test_DELETE_not_empty(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
|
||||||
|
'HTTP_X_TIMESTAMP': '1'})
|
||||||
|
resp = self.controller.DELETE(req)
|
||||||
|
# We now allow deleting non-empty accounts
|
||||||
|
self.assertEquals(resp.status_int, 204)
|
||||||
|
|
||||||
|
def test_DELETE_now_empty(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1',
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '2',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
self.assertEquals(resp.status_int, 204)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
|
||||||
|
'HTTP_X_TIMESTAMP': '1'})
|
||||||
|
resp = self.controller.DELETE(req)
|
||||||
|
self.assertEquals(resp.status_int, 204)
|
||||||
|
|
||||||
|
def test_HEAD_not_found(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
|
||||||
|
resp = self.controller.HEAD(req)
|
||||||
|
self.assertEquals(resp.status_int, 404)
|
||||||
|
|
||||||
|
def test_HEAD_empty_account(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
|
||||||
|
resp = self.controller.HEAD(req)
|
||||||
|
self.assertEquals(resp.status_int, 204)
|
||||||
|
self.assertEquals(resp.headers['x-account-container-count'], 0)
|
||||||
|
self.assertEquals(resp.headers['x-account-object-count'], 0)
|
||||||
|
self.assertEquals(resp.headers['x-account-bytes-used'], 0)
|
||||||
|
|
||||||
|
def test_HEAD_with_containers(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Timestamp': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '2',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
|
||||||
|
resp = self.controller.HEAD(req)
|
||||||
|
self.assertEquals(resp.status_int, 204)
|
||||||
|
self.assertEquals(resp.headers['x-account-container-count'], 2)
|
||||||
|
self.assertEquals(resp.headers['x-account-object-count'], 0)
|
||||||
|
self.assertEquals(resp.headers['x-account-bytes-used'], 0)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '1',
|
||||||
|
'X-Bytes-Used': '2',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '2',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '3',
|
||||||
|
'X-Bytes-Used': '4',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD',
|
||||||
|
'HTTP_X_TIMESTAMP': '5'})
|
||||||
|
resp = self.controller.HEAD(req)
|
||||||
|
self.assertEquals(resp.status_int, 204)
|
||||||
|
self.assertEquals(resp.headers['x-account-container-count'], 2)
|
||||||
|
self.assertEquals(resp.headers['x-account-object-count'], 4)
|
||||||
|
self.assertEquals(resp.headers['x-account-bytes-used'], 6)
|
||||||
|
|
||||||
|
def test_PUT_not_found(self):
|
||||||
|
req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-PUT-Timestamp': normalize_timestamp(1),
|
||||||
|
'X-DELETE-Timestamp': normalize_timestamp(0),
|
||||||
|
'X-Object-Count': '1',
|
||||||
|
'X-Bytes-Used': '1',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
self.assertEquals(resp.status_int, 404)
|
||||||
|
|
||||||
|
def test_PUT(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
self.assertEquals(resp.status_int, 201)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '1'})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
self.assertEquals(resp.status_int, 202)
|
||||||
|
|
||||||
|
def test_PUT_after_DELETE(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Timestamp': normalize_timestamp(1)})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
self.assertEquals(resp.status_int, 201)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
|
||||||
|
headers={'X-Timestamp': normalize_timestamp(1)})
|
||||||
|
resp = self.controller.DELETE(req)
|
||||||
|
self.assertEquals(resp.status_int, 204)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Timestamp': normalize_timestamp(2)})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
self.assertEquals(resp.status_int, 403)
|
||||||
|
self.assertEquals(resp.body, 'Recently deleted')
|
||||||
|
|
||||||
|
def test_GET_not_found_plain(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 404)
|
||||||
|
|
||||||
|
def test_GET_not_found_json(self):
|
||||||
|
req = Request.blank('/sda1/p/a?format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 404)
|
||||||
|
|
||||||
|
def test_GET_not_found_xml(self):
|
||||||
|
req = Request.blank('/sda1/p/a?format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 404)
|
||||||
|
|
||||||
|
def test_GET_empty_account_plain(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 204)
|
||||||
|
|
||||||
|
def test_GET_empty_account_json(self):
|
||||||
|
req = Request.blank('/sda1/p/a?format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
|
||||||
|
def test_GET_empty_account_xml(self):
|
||||||
|
req = Request.blank('/sda1/p/a?format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
|
||||||
|
def test_GET_over_limit(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?limit=%d' %
|
||||||
|
(ACCOUNT_LISTING_LIMIT + 1), environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 412)
|
||||||
|
|
||||||
|
def test_GET_with_containers_plain(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '2',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(resp.body.strip().split('\n'), ['c1', 'c2'])
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '1',
|
||||||
|
'X-Bytes-Used': '2',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '2',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '3',
|
||||||
|
'X-Bytes-Used': '4',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(resp.body.strip().split('\n'), ['c1', 'c2'])
|
||||||
|
self.assertEquals(resp.content_type, 'text/plain')
|
||||||
|
|
||||||
|
def test_GET_with_containers_json(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '2',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(simplejson.loads(resp.body),
|
||||||
|
[{'count': 0, 'bytes': 0, 'name': 'c1'},
|
||||||
|
{'count': 0, 'bytes': 0, 'name': 'c2'}])
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '1',
|
||||||
|
'X-Bytes-Used': '2',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '2',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '3',
|
||||||
|
'X-Bytes-Used': '4',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(simplejson.loads(resp.body),
|
||||||
|
[{'count': 1, 'bytes': 2, 'name': 'c1'},
|
||||||
|
{'count': 3, 'bytes': 4, 'name': 'c2'}])
|
||||||
|
self.assertEquals(resp.content_type, 'application/json')
|
||||||
|
|
||||||
|
def test_GET_with_containers_xml(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '2',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.content_type, 'application/xml')
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
dom = xml.dom.minidom.parseString(resp.body)
|
||||||
|
self.assertEquals(dom.firstChild.nodeName, 'account')
|
||||||
|
listing = \
|
||||||
|
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(len(listing), 2)
|
||||||
|
self.assertEquals(listing[0].nodeName, 'container')
|
||||||
|
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(sorted([n.nodeName for n in container]),
|
||||||
|
['bytes', 'count', 'name'])
|
||||||
|
node = [n for n in container if n.nodeName == 'name'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, 'c1')
|
||||||
|
node = [n for n in container if n.nodeName == 'count'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '0')
|
||||||
|
node = [n for n in container if n.nodeName == 'bytes'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '0')
|
||||||
|
self.assertEquals(listing[-1].nodeName, 'container')
|
||||||
|
container = \
|
||||||
|
[n for n in listing[-1].childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(sorted([n.nodeName for n in container]),
|
||||||
|
['bytes', 'count', 'name'])
|
||||||
|
node = [n for n in container if n.nodeName == 'name'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, 'c2')
|
||||||
|
node = [n for n in container if n.nodeName == 'count'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '0')
|
||||||
|
node = [n for n in container if n.nodeName == 'bytes'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '0')
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '1',
|
||||||
|
'X-Bytes-Used': '2',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '2',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '3',
|
||||||
|
'X-Bytes-Used': '4',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
dom = xml.dom.minidom.parseString(resp.body)
|
||||||
|
self.assertEquals(dom.firstChild.nodeName, 'account')
|
||||||
|
listing = \
|
||||||
|
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(len(listing), 2)
|
||||||
|
self.assertEquals(listing[0].nodeName, 'container')
|
||||||
|
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(sorted([n.nodeName for n in container]),
|
||||||
|
['bytes', 'count', 'name'])
|
||||||
|
node = [n for n in container if n.nodeName == 'name'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, 'c1')
|
||||||
|
node = [n for n in container if n.nodeName == 'count'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '1')
|
||||||
|
node = [n for n in container if n.nodeName == 'bytes'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '2')
|
||||||
|
self.assertEquals(listing[-1].nodeName, 'container')
|
||||||
|
container = [n for n in listing[-1].childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(sorted([n.nodeName for n in container]),
|
||||||
|
['bytes', 'count', 'name'])
|
||||||
|
node = [n for n in container if n.nodeName == 'name'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, 'c2')
|
||||||
|
node = [n for n in container if n.nodeName == 'count'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '3')
|
||||||
|
node = [n for n in container if n.nodeName == 'bytes'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '4')
|
||||||
|
|
||||||
|
def test_GET_limit_marker_plain(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
for c in xrange(5):
|
||||||
|
req = Request.blank('/sda1/p/a/c%d' % c,
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': str(c + 1),
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '2',
|
||||||
|
'X-Bytes-Used': '3',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?limit=3',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(resp.body.strip().split('\n'), ['c0', 'c1', 'c2'])
|
||||||
|
req = Request.blank('/sda1/p/a?limit=3&marker=c2',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(resp.body.strip().split('\n'), ['c3', 'c4'])
|
||||||
|
|
||||||
|
def test_GET_limit_marker_json(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
for c in xrange(5):
|
||||||
|
req = Request.blank('/sda1/p/a/c%d' % c,
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': str(c + 1),
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '2',
|
||||||
|
'X-Bytes-Used': '3',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?limit=3&format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(simplejson.loads(resp.body),
|
||||||
|
[{'count': 2, 'bytes': 3, 'name': 'c0'},
|
||||||
|
{'count': 2, 'bytes': 3, 'name': 'c1'},
|
||||||
|
{'count': 2, 'bytes': 3, 'name': 'c2'}])
|
||||||
|
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(simplejson.loads(resp.body),
|
||||||
|
[{'count': 2, 'bytes': 3, 'name': 'c3'},
|
||||||
|
{'count': 2, 'bytes': 3, 'name': 'c4'}])
|
||||||
|
|
||||||
|
def test_GET_limit_marker_xml(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
for c in xrange(5):
|
||||||
|
req = Request.blank('/sda1/p/a/c%d' % c,
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': str(c + 1),
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '2',
|
||||||
|
'X-Bytes-Used': '3',
|
||||||
|
'X-Timestamp': normalize_timestamp(c)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?limit=3&format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
dom = xml.dom.minidom.parseString(resp.body)
|
||||||
|
self.assertEquals(dom.firstChild.nodeName, 'account')
|
||||||
|
listing = \
|
||||||
|
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(len(listing), 3)
|
||||||
|
self.assertEquals(listing[0].nodeName, 'container')
|
||||||
|
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(sorted([n.nodeName for n in container]),
|
||||||
|
['bytes', 'count', 'name'])
|
||||||
|
node = [n for n in container if n.nodeName == 'name'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, 'c0')
|
||||||
|
node = [n for n in container if n.nodeName == 'count'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '2')
|
||||||
|
node = [n for n in container if n.nodeName == 'bytes'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '3')
|
||||||
|
self.assertEquals(listing[-1].nodeName, 'container')
|
||||||
|
container = [n for n in listing[-1].childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(sorted([n.nodeName for n in container]),
|
||||||
|
['bytes', 'count', 'name'])
|
||||||
|
node = [n for n in container if n.nodeName == 'name'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, 'c2')
|
||||||
|
node = [n for n in container if n.nodeName == 'count'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '2')
|
||||||
|
node = [n for n in container if n.nodeName == 'bytes'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '3')
|
||||||
|
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
dom = xml.dom.minidom.parseString(resp.body)
|
||||||
|
self.assertEquals(dom.firstChild.nodeName, 'account')
|
||||||
|
listing = \
|
||||||
|
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(len(listing), 2)
|
||||||
|
self.assertEquals(listing[0].nodeName, 'container')
|
||||||
|
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(sorted([n.nodeName for n in container]),
|
||||||
|
['bytes', 'count', 'name'])
|
||||||
|
node = [n for n in container if n.nodeName == 'name'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, 'c3')
|
||||||
|
node = [n for n in container if n.nodeName == 'count'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '2')
|
||||||
|
node = [n for n in container if n.nodeName == 'bytes'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '3')
|
||||||
|
self.assertEquals(listing[-1].nodeName, 'container')
|
||||||
|
container = [n for n in listing[-1].childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(sorted([n.nodeName for n in container]),
|
||||||
|
['bytes', 'count', 'name'])
|
||||||
|
node = [n for n in container if n.nodeName == 'name'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, 'c4')
|
||||||
|
node = [n for n in container if n.nodeName == 'count'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '2')
|
||||||
|
node = [n for n in container if n.nodeName == 'bytes'][0]
|
||||||
|
self.assertEquals(node.firstChild.nodeValue, '3')
|
||||||
|
|
||||||
|
def test_GET_accept_wildcard(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
req.accept = '*/*'
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(resp.body, 'c1\n')
|
||||||
|
|
||||||
|
def test_GET_accept_application_wildcard(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
req.accept = 'application/*'
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(len(simplejson.loads(resp.body)), 1)
|
||||||
|
|
||||||
|
def test_GET_accept_json(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
req.accept = 'application/json'
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(len(simplejson.loads(resp.body)), 1)
|
||||||
|
|
||||||
|
def test_GET_accept_xml(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
req.accept = 'application/xml'
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
dom = xml.dom.minidom.parseString(resp.body)
|
||||||
|
self.assertEquals(dom.firstChild.nodeName, 'account')
|
||||||
|
listing = \
|
||||||
|
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
|
||||||
|
self.assertEquals(len(listing), 1)
|
||||||
|
|
||||||
|
def test_GET_accept_conflicting(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?format=plain',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
req.accept = 'application/json'
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(resp.body, 'c1\n')
|
||||||
|
|
||||||
|
def test_GET_prefix_delimeter_plain(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
for first in range(3):
|
||||||
|
req = Request.blank('/sda1/p/a/sub.%s' % first,
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
for second in range(3):
|
||||||
|
req = Request.blank('/sda1/p/a/sub.%s.%s' % (first, second),
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?delimiter=.',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(resp.body.strip().split('\n'), ['sub.'])
|
||||||
|
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(resp.body.strip().split('\n'),
|
||||||
|
['sub.0', 'sub.0.', 'sub.1', 'sub.1.', 'sub.2', 'sub.2.'])
|
||||||
|
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals(resp.body.strip().split('\n'),
|
||||||
|
['sub.1.0', 'sub.1.1', 'sub.1.2'])
|
||||||
|
|
||||||
|
def test_GET_prefix_delimeter_json(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
for first in range(3):
|
||||||
|
req = Request.blank('/sda1/p/a/sub.%s' % first,
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
for second in range(3):
|
||||||
|
req = Request.blank('/sda1/p/a/sub.%s.%s' % (first, second),
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?delimiter=.&format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals([n.get('name', 's:' + n.get('subdir', 'error'))
|
||||||
|
for n in simplejson.loads(resp.body)], ['s:sub.'])
|
||||||
|
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.&format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals([n.get('name', 's:' + n.get('subdir', 'error'))
|
||||||
|
for n in simplejson.loads(resp.body)],
|
||||||
|
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
|
||||||
|
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.&format=json',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
self.assertEquals([n.get('name', 's:' + n.get('subdir', 'error'))
|
||||||
|
for n in simplejson.loads(resp.body)],
|
||||||
|
['sub.1.0', 'sub.1.1', 'sub.1.2'])
|
||||||
|
|
||||||
|
def test_GET_prefix_delimeter_xml(self):
|
||||||
|
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
|
||||||
|
'HTTP_X_TIMESTAMP': '0'})
|
||||||
|
resp = self.controller.PUT(req)
|
||||||
|
for first in range(3):
|
||||||
|
req = Request.blank('/sda1/p/a/sub.%s' % first,
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
for second in range(3):
|
||||||
|
req = Request.blank('/sda1/p/a/sub.%s.%s' % (first, second),
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'},
|
||||||
|
headers={'X-Put-Timestamp': '1',
|
||||||
|
'X-Delete-Timestamp': '0',
|
||||||
|
'X-Object-Count': '0',
|
||||||
|
'X-Bytes-Used': '0',
|
||||||
|
'X-Timestamp': normalize_timestamp(0)})
|
||||||
|
self.controller.PUT(req)
|
||||||
|
req = Request.blank('/sda1/p/a?delimiter=.&format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
dom = xml.dom.minidom.parseString(resp.body)
|
||||||
|
listing = []
|
||||||
|
for node1 in dom.firstChild.childNodes:
|
||||||
|
if node1.nodeName == 'subdir':
|
||||||
|
listing.append('s:' + node1.attributes['name'].value)
|
||||||
|
elif node1.nodeName == 'container':
|
||||||
|
for node2 in node1.childNodes:
|
||||||
|
if node2.nodeName == 'name':
|
||||||
|
listing.append(node2.firstChild.nodeValue)
|
||||||
|
self.assertEquals(listing, ['s:sub.'])
|
||||||
|
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.&format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
dom = xml.dom.minidom.parseString(resp.body)
|
||||||
|
listing = []
|
||||||
|
for node1 in dom.firstChild.childNodes:
|
||||||
|
if node1.nodeName == 'subdir':
|
||||||
|
listing.append('s:' + node1.attributes['name'].value)
|
||||||
|
elif node1.nodeName == 'container':
|
||||||
|
for node2 in node1.childNodes:
|
||||||
|
if node2.nodeName == 'name':
|
||||||
|
listing.append(node2.firstChild.nodeValue)
|
||||||
|
self.assertEquals(listing,
|
||||||
|
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
|
||||||
|
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.&format=xml',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 200)
|
||||||
|
dom = xml.dom.minidom.parseString(resp.body)
|
||||||
|
listing = []
|
||||||
|
for node1 in dom.firstChild.childNodes:
|
||||||
|
if node1.nodeName == 'subdir':
|
||||||
|
listing.append('s:' + node1.attributes['name'].value)
|
||||||
|
elif node1.nodeName == 'container':
|
||||||
|
for node2 in node1.childNodes:
|
||||||
|
if node2.nodeName == 'name':
|
||||||
|
listing.append(node2.firstChild.nodeValue)
|
||||||
|
self.assertEquals(listing, ['sub.1.0', 'sub.1.1', 'sub.1.2'])
|
||||||
|
|
||||||
|
def test_healthcheck(self):
|
||||||
|
inbuf = StringIO()
|
||||||
|
errbuf = StringIO()
|
||||||
|
outbuf = StringIO()
|
||||||
|
|
||||||
|
def start_response(*args):
|
||||||
|
""" Sends args to outbuf """
|
||||||
|
outbuf.writelines(args)
|
||||||
|
|
||||||
|
self.controller.__call__({'REQUEST_METHOD': 'GET',
|
||||||
|
'SCRIPT_NAME': '',
|
||||||
|
'PATH_INFO': '/healthcheck',
|
||||||
|
'SERVER_NAME': '127.0.0.1',
|
||||||
|
'SERVER_PORT': '8080',
|
||||||
|
'SERVER_PROTOCOL': 'HTTP/1.0',
|
||||||
|
'CONTENT_LENGTH': '0',
|
||||||
|
'wsgi.version': (1, 0),
|
||||||
|
'wsgi.url_scheme': 'http',
|
||||||
|
'wsgi.input': inbuf,
|
||||||
|
'wsgi.errors': errbuf,
|
||||||
|
'wsgi.multithread': False,
|
||||||
|
'wsgi.multiprocess': False,
|
||||||
|
'wsgi.run_once': False},
|
||||||
|
start_response)
|
||||||
|
self.assertEquals(errbuf.getvalue(), '')
|
||||||
|
self.assertEquals(outbuf.getvalue()[:4], '200 ')
|
||||||
|
|
||||||
|
def test_through_call(self):
|
||||||
|
inbuf = StringIO()
|
||||||
|
errbuf = StringIO()
|
||||||
|
outbuf = StringIO()
|
||||||
|
def start_response(*args):
|
||||||
|
outbuf.writelines(args)
|
||||||
|
self.controller.__call__({'REQUEST_METHOD': 'GET',
|
||||||
|
'SCRIPT_NAME': '',
|
||||||
|
'PATH_INFO': '/sda1/p/a',
|
||||||
|
'SERVER_NAME': '127.0.0.1',
|
||||||
|
'SERVER_PORT': '8080',
|
||||||
|
'SERVER_PROTOCOL': 'HTTP/1.0',
|
||||||
|
'CONTENT_LENGTH': '0',
|
||||||
|
'wsgi.version': (1, 0),
|
||||||
|
'wsgi.url_scheme': 'http',
|
||||||
|
'wsgi.input': inbuf,
|
||||||
|
'wsgi.errors': errbuf,
|
||||||
|
'wsgi.multithread': False,
|
||||||
|
'wsgi.multiprocess': False,
|
||||||
|
'wsgi.run_once': False},
|
||||||
|
start_response)
|
||||||
|
self.assertEquals(errbuf.getvalue(), '')
|
||||||
|
self.assertEquals(outbuf.getvalue()[:4], '404 ')
|
||||||
|
|
||||||
|
def test_through_call_invalid_path(self):
|
||||||
|
inbuf = StringIO()
|
||||||
|
errbuf = StringIO()
|
||||||
|
outbuf = StringIO()
|
||||||
|
def start_response(*args):
|
||||||
|
outbuf.writelines(args)
|
||||||
|
self.controller.__call__({'REQUEST_METHOD': 'GET',
|
||||||
|
'SCRIPT_NAME': '',
|
||||||
|
'PATH_INFO': '/bob',
|
||||||
|
'SERVER_NAME': '127.0.0.1',
|
||||||
|
'SERVER_PORT': '8080',
|
||||||
|
'SERVER_PROTOCOL': 'HTTP/1.0',
|
||||||
|
'CONTENT_LENGTH': '0',
|
||||||
|
'wsgi.version': (1, 0),
|
||||||
|
'wsgi.url_scheme': 'http',
|
||||||
|
'wsgi.input': inbuf,
|
||||||
|
'wsgi.errors': errbuf,
|
||||||
|
'wsgi.multithread': False,
|
||||||
|
'wsgi.multiprocess': False,
|
||||||
|
'wsgi.run_once': False},
|
||||||
|
start_response)
|
||||||
|
self.assertEquals(errbuf.getvalue(), '')
|
||||||
|
self.assertEquals(outbuf.getvalue()[:4], '400 ')
|
||||||
|
|
||||||
|
def test_params_utf8(self):
|
||||||
|
self.controller.PUT(Request.blank('/sda1/p/a',
|
||||||
|
headers={'X-Timestamp': normalize_timestamp(1)},
|
||||||
|
environ={'REQUEST_METHOD': 'PUT'}))
|
||||||
|
for param in ('delimiter', 'format', 'limit', 'marker', 'prefix'):
|
||||||
|
req = Request.blank('/sda1/p/a?%s=\xce' % param,
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assertEquals(resp.status_int, 400)
|
||||||
|
req = Request.blank('/sda1/p/a?%s=\xce\xa9' % param,
|
||||||
|
environ={'REQUEST_METHOD': 'GET'})
|
||||||
|
resp = self.controller.GET(req)
|
||||||
|
self.assert_(resp.status_int in (204, 412), resp.status_int)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
0
test/unit/auth/__init__.py
Normal file
0
test/unit/auth/__init__.py
Normal file
599
test/unit/auth/test_server.py
Normal file
599
test/unit/auth/test_server.py
Normal file
@ -0,0 +1,599 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
import unittest
|
||||||
|
import os
|
||||||
|
from shutil import rmtree
|
||||||
|
from StringIO import StringIO
|
||||||
|
from uuid import uuid4
|
||||||
|
from logging import StreamHandler
|
||||||
|
|
||||||
|
from webob import Request
|
||||||
|
|
||||||
|
from swift.auth import server as auth_server
|
||||||
|
from swift.common.db import DatabaseConnectionError
|
||||||
|
from swift.common.utils import get_logger
|
||||||
|
|
||||||
|
|
||||||
|
class TestException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def fake_http_connect(*code_iter, **kwargs):
|
||||||
|
class FakeConn(object):
|
||||||
|
def __init__(self, status):
|
||||||
|
self.status = status
|
||||||
|
self.reason = 'Fake'
|
||||||
|
self.host = '1.2.3.4'
|
||||||
|
self.port = '1234'
|
||||||
|
def getresponse(self):
|
||||||
|
if 'slow' in kwargs:
|
||||||
|
sleep(0.2)
|
||||||
|
if 'raise_exc' in kwargs:
|
||||||
|
raise kwargs['raise_exc']
|
||||||
|
return self
|
||||||
|
def getheaders(self):
|
||||||
|
return {'x-account-bytes-used': '20'}
|
||||||
|
def read(self, amt=None):
|
||||||
|
return ''
|
||||||
|
def getheader(self, name):
|
||||||
|
return self.getheaders().get(name.lower())
|
||||||
|
code_iter = iter(code_iter)
|
||||||
|
def connect(*args, **ckwargs):
|
||||||
|
if 'give_content_type' in kwargs:
|
||||||
|
if len(args) >= 7 and 'content_type' in args[6]:
|
||||||
|
kwargs['give_content_type'](args[6]['content-type'])
|
||||||
|
else:
|
||||||
|
kwargs['give_content_type']('')
|
||||||
|
return FakeConn(code_iter.next())
|
||||||
|
return connect
|
||||||
|
|
||||||
|
|
||||||
|
class FakeRing(object):
|
||||||
|
def get_nodes(self, path):
|
||||||
|
return 1, [{'ip': '10.0.0.%s' % x, 'port': 1000+x, 'device': 'sda'}
|
||||||
|
for x in xrange(3)]
|
||||||
|
|
||||||
|
|
||||||
|
class TestAuthServer(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.testdir = os.path.join(os.path.dirname(__file__),
|
||||||
|
'auth_server')
|
||||||
|
rmtree(self.testdir, ignore_errors=1)
|
||||||
|
os.mkdir(self.testdir)
|
||||||
|
self.conf = {'swift_dir': self.testdir}
|
||||||
|
self.controller = auth_server.AuthController(self.conf, FakeRing())
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
rmtree(self.testdir, ignore_errors=1)
|
||||||
|
|
||||||
|
def test_get_conn(self):
|
||||||
|
with self.controller.get_conn() as conn:
|
||||||
|
pass
|
||||||
|
exc = False
|
||||||
|
try:
|
||||||
|
with self.controller.get_conn() as conn:
|
||||||
|
raise TestException('test')
|
||||||
|
except TestException:
|
||||||
|
exc = True
|
||||||
|
self.assert_(exc)
|
||||||
|
# We allow reentrant calls for the auth-server
|
||||||
|
with self.controller.get_conn() as conn1:
|
||||||
|
exc = False
|
||||||
|
try:
|
||||||
|
with self.controller.get_conn() as conn2:
|
||||||
|
self.assert_(conn1 is not conn2)
|
||||||
|
except DatabaseConnectionError:
|
||||||
|
exc = True
|
||||||
|
self.assert_(not exc)
|
||||||
|
self.controller.conn = None
|
||||||
|
with self.controller.get_conn() as conn:
|
||||||
|
self.assert_(conn is not None)
|
||||||
|
|
||||||
|
def test_validate_token_non_existant_token(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing',).split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
token = res.headers['x-storage-token']
|
||||||
|
self.assertEquals(self.controller.validate_token(token + 'bad',
|
||||||
|
cfaccount), False)
|
||||||
|
|
||||||
|
def test_validate_token_non_existant_cfaccount(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
token = res.headers['x-storage-token']
|
||||||
|
self.assertEquals(self.controller.validate_token(token,
|
||||||
|
cfaccount + 'bad'), False)
|
||||||
|
|
||||||
|
def test_validate_token_good(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing',).split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
token = res.headers['x-storage-token']
|
||||||
|
ttl = self.controller.validate_token(token, cfaccount)
|
||||||
|
self.assert_(ttl > 0, repr(ttl))
|
||||||
|
|
||||||
|
def test_validate_token_expired(self):
|
||||||
|
orig_time = auth_server.time
|
||||||
|
try:
|
||||||
|
auth_server.time = lambda: 1
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account('test', 'tester',
|
||||||
|
'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
token = res.headers['x-storage-token']
|
||||||
|
ttl = self.controller.validate_token(
|
||||||
|
token, cfaccount)
|
||||||
|
self.assert_(ttl > 0, repr(ttl))
|
||||||
|
auth_server.time = lambda: 1 + self.controller.token_life
|
||||||
|
self.assertEquals(self.controller.validate_token(
|
||||||
|
token, cfaccount), False)
|
||||||
|
finally:
|
||||||
|
auth_server.time = orig_time
|
||||||
|
|
||||||
|
def test_create_account_no_new_account(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
result = self.controller.create_account('', 'tester', 'testing')
|
||||||
|
self.assertFalse(result)
|
||||||
|
|
||||||
|
def test_create_account_no_new_user(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
result = self.controller.create_account('test', '', 'testing')
|
||||||
|
self.assertFalse(result)
|
||||||
|
|
||||||
|
def test_create_account_no_new_password(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
result = self.controller.create_account('test', 'tester', '')
|
||||||
|
self.assertFalse(result)
|
||||||
|
|
||||||
|
def test_create_account_good(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test', 'tester', 'testing')
|
||||||
|
self.assert_(url)
|
||||||
|
self.assertEquals('/'.join(url.split('/')[:-1]),
|
||||||
|
self.controller.default_cluster_url.rstrip('/'), repr(url))
|
||||||
|
|
||||||
|
def test_recreate_accounts_none(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
rv = self.controller.recreate_accounts()
|
||||||
|
self.assertEquals(rv.split()[0], '0', repr(rv))
|
||||||
|
self.assertEquals(rv.split()[-1], '[]', repr(rv))
|
||||||
|
|
||||||
|
def test_recreate_accounts_one(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
self.controller.create_account('test', 'tester', 'testing')
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
rv = self.controller.recreate_accounts()
|
||||||
|
self.assertEquals(rv.split()[0], '1', repr(rv))
|
||||||
|
self.assertEquals(rv.split()[-1], '[]', repr(rv))
|
||||||
|
|
||||||
|
def test_recreate_accounts_several(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
self.controller.create_account('test1', 'tester', 'testing')
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
self.controller.create_account('test2', 'tester', 'testing')
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
self.controller.create_account('test3', 'tester', 'testing')
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
self.controller.create_account('test4', 'tester', 'testing')
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201,
|
||||||
|
201, 201, 201,
|
||||||
|
201, 201, 201,
|
||||||
|
201, 201, 201)
|
||||||
|
rv = self.controller.recreate_accounts()
|
||||||
|
self.assertEquals(rv.split()[0], '4', repr(rv))
|
||||||
|
self.assertEquals(rv.split()[-1], '[]', repr(rv))
|
||||||
|
|
||||||
|
def test_recreate_accounts_one_fail(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test', 'tester', 'testing')
|
||||||
|
cfaccount = url.split('/')[-1]
|
||||||
|
auth_server.http_connect = fake_http_connect(500, 500, 500)
|
||||||
|
rv = self.controller.recreate_accounts()
|
||||||
|
self.assertEquals(rv.split()[0], '1', repr(rv))
|
||||||
|
self.assertEquals(rv.split()[-1], '[%s]' % repr(cfaccount),
|
||||||
|
repr(rv))
|
||||||
|
|
||||||
|
def test_recreate_accounts_several_fail(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test1', 'tester', 'testing')
|
||||||
|
cfaccounts = [url.split('/')[-1]]
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test2', 'tester', 'testing')
|
||||||
|
cfaccounts.append(url.split('/')[-1])
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test3', 'tester', 'testing')
|
||||||
|
cfaccounts.append(url.split('/')[-1])
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test4', 'tester', 'testing')
|
||||||
|
cfaccounts.append(url.split('/')[-1])
|
||||||
|
auth_server.http_connect = fake_http_connect(500, 500, 500,
|
||||||
|
500, 500, 500,
|
||||||
|
500, 500, 500,
|
||||||
|
500, 500, 500)
|
||||||
|
rv = self.controller.recreate_accounts()
|
||||||
|
self.assertEquals(rv.split()[0], '4', repr(rv))
|
||||||
|
failed = rv.split('[', 1)[-1][:-1].split(', ')
|
||||||
|
self.assertEquals(failed, [repr(a) for a in cfaccounts])
|
||||||
|
|
||||||
|
def test_recreate_accounts_several_fail_some(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test1', 'tester', 'testing')
|
||||||
|
cfaccounts = [url.split('/')[-1]]
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test2', 'tester', 'testing')
|
||||||
|
cfaccounts.append(url.split('/')[-1])
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test3', 'tester', 'testing')
|
||||||
|
cfaccounts.append(url.split('/')[-1])
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test4', 'tester', 'testing')
|
||||||
|
cfaccounts.append(url.split('/')[-1])
|
||||||
|
auth_server.http_connect = fake_http_connect(500, 500, 500,
|
||||||
|
201, 201, 201,
|
||||||
|
500, 500, 500,
|
||||||
|
201, 201, 201)
|
||||||
|
rv = self.controller.recreate_accounts()
|
||||||
|
self.assertEquals(rv.split()[0], '4', repr(rv))
|
||||||
|
failed = rv.split('[', 1)[-1][:-1].split(', ')
|
||||||
|
expected = []
|
||||||
|
for i, value in enumerate(cfaccounts):
|
||||||
|
if not i % 2:
|
||||||
|
expected.append(repr(value))
|
||||||
|
self.assertEquals(failed, expected)
|
||||||
|
|
||||||
|
def test_auth_bad_path(self):
|
||||||
|
self.assertRaises(ValueError, self.controller.handle_auth,
|
||||||
|
Request.blank('', environ={'REQUEST_METHOD': 'GET'}))
|
||||||
|
res = self.controller.handle_auth(Request.blank('/bad',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'}))
|
||||||
|
self.assertEquals(res.status_int, 400)
|
||||||
|
|
||||||
|
def test_auth_SOSO_missing_headers(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-Pass': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_SOSO_bad_account(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/testbad/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1//auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_SOSO_bad_user(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'testerbad',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': '',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_SOSO_bad_password(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester',
|
||||||
|
'X-Storage-Pass': 'testingbad'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester',
|
||||||
|
'X-Storage-Pass': ''}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_SOSO_good(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'tester',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
token = res.headers['x-storage-token']
|
||||||
|
ttl = self.controller.validate_token(token, cfaccount)
|
||||||
|
self.assert_(ttl > 0, repr(ttl))
|
||||||
|
|
||||||
|
def test_auth_SOSO_good_Mosso_headers(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'test:tester',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
token = res.headers['x-storage-token']
|
||||||
|
ttl = self.controller.validate_token(token, cfaccount)
|
||||||
|
self.assert_(ttl > 0, repr(ttl))
|
||||||
|
|
||||||
|
def test_auth_SOSO_bad_Mosso_headers(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing',).split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'test2:tester',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': ':tester',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/v1/test/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'test:',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_Mosso_missing_headers(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'test:tester'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_Mosso_bad_header_format(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'badformat',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': '',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_Mosso_bad_account(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'testbad:tester',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': ':tester',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_Mosso_bad_user(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'test:testerbad',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'test:',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_Mosso_bad_password(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'test:tester',
|
||||||
|
'X-Auth-Key': 'testingbad'}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'test:tester',
|
||||||
|
'X-Auth-Key': ''}))
|
||||||
|
self.assertEquals(res.status_int, 401)
|
||||||
|
|
||||||
|
def test_auth_Mosso_good(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Auth-User': 'test:tester',
|
||||||
|
'X-Auth-Key': 'testing'}))
|
||||||
|
token = res.headers['x-storage-token']
|
||||||
|
ttl = self.controller.validate_token(token, cfaccount)
|
||||||
|
self.assert_(ttl > 0, repr(ttl))
|
||||||
|
|
||||||
|
def test_auth_Mosso_good_SOSO_header_names(self):
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
cfaccount = self.controller.create_account(
|
||||||
|
'test', 'tester', 'testing').split('/')[-1]
|
||||||
|
res = self.controller.handle_auth(Request.blank('/auth',
|
||||||
|
environ={'REQUEST_METHOD': 'GET'},
|
||||||
|
headers={'X-Storage-User': 'test:tester',
|
||||||
|
'X-Storage-Pass': 'testing'}))
|
||||||
|
token = res.headers['x-storage-token']
|
||||||
|
ttl = self.controller.validate_token(token, cfaccount)
|
||||||
|
self.assert_(ttl > 0, repr(ttl))
|
||||||
|
|
||||||
|
def test_basic_logging(self):
|
||||||
|
log = StringIO()
|
||||||
|
log_handler = StreamHandler(log)
|
||||||
|
logger = get_logger(self.conf, 'auth')
|
||||||
|
logger.logger.addHandler(log_handler)
|
||||||
|
try:
|
||||||
|
auth_server.http_connect = fake_http_connect(201, 201, 201)
|
||||||
|
url = self.controller.create_account('test', 'tester', 'testing')
|
||||||
|
self.assertEquals(log.getvalue().rsplit(' ', 1)[0],
|
||||||
|
"auth SUCCESS create_account('test', 'tester', _) = %s" %
|
||||||
|
repr(url))
|
||||||
|
log.truncate(0)
|
||||||
|
def start_response(*args):
|
||||||
|
pass
|
||||||
|
self.controller.handleREST({'REQUEST_METHOD': 'GET',
|
||||||
|
'SCRIPT_NAME': '',
|
||||||
|
'PATH_INFO': '/v1/test/auth',
|
||||||
|
'QUERY_STRING': 'test=True',
|
||||||
|
'SERVER_NAME': '127.0.0.1',
|
||||||
|
'SERVER_PORT': '8080',
|
||||||
|
'SERVER_PROTOCOL': 'HTTP/1.0',
|
||||||
|
'CONTENT_LENGTH': '0',
|
||||||
|
'wsgi.version': (1, 0),
|
||||||
|
'wsgi.url_scheme': 'http',
|
||||||
|
'wsgi.input': StringIO(),
|
||||||
|
'wsgi.errors': StringIO(),
|
||||||
|
'wsgi.multithread': False,
|
||||||
|
'wsgi.multiprocess': False,
|
||||||
|
'wsgi.run_once': False,
|
||||||
|
'HTTP_X_FORWARDED_FOR': 'testhost',
|
||||||
|
'HTTP_X_STORAGE_USER': 'tester',
|
||||||
|
'HTTP_X_STORAGE_PASS': 'testing'},
|
||||||
|
start_response)
|
||||||
|
logsegs = log.getvalue().split(' [', 1)
|
||||||
|
logsegs[1:] = logsegs[1].split('] ', 1)
|
||||||
|
logsegs[1] = '[01/Jan/2001:01:02:03 +0000]'
|
||||||
|
logsegs[2:] = logsegs[2].split(' ')
|
||||||
|
logsegs[-1] = '0.1234'
|
||||||
|
self.assertEquals(' '.join(logsegs), 'auth testhost - - '
|
||||||
|
'[01/Jan/2001:01:02:03 +0000] "GET /v1/test/auth?test=True '
|
||||||
|
'HTTP/1.0" 204 - "-" "-" - - - - - - - - - "-" "None" "-" '
|
||||||
|
'0.1234')
|
||||||
|
self.controller.log_headers = True
|
||||||
|
log.truncate(0)
|
||||||
|
self.controller.handleREST({'REQUEST_METHOD': 'GET',
|
||||||
|
'SCRIPT_NAME': '',
|
||||||
|
'PATH_INFO': '/v1/test/auth',
|
||||||
|
'SERVER_NAME': '127.0.0.1',
|
||||||
|
'SERVER_PORT': '8080',
|
||||||
|
'SERVER_PROTOCOL': 'HTTP/1.0',
|
||||||
|
'CONTENT_LENGTH': '0',
|
||||||
|
'wsgi.version': (1, 0),
|
||||||
|
'wsgi.url_scheme': 'http',
|
||||||
|
'wsgi.input': StringIO(),
|
||||||
|
'wsgi.errors': StringIO(),
|
||||||
|
'wsgi.multithread': False,
|
||||||
|
'wsgi.multiprocess': False,
|
||||||
|
'wsgi.run_once': False,
|
||||||
|
'HTTP_X_STORAGE_USER': 'tester',
|
||||||
|
'HTTP_X_STORAGE_PASS': 'testing'},
|
||||||
|
start_response)
|
||||||
|
logsegs = log.getvalue().split(' [', 1)
|
||||||
|
logsegs[1:] = logsegs[1].split('] ', 1)
|
||||||
|
logsegs[1] = '[01/Jan/2001:01:02:03 +0000]'
|
||||||
|
logsegs[2:] = logsegs[2].split(' ')
|
||||||
|
logsegs[-1] = '0.1234'
|
||||||
|
self.assertEquals(' '.join(logsegs), 'auth None - - [01/Jan/2001:'
|
||||||
|
'01:02:03 +0000] "GET /v1/test/auth HTTP/1.0" 204 - "-" "-" - '
|
||||||
|
'- - - - - - - - "-" "None" "Content-Length: 0\n'
|
||||||
|
'X-Storage-User: tester\nX-Storage-Pass: testing" 0.1234')
|
||||||
|
finally:
|
||||||
|
logger.logger.handlers.remove(log_handler)
|
||||||
|
|
||||||
|
def test_unhandled_exceptions(self):
|
||||||
|
def request_causing_exception(*args, **kwargs):
|
||||||
|
pass
|
||||||
|
def start_response(*args):
|
||||||
|
pass
|
||||||
|
orig_Request = auth_server.Request
|
||||||
|
log = StringIO()
|
||||||
|
log_handler = StreamHandler(log)
|
||||||
|
logger = get_logger(self.conf, 'auth')
|
||||||
|
logger.logger.addHandler(log_handler)
|
||||||
|
try:
|
||||||
|
auth_server.Request = request_causing_exception
|
||||||
|
self.controller.handleREST({'REQUEST_METHOD': 'GET',
|
||||||
|
'SCRIPT_NAME': '',
|
||||||
|
'PATH_INFO': '/v1/test/auth',
|
||||||
|
'SERVER_NAME': '127.0.0.1',
|
||||||
|
'SERVER_PORT': '8080',
|
||||||
|
'SERVER_PROTOCOL': 'HTTP/1.0',
|
||||||
|
'CONTENT_LENGTH': '0',
|
||||||
|
'wsgi.version': (1, 0),
|
||||||
|
'wsgi.url_scheme': 'http',
|
||||||
|
'wsgi.input': StringIO(),
|
||||||
|
'wsgi.errors': StringIO(),
|
||||||
|
'wsgi.multithread': False,
|
||||||
|
'wsgi.multiprocess': False,
|
||||||
|
'wsgi.run_once': False,
|
||||||
|
'HTTP_X_STORAGE_USER': 'tester',
|
||||||
|
'HTTP_X_STORAGE_PASS': 'testing'},
|
||||||
|
start_response)
|
||||||
|
self.assert_(log.getvalue().startswith(
|
||||||
|
'auth ERROR Unhandled exception in ReST request'),
|
||||||
|
log.getvalue())
|
||||||
|
log.truncate(0)
|
||||||
|
finally:
|
||||||
|
auth_server.Request = orig_Request
|
||||||
|
logger.logger.handlers.remove(log_handler)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
0
test/unit/common/__init__.py
Normal file
0
test/unit/common/__init__.py
Normal file
0
test/unit/common/ring/__init__.py
Normal file
0
test/unit/common/ring/__init__.py
Normal file
245
test/unit/common/ring/test_builder.py
Normal file
245
test/unit/common/ring/test_builder.py
Normal file
@ -0,0 +1,245 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import unittest
|
||||||
|
from shutil import rmtree
|
||||||
|
|
||||||
|
from swift.common.ring import RingBuilder, RingData
|
||||||
|
from swift.common import ring
|
||||||
|
|
||||||
|
class TestRingBuilder(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.testdir = os.path.join(os.path.dirname(__file__),
|
||||||
|
'ring_builder')
|
||||||
|
rmtree(self.testdir, ignore_errors=1)
|
||||||
|
os.mkdir(self.testdir)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
rmtree(self.testdir, ignore_errors=1)
|
||||||
|
|
||||||
|
def test_init(self):
|
||||||
|
rb = ring.RingBuilder(8, 3, 1)
|
||||||
|
self.assertEquals(rb.part_power, 8)
|
||||||
|
self.assertEquals(rb.replicas, 3)
|
||||||
|
self.assertEquals(rb.min_part_hours, 1)
|
||||||
|
self.assertEquals(rb.parts, 2**8)
|
||||||
|
self.assertEquals(rb.devs, [])
|
||||||
|
self.assertEquals(rb.devs_changed, False)
|
||||||
|
self.assertEquals(rb.version, 0)
|
||||||
|
|
||||||
|
def test_get_ring(self):
|
||||||
|
rb = ring.RingBuilder(8, 3, 1)
|
||||||
|
self.assertRaises(Exception, rb.get_ring)
|
||||||
|
rb.add_dev({'id': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10000, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10001, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 2, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10002, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 3, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10004, 'device': 'sda1'})
|
||||||
|
rb.remove_dev(1)
|
||||||
|
rb.rebalance()
|
||||||
|
r = rb.get_ring()
|
||||||
|
self.assert_(isinstance(r, ring.RingData))
|
||||||
|
r2 = rb.get_ring()
|
||||||
|
self.assert_(r is r2)
|
||||||
|
rb.rebalance()
|
||||||
|
r3 = rb.get_ring()
|
||||||
|
self.assert_(r3 is not r2)
|
||||||
|
r4 = rb.get_ring()
|
||||||
|
self.assert_(r3 is r4)
|
||||||
|
|
||||||
|
def test_add_dev(self):
|
||||||
|
rb = ring.RingBuilder(8, 3, 1)
|
||||||
|
dev = \
|
||||||
|
{'id': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000}
|
||||||
|
rb.add_dev(dev)
|
||||||
|
self.assertRaises(Exception, rb.add_dev, dev)
|
||||||
|
|
||||||
|
def test_set_dev_weight(self):
|
||||||
|
rb = ring.RingBuilder(8, 3, 1)
|
||||||
|
rb.add_dev({'id': 0, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1',
|
||||||
|
'port': 10000, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 1, 'zone': 0, 'weight': 0.5, 'ip': '127.0.0.1',
|
||||||
|
'port': 10001, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 2, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10002, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 3, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10003, 'device': 'sda1'})
|
||||||
|
rb.rebalance()
|
||||||
|
r = rb.get_ring()
|
||||||
|
counts = {}
|
||||||
|
for part2dev_id in r._replica2part2dev_id:
|
||||||
|
for dev_id in part2dev_id:
|
||||||
|
counts[dev_id] = counts.get(dev_id, 0) + 1
|
||||||
|
self.assertEquals(counts, {0: 128, 1: 128, 2: 256, 3: 256})
|
||||||
|
rb.set_dev_weight(0, 0.75)
|
||||||
|
rb.set_dev_weight(1, 0.25)
|
||||||
|
rb.pretend_min_part_hours_passed()
|
||||||
|
rb.rebalance()
|
||||||
|
r = rb.get_ring()
|
||||||
|
counts = {}
|
||||||
|
for part2dev_id in r._replica2part2dev_id:
|
||||||
|
for dev_id in part2dev_id:
|
||||||
|
counts[dev_id] = counts.get(dev_id, 0) + 1
|
||||||
|
self.assertEquals(counts, {0: 192, 1: 64, 2: 256, 3: 256})
|
||||||
|
|
||||||
|
def test_remove_dev(self):
|
||||||
|
rb = ring.RingBuilder(8, 3, 1)
|
||||||
|
rb.add_dev({'id': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10000, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10001, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 2, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10002, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 3, 'zone': 3, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10003, 'device': 'sda1'})
|
||||||
|
rb.rebalance()
|
||||||
|
r = rb.get_ring()
|
||||||
|
counts = {}
|
||||||
|
for part2dev_id in r._replica2part2dev_id:
|
||||||
|
for dev_id in part2dev_id:
|
||||||
|
counts[dev_id] = counts.get(dev_id, 0) + 1
|
||||||
|
self.assertEquals(counts, {0: 192, 1: 192, 2: 192, 3: 192})
|
||||||
|
rb.remove_dev(1)
|
||||||
|
rb.pretend_min_part_hours_passed()
|
||||||
|
rb.rebalance()
|
||||||
|
r = rb.get_ring()
|
||||||
|
counts = {}
|
||||||
|
for part2dev_id in r._replica2part2dev_id:
|
||||||
|
for dev_id in part2dev_id:
|
||||||
|
counts[dev_id] = counts.get(dev_id, 0) + 1
|
||||||
|
self.assertEquals(counts, {0: 256, 2: 256, 3: 256})
|
||||||
|
|
||||||
|
def test_rerebalance(self):
|
||||||
|
rb = ring.RingBuilder(8, 3, 1)
|
||||||
|
rb.add_dev({'id': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10000, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10001, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 2, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10002, 'device': 'sda1'})
|
||||||
|
rb.rebalance()
|
||||||
|
r = rb.get_ring()
|
||||||
|
counts = {}
|
||||||
|
for part2dev_id in r._replica2part2dev_id:
|
||||||
|
for dev_id in part2dev_id:
|
||||||
|
counts[dev_id] = counts.get(dev_id, 0) + 1
|
||||||
|
self.assertEquals(counts, {0: 256, 1: 256, 2: 256})
|
||||||
|
rb.add_dev({'id': 3, 'zone': 3, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10003, 'device': 'sda1'})
|
||||||
|
rb.pretend_min_part_hours_passed()
|
||||||
|
rb.rebalance()
|
||||||
|
r = rb.get_ring()
|
||||||
|
counts = {}
|
||||||
|
for part2dev_id in r._replica2part2dev_id:
|
||||||
|
for dev_id in part2dev_id:
|
||||||
|
counts[dev_id] = counts.get(dev_id, 0) + 1
|
||||||
|
self.assertEquals(counts, {0: 192, 1: 192, 2: 192, 3: 192})
|
||||||
|
rb.set_dev_weight(3, 100)
|
||||||
|
rb.rebalance()
|
||||||
|
r = rb.get_ring()
|
||||||
|
counts = {}
|
||||||
|
for part2dev_id in r._replica2part2dev_id:
|
||||||
|
for dev_id in part2dev_id:
|
||||||
|
counts[dev_id] = counts.get(dev_id, 0) + 1
|
||||||
|
self.assertEquals(counts[3], 256)
|
||||||
|
|
||||||
|
def test_validate(self):
|
||||||
|
rb = ring.RingBuilder(8, 3, 1)
|
||||||
|
rb.add_dev({'id': 0, 'zone': 0, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10000, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 1, 'zone': 1, 'weight': 1, 'ip': '127.0.0.1',
|
||||||
|
'port': 10001, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 2, 'zone': 2, 'weight': 2, 'ip': '127.0.0.1',
|
||||||
|
'port': 10002, 'device': 'sda1'})
|
||||||
|
rb.add_dev({'id': 3, 'zone': 3, 'weight': 2, 'ip': '127.0.0.1',
|
||||||
|
'port': 10003, 'device': 'sda1'})
|
||||||
|
rb.rebalance()
|
||||||
|
r = rb.get_ring()
|
||||||
|
counts = {}
|
||||||
|
for part2dev_id in r._replica2part2dev_id:
|
||||||
|
for dev_id in part2dev_id:
|
||||||
|
counts[dev_id] = counts.get(dev_id, 0) + 1
|
||||||
|
self.assertEquals(counts, {0: 128, 1: 128, 2: 256, 3: 256})
|
||||||
|
|
||||||
|
dev_usage, worst = rb.validate()
|
||||||
|
self.assert_(dev_usage is None)
|
||||||
|
self.assert_(worst is None)
|
||||||
|
|
||||||
|
dev_usage, worst = rb.validate(stats=True)
|
||||||
|
self.assertEquals(list(dev_usage), [128, 128, 256, 256])
|
||||||
|
self.assertEquals(int(worst), 0)
|
||||||
|
|
||||||
|
rb.set_dev_weight(2, 0)
|
||||||
|
rb.rebalance()
|
||||||
|
self.assertEquals(rb.validate(stats=True)[1], 999.99)
|
||||||
|
|
||||||
|
# Test not all partitions doubly accounted for
|
||||||
|
rb.devs[1]['parts'] -= 1
|
||||||
|
self.assertRaises(Exception, rb.validate)
|
||||||
|
rb.devs[1]['parts'] += 1
|
||||||
|
|
||||||
|
# Test duplicate device for partition
|
||||||
|
orig_dev_id = rb._replica2part2dev[0][0]
|
||||||
|
rb._replica2part2dev[0][0] = rb._replica2part2dev[1][0]
|
||||||
|
self.assertRaises(Exception, rb.validate)
|
||||||
|
rb._replica2part2dev[0][0] = orig_dev_id
|
||||||
|
|
||||||
|
# Test duplicate zone for partition
|
||||||
|
rb.add_dev({'id': 5, 'zone': 0, 'weight': 2, 'ip': '127.0.0.1',
|
||||||
|
'port': 10005, 'device': 'sda1'})
|
||||||
|
rb.pretend_min_part_hours_passed()
|
||||||
|
rb.rebalance()
|
||||||
|
rb.validate()
|
||||||
|
orig_replica = orig_partition = orig_device = None
|
||||||
|
for part2dev in rb._replica2part2dev:
|
||||||
|
for p in xrange(2**8):
|
||||||
|
if part2dev[p] == 5:
|
||||||
|
for r in xrange(len(rb._replica2part2dev)):
|
||||||
|
if rb._replica2part2dev[r][p] != 5:
|
||||||
|
orig_replica = r
|
||||||
|
orig_partition = p
|
||||||
|
orig_device = rb._replica2part2dev[r][p]
|
||||||
|
rb._replica2part2dev[r][p] = 0
|
||||||
|
break
|
||||||
|
if orig_replica is not None:
|
||||||
|
break
|
||||||
|
if orig_replica is not None:
|
||||||
|
break
|
||||||
|
self.assertRaises(Exception, rb.validate)
|
||||||
|
rb._replica2part2dev[orig_replica][orig_partition] = orig_device
|
||||||
|
|
||||||
|
# Tests that validate can handle 'holes' in .devs
|
||||||
|
rb.remove_dev(2)
|
||||||
|
rb.pretend_min_part_hours_passed()
|
||||||
|
rb.rebalance()
|
||||||
|
rb.validate(stats=True)
|
||||||
|
|
||||||
|
# Validate that zero weight devices with no partitions don't count on
|
||||||
|
# the 'worst' value.
|
||||||
|
self.assertNotEquals(rb.validate(stats=True)[1], 999.99)
|
||||||
|
rb.add_dev({'id': 4, 'zone': 0, 'weight': 0, 'ip': '127.0.0.1',
|
||||||
|
'port': 10004, 'device': 'sda1'})
|
||||||
|
rb.pretend_min_part_hours_passed()
|
||||||
|
rb.rebalance()
|
||||||
|
self.assertNotEquals(rb.validate(stats=True)[1], 999.99)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
204
test/unit/common/ring/test_ring.py
Normal file
204
test/unit/common/ring/test_ring.py
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import cPickle as pickle
|
||||||
|
import os
|
||||||
|
import unittest
|
||||||
|
from gzip import GzipFile
|
||||||
|
from shutil import rmtree
|
||||||
|
from time import sleep, time
|
||||||
|
|
||||||
|
from swift.common import ring, utils
|
||||||
|
|
||||||
|
|
||||||
|
class TestRingData(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_attrs(self):
|
||||||
|
r2p2d = [[0, 1, 0, 1], [0, 1, 0, 1]]
|
||||||
|
d = [{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}]
|
||||||
|
s = 30
|
||||||
|
rd = ring.RingData(r2p2d, d, s)
|
||||||
|
self.assertEquals(rd._replica2part2dev_id, r2p2d)
|
||||||
|
self.assertEquals(rd.devs, d)
|
||||||
|
self.assertEquals(rd._part_shift, s)
|
||||||
|
|
||||||
|
def test_pickleable(self):
|
||||||
|
rd = ring.RingData([[0, 1, 0, 1], [0, 1, 0, 1]],
|
||||||
|
[{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}], 30)
|
||||||
|
for p in xrange(pickle.HIGHEST_PROTOCOL):
|
||||||
|
pickle.loads(pickle.dumps(rd, protocol=p))
|
||||||
|
|
||||||
|
|
||||||
|
class TestRing(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
utils.HASH_PATH_SUFFIX = 'endcap'
|
||||||
|
self.testdir = os.path.join(os.path.dirname(__file__), 'ring')
|
||||||
|
rmtree(self.testdir, ignore_errors=1)
|
||||||
|
os.mkdir(self.testdir)
|
||||||
|
self.testgz = os.path.join(self.testdir, 'ring.gz')
|
||||||
|
self.intended_replica2part2dev_id = [[0, 2, 0, 2], [2, 0, 2, 0]]
|
||||||
|
self.intended_devs = [{'id': 0, 'zone': 0}, None, {'id': 2, 'zone': 2}]
|
||||||
|
self.intended_part_shift = 30
|
||||||
|
self.intended_reload_time = 15
|
||||||
|
pickle.dump(ring.RingData(self.intended_replica2part2dev_id,
|
||||||
|
self.intended_devs, self.intended_part_shift),
|
||||||
|
GzipFile(self.testgz, 'wb'))
|
||||||
|
self.ring = \
|
||||||
|
ring.Ring(self.testgz, reload_time=self.intended_reload_time)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
rmtree(self.testdir, ignore_errors=1)
|
||||||
|
|
||||||
|
def test_creation(self):
|
||||||
|
self.assertEquals(self.ring._replica2part2dev_id,
|
||||||
|
self.intended_replica2part2dev_id)
|
||||||
|
self.assertEquals(self.ring._part_shift, self.intended_part_shift)
|
||||||
|
self.assertEquals(self.ring.devs, self.intended_devs)
|
||||||
|
self.assertEquals(self.ring.reload_time, self.intended_reload_time)
|
||||||
|
self.assertEquals(self.ring.pickle_gz_path, self.testgz)
|
||||||
|
|
||||||
|
def test_has_changed(self):
|
||||||
|
self.assertEquals(self.ring.has_changed(), False)
|
||||||
|
os.utime(self.testgz, (time()+60, time()+60))
|
||||||
|
self.assertEquals(self.ring.has_changed(), True)
|
||||||
|
|
||||||
|
def test_reload(self):
|
||||||
|
os.utime(self.testgz, (time() - 300, time() - 300))
|
||||||
|
self.ring = ring.Ring(self.testgz, reload_time=0.001)
|
||||||
|
orig_mtime = self.ring._mtime
|
||||||
|
self.assertEquals(len(self.ring.devs), 3)
|
||||||
|
self.intended_devs.append({'id': 3, 'zone': 3})
|
||||||
|
pickle.dump(ring.RingData(self.intended_replica2part2dev_id,
|
||||||
|
self.intended_devs, self.intended_part_shift),
|
||||||
|
GzipFile(self.testgz, 'wb'))
|
||||||
|
sleep(0.1)
|
||||||
|
self.ring.get_nodes('a')
|
||||||
|
self.assertEquals(len(self.ring.devs), 4)
|
||||||
|
self.assertNotEquals(self.ring._mtime, orig_mtime)
|
||||||
|
|
||||||
|
os.utime(self.testgz, (time() - 300, time() - 300))
|
||||||
|
self.ring = ring.Ring(self.testgz, reload_time=0.001)
|
||||||
|
orig_mtime = self.ring._mtime
|
||||||
|
self.assertEquals(len(self.ring.devs), 4)
|
||||||
|
self.intended_devs.append({'id': 4, 'zone': 4})
|
||||||
|
pickle.dump(ring.RingData(self.intended_replica2part2dev_id,
|
||||||
|
self.intended_devs, self.intended_part_shift),
|
||||||
|
GzipFile(self.testgz, 'wb'))
|
||||||
|
sleep(0.1)
|
||||||
|
self.ring.get_part_nodes(0)
|
||||||
|
self.assertEquals(len(self.ring.devs), 5)
|
||||||
|
self.assertNotEquals(self.ring._mtime, orig_mtime)
|
||||||
|
|
||||||
|
os.utime(self.testgz, (time() - 300, time() - 300))
|
||||||
|
self.ring = \
|
||||||
|
ring.Ring(self.testgz, reload_time=0.001)
|
||||||
|
orig_mtime = self.ring._mtime
|
||||||
|
part, nodes = self.ring.get_nodes('a')
|
||||||
|
self.assertEquals(len(self.ring.devs), 5)
|
||||||
|
self.intended_devs.append({'id': 5, 'zone': 5})
|
||||||
|
pickle.dump(ring.RingData(self.intended_replica2part2dev_id,
|
||||||
|
self.intended_devs, self.intended_part_shift),
|
||||||
|
GzipFile(self.testgz, 'wb'))
|
||||||
|
sleep(0.1)
|
||||||
|
self.ring.get_more_nodes(part).next()
|
||||||
|
self.assertEquals(len(self.ring.devs), 6)
|
||||||
|
self.assertNotEquals(self.ring._mtime, orig_mtime)
|
||||||
|
|
||||||
|
def test_get_part_nodes(self):
|
||||||
|
part, nodes = self.ring.get_nodes('a')
|
||||||
|
self.assertEquals(nodes, self.ring.get_part_nodes(part))
|
||||||
|
|
||||||
|
def test_get_nodes(self):
|
||||||
|
# Yes, these tests are deliberately very fragile. We want to make sure
|
||||||
|
# that if someones changes the results the ring produces, they know it.
|
||||||
|
self.assertRaises(TypeError, self.ring.get_nodes)
|
||||||
|
part, nodes = self.ring.get_nodes('a')
|
||||||
|
self.assertEquals(part, 0)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
part, nodes = self.ring.get_nodes('a1')
|
||||||
|
self.assertEquals(part, 0)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
part, nodes = self.ring.get_nodes('a4')
|
||||||
|
self.assertEquals(part, 1)
|
||||||
|
self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}])
|
||||||
|
part, nodes = self.ring.get_nodes('aa')
|
||||||
|
self.assertEquals(part, 1)
|
||||||
|
self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}])
|
||||||
|
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c1')
|
||||||
|
self.assertEquals(part, 0)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c0')
|
||||||
|
self.assertEquals(part, 3)
|
||||||
|
self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}])
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c3')
|
||||||
|
self.assertEquals(part, 2)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c2')
|
||||||
|
self.assertEquals(part, 2)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c', 'o1')
|
||||||
|
self.assertEquals(part, 1)
|
||||||
|
self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}])
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c', 'o5')
|
||||||
|
self.assertEquals(part, 0)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c', 'o0')
|
||||||
|
self.assertEquals(part, 0)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
|
||||||
|
self.assertEquals(part, 2)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
|
||||||
|
def test_get_more_nodes(self):
|
||||||
|
# Yes, these tests are deliberately very fragile. We want to make sure
|
||||||
|
# that if someone changes the results the ring produces, they know it.
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
|
||||||
|
self.assertEquals(part, 2)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
nodes = list(self.ring.get_more_nodes(part))
|
||||||
|
self.assertEquals(nodes, [])
|
||||||
|
|
||||||
|
self.ring.devs.append({'id': 3, 'zone': 0})
|
||||||
|
self.ring.zone2devs[0].append(self.ring.devs[3])
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
|
||||||
|
self.assertEquals(part, 2)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
nodes = list(self.ring.get_more_nodes(part))
|
||||||
|
self.assertEquals(nodes, [])
|
||||||
|
|
||||||
|
self.ring.zone2devs[self.ring.devs[3]['zone']].remove(self.ring.devs[3])
|
||||||
|
self.ring.devs[3]['zone'] = 3
|
||||||
|
self.ring.zone2devs[3] = [self.ring.devs[3]]
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
|
||||||
|
self.assertEquals(part, 2)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
nodes = list(self.ring.get_more_nodes(part))
|
||||||
|
self.assertEquals(nodes, [{'id': 3, 'zone': 3}])
|
||||||
|
|
||||||
|
self.ring.devs.append(None)
|
||||||
|
self.ring.devs.append({'id': 5, 'zone': 5})
|
||||||
|
self.ring.zone2devs[5] = [self.ring.devs[5]]
|
||||||
|
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
|
||||||
|
self.assertEquals(part, 2)
|
||||||
|
self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}])
|
||||||
|
nodes = list(self.ring.get_more_nodes(part))
|
||||||
|
self.assertEquals(nodes, [{'id': 3, 'zone': 3}, {'id': 5, 'zone': 5}])
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
177
test/unit/common/test_auth.py
Normal file
177
test/unit/common/test_auth.py
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import with_statement
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
from contextlib import contextmanager
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
from webob import Request
|
||||||
|
|
||||||
|
from swift.common import auth
|
||||||
|
|
||||||
|
# mocks
|
||||||
|
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
|
||||||
|
|
||||||
|
|
||||||
|
class FakeMemcache(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.store = {}
|
||||||
|
|
||||||
|
def get(self, key):
|
||||||
|
return self.store.get(key)
|
||||||
|
|
||||||
|
def set(self, key, value, timeout=0):
|
||||||
|
self.store[key] = value
|
||||||
|
return True
|
||||||
|
|
||||||
|
def incr(self, key, timeout=0):
|
||||||
|
self.store[key] = self.store.setdefault(key, 0) + 1
|
||||||
|
return self.store[key]
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def soft_lock(self, key, timeout=0, retries=5):
|
||||||
|
yield True
|
||||||
|
|
||||||
|
def delete(self, key):
|
||||||
|
try:
|
||||||
|
del self.store[key]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def mock_http_connect(response, headers=None, with_exc=False):
|
||||||
|
class FakeConn(object):
|
||||||
|
def __init__(self, status, headers, with_exc):
|
||||||
|
self.status = status
|
||||||
|
self.reason = 'Fake'
|
||||||
|
self.host = '1.2.3.4'
|
||||||
|
self.port = '1234'
|
||||||
|
self.with_exc = with_exc
|
||||||
|
self.headers = headers
|
||||||
|
if self.headers is None:
|
||||||
|
self.headers = {}
|
||||||
|
def getresponse(self):
|
||||||
|
if self.with_exc:
|
||||||
|
raise Exception('test')
|
||||||
|
return self
|
||||||
|
def getheader(self, header):
|
||||||
|
return self.headers[header]
|
||||||
|
def read(self, amt=None):
|
||||||
|
return ''
|
||||||
|
def close(self):
|
||||||
|
return
|
||||||
|
return lambda *args, **kwargs: FakeConn(response, headers, with_exc)
|
||||||
|
|
||||||
|
|
||||||
|
class Logger(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.error_value = None
|
||||||
|
self.exception_value = None
|
||||||
|
def error(self, msg, *args, **kwargs):
|
||||||
|
self.error_value = (msg, args, kwargs)
|
||||||
|
def exception(self, msg, *args, **kwargs):
|
||||||
|
_, exc, _ = sys.exc_info()
|
||||||
|
self.exception_value = (msg,
|
||||||
|
'%s %s' % (exc.__class__.__name__, str(exc)), args, kwargs)
|
||||||
|
# tests
|
||||||
|
|
||||||
|
class FakeApp(object):
|
||||||
|
def __call__(self, env, start_response):
|
||||||
|
return "OK"
|
||||||
|
|
||||||
|
def start_response(*args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
class TestAuth(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.test_auth = auth.DevAuthMiddleware(
|
||||||
|
FakeApp(), {}, FakeMemcache(), Logger())
|
||||||
|
|
||||||
|
def test_auth_fail(self):
|
||||||
|
old_http_connect = auth.http_connect
|
||||||
|
try:
|
||||||
|
auth.http_connect = mock_http_connect(404)
|
||||||
|
self.assertFalse(self.test_auth.auth('a','t'))
|
||||||
|
finally:
|
||||||
|
auth.http_connect = old_http_connect
|
||||||
|
|
||||||
|
def test_auth_success(self):
|
||||||
|
old_http_connect = auth.http_connect
|
||||||
|
try:
|
||||||
|
auth.http_connect = mock_http_connect(204, {'x-auth-ttl':'1234'})
|
||||||
|
self.assertTrue(self.test_auth.auth('a','t'))
|
||||||
|
finally:
|
||||||
|
auth.http_connect = old_http_connect
|
||||||
|
|
||||||
|
def test_auth_memcache(self):
|
||||||
|
old_http_connect = auth.http_connect
|
||||||
|
try:
|
||||||
|
auth.http_connect = mock_http_connect(204, {'x-auth-ttl':'1234'})
|
||||||
|
self.assertTrue(self.test_auth.auth('a','t'))
|
||||||
|
auth.http_connect = mock_http_connect(404)
|
||||||
|
# Should still be in memcache
|
||||||
|
self.assertTrue(self.test_auth.auth('a','t'))
|
||||||
|
finally:
|
||||||
|
auth.http_connect = old_http_connect
|
||||||
|
|
||||||
|
def test_middleware_success(self):
|
||||||
|
old_http_connect = auth.http_connect
|
||||||
|
try:
|
||||||
|
auth.http_connect = mock_http_connect(204, {'x-auth-ttl':'1234'})
|
||||||
|
req = Request.blank('/v/a/c/o', headers={'x-auth-token':'t'})
|
||||||
|
resp = self.test_auth(req.environ, start_response)
|
||||||
|
self.assertEquals(resp, 'OK')
|
||||||
|
finally:
|
||||||
|
auth.http_connect = old_http_connect
|
||||||
|
|
||||||
|
def test_middleware_no_header(self):
|
||||||
|
old_http_connect = auth.http_connect
|
||||||
|
try:
|
||||||
|
auth.http_connect = mock_http_connect(204, {'x-auth-ttl':'1234'})
|
||||||
|
req = Request.blank('/v/a/c/o')
|
||||||
|
resp = self.test_auth(req.environ, start_response)
|
||||||
|
self.assertEquals(resp, ['Missing Auth Token'])
|
||||||
|
finally:
|
||||||
|
auth.http_connect = old_http_connect
|
||||||
|
|
||||||
|
def test_middleware_storage_token(self):
|
||||||
|
old_http_connect = auth.http_connect
|
||||||
|
try:
|
||||||
|
auth.http_connect = mock_http_connect(204, {'x-auth-ttl':'1234'})
|
||||||
|
req = Request.blank('/v/a/c/o', headers={'x-storage-token':'t'})
|
||||||
|
resp = self.test_auth(req.environ, start_response)
|
||||||
|
self.assertEquals(resp, 'OK')
|
||||||
|
finally:
|
||||||
|
auth.http_connect = old_http_connect
|
||||||
|
|
||||||
|
def test_middleware_only_version(self):
|
||||||
|
old_http_connect = auth.http_connect
|
||||||
|
try:
|
||||||
|
auth.http_connect = mock_http_connect(204, {'x-auth-ttl':'1234'})
|
||||||
|
req = Request.blank('/v', headers={'x-auth-token':'t'})
|
||||||
|
resp = self.test_auth(req.environ, start_response)
|
||||||
|
self.assertEquals(resp, ['Bad URL'])
|
||||||
|
finally:
|
||||||
|
auth.http_connect = old_http_connect
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
73
test/unit/common/test_bufferedhttp.py
Normal file
73
test/unit/common/test_bufferedhttp.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from eventlet import spawn, TimeoutError, listen
|
||||||
|
from eventlet.timeout import Timeout
|
||||||
|
|
||||||
|
from swift.common import bufferedhttp
|
||||||
|
|
||||||
|
|
||||||
|
class TestBufferedHTTP(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_http_connect(self):
|
||||||
|
bindsock = listen(('127.0.0.1', 0))
|
||||||
|
def accept(expected_par):
|
||||||
|
try:
|
||||||
|
with Timeout(3):
|
||||||
|
sock, addr = bindsock.accept()
|
||||||
|
fp = sock.makefile()
|
||||||
|
fp.write('HTTP/1.1 200 OK\r\nContent-Length: 8\r\n\r\n'
|
||||||
|
'RESPONSE')
|
||||||
|
fp.flush()
|
||||||
|
self.assertEquals(fp.readline(),
|
||||||
|
'PUT /dev/%s/path/..%%25/?omg&no=%%7f HTTP/1.1\r\n' %
|
||||||
|
expected_par)
|
||||||
|
headers = {}
|
||||||
|
line = fp.readline()
|
||||||
|
while line and line != '\r\n':
|
||||||
|
headers[line.split(':')[0].lower()] = \
|
||||||
|
line.split(':')[1].strip()
|
||||||
|
line = fp.readline()
|
||||||
|
self.assertEquals(headers['content-length'], '7')
|
||||||
|
self.assertEquals(headers['x-header'], 'value')
|
||||||
|
self.assertEquals(fp.readline(), 'REQUEST\r\n')
|
||||||
|
except BaseException, err:
|
||||||
|
return err
|
||||||
|
return None
|
||||||
|
for par in ('par', 1357):
|
||||||
|
event = spawn(accept, par)
|
||||||
|
try:
|
||||||
|
with Timeout(3):
|
||||||
|
conn = bufferedhttp.http_connect('127.0.0.1',
|
||||||
|
bindsock.getsockname()[1], 'dev', par, 'PUT',
|
||||||
|
'/path/..%/', {'content-length': 7, 'x-header':
|
||||||
|
'value'}, query_string='omg&no=%7f')
|
||||||
|
conn.send('REQUEST\r\n')
|
||||||
|
resp = conn.getresponse()
|
||||||
|
body = resp.read()
|
||||||
|
conn.close()
|
||||||
|
self.assertEquals(resp.status, 200)
|
||||||
|
self.assertEquals(resp.reason, 'OK')
|
||||||
|
self.assertEquals(body, 'RESPONSE')
|
||||||
|
finally:
|
||||||
|
err = event.wait()
|
||||||
|
if err:
|
||||||
|
raise Exception(err)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
28
test/unit/common/test_client.py
Normal file
28
test/unit/common/test_client.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# TODO: Tests
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
from swift.common import client
|
||||||
|
|
||||||
|
class TestAuditor(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_placeholder(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
142
test/unit/common/test_constraints.py
Normal file
142
test/unit/common/test_constraints.py
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
# Copyright (c) 2010 OpenStack, LLC.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from webob import Request
|
||||||
|
from webob.exc import HTTPBadRequest, HTTPLengthRequired, \
|
||||||
|
HTTPRequestEntityTooLarge
|
||||||
|
|
||||||
|
from swift.common import constraints
|
||||||
|
|
||||||
|
|
||||||
|
class TestConstraints(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_check_metadata_empty(self):
|
||||||
|
headers = {}
|
||||||
|
self.assertEquals(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), None)
|
||||||
|
|
||||||
|
def test_check_metadata_good(self):
|
||||||
|
headers = {'X-Object-Meta-Name': 'Value'}
|
||||||
|
self.assertEquals(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), None)
|
||||||
|
|
||||||
|
def test_check_metadata_empty_name(self):
|
||||||
|
headers = {'X-Object-Meta-': 'Value'}
|
||||||
|
self.assert_(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), HTTPBadRequest)
|
||||||
|
|
||||||
|
def test_check_metadata_name_length(self):
|
||||||
|
name = 'a' * constraints.MAX_META_NAME_LENGTH
|
||||||
|
headers = {'X-Object-Meta-%s' % name: 'v'}
|
||||||
|
self.assertEquals(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), None)
|
||||||
|
name = 'a' * (constraints.MAX_META_NAME_LENGTH + 1)
|
||||||
|
headers = {'X-Object-Meta-%s' % name: 'v'}
|
||||||
|
self.assert_(isinstance(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), HTTPBadRequest))
|
||||||
|
|
||||||
|
def test_check_metadata_value_length(self):
|
||||||
|
value = 'a' * constraints.MAX_META_VALUE_LENGTH
|
||||||
|
headers = {'X-Object-Meta-Name': value}
|
||||||
|
self.assertEquals(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), None)
|
||||||
|
value = 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)
|
||||||
|
headers = {'X-Object-Meta-Name': value}
|
||||||
|
self.assert_(isinstance(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), HTTPBadRequest))
|
||||||
|
|
||||||
|
def test_check_metadata_count(self):
|
||||||
|
headers = {}
|
||||||
|
for x in xrange(constraints.MAX_META_COUNT):
|
||||||
|
headers['X-Object-Meta-%d' % x] = 'v'
|
||||||
|
self.assertEquals(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), None)
|
||||||
|
headers['X-Object-Meta-Too-Many'] = 'v'
|
||||||
|
self.assert_(isinstance(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), HTTPBadRequest))
|
||||||
|
|
||||||
|
def test_check_metadata_size(self):
|
||||||
|
headers = {}
|
||||||
|
size = 0
|
||||||
|
chunk = constraints.MAX_META_NAME_LENGTH + \
|
||||||
|
constraints.MAX_META_VALUE_LENGTH
|
||||||
|
x = 0
|
||||||
|
while size + chunk < constraints.MAX_META_OVERALL_SIZE:
|
||||||
|
headers['X-Object-Meta-%04d%s' %
|
||||||
|
(x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
|
||||||
|
'v' * constraints.MAX_META_VALUE_LENGTH
|
||||||
|
size += chunk
|
||||||
|
x += 1
|
||||||
|
self.assertEquals(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), None)
|
||||||
|
headers['X-Object-Meta-9999%s' %
|
||||||
|
('a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
|
||||||
|
'v' * constraints.MAX_META_VALUE_LENGTH
|
||||||
|
self.assert_(isinstance(constraints.check_metadata(Request.blank('/',
|
||||||
|
headers=headers)), HTTPBadRequest))
|
||||||
|
|
||||||
|
def test_check_object_creation_content_length(self):
|
||||||
|
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE),
|
||||||
|
'Content-Type': 'text/plain'}
|
||||||
|
self.assertEquals(constraints.check_object_creation(Request.blank('/',
|
||||||
|
headers=headers), 'object_name'), None)
|
||||||
|
headers = {'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
|
||||||
|
'Content-Type': 'text/plain'}
|
||||||
|
self.assert_(isinstance(constraints.check_object_creation(
|
||||||
|
Request.blank('/', headers=headers), 'object_name'),
|
||||||
|
HTTPRequestEntityTooLarge))
|
||||||
|
headers = {'Transfer-Encoding': 'chunked',
|
||||||
|
'Content-Type': 'text/plain'}
|
||||||
|
self.assertEquals(constraints.check_object_creation(Request.blank('/',
|
||||||
|
headers=headers), 'object_name'), None)
|
||||||
|
headers = {'Content-Type': 'text/plain'}
|
||||||
|
self.assert_(isinstance(constraints.check_object_creation(
|
||||||
|
Request.blank('/', headers=headers), 'object_name'),
|
||||||
|
HTTPLengthRequired))
|
||||||
|
|
||||||
|
def test_check_object_creation_name_length(self):
|
||||||
|
headers = {'Transfer-Encoding': 'chunked',
|
||||||
|
'Content-Type': 'text/plain'}
|
||||||
|
name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
|
||||||
|
self.assertEquals(constraints.check_object_creation(Request.blank('/',
|
||||||
|
headers=headers), name), None)
|
||||||
|
name = 'o' * (constraints.MAX_OBJECT_NAME_LENGTH + 1)
|
||||||
|
self.assert_(isinstance(constraints.check_object_creation(
|
||||||
|
Request.blank('/', headers=headers), name),
|
||||||
|
HTTPBadRequest))
|
||||||
|
|
||||||
|
def test_check_object_creation_content_type(self):
|
||||||
|
headers = {'Transfer-Encoding': 'chunked',
|
||||||
|
'Content-Type': 'text/plain'}
|
||||||
|
self.assertEquals(constraints.check_object_creation(Request.blank('/',
|
||||||
|
headers=headers), 'object_name'), None)
|
||||||
|
headers = {'Transfer-Encoding': 'chunked'}
|
||||||
|
self.assert_(isinstance(constraints.check_object_creation(
|
||||||
|
Request.blank('/', headers=headers), 'object_name'),
|
||||||
|
HTTPBadRequest))
|
||||||
|
|
||||||
|
def test_check_object_creation_bad_content_type(self):
|
||||||
|
headers = {'Transfer-Encoding': 'chunked',
|
||||||
|
'Content-Type': '\xff\xff'}
|
||||||
|
resp = constraints.check_object_creation(
|
||||||
|
Request.blank('/', headers=headers), 'object_name')
|
||||||
|
self.assert_(isinstance(resp, HTTPBadRequest))
|
||||||
|
self.assert_('Content-Type' in resp.body)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
1579
test/unit/common/test_db.py
Normal file
1579
test/unit/common/test_db.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user