Add Storage Policy support to Object Server

Objects now have a storage policy index associated with them as well;
this is determined by their filesystem path. Like before, objects in
policy 0 are in /srv/node/$disk/objects; this provides compatibility
on upgrade. (Recall that policy 0 is given to all existing data when a
cluster is upgraded.) Objects in policy 1 are in
/srv/node/$disk/objects-1, objects in policy 2 are in
/srv/node/$disk/objects-2, and so on.

 * 'quarantined' dir already created 'objects' subdir so now there
   will also be objects-N created at the same level

This commit does not address replicators, auditors, or updaters except
where method signatures changed. They'll still work if your cluster
has only one storage policy, though.

DocImpact
Implements: blueprint storage-policies
Change-Id: I459f3ed97df516cb0c9294477c28729c30f48e09
This commit is contained in:
Clay Gerrard
2014-05-27 01:17:13 -07:00
parent 4321bb0af6
commit 3824ff3df7
18 changed files with 970 additions and 275 deletions

View File

@@ -30,6 +30,7 @@ from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.http import is_success, HTTP_SERVICE_UNAVAILABLE
from swift.common.swob import HTTPBadRequest, HTTPNotAcceptable
from swift.common.utils import split_path, validate_device_partition
from swift.common.storage_policy import POLICY_INDEX
from swift.common.wsgi import make_subrequest
@@ -78,12 +79,34 @@ def get_listing_content_type(req):
return out_content_type
def get_name_and_placement(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path and
storage_policy_index. The storage_policy_index is extracted from
the headers of the request and converted to an integer, and then the
args are passed through to :meth:`split_and_validate_path`.
:returns: a list, result of :meth:`split_and_validate_path` with
storage_policy_index appended on the end
:raises: HTTPBadRequest
"""
policy_idx = request.headers.get(POLICY_INDEX, '0')
policy_idx = int(policy_idx)
results = split_and_validate_path(request, minsegs=minsegs,
maxsegs=maxsegs,
rest_with_last=rest_with_last)
results.append(policy_idx)
return results
def split_and_validate_path(request, minsegs=1, maxsegs=None,
rest_with_last=False):
"""
Utility function to split and validate the request path.
:returns: result of split_path if everything's okay
:returns: result of :meth:`~swift.common.utils.split_path` if
everything's okay
:raises: HTTPBadRequest if something's not okay
"""
try:

View File

@@ -58,7 +58,8 @@ from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileDeleted, DiskFileError, DiskFileNotOpen, PathNotDir, \
ReplicationLockTimeout, DiskFileExpired
from swift.common.swob import multi_range_iterator
from swift.common.storage_policy import get_policy_string, POLICIES
from functools import partial
PICKLE_PROTOCOL = 2
ONE_WEEK = 604800
@@ -67,8 +68,12 @@ METADATA_KEY = 'user.swift.metadata'
# These are system-set metadata keys that cannot be changed with a POST.
# They should be lowercase.
DATAFILE_SYSTEM_META = set('content-length content-type deleted etag'.split())
DATADIR = 'objects'
ASYNCDIR = 'async_pending'
DATADIR_BASE = 'objects'
ASYNCDIR_BASE = 'async_pending'
TMP_BASE = 'tmp'
get_data_dir = partial(get_policy_string, DATADIR_BASE)
get_async_dir = partial(get_policy_string, ASYNCDIR_BASE)
get_tmp_dir = partial(get_policy_string, TMP_BASE)
def read_metadata(fd):
@@ -105,6 +110,37 @@ def write_metadata(fd, metadata):
key += 1
def extract_policy_index(obj_path):
"""
Extracts the policy index for an object (based on the name of the objects
directory) given the device-relative path to the object. Returns 0 in
the event that the path is malformed in some way.
The device-relative path is everything after the mount point; for example:
/srv/node/d42/objects-5/179/
485dc017205a81df3af616d917c90179/1401811134.873649.data
would have device-relative path:
objects-5/179/485dc017205a81df3af616d917c90179/1401811134.873649.data
:param obj_path: device-relative path of an object
:returns: storage policy index
"""
policy_idx = 0
try:
obj_portion = obj_path[obj_path.index(DATADIR_BASE):]
obj_dirname = obj_portion[:obj_portion.index('/')]
except Exception:
return policy_idx
if '-' in obj_dirname:
base, policy_idx = obj_dirname.split('-', 1)
if POLICIES.get_by_index(policy_idx) is None:
policy_idx = 0
return int(policy_idx)
def quarantine_renamer(device_path, corrupted_file_path):
"""
In the case that a file is corrupted, move it to a quarantined
@@ -118,7 +154,9 @@ def quarantine_renamer(device_path, corrupted_file_path):
exceptions from rename
"""
from_dir = dirname(corrupted_file_path)
to_dir = join(device_path, 'quarantined', 'objects', basename(from_dir))
to_dir = join(device_path, 'quarantined',
get_data_dir(extract_policy_index(corrupted_file_path)),
basename(from_dir))
invalidate_hash(dirname(from_dir))
try:
renamer(from_dir, to_dir)
@@ -385,7 +423,7 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
logger.debug(
_('Skipping %s as it is not mounted'), device)
continue
datadir_path = os.path.join(devices, device, DATADIR)
datadir_path = os.path.join(devices, device, DATADIR_BASE)
partitions = listdir(datadir_path)
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
@@ -495,7 +533,7 @@ class DiskFileManager(object):
def pickle_async_update(self, device, account, container, obj, data,
timestamp):
device_path = self.construct_dev_path(device)
async_dir = os.path.join(device_path, ASYNCDIR)
async_dir = os.path.join(device_path, ASYNCDIR_BASE)
ohash = hash_path(account, container, obj)
self.threadpools[device].run_in_thread(
write_pickle,
@@ -506,12 +544,13 @@ class DiskFileManager(object):
self.logger.increment('async_pendings')
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
policy_idx=0, **kwargs):
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
return DiskFile(self, dev_path, self.threadpools[device],
partition, account, container, obj, **kwargs)
partition, account, container, obj,
policy_idx=policy_idx, **kwargs)
def object_audit_location_generator(self, device_dirs=None):
return object_audit_location_generator(self.devices, self.mount_check,
@@ -537,7 +576,7 @@ class DiskFileManager(object):
if not dev_path:
raise DiskFileDeviceUnavailable()
object_path = os.path.join(
dev_path, DATADIR, partition, object_hash[-3:], object_hash)
dev_path, DATADIR_BASE, partition, object_hash[-3:], object_hash)
try:
filenames = hash_cleanup_listdir(object_path, self.reclaim_age)
except OSError as err:
@@ -569,7 +608,7 @@ class DiskFileManager(object):
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = os.path.join(dev_path, DATADIR, partition)
partition_path = os.path.join(dev_path, DATADIR_BASE, partition)
if not os.path.exists(partition_path):
mkdirs(partition_path)
suffixes = suffix.split('-') if suffix else []
@@ -595,7 +634,7 @@ class DiskFileManager(object):
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = os.path.join(dev_path, DATADIR, partition)
partition_path = os.path.join(dev_path, DATADIR_BASE, partition)
for suffix in self._listdir(partition_path):
if len(suffix) != 3:
continue
@@ -620,7 +659,7 @@ class DiskFileManager(object):
if suffixes is None:
suffixes = self.yield_suffixes(device, partition)
else:
partition_path = os.path.join(dev_path, DATADIR, partition)
partition_path = os.path.join(dev_path, DATADIR_BASE, partition)
suffixes = (
(os.path.join(partition_path, suffix), suffix)
for suffix in suffixes)
@@ -937,10 +976,13 @@ class DiskFile(object):
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param _datadir: override the full datadir otherwise constructed here
:param policy_idx: used to get the data dir when constructing it here
"""
def __init__(self, mgr, device_path, threadpool, partition,
account=None, container=None, obj=None, _datadir=None):
account=None, container=None, obj=None, _datadir=None,
policy_idx=0):
self._mgr = mgr
self._device_path = device_path
self._threadpool = threadpool or ThreadPool(nthreads=0)
@@ -954,7 +996,8 @@ class DiskFile(object):
self._obj = obj
name_hash = hash_path(account, container, obj)
self._datadir = join(
device_path, storage_directory(DATADIR, partition, name_hash))
device_path, storage_directory(get_data_dir(policy_idx),
partition, name_hash))
else:
# gets populated when we read the metadata
self._name = None
@@ -973,7 +1016,8 @@ class DiskFile(object):
else:
name_hash = hash_path(account, container, obj)
self._datadir = join(
device_path, storage_directory(DATADIR, partition, name_hash))
device_path, storage_directory(get_data_dir(policy_idx),
partition, name_hash))
@property
def account(self):

View File

@@ -38,7 +38,8 @@ from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout
from swift.obj import ssync_receiver
from swift.common.http import is_success
from swift.common.request_helpers import split_and_validate_path, is_user_meta
from swift.common.request_helpers import get_name_and_placement, \
is_user_meta, split_and_validate_path
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
@@ -139,7 +140,7 @@ class ObjectController(object):
conf.get('replication_failure_ratio') or 1.0)
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
policy_idx, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
@@ -149,7 +150,8 @@ class ObjectController(object):
behavior.
"""
return self._diskfile_mgr.get_diskfile(
device, partition, account, container, obj, **kwargs)
device, partition, account, container, obj,
policy_idx=policy_idx, **kwargs)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice):
@@ -315,9 +317,8 @@ class ObjectController(object):
@timing_stats()
def POST(self, request):
"""Handle HTTP POST requests for the Swift Object Server."""
device, partition, account, container, obj = \
split_and_validate_path(request, 5, 5, True)
device, partition, account, container, obj, policy_idx = \
get_name_and_placement(request, 5, 5, True)
if 'x-timestamp' not in request.headers or \
not check_float(request.headers['x-timestamp']):
return HTTPBadRequest(body='Missing timestamp', request=request,
@@ -328,7 +329,8 @@ class ObjectController(object):
content_type='text/plain')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj)
device, partition, account, container, obj,
policy_idx=policy_idx)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
@@ -360,9 +362,8 @@ class ObjectController(object):
@timing_stats()
def PUT(self, request):
"""Handle HTTP PUT requests for the Swift Object Server."""
device, partition, account, container, obj = \
split_and_validate_path(request, 5, 5, True)
device, partition, account, container, obj, policy_idx = \
get_name_and_placement(request, 5, 5, True)
if 'x-timestamp' not in request.headers or \
not check_float(request.headers['x-timestamp']):
return HTTPBadRequest(body='Missing timestamp', request=request,
@@ -381,7 +382,8 @@ class ObjectController(object):
content_type='text/plain')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj)
device, partition, account, container, obj,
policy_idx=policy_idx)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
@@ -455,8 +457,8 @@ class ObjectController(object):
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj,
request, device)
'PUT', new_delete_at, account, container, obj, request,
device)
if orig_delete_at:
self.delete_at_update(
'DELETE', orig_delete_at, account, container, obj,
@@ -475,14 +477,15 @@ class ObjectController(object):
@timing_stats()
def GET(self, request):
"""Handle HTTP GET requests for the Swift Object Server."""
device, partition, account, container, obj = \
split_and_validate_path(request, 5, 5, True)
device, partition, account, container, obj, policy_idx = \
get_name_and_placement(request, 5, 5, True)
keep_cache = self.keep_cache_private or (
'X-Auth-Token' not in request.headers and
'X-Storage-Token' not in request.headers)
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj)
device, partition, account, container, obj,
policy_idx=policy_idx)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
@@ -521,11 +524,12 @@ class ObjectController(object):
@timing_stats(sample_rate=0.8)
def HEAD(self, request):
"""Handle HTTP HEAD requests for the Swift Object Server."""
device, partition, account, container, obj = \
split_and_validate_path(request, 5, 5, True)
device, partition, account, container, obj, policy_idx = \
get_name_and_placement(request, 5, 5, True)
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj)
device, partition, account, container, obj,
policy_idx=policy_idx)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
@@ -555,15 +559,16 @@ class ObjectController(object):
@timing_stats()
def DELETE(self, request):
"""Handle HTTP DELETE requests for the Swift Object Server."""
device, partition, account, container, obj = \
split_and_validate_path(request, 5, 5, True)
device, partition, account, container, obj, policy_idx = \
get_name_and_placement(request, 5, 5, True)
if 'x-timestamp' not in request.headers or \
not check_float(request.headers['x-timestamp']):
return HTTPBadRequest(body='Missing timestamp', request=request,
content_type='text/plain')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj)
device, partition, account, container, obj,
policy_idx=policy_idx)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:

View File

@@ -29,7 +29,7 @@ from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle, \
dump_recon_cache, config_true_value, ismount
from swift.common.daemon import Daemon
from swift.obj.diskfile import ASYNCDIR
from swift.obj.diskfile import ASYNCDIR_BASE
from swift.common.http import is_success, HTTP_NOT_FOUND, \
HTTP_INTERNAL_SERVER_ERROR
@@ -137,7 +137,7 @@ class ObjectUpdater(Daemon):
:param device: path to device
"""
start_time = time.time()
async_pending = os.path.join(device, ASYNCDIR)
async_pending = os.path.join(device, ASYNCDIR_BASE)
if not os.path.isdir(async_pending):
return
for prefix in os.listdir(async_pending):

View File

@@ -162,6 +162,7 @@ def headers_to_container_info(headers, status_int=HTTP_OK):
'object_count': headers.get('x-container-object-count'),
'bytes': headers.get('x-container-bytes-used'),
'versions': headers.get('x-versions-location'),
'storage_policy': headers.get(POLICY_INDEX.lower(), '0'),
'cors': {
'allow_origin': meta.get('access-control-allow-origin'),
'expose_headers': meta.get('access-control-expose-headers'),

View File

@@ -56,6 +56,7 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
HTTPServerError, HTTPServiceUnavailable, Request, \
HTTPClientDisconnect, HTTPNotImplemented
from swift.common.storage_policy import POLICY_INDEX
from swift.common.request_helpers import is_user_meta
@@ -195,15 +196,18 @@ class ObjectController(Controller):
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['read_acl']
policy_idx = container_info['storage_policy']
obj_ring = self.app.get_object_ring(policy_idx)
# pass the policy index to storage nodes via req header
req.headers[POLICY_INDEX] = policy_idx
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
partition = self.app.object_ring.get_part(
partition = obj_ring.get_part(
self.account_name, self.container_name, self.object_name)
resp = self.GETorHEAD_base(
req, _('Object'), self.app.object_ring, partition,
req, _('Object'), obj_ring, partition,
req.swift_entity_path)
if ';' in resp.headers.get('content-type', ''):
@@ -297,7 +301,11 @@ class ObjectController(Controller):
self.app.expiring_objects_account, delete_at_container)
else:
delete_at_container = delete_at_part = delete_at_nodes = None
partition, nodes = self.app.object_ring.get_nodes(
policy_idx = container_info['storage_policy']
obj_ring = self.app.get_object_ring(policy_idx)
# pass the policy index to storage nodes via req header
req.headers[POLICY_INDEX] = policy_idx
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
req.headers['X-Timestamp'] = normalize_timestamp(time.time())
@@ -305,7 +313,7 @@ class ObjectController(Controller):
req, len(nodes), container_partition, containers,
delete_at_container, delete_at_part, delete_at_nodes)
resp = self.make_requests(req, self.app.object_ring, partition,
resp = self.make_requests(req, obj_ring, partition,
'POST', req.swift_entity_path, headers)
return resp
@@ -448,6 +456,10 @@ class ObjectController(Controller):
body='If-None-Match only supports *')
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_idx = container_info['storage_policy']
obj_ring = self.app.get_object_ring(policy_idx)
# pass the policy index to storage nodes via req header
req.headers[POLICY_INDEX] = policy_idx
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
@@ -478,16 +490,19 @@ class ObjectController(Controller):
body='Non-integer X-Delete-After')
req.headers['x-delete-at'] = normalize_delete_at_timestamp(
time.time() + x_delete_after)
partition, nodes = self.app.object_ring.get_nodes(
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# do a HEAD request for container sync and checking object versions
if 'x-timestamp' in req.headers or \
(object_versions and not
req.environ.get('swift_versioned_copy')):
hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'},
# make sure proxy-server uses the right policy index
_headers = {POLICY_INDEX: req.headers[POLICY_INDEX],
'X-Newest': 'True'}
hreq = Request.blank(req.path_info, headers=_headers,
environ={'REQUEST_METHOD': 'HEAD'})
hresp = self.GETorHEAD_base(
hreq, _('Object'), self.app.object_ring, partition,
hreq, _('Object'), obj_ring, partition,
hreq.swift_entity_path)
# Used by container sync feature
if 'x-timestamp' in req.headers:
@@ -642,7 +657,7 @@ class ObjectController(Controller):
delete_at_container = delete_at_part = delete_at_nodes = None
node_iter = GreenthreadSafeIterator(
self.iter_nodes_local_first(self.app.object_ring, partition))
self.iter_nodes_local_first(obj_ring, partition))
pile = GreenPile(len(nodes))
te = req.headers.get('transfer-encoding', '')
chunked = ('chunked' in te)
@@ -756,6 +771,10 @@ class ObjectController(Controller):
"""HTTP DELETE request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_idx = container_info['storage_policy']
obj_ring = self.app.get_object_ring(policy_idx)
# pass the policy index to storage nodes via req header
req.headers[POLICY_INDEX] = policy_idx
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
@@ -809,6 +828,10 @@ class ObjectController(Controller):
new_del_req = Request.blank(copy_path, environ=req.environ)
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_idx = container_info['storage_policy']
obj_ring = self.app.get_object_ring(policy_idx)
# pass the policy index to storage nodes via req header
new_del_req.headers[POLICY_INDEX] = policy_idx
container_partition = container_info['partition']
containers = container_info['nodes']
new_del_req.acl = container_info['write_acl']
@@ -823,7 +846,7 @@ class ObjectController(Controller):
return aresp
if not containers:
return HTTPNotFound(request=req)
partition, nodes = self.app.object_ring.get_nodes(
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# Used by container sync feature
if 'x-timestamp' in req.headers:
@@ -840,7 +863,7 @@ class ObjectController(Controller):
headers = self._backend_requests(
req, len(nodes), container_partition, containers)
resp = self.make_requests(req, self.app.object_ring,
resp = self.make_requests(req, obj_ring,
partition, 'DELETE', req.swift_entity_path,
headers)
return resp

View File

@@ -25,13 +25,13 @@ from eventlet import Timeout
from swift import __canonical_version__ as swift_version
from swift.common import constraints
from swift.common.storage_policy import POLICIES
from swift.common.ring import Ring
from swift.common.utils import cache_from_env, get_logger, \
get_remote_client, split_path, config_true_value, generate_trans_id, \
affinity_key_function, affinity_locality_predicate, list_from_csv, \
register_swift_info
from swift.common.constraints import check_utf8
from swift.common.storage_policy import POLICIES
from swift.proxy.controllers import AccountController, ObjectController, \
ContainerController, InfoController
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
@@ -68,7 +68,7 @@ class Application(object):
"""WSGI application for the proxy server."""
def __init__(self, conf, memcache=None, logger=None, account_ring=None,
container_ring=None, object_ring=None):
container_ring=None):
if conf is None:
conf = {}
if logger is None:
@@ -77,6 +77,7 @@ class Application(object):
self.logger = logger
swift_dir = conf.get('swift_dir', '/etc/swift')
self.swift_dir = swift_dir
self.node_timeout = int(conf.get('node_timeout', 10))
self.recoverable_node_timeout = int(
conf.get('recoverable_node_timeout', self.node_timeout))
@@ -99,11 +100,13 @@ class Application(object):
config_true_value(conf.get('allow_account_management', 'no'))
self.object_post_as_copy = \
config_true_value(conf.get('object_post_as_copy', 'true'))
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
self.account_ring = account_ring or Ring(swift_dir,
ring_name='account')
# ensure rings are loaded for all configured storage policies
for policy in POLICIES:
policy.load_ring(swift_dir)
self.memcache = memcache
mimetypes.init(mimetypes.knownfiles +
[os.path.join(swift_dir, 'mime.types')])
@@ -220,6 +223,16 @@ class Application(object):
"read_affinity setting will have no effect." %
self.sorting_method)
def get_object_ring(self, policy_idx):
"""
Get the ring object to use to handle a request based on its policy.
:param policy_idx: policy index as defined in swift.conf
:returns: appropriate ring object
"""
return POLICIES.get_object_ring(policy_idx, self.swift_dir)
def get_controller(self, path):
"""
Get the controller to handle a request.

View File

@@ -37,6 +37,8 @@ import swift.proxy.server
from swift.common.swob import Request
from swift.common import wsgi, utils
from swift.common.storage_policy import StoragePolicy, \
StoragePolicyCollection
from test.unit import temptree, write_fake_ring
@@ -46,7 +48,14 @@ from paste.deploy import loadwsgi
def _fake_rings(tmpdir):
write_fake_ring(os.path.join(tmpdir, 'account.ring.gz'))
write_fake_ring(os.path.join(tmpdir, 'container.ring.gz'))
write_fake_ring(os.path.join(tmpdir, 'object.ring.gz'))
# Some storage-policy-specific fake rings.
policy = [StoragePolicy(0, 'zero'),
StoragePolicy(1, 'one', is_default=True)]
policies = StoragePolicyCollection(policy)
for pol in policies:
obj_ring_path = \
os.path.join(tmpdir, pol.ring_name + '.ring.gz')
write_fake_ring(obj_ring_path)
class TestWSGI(unittest.TestCase):

View File

@@ -25,7 +25,7 @@ from tempfile import mkdtemp
from test.unit import FakeLogger
from swift.obj import auditor
from swift.obj.diskfile import DiskFile, write_metadata, invalidate_hash, \
DATADIR, DiskFileManager, AuditLocation
DATADIR_BASE, DiskFileManager, AuditLocation
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
storage_directory
@@ -339,7 +339,7 @@ class TestAuditor(unittest.TestCase):
name_hash = hash_path('a', 'c', 'o')
dir_path = os.path.join(
self.devices, 'sda',
storage_directory(DATADIR, '0', name_hash))
storage_directory(DATADIR_BASE, '0', name_hash))
ts_file_path = os.path.join(dir_path, '99999.ts')
if not os.path.exists(dir_path):
mkdirs(dir_path)

View File

@@ -33,7 +33,7 @@ from contextlib import closing, nested
from gzip import GzipFile
from eventlet import tpool
from test.unit import FakeLogger, mock as unit_mock, temptree
from test.unit import FakeLogger, mock as unit_mock, temptree, patch_policies
from swift.obj import diskfile
from swift.common import utils
@@ -43,6 +43,15 @@ from swift.common.exceptions import DiskFileNotExist, DiskFileQuarantined, \
DiskFileDeviceUnavailable, DiskFileDeleted, DiskFileNotOpen, \
DiskFileError, ReplicationLockTimeout, PathNotDir, DiskFileCollision, \
DiskFileExpired, SwiftException, DiskFileNoSpace
from swift.common.storage_policy import StoragePolicy, POLICIES, \
get_policy_string
from functools import partial
get_data_dir = partial(get_policy_string, diskfile.DATADIR_BASE)
get_tmp_dir = partial(get_policy_string, diskfile.TMP_BASE)
_mocked_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True)]
def _create_test_ring(path):
@@ -101,23 +110,69 @@ class TestDiskFileModuleMethods(unittest.TestCase):
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _create_diskfile(self):
def _create_diskfile(self, policy_idx=0):
return self.df_mgr.get_diskfile(self.existing_device,
'0', 'a', 'c', 'o')
'0', 'a', 'c', 'o',
policy_idx)
@patch_policies(_mocked_policies)
def test_extract_policy_index(self):
# good path names
pn = 'objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy_index(pn), 0)
pn = 'objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy_index(pn), 1)
good_path = '/srv/node/sda1/objects-1/1/abc/def/1234.data'
self.assertEquals(1, diskfile.extract_policy_index(good_path))
good_path = '/srv/node/sda1/objects/1/abc/def/1234.data'
self.assertEquals(0, diskfile.extract_policy_index(good_path))
# short paths still ok
path = '/srv/node/sda1/objects/1/1234.data'
self.assertEqual(diskfile.extract_policy_index(path), 0)
path = '/srv/node/sda1/objects-1/1/1234.data'
self.assertEqual(diskfile.extract_policy_index(path), 1)
# leading slash, just in case
pn = '/objects/0/606/1984527ed7ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy_index(pn), 0)
pn = '/objects-1/0/606/198452b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy_index(pn), 1)
# bad policy index
pn = 'objects-2/0/606/198427efcff042c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy_index(pn), 0)
bad_path = '/srv/node/sda1/objects-t/1/abc/def/1234.data'
self.assertRaises(ValueError,
diskfile.extract_policy_index, bad_path)
# malformed path (no objects dir or nothing at all)
pn = 'XXXX/0/606/1984527ed42b6ef6247c78606/1401379842.14643.data'
self.assertEqual(diskfile.extract_policy_index(pn), 0)
self.assertEqual(diskfile.extract_policy_index(''), 0)
# no datadir base in path
bad_path = '/srv/node/sda1/foo-1/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy_index(bad_path), 0)
bad_path = '/srv/node/sda1/obj1/1/abc/def/1234.data'
self.assertEqual(diskfile.extract_policy_index(bad_path), 0)
@patch_policies(_mocked_policies)
def test_quarantine_renamer(self):
# we use this for convenience, not really about a diskfile layout
df = self._create_diskfile()
mkdirs(df._datadir)
exp_dir = os.path.join(self.devices, 'quarantined', 'objects',
os.path.basename(df._datadir))
qbit = os.path.join(df._datadir, 'qbit')
with open(qbit, 'w') as f:
f.write('abc')
to_dir = diskfile.quarantine_renamer(self.devices, qbit)
self.assertEqual(to_dir, exp_dir)
self.assertRaises(OSError, diskfile.quarantine_renamer, self.devices,
qbit)
for policy in POLICIES:
# we use this for convenience, not really about a diskfile layout
df = self._create_diskfile(policy_idx=policy.idx)
mkdirs(df._datadir)
exp_dir = os.path.join(self.devices, 'quarantined',
get_data_dir(policy.idx),
os.path.basename(df._datadir))
qbit = os.path.join(df._datadir, 'qbit')
with open(qbit, 'w') as f:
f.write('abc')
to_dir = diskfile.quarantine_renamer(self.devices, qbit)
self.assertEqual(to_dir, exp_dir)
self.assertRaises(OSError, diskfile.quarantine_renamer,
self.devices, qbit)
def test_hash_suffix_enoent(self):
self.assertRaises(PathNotDir, diskfile.hash_suffix,
@@ -130,6 +185,35 @@ class TestDiskFileModuleMethods(unittest.TestCase):
self.assertRaises(OSError, diskfile.hash_suffix,
os.path.join(self.testdir, "doesnotexist"), 101)
@patch_policies(_mocked_policies)
def test_get_data_dir(self):
self.assertEquals(diskfile.get_data_dir(0), diskfile.DATADIR_BASE)
self.assertEquals(diskfile.get_data_dir(1),
diskfile.DATADIR_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_data_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_data_dir, 99)
@patch_policies(_mocked_policies)
def test_get_async_dir(self):
self.assertEquals(diskfile.get_async_dir(0),
diskfile.ASYNCDIR_BASE)
self.assertEquals(diskfile.get_async_dir(1),
diskfile.ASYNCDIR_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_async_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_async_dir, 99)
@patch_policies(_mocked_policies)
def test_get_tmp_dir(self):
self.assertEquals(diskfile.get_tmp_dir(0),
diskfile.TMP_BASE)
self.assertEquals(diskfile.get_tmp_dir(1),
diskfile.TMP_BASE + "-1")
self.assertRaises(ValueError, diskfile.get_tmp_dir, 'junk')
self.assertRaises(ValueError, diskfile.get_tmp_dir, 99)
def test_hash_suffix_hash_dir_is_file_quarantine(self):
df = self._create_diskfile()
mkdirs(os.path.dirname(df._datadir))
@@ -733,7 +817,7 @@ class TestDiskFileManager(unittest.TestCase):
dp = self.df_mgr.construct_dev_path(self.existing_device1)
ohash = diskfile.hash_path('a', 'c', 'o')
wp.assert_called_with({'a': 1, 'b': 2},
os.path.join(dp, diskfile.ASYNCDIR,
os.path.join(dp, diskfile.ASYNCDIR_BASE,
ohash[-3:], ohash + '-' + ts),
os.path.join(dp, 'tmp'))
self.df_mgr.logger.increment.assert_called_with('async_pendings')
@@ -857,9 +941,10 @@ class TestDiskFile(unittest.TestCase):
pickle.dumps(metadata, diskfile.PICKLE_PROTOCOL))
def _simple_get_diskfile(self, partition='0', account='a', container='c',
obj='o'):
obj='o', policy_idx=0):
return self.df_mgr.get_diskfile(self.existing_device,
partition, account, container, obj)
partition, account, container, obj,
policy_idx)
def _create_test_file(self, data, timestamp=None, metadata=None,
account='a', container='c', obj='o'):

View File

@@ -34,14 +34,15 @@ from eventlet import sleep, spawn, wsgi, listen, Timeout, tpool
from nose import SkipTest
from test.unit import FakeLogger, debug_logger
from test.unit import connect_tcp, readuntil2crlfs
from test.unit import connect_tcp, readuntil2crlfs, patch_policies
from swift.obj import server as object_server
from swift.obj import diskfile
from swift.common import utils
from swift.common import utils, storage_policy
from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \
NullLogger, storage_directory, public, replication
from swift.common import constraints
from swift.common.swob import Request, HeaderKeyDict
from swift.common.storage_policy import POLICY_INDEX, POLICIES
from swift.common.exceptions import DiskFileDeviceUnavailable
@@ -545,8 +546,8 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
hash_path('a', 'c', 'o')),
storage_directory(diskfile.get_data_dir(0),
'p', hash_path('a', 'c', 'o')),
timestamp + '.data')
self.assert_(os.path.isfile(objfile))
self.assertEquals(open(objfile).read(), 'VERIFY')
@@ -578,7 +579,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.data')
self.assert_(os.path.isfile(objfile))
@@ -613,7 +614,7 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.data')
self.assertTrue(os.path.isfile(objfile))
@@ -687,7 +688,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 201)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.data')
self.assert_(os.path.isfile(objfile))
@@ -833,7 +834,7 @@ class TestObjectController(unittest.TestCase):
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.data')
os.unlink(objfile)
@@ -957,7 +958,7 @@ class TestObjectController(unittest.TestCase):
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.data')
os.unlink(objfile)
@@ -1252,7 +1253,7 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Modified-Since': since})
resp = self.object_controller.GET(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 304)
timestamp = normalize_timestamp(int(time()))
@@ -1391,7 +1392,7 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'GET'},
headers={'If-Unmodified-Since': since})
resp = self.object_controller.GET(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 200)
def test_HEAD_if_unmodified_since(self):
@@ -1568,7 +1569,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 404)
ts_1000_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.ts')
self.assertTrue(os.path.isfile(ts_1000_file))
@@ -1584,7 +1585,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 404)
ts_999_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.ts')
self.assertFalse(os.path.isfile(ts_999_file))
@@ -1604,7 +1605,7 @@ class TestObjectController(unittest.TestCase):
# There should now be 1000 ts and a 1001 data file.
data_1002_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.data')
self.assertTrue(os.path.isfile(data_1002_file))
@@ -1619,7 +1620,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 409)
ts_1001_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.ts')
self.assertFalse(os.path.isfile(ts_1001_file))
@@ -1634,7 +1635,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 204)
ts_1003_file = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.ts')
self.assertTrue(os.path.isfile(ts_1003_file))
@@ -1673,7 +1674,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 409)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.ts')
self.assertFalse(os.path.isfile(objfile))
@@ -1693,7 +1694,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 204)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.ts')
self.assert_(os.path.isfile(objfile))
@@ -1713,7 +1714,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 404)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.ts')
self.assert_(os.path.isfile(objfile))
@@ -1732,7 +1733,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.status_int, 404)
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
timestamp + '.ts')
self.assertFalse(os.path.isfile(objfile))
@@ -2119,6 +2120,9 @@ class TestObjectController(unittest.TestCase):
'x-timestamp': '1', 'x-out': 'set',
'user-agent': 'obj-server %s' % os.getpid()}])
@patch_policies([storage_policy.StoragePolicy(0, 'zero', True),
storage_policy.StoragePolicy(1, 'one'),
storage_policy.StoragePolicy(37, 'fantastico')])
def test_updating_multiple_delete_at_container_servers(self):
self.object_controller.expiring_objects_account = 'exp'
self.object_controller.expiring_objects_container_divisor = 60
@@ -2164,12 +2168,9 @@ class TestObjectController(unittest.TestCase):
'X-Delete-At-Partition': '6237',
'X-Delete-At-Device': 'sdp,sdq'})
orig_http_connect = object_server.http_connect
try:
object_server.http_connect = fake_http_connect
with mock.patch.object(object_server, 'http_connect',
fake_http_connect):
resp = req.get_response(self.object_controller)
finally:
object_server.http_connect = orig_http_connect
self.assertEqual(resp.status_int, 201)
@@ -2228,6 +2229,9 @@ class TestObjectController(unittest.TestCase):
'user-agent': 'obj-server %d' % os.getpid(),
'x-trans-id': '-'})})
@patch_policies([storage_policy.StoragePolicy(0, 'zero', True),
storage_policy.StoragePolicy(1, 'one'),
storage_policy.StoragePolicy(26, 'twice-thirteen')])
def test_updating_multiple_container_servers(self):
http_connect_args = []
@@ -2261,16 +2265,14 @@ class TestObjectController(unittest.TestCase):
headers={'X-Timestamp': '12345',
'Content-Type': 'application/burrito',
'Content-Length': '0',
'X-Storage-Policy-Index': '26',
'X-Container-Partition': '20',
'X-Container-Host': '1.2.3.4:5, 6.7.8.9:10',
'X-Container-Device': 'sdb1, sdf1'})
orig_http_connect = object_server.http_connect
try:
object_server.http_connect = fake_http_connect
self.object_controller.PUT(req)
finally:
object_server.http_connect = orig_http_connect
with mock.patch.object(object_server, 'http_connect',
fake_http_connect):
req.get_response(self.object_controller)
http_connect_args.sort(key=operator.itemgetter('ipaddr'))
@@ -2701,7 +2703,7 @@ class TestObjectController(unittest.TestCase):
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = self.object_controller.PUT(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
self.assertEquals(given_args, [])
@@ -2711,7 +2713,7 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(time()),
'Content-Type': 'application/x-test'})
resp = self.object_controller.POST(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 202)
self.assertEquals(given_args, [])
@@ -2724,12 +2726,12 @@ class TestObjectController(unittest.TestCase):
headers={'X-Timestamp': timestamp1,
'Content-Type': 'application/x-test',
'X-Delete-At': delete_at_timestamp1})
resp = self.object_controller.POST(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 202)
self.assertEquals(
given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
req, 'sda1'])
given_args[5], 'sda1'])
while given_args:
given_args.pop()
@@ -2743,14 +2745,14 @@ class TestObjectController(unittest.TestCase):
headers={'X-Timestamp': timestamp2,
'Content-Type': 'application/x-test',
'X-Delete-At': delete_at_timestamp2})
resp = self.object_controller.POST(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 202)
self.assertEquals(
given_args, [
'PUT', int(delete_at_timestamp2), 'a', 'c', 'o',
req, 'sda1',
given_args[5], 'sda1',
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
req, 'sda1'])
given_args[5], 'sda1'])
def test_PUT_calls_delete_at(self):
given_args = []
@@ -2766,7 +2768,7 @@ class TestObjectController(unittest.TestCase):
'Content-Length': '4',
'Content-Type': 'application/octet-stream'})
req.body = 'TEST'
resp = self.object_controller.PUT(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
self.assertEquals(given_args, [])
@@ -2780,12 +2782,12 @@ class TestObjectController(unittest.TestCase):
'Content-Type': 'application/octet-stream',
'X-Delete-At': delete_at_timestamp1})
req.body = 'TEST'
resp = self.object_controller.PUT(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
self.assertEquals(
given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
req, 'sda1'])
given_args[5], 'sda1'])
while given_args:
given_args.pop()
@@ -2801,14 +2803,14 @@ class TestObjectController(unittest.TestCase):
'Content-Type': 'application/octet-stream',
'X-Delete-At': delete_at_timestamp2})
req.body = 'TEST'
resp = self.object_controller.PUT(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
self.assertEquals(
given_args, [
'PUT', int(delete_at_timestamp2), 'a', 'c', 'o',
req, 'sda1',
given_args[5], 'sda1',
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
req, 'sda1'])
given_args[5], 'sda1'])
def test_GET_but_expired(self):
test_time = time() + 10000
@@ -3057,7 +3059,7 @@ class TestObjectController(unittest.TestCase):
self.assertEquals(resp.body, 'TEST')
objfile = os.path.join(
self.testdir, 'sda1',
storage_directory(diskfile.DATADIR, 'p',
storage_directory(diskfile.get_data_dir(0), 'p',
hash_path('a', 'c', 'o')),
test_timestamp + '.data')
self.assert_(os.path.isfile(objfile))
@@ -3177,7 +3179,6 @@ class TestObjectController(unittest.TestCase):
given_args.extend(args)
self.object_controller.delete_at_update = fake_delete_at_update
timestamp1 = normalize_timestamp(time())
delete_at_timestamp1 = int(time() + 1000)
delete_at_container1 = str(
@@ -3192,11 +3193,11 @@ class TestObjectController(unittest.TestCase):
'X-Delete-At': str(delete_at_timestamp1),
'X-Delete-At-Container': delete_at_container1})
req.body = 'TEST'
resp = self.object_controller.PUT(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
self.assertEquals(given_args, [
'PUT', int(delete_at_timestamp1), 'a', 'c', 'o',
req, 'sda1'])
given_args[5], 'sda1'])
while given_args:
given_args.pop()
@@ -3208,11 +3209,11 @@ class TestObjectController(unittest.TestCase):
environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': timestamp2,
'Content-Type': 'application/octet-stream'})
resp = self.object_controller.DELETE(req)
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 204)
self.assertEquals(given_args, [
'DELETE', int(delete_at_timestamp1), 'a', 'c', 'o',
req, 'sda1'])
given_args[5], 'sda1'])
def test_PUT_delete_at_in_past(self):
req = Request.blank(
@@ -3439,7 +3440,8 @@ class TestObjectController(unittest.TestCase):
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
mock_method = public(lambda x:
mock.MagicMock(return_value=method_res))
with mock.patch.object(self.object_controller, method,
new=mock_method):
response = self.object_controller.__call__(env, start_response)
@@ -3643,6 +3645,41 @@ class TestObjectController(unittest.TestCase):
[(('1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a/c/o" '
'404 - "-" "-" "-" 2.0000 "-"',), {})])
@patch_policies([storage_policy.StoragePolicy(0, 'zero', True),
storage_policy.StoragePolicy(1, 'one', False)])
def test_dynamic_datadir(self):
timestamp = normalize_timestamp(time())
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'Baz': 'bazheader',
POLICY_INDEX: 1,
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
object_dir = self.testdir + "/sda1/objects-1"
self.assertFalse(os.path.isdir(object_dir))
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
self.assertTrue(os.path.isdir(object_dir))
# make sure no idx in header uses policy 0 data_dir
req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': timestamp,
'Content-Type': 'application/x-test',
'Foo': 'fooheader',
'Baz': 'bazheader',
'X-Object-Meta-1': 'One',
'X-Object-Meta-Two': 'Two'})
req.body = 'VERIFY'
object_dir = self.testdir + "/sda1/objects"
self.assertFalse(os.path.isdir(object_dir))
with mock.patch.object(POLICIES, 'get_by_index',
lambda _: True):
resp = req.get_response(self.object_controller)
self.assertEquals(resp.status_int, 201)
self.assertTrue(os.path.isdir(object_dir))
if __name__ == '__main__':
unittest.main()

View File

@@ -454,7 +454,7 @@ class TestReceiver(unittest.TestCase):
def test_MISSING_CHECK_have_one_exact(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1', diskfile.DATADIR),
os.path.join(self.testdir, 'sda1', diskfile.DATADIR_BASE),
'1', self.hash1)
utils.mkdirs(object_dir)
fp = open(os.path.join(object_dir, self.ts1 + '.data'), 'w+')
@@ -485,7 +485,7 @@ class TestReceiver(unittest.TestCase):
def test_MISSING_CHECK_have_one_newer(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1', diskfile.DATADIR),
os.path.join(self.testdir, 'sda1', diskfile.DATADIR_BASE),
'1', self.hash1)
utils.mkdirs(object_dir)
newer_ts1 = utils.normalize_timestamp(float(self.ts1) + 1)
@@ -518,7 +518,7 @@ class TestReceiver(unittest.TestCase):
def test_MISSING_CHECK_have_one_older(self):
object_dir = utils.storage_directory(
os.path.join(self.testdir, 'sda1', diskfile.DATADIR),
os.path.join(self.testdir, 'sda1', diskfile.DATADIR_BASE),
'1', self.hash1)
utils.mkdirs(object_dir)
older_ts1 = utils.normalize_timestamp(float(self.ts1) - 1)

View File

@@ -27,7 +27,7 @@ from distutils.dir_util import mkpath
from eventlet import spawn, Timeout, listen
from swift.obj import updater as object_updater
from swift.obj.diskfile import ASYNCDIR
from swift.obj.diskfile import ASYNCDIR_BASE
from swift.common.ring import RingData
from swift.common import utils
from swift.common.utils import hash_path, normalize_timestamp, mkdirs, \
@@ -80,11 +80,11 @@ class TestObjectUpdater(unittest.TestCase):
self.assert_(cu.get_container_ring() is not None)
def test_object_sweep(self):
prefix_dir = os.path.join(self.sda1, ASYNCDIR, 'abc')
prefix_dir = os.path.join(self.sda1, ASYNCDIR_BASE, 'abc')
mkpath(prefix_dir)
# A non-directory where directory is expected should just be skipped...
not_a_dir_path = os.path.join(self.sda1, ASYNCDIR, 'not_a_dir')
not_a_dir_path = os.path.join(self.sda1, ASYNCDIR_BASE, 'not_a_dir')
with open(not_a_dir_path, 'w'):
pass
@@ -134,7 +134,7 @@ class TestObjectUpdater(unittest.TestCase):
'concurrency': '1',
'node_timeout': '15'})
cu.run_once()
async_dir = os.path.join(self.sda1, ASYNCDIR)
async_dir = os.path.join(self.sda1, ASYNCDIR_BASE)
os.mkdir(async_dir)
cu.run_once()
self.assert_(os.path.exists(async_dir))
@@ -171,7 +171,7 @@ class TestObjectUpdater(unittest.TestCase):
'concurrency': '1',
'node_timeout': '15'})
cu.run_once()
async_dir = os.path.join(self.sda1, ASYNCDIR)
async_dir = os.path.join(self.sda1, ASYNCDIR_BASE)
os.mkdir(async_dir)
cu.run_once()
self.assert_(os.path.exists(async_dir))

View File

@@ -22,16 +22,19 @@ from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_account_info
from swift.common import constraints
from test.unit import fake_http_connect, FakeRing, FakeMemcache
from swift.common.storage_policy import StoragePolicy
from swift.common.request_helpers import get_sys_meta_prefix
import swift.proxy.controllers.base
from test.unit import patch_policies
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestAccountController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
object_ring=FakeRing())
self.app = proxy_server.Application(
None, FakeMemcache(),
account_ring=FakeRing(), container_ring=FakeRing())
def test_account_info_in_response_env(self):
controller = proxy_server.AccountController(self.app, 'AUTH_bob')

View File

@@ -22,10 +22,13 @@ from swift.proxy.controllers.base import headers_to_container_info, \
Controller, GetOrHeadHandler
from swift.common.swob import Request, HTTPException, HeaderKeyDict
from swift.common.utils import split_path
from swift.common.storage_policy import StoragePolicy
from test.unit import fake_http_connect, FakeRing, FakeMemcache
from swift.proxy import server as proxy_server
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies
FakeResponse_status_int = 201
@@ -81,12 +84,12 @@ class FakeCache(object):
return self.val
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestFuncs(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
object_ring=FakeRing)
container_ring=FakeRing())
def test_GETorHEAD_base(self):
base = Controller(self.app)

View File

@@ -33,8 +33,7 @@ class TestContainerController(TestRingBase):
TestRingBase.setUp(self)
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
object_ring=FakeRing())
container_ring=FakeRing())
def test_container_info_in_response_env(self):
controller = proxy_server.ContainerController(self.app, 'a', 'c')

View File

@@ -23,6 +23,9 @@ import swift
from swift.proxy import server as proxy_server
from swift.common.swob import HTTPException
from test.unit import FakeRing, FakeMemcache, fake_http_connect, debug_logger
from swift.common.storage_policy import StoragePolicy
from test.unit import patch_policies
@contextmanager
@@ -40,23 +43,25 @@ def set_http_connect(*args, **kwargs):
swift.proxy.controllers.container.http_connect = old_connect
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(max_more_nodes=9))])
class TestObjControllerWriteAffinity(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), object_ring=FakeRing(max_more_nodes=9))
self.app.request_node_count = lambda replicas: 10000000
container_ring=FakeRing(), logger=debug_logger())
self.app.request_node_count = lambda ring: 10000000
self.app.sort_nodes = lambda l: l # stop shuffling the primary nodes
def test_iter_nodes_local_first_noops_when_no_affinity(self):
controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = None
all_nodes = self.app.object_ring.get_part_nodes(1)
all_nodes.extend(self.app.object_ring.get_more_nodes(1))
object_ring = self.app.get_object_ring(None)
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
self.app.object_ring, 1))
object_ring, 1))
self.maxDiff = None
@@ -68,11 +73,12 @@ class TestObjControllerWriteAffinity(unittest.TestCase):
lambda node: node['region'] == 1)
self.app.write_affinity_node_count = lambda ring: 4
all_nodes = self.app.object_ring.get_part_nodes(1)
all_nodes.extend(self.app.object_ring.get_more_nodes(1))
object_ring = self.app.get_object_ring(None)
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
self.app.object_ring, 1))
object_ring, 1))
# the local nodes move up in the ordering
self.assertEqual([1, 1, 1, 1],
@@ -90,14 +96,14 @@ class TestObjControllerWriteAffinity(unittest.TestCase):
self.assertTrue(res is None)
@patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())])
class TestObjController(unittest.TestCase):
def setUp(self):
logger = debug_logger('proxy-server')
logger.thread_locals = ('txn1', '127.0.0.2')
self.app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), object_ring=FakeRing(),
logger=logger)
container_ring=FakeRing(), logger=logger)
self.controller = proxy_server.ObjectController(self.app,
'a', 'c', 'o')
self.controller.container_info = mock.MagicMock(return_value={
@@ -109,6 +115,7 @@ class TestObjController(unittest.TestCase):
],
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None})
@@ -155,23 +162,35 @@ class TestObjController(unittest.TestCase):
resp = self.controller.DELETE(req)
self.assertEquals(resp.status_int, 204)
def test_POST_simple(self):
def test_POST_as_COPY_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, 200, 200, 201, 201, 201):
with set_http_connect(200, 200, 200, 201, 201, 201) as fake_conn:
resp = self.controller.POST(req)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEquals(resp.status_int, 202)
def test_COPY_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, 200, 200, 201, 201, 201):
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 202)
req = swift.common.swob.Request.blank(
'/v1/a/c/o', headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
with set_http_connect(200, 200, 200, 201, 201, 201) as fake_conn:
resp = self.controller.COPY(req)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEquals(resp.status_int, 201)
def test_HEAD_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, 200, 200, 201, 201, 201):
resp = self.controller.POST(req)
self.assertEquals(resp.status_int, 202)
with set_http_connect(200):
resp = self.controller.HEAD(req)
self.assertEquals(resp.status_int, 200)
def test_HEAD_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o',
headers={'X-Newest': 'true'})
with set_http_connect(200, 200, 200) as fake_conn:
resp = self.controller.HEAD(req)
self.assertRaises(StopIteration, fake_conn.code_iter.next)
self.assertEquals(resp.status_int, 200)
def test_PUT_log_info(self):
# mock out enough to get to the area of the code we want to test

File diff suppressed because it is too large Load Diff