Merge "proxy: remove x-backend-record-type=shard in object listing"
This commit is contained in:
commit
7a3124d82d
|
@ -342,10 +342,15 @@ class ContainerController(Controller):
|
|||
# the setter must be called for new params to update the query string.
|
||||
params = req.params
|
||||
params['format'] = 'json'
|
||||
# x-backend-record-type may be sent via internal client e.g. from
|
||||
# the sharder or in probe tests
|
||||
record_type = req.headers.get('X-Backend-Record-Type', '').lower()
|
||||
if not record_type:
|
||||
# x-backend-record-type may be sent via internal client e.g. from the
|
||||
# sharder, or by the proxy itself when making a recursive request, or
|
||||
# in probe tests. If the header is present then the only values that
|
||||
# the proxy respects are 'object' or 'shard'. However, the proxy may
|
||||
# use the value 'auto' when making requests to container server.
|
||||
orig_record_type = req.headers.get('X-Backend-Record-Type', '').lower()
|
||||
if orig_record_type in ('object', 'shard'):
|
||||
record_type = orig_record_type
|
||||
else:
|
||||
record_type = 'auto'
|
||||
req.headers['X-Backend-Record-Type'] = 'auto'
|
||||
params['states'] = 'listing'
|
||||
|
@ -391,6 +396,9 @@ class ContainerController(Controller):
|
|||
resp_record_type.lower() == 'shard')):
|
||||
resp = self._get_from_shards(req, resp)
|
||||
|
||||
if orig_record_type not in ('object', 'shard'):
|
||||
resp.headers.pop('X-Backend-Record-Type', None)
|
||||
|
||||
if not config_true_value(
|
||||
resp.headers.get('X-Backend-Cached-Results')):
|
||||
# Cache container metadata. We just made a request to a storage
|
||||
|
|
|
@ -24,11 +24,12 @@ import six
|
|||
from six.moves.urllib.parse import quote
|
||||
|
||||
from swift.common import direct_client, utils
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
from swift.common.internal_client import UnexpectedResponse
|
||||
from swift.common.manager import Manager
|
||||
from swift.common.memcached import MemcacheRing
|
||||
from swift.common.utils import ShardRange, parse_db_filename, get_db_files, \
|
||||
quorum_size, config_true_value, Timestamp, md5
|
||||
quorum_size, config_true_value, Timestamp, md5, Namespace
|
||||
from swift.container.backend import ContainerBroker, UNSHARDED, SHARDING, \
|
||||
SHARDED
|
||||
from swift.container.sharder import CleavingContext, ContainerSharder
|
||||
|
@ -173,16 +174,35 @@ class BaseTestContainerSharding(ReplProbeTest):
|
|||
else:
|
||||
conn.delete_object(self.container_name, obj)
|
||||
|
||||
def get_container_shard_ranges(self, account=None, container=None,
|
||||
include_deleted=False):
|
||||
def get_container_listing(self, account=None, container=None,
|
||||
headers=None, params=None):
|
||||
account = account if account else self.account
|
||||
container = container if container else self.container_to_shard
|
||||
path = self.internal_client.make_path(account, container)
|
||||
headers = {'X-Backend-Record-Type': 'shard'}
|
||||
if include_deleted:
|
||||
headers['X-Backend-Include-Deleted'] = 'true'
|
||||
resp = self.internal_client.make_request(
|
||||
'GET', path + '?format=json', headers, [200])
|
||||
headers = headers or {}
|
||||
return self.internal_client.make_request(
|
||||
'GET', path + '?format=json', headers, [200], params=params)
|
||||
|
||||
def get_container_objects(self, account=None, container=None,
|
||||
headers=None, params=None):
|
||||
headers = HeaderKeyDict(headers) if headers else {}
|
||||
resp = self.get_container_listing(account, container, headers,
|
||||
params=params)
|
||||
req_record_type = headers.get('X-Backend-Record-Type')
|
||||
resp_record_type = resp.headers.get('X-Backend-Record-Type')
|
||||
if req_record_type and req_record_type.lower() == 'object':
|
||||
self.assertEqual('object', resp_record_type)
|
||||
else:
|
||||
self.assertIsNone(resp_record_type)
|
||||
return json.loads(resp.body)
|
||||
|
||||
def get_container_shard_ranges(self, account=None, container=None,
|
||||
headers=None, params=None):
|
||||
headers = dict(headers) if headers else {}
|
||||
headers.update({'X-Backend-Record-Type': 'shard'})
|
||||
resp = self.get_container_listing(account, container, headers,
|
||||
params=params)
|
||||
self.assertEqual('shard', resp.headers.get('X-Backend-Record-Type'))
|
||||
return [ShardRange.from_dict(sr) for sr in json.loads(resp.body)]
|
||||
|
||||
def direct_get_container_shard_ranges(self, account=None, container=None,
|
||||
|
@ -386,6 +406,10 @@ class BaseTestContainerSharding(ReplProbeTest):
|
|||
expected_state, headers['X-Backend-Sharding-State'])
|
||||
return [ShardRange.from_dict(sr) for sr in shard_ranges]
|
||||
|
||||
def assert_container_states(self, expected_state, num_shard_ranges):
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, expected_state, num_shard_ranges)
|
||||
|
||||
def assert_subprocess_success(self, cmd_args):
|
||||
try:
|
||||
return subprocess.check_output(cmd_args, stderr=subprocess.STDOUT)
|
||||
|
@ -434,6 +458,15 @@ class BaseTestContainerSharding(ReplProbeTest):
|
|||
return self.run_custom_daemon(ContainerSharder, 'container-sharder',
|
||||
conf_index, custom_conf, **kwargs)
|
||||
|
||||
def sharders_once_non_auto(self, **kwargs):
|
||||
# inhibit auto_sharding regardless of the config setting
|
||||
additional_args = kwargs.get('additional_args', [])
|
||||
if not isinstance(additional_args, list):
|
||||
additional_args = [additional_args]
|
||||
additional_args.append('--no-auto-shard')
|
||||
kwargs['additional_args'] = additional_args
|
||||
self.sharders.once(**kwargs)
|
||||
|
||||
|
||||
class BaseAutoContainerSharding(BaseTestContainerSharding):
|
||||
|
||||
|
@ -553,8 +586,7 @@ class TestContainerShardingNonUTF8(BaseAutoContainerSharding):
|
|||
number=n, additional_args='--partitions=%s' % self.brain.part)
|
||||
|
||||
# sanity check shard range states
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharding', 4)
|
||||
self.assert_container_states('sharding', 4)
|
||||
shard_ranges = self.get_container_shard_ranges()
|
||||
self.assertLengthEqual(shard_ranges, 4)
|
||||
self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2])
|
||||
|
@ -584,8 +616,7 @@ class TestContainerShardingNonUTF8(BaseAutoContainerSharding):
|
|||
|
||||
# run all the sharders again and the last two shard ranges get cleaved
|
||||
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 4)
|
||||
self.assert_container_states('sharded', 4)
|
||||
shard_ranges = self.get_container_shard_ranges()
|
||||
self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges)
|
||||
|
||||
|
@ -839,8 +870,7 @@ class TestContainerShardingObjectVersioning(BaseAutoContainerSharding):
|
|||
number=n, additional_args='--partitions=%s' % self.brain.part)
|
||||
|
||||
# sanity check shard range states
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharding', 4)
|
||||
self.assert_container_states('sharding', 4)
|
||||
shard_ranges = self.get_container_shard_ranges()
|
||||
self.assertLengthEqual(shard_ranges, 4)
|
||||
self.assert_shard_range_state(ShardRange.CLEAVED, shard_ranges[:2])
|
||||
|
@ -869,8 +899,7 @@ class TestContainerShardingObjectVersioning(BaseAutoContainerSharding):
|
|||
|
||||
# run all the sharders again and the last two shard ranges get cleaved
|
||||
self.sharders.once(additional_args='--partitions=%s' % self.brain.part)
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 4)
|
||||
self.assert_container_states('sharded', 4)
|
||||
shard_ranges = self.get_container_shard_ranges()
|
||||
self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges)
|
||||
|
||||
|
@ -1918,8 +1947,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
|
|||
self.sharders.once(
|
||||
number=n, additional_args='--partitions=%s' % self.brain.part)
|
||||
# sanity checks
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 2)
|
||||
self.assert_container_states('sharded', 2)
|
||||
self.assert_container_delete_fails()
|
||||
self.assert_container_has_shard_sysmeta()
|
||||
self.assert_container_post_ok('sharded')
|
||||
|
@ -2255,8 +2283,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
|
|||
self.sharders.once(
|
||||
number=n, additional_args='--partitions=%s' % self.brain.part)
|
||||
# sanity checks
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 2)
|
||||
self.assert_container_states('sharded', 2)
|
||||
self.assert_container_delete_fails()
|
||||
self.assert_container_has_shard_sysmeta()
|
||||
self.assert_container_post_ok('sharded')
|
||||
|
@ -2373,8 +2400,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
|
|||
self.sharders.once(
|
||||
number=n, additional_args='--partitions=%s' % self.brain.part)
|
||||
# sanity checks
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 2)
|
||||
self.assert_container_states('sharded', 2)
|
||||
self.assert_container_delete_fails()
|
||||
self.assert_container_has_shard_sysmeta()
|
||||
self.assert_container_post_ok('sharded')
|
||||
|
@ -2506,8 +2532,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
|
|||
self.sharders.once(
|
||||
number=n, additional_args='--partitions=%s' % self.brain.part)
|
||||
# sanity checks
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 2)
|
||||
self.assert_container_states('sharded', 2)
|
||||
self.assert_container_delete_fails()
|
||||
self.assert_container_has_shard_sysmeta()
|
||||
self.assert_container_post_ok('sharded')
|
||||
|
@ -2620,8 +2645,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
|
|||
self.sharders.once(
|
||||
number=n, additional_args='--partitions=%s' % self.brain.part)
|
||||
# sanity checks
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 2)
|
||||
self.assert_container_states('sharded', 2)
|
||||
self.assert_container_delete_fails()
|
||||
self.assert_container_has_shard_sysmeta()
|
||||
self.assert_container_post_ok('sharded')
|
||||
|
@ -2860,8 +2884,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
|
|||
for node in self.brain.nodes[1:]:
|
||||
self.assert_container_state(node, 'sharding', 3)
|
||||
self.sharders.once()
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 3)
|
||||
self.assert_container_states('sharded', 3)
|
||||
|
||||
self.assert_container_listing(obj_names)
|
||||
|
||||
|
@ -2900,6 +2923,137 @@ class TestContainerSharding(BaseAutoContainerSharding):
|
|||
self.assertEqual(0, int(metadata.get('x-account-bytes-used')))
|
||||
|
||||
|
||||
class TestShardedAPI(BaseTestContainerSharding):
|
||||
def _assert_namespace_equivalence(
|
||||
self, namespaces_list, other_namespaces_list):
|
||||
# verify given lists are equivalent when cast to Namespaces
|
||||
self.assertEqual(len(namespaces_list), len(other_namespaces_list))
|
||||
self.assertEqual(
|
||||
[Namespace(sr.name, sr.lower, sr.upper)
|
||||
for sr in namespaces_list],
|
||||
[Namespace(sr.name, sr.lower, sr.upper)
|
||||
for sr in other_namespaces_list])
|
||||
|
||||
def test_GET(self):
|
||||
all_obj_names = self._make_object_names(10)
|
||||
self.put_objects(all_obj_names)
|
||||
|
||||
# unsharded container
|
||||
objs = self.get_container_objects()
|
||||
self.assertEqual(all_obj_names, [obj['name'] for obj in objs])
|
||||
|
||||
objs = self.get_container_objects(
|
||||
headers={'X-Backend-Record-Type': 'auto'})
|
||||
self.assertEqual(all_obj_names, [obj['name'] for obj in objs])
|
||||
|
||||
objs = self.get_container_objects(
|
||||
headers={'X-Backend-Record-Type': 'object'})
|
||||
self.assertEqual(all_obj_names, [obj['name'] for obj in objs])
|
||||
|
||||
objs = self.get_container_objects(
|
||||
headers={'X-Backend-Record-Type': 'banana'})
|
||||
self.assertEqual(all_obj_names, [obj['name'] for obj in objs])
|
||||
|
||||
shard_ranges = self.get_container_shard_ranges()
|
||||
self.assertFalse(shard_ranges)
|
||||
|
||||
# Shard the container
|
||||
client.post_container(self.url, self.admin_token, self.container_name,
|
||||
headers={'X-Container-Sharding': 'on'})
|
||||
self.assert_subprocess_success([
|
||||
'swift-manage-shard-ranges',
|
||||
self.get_db_file(self.brain.part, self.brain.nodes[0]),
|
||||
'find_and_replace', '5', '--enable', '--minimum-shard-size', '5'])
|
||||
self.replicators.once()
|
||||
# "Run container-sharder on all nodes to shard the container."
|
||||
# first pass cleaves 2 shards
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
# sanity check
|
||||
self.assert_container_states('sharded', 2)
|
||||
|
||||
orig_shard_ranges = self.get_container_shard_ranges()
|
||||
self.assertEqual(2, len(orig_shard_ranges))
|
||||
|
||||
# the container is sharded so *all* shard ranges should satisfy
|
||||
# updating and listing state aliases
|
||||
shard_ranges = self.get_container_shard_ranges(
|
||||
params={'states': 'updating'})
|
||||
self._assert_namespace_equivalence(orig_shard_ranges, shard_ranges)
|
||||
|
||||
# XXX the states=listing param provokes the proxy to cache the backend
|
||||
# values and then respond to the client with the cached *namespaces* !!
|
||||
# shard_ranges = self.get_container_shard_ranges(
|
||||
# params={'states': 'listing'})
|
||||
# self._assert_namespace_equivalence(orig_shard_ranges, shard_ranges)
|
||||
|
||||
# XXX ditto...
|
||||
# shard_ranges = self.get_container_shard_ranges(
|
||||
# headers={'X-Newest': 'true'},
|
||||
# params={'states': 'listing'})
|
||||
# self._assert_namespace_equivalence(orig_shard_ranges, shard_ranges)
|
||||
|
||||
# this is what the sharder requests...
|
||||
shard_ranges = self.get_container_shard_ranges(
|
||||
headers={'X-Newest': 'true'},
|
||||
params={'states': 'auditing'})
|
||||
own_ns = Namespace('%s/%s' % (self.account, self.container_name),
|
||||
lower='', upper='')
|
||||
self._assert_namespace_equivalence(orig_shard_ranges + [own_ns],
|
||||
shard_ranges)
|
||||
|
||||
shard_ranges = self.get_container_shard_ranges(
|
||||
params={'includes': all_obj_names[1]})
|
||||
self._assert_namespace_equivalence(orig_shard_ranges[:1], shard_ranges)
|
||||
|
||||
shard_ranges = self.get_container_shard_ranges(
|
||||
# override 'includes'
|
||||
headers={'X-Backend-Override-Shard-Name-Filter': 'sharded'},
|
||||
params={'includes': all_obj_names[1]})
|
||||
self._assert_namespace_equivalence(orig_shard_ranges, shard_ranges)
|
||||
|
||||
shard_ranges = self.get_container_shard_ranges(
|
||||
params={'end_marker': all_obj_names[1]})
|
||||
self._assert_namespace_equivalence(orig_shard_ranges[:1], shard_ranges)
|
||||
|
||||
shard_ranges = self.get_container_shard_ranges(
|
||||
# override 'end_marker'
|
||||
headers={'X-Backend-Override-Shard-Name-Filter': 'sharded'},
|
||||
params={'end_marker': all_obj_names[1]})
|
||||
self._assert_namespace_equivalence(orig_shard_ranges, shard_ranges)
|
||||
|
||||
shard_ranges = self.get_container_shard_ranges(
|
||||
params={'reverse': 'true'})
|
||||
self._assert_namespace_equivalence(list(reversed(orig_shard_ranges)),
|
||||
shard_ranges)
|
||||
|
||||
shard_ranges = self.get_container_shard_ranges(
|
||||
# override 'reverse'
|
||||
headers={'X-Backend-Override-Shard-Name-Filter': 'sharded'},
|
||||
params={'reverse': 'true'})
|
||||
self._assert_namespace_equivalence(orig_shard_ranges, shard_ranges)
|
||||
|
||||
objs = self.get_container_objects()
|
||||
self.assertEqual(all_obj_names, [obj['name'] for obj in objs])
|
||||
|
||||
objs = self.get_container_objects(
|
||||
headers={'X-Newest': 'true'})
|
||||
self.assertEqual(all_obj_names, [obj['name'] for obj in objs])
|
||||
|
||||
objs = self.get_container_objects(
|
||||
headers={'X-Backend-Record-Type': 'auto'})
|
||||
self.assertEqual(all_obj_names, [obj['name'] for obj in objs])
|
||||
|
||||
objs = self.get_container_objects(
|
||||
headers={'X-Backend-Record-Type': 'banana'})
|
||||
self.assertEqual(all_obj_names, [obj['name'] for obj in objs])
|
||||
|
||||
# note: explicitly asking for the root object rows, but it has None
|
||||
objs = self.get_container_objects(
|
||||
headers={'X-Backend-Record-Type': 'object'})
|
||||
self.assertEqual([], objs)
|
||||
|
||||
|
||||
class TestContainerShardingMoreUTF8(TestContainerSharding):
|
||||
def _make_object_names(self, number):
|
||||
# override default with names that include non-ascii chars
|
||||
|
@ -2925,16 +3079,7 @@ class TestContainerShardingMoreUTF8(TestContainerSharding):
|
|||
|
||||
|
||||
class TestManagedContainerSharding(BaseTestContainerSharding):
|
||||
'''Test sharding using swift-manage-shard-ranges'''
|
||||
|
||||
def sharders_once(self, **kwargs):
|
||||
# inhibit auto_sharding regardless of the config setting
|
||||
additional_args = kwargs.get('additional_args', [])
|
||||
if not isinstance(additional_args, list):
|
||||
additional_args = [additional_args]
|
||||
additional_args.append('--no-auto-shard')
|
||||
kwargs['additional_args'] = additional_args
|
||||
self.sharders.once(**kwargs)
|
||||
"""Test sharding using swift-manage-shard-ranges"""
|
||||
|
||||
def test_manage_shard_ranges(self):
|
||||
obj_names = self._make_object_names(10)
|
||||
|
@ -2948,8 +3093,9 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
|
||||
# sanity check: we don't have nearly enough objects for this to shard
|
||||
# automatically
|
||||
self.sharders_once(number=self.brain.node_numbers[0],
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
number=self.brain.node_numbers[0],
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
self.assert_container_state(self.brain.nodes[0], 'unsharded', 0)
|
||||
|
||||
self.assert_subprocess_success([
|
||||
|
@ -2962,7 +3108,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
self.replicators.once()
|
||||
# "Run container-sharder on all nodes to shard the container."
|
||||
# first pass cleaves 2 shards
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
self.assert_container_state(self.brain.nodes[0], 'sharding', 3)
|
||||
self.assert_container_state(self.brain.nodes[1], 'sharding', 3)
|
||||
shard_ranges = self.assert_container_state(
|
||||
|
@ -2972,7 +3119,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# make the un-cleaved shard update the root container...
|
||||
self.assertEqual([3, 3, 4], [sr.object_count for sr in shard_ranges])
|
||||
shard_part, nodes = self.get_part_and_node_numbers(shard_ranges[2])
|
||||
self.sharders_once(additional_args='--partitions=%s' % shard_part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % shard_part)
|
||||
shard_ranges = self.assert_container_state(
|
||||
self.brain.nodes[2], 'sharding', 3)
|
||||
# ...it does not report zero-stats despite being empty, because it has
|
||||
|
@ -2980,7 +3128,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
self.assertEqual([3, 3, 4], [sr.object_count for sr in shard_ranges])
|
||||
|
||||
# second pass cleaves final shard
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
|
||||
# Everybody's settled
|
||||
self.assert_container_state(self.brain.nodes[0], 'sharded', 3)
|
||||
|
@ -3006,11 +3155,11 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
self.assert_container_state(self.brain.nodes[0], 'unsharded', 4)
|
||||
self.replicators.once()
|
||||
# run sharders twice to cleave all 4 shard ranges
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.assert_container_state(self.brain.nodes[0], 'sharded', 4)
|
||||
self.assert_container_state(self.brain.nodes[1], 'sharded', 4)
|
||||
self.assert_container_state(self.brain.nodes[2], 'sharded', 4)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
self.assert_container_states('sharded', 4)
|
||||
self.assert_container_listing(obj_names)
|
||||
|
||||
# now compact some ranges; use --max-shrinking to allow 2 shrinking
|
||||
|
@ -3025,7 +3174,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
self.assertEqual([ShardRange.SHRINKING] * 2 + [ShardRange.ACTIVE] * 2,
|
||||
[sr.state for sr in shard_ranges])
|
||||
self.replicators.once()
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
# check there's now just 2 remaining shard ranges
|
||||
shard_ranges = self.assert_container_state(
|
||||
self.brain.nodes[0], 'sharded', 2)
|
||||
|
@ -3051,7 +3200,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
self.assertEqual([ShardRange.SHRINKING] * 2,
|
||||
[sr.state for sr in shard_ranges])
|
||||
self.replicators.once()
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
self.assert_container_state(self.brain.nodes[0], 'collapsed', 0)
|
||||
self.assert_container_listing(obj_names, req_hdrs={'X-Newest': 'True'})
|
||||
|
||||
|
@ -3100,8 +3249,9 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# proceeds to cleave shard 0.0, but after 0.0 cleaving stalls because
|
||||
# next in iteration is shard range 1.0 in FOUND state from the other
|
||||
# replica that it cannot yet cleave.
|
||||
self.sharders_once(number=self.brain.node_numbers[0],
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
number=self.brain.node_numbers[0],
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
|
||||
# On first pass the second replica passes audit (it has its own found
|
||||
# ranges and the first replica's created shard ranges but none in the
|
||||
|
@ -3109,8 +3259,9 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# with the other replicas. All of the 7 shard ranges on this replica
|
||||
# are now in CREATED state so it proceeds to cleave the first two shard
|
||||
# ranges, 0.1 and 1.0.
|
||||
self.sharders_once(number=self.brain.node_numbers[1],
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
number=self.brain.node_numbers[1],
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
self.replicators.once()
|
||||
|
||||
# Uh-oh
|
||||
|
@ -3119,9 +3270,11 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# There's a race: the third replica may be sharding, may be unsharded
|
||||
|
||||
# Try it again a few times
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
self.replicators.once()
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
|
||||
# It's not really fixing itself... the sharder audit will detect
|
||||
# overlapping ranges which prevents cleaving proceeding; expect the
|
||||
|
@ -3182,8 +3335,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# and the third replica may have cleaved no shards. We therefore need
|
||||
# two more passes of the sharder to get to a predictable state where
|
||||
# all replicas have cleaved all three 0.* shards.
|
||||
self.sharders_once()
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
self.sharders_once_non_auto()
|
||||
|
||||
# now we expect all replicas to have just the three 1.* shards, with
|
||||
# the 0.* shards all deleted
|
||||
|
@ -3230,7 +3383,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# because of the older epoch_0 in its db filename will now start to
|
||||
# shard again with a newer epoch_1 db, and will start to re-cleave the
|
||||
# 3 active shards, albeit with zero objects to cleave.
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
for node in (0, 1, 2):
|
||||
with annotate_failure('node %s' % node):
|
||||
broker = self.get_broker(self.brain.part,
|
||||
|
@ -3274,7 +3427,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# to exist on the same node as the root because the roots cleaving
|
||||
# process doesn't think that it created the shard db and will therefore
|
||||
# replicate it as per a normal cleave.
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
for node in (0, 1, 2):
|
||||
with annotate_failure('node %s' % node):
|
||||
broker = self.get_broker(self.brain.part,
|
||||
|
@ -3311,11 +3464,13 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
'find_and_replace', '4', '--enable'])
|
||||
self.replicators.once()
|
||||
# cleave first two shards
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
# cleave third shard
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
# ensure all shards learn their ACTIVE state from root
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
for node in (0, 1, 2):
|
||||
with annotate_failure('node %d' % node):
|
||||
shard_ranges = self.assert_container_state(
|
||||
|
@ -3368,9 +3523,12 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
self.replicators.once()
|
||||
|
||||
# try hard to shard the shard...
|
||||
self.sharders_once(additional_args='--partitions=%s' % shard_1_part)
|
||||
self.sharders_once(additional_args='--partitions=%s' % shard_1_part)
|
||||
self.sharders_once(additional_args='--partitions=%s' % shard_1_part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % shard_1_part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % shard_1_part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % shard_1_part)
|
||||
# sharding hasn't completed and there's overlaps in the shard and root:
|
||||
# the sub-shards will have been cleaved in the order listed above, but
|
||||
# sub-shards (10 -12) and one of (12 - 14) will be overlooked because
|
||||
|
@ -3403,8 +3561,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
['swift-manage-shard-ranges', db_file, 'repair', '--yes',
|
||||
'--min-shard-age', '0'])
|
||||
self.replicators.once()
|
||||
self.sharders_once()
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
self.sharders_once_non_auto()
|
||||
|
||||
# check root now has just 5 shard ranges
|
||||
root_shard_ranges = self.get_container_shard_ranges()
|
||||
|
@ -3416,7 +3574,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# because the sub-shards report their state to the root; we cannot make
|
||||
# assertions about shrunk states in shard_1's shard range table)
|
||||
root_shard_ranges = self.get_container_shard_ranges(
|
||||
include_deleted=True)
|
||||
headers={'X-Backend-Include-Deleted': 'true'})
|
||||
self.assertEqual(10, len(root_shard_ranges), root_shard_ranges)
|
||||
shrunk_shard_ranges = [sr for sr in root_shard_ranges
|
||||
if sr.state == ShardRange.SHRUNK]
|
||||
|
@ -3452,13 +3610,12 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
'find_and_replace', '2', '--enable'])
|
||||
self.container_replicators.once(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'unsharded', 2)
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.assert_container_states('unsharded', 2)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
# get shards to update state from parent...
|
||||
self.sharders_once()
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 2)
|
||||
self.sharders_once_non_auto()
|
||||
self.assert_container_states('sharded', 2)
|
||||
|
||||
# sanity check, all is well
|
||||
msg = self.assert_subprocess_success([
|
||||
|
@ -3483,9 +3640,10 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
self.assert_container_state(
|
||||
node, 'unsharded', 2, account=shard_ranges[0].account,
|
||||
container=shard_ranges[0].container, part=shard_part)
|
||||
self.sharders_once(additional_args='--partitions=%s' % shard_part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % shard_part)
|
||||
# get shards to update state from parent...
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
for node in exclude_nodes(shard_nodes, self.brain.nodes[0]):
|
||||
self.assert_container_state(
|
||||
node, 'sharded', 2, account=shard_ranges[0].account,
|
||||
|
@ -3528,7 +3686,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# TODO: how can we work around this?
|
||||
self.assertNotEqual(sub_shard_part, shard_part,
|
||||
'You were unlucky, try again')
|
||||
self.sharders_once(additional_args='--partitions=%s' % sub_shard_part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % sub_shard_part)
|
||||
|
||||
# now root node 0 has the original shards plus one of the sub-shards
|
||||
# but all are active :(
|
||||
|
@ -3558,8 +3717,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
for sr in root_brokers[0].get_shard_ranges(include_deleted=True)])
|
||||
|
||||
# the transient overlap is 'fixed' in subsequent sharder cycles...
|
||||
self.sharders_once()
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
self.sharders_once_non_auto()
|
||||
self.container_replicators.once()
|
||||
|
||||
for broker in root_brokers:
|
||||
|
@ -3593,13 +3752,12 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
'find_and_replace', '2', '--enable'])
|
||||
self.container_replicators.once(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'unsharded', 4)
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.assert_container_states('unsharded', 4)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
# get shards to update state from parent...
|
||||
self.sharders_once()
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 4)
|
||||
self.sharders_once_non_auto()
|
||||
self.assert_container_states('sharded', 4)
|
||||
|
||||
# sanity check, all is well
|
||||
msg = self.assert_subprocess_success([
|
||||
|
@ -3635,8 +3793,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
'--yes'])
|
||||
self.assertIn(b'Repairs necessary to fill gaps.', msg)
|
||||
|
||||
self.sharders_once()
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
self.sharders_once_non_auto()
|
||||
self.container_replicators.once()
|
||||
|
||||
# yay! we fixed the gap (without creating an overlap)
|
||||
|
@ -3662,7 +3820,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
new_objs = [obj_names[4] + 'a']
|
||||
self.put_objects(new_objs)
|
||||
# get root stats up to date
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
# new object is in listing but old objects in the gap have been lost -
|
||||
# don't delete shard ranges!
|
||||
self.assert_container_listing(obj_names[:4] + new_objs + obj_names[6:])
|
||||
|
@ -3702,7 +3860,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# Run container-sharder to shard the 2 primary replicas that did
|
||||
# receive the object PUTs
|
||||
for num in self.brain.primary_numbers:
|
||||
self.sharders_once(
|
||||
self.sharders_once_non_auto(
|
||||
number=num,
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
|
||||
|
@ -3711,7 +3869,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
self.delete_objects(obj_names)
|
||||
# deal with DELETE's being misplaced in root db's...
|
||||
for num in self.brain.primary_numbers:
|
||||
self.sharders_once(
|
||||
self.sharders_once_non_auto(
|
||||
number=num,
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
|
||||
|
@ -3738,7 +3896,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
|
||||
# now shard the final DB
|
||||
for num in self.brain.handoff_numbers:
|
||||
self.sharders_once(
|
||||
self.sharders_once_non_auto(
|
||||
number=num,
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
|
||||
|
@ -3806,7 +3964,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# Run container-sharder to shard the 2 primary replicas that did
|
||||
# receive the object PUTs
|
||||
for num in self.brain.primary_numbers:
|
||||
self.sharders_once(
|
||||
self.sharders_once_non_auto(
|
||||
number=num,
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
|
||||
|
@ -3828,7 +3986,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
|
||||
# now shard the final DB
|
||||
for num in self.brain.handoff_numbers:
|
||||
self.sharders_once(
|
||||
self.sharders_once_non_auto(
|
||||
number=num,
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
shard_ranges = self.assert_container_state(
|
||||
|
@ -3876,14 +4034,13 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# Run container-replicator to replicate them to other nodes.
|
||||
self.container_replicators.once(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'unsharded', 2)
|
||||
self.assert_container_states('unsharded', 2)
|
||||
# Run container-sharder on all nodes to shard the container.
|
||||
self.sharders_once(additional_args='--partitions=%s' % self.brain.part)
|
||||
self.sharders_once_non_auto(
|
||||
additional_args='--partitions=%s' % self.brain.part)
|
||||
# get shards to update state from parent...
|
||||
self.sharders_once()
|
||||
for node in self.brain.nodes:
|
||||
self.assert_container_state(node, 'sharded', 2)
|
||||
self.sharders_once_non_auto()
|
||||
self.assert_container_states('sharded', 2)
|
||||
|
||||
# shard first child shard into 2 grand-child-shards.
|
||||
c_shard_ranges = self.get_container_shard_ranges()
|
||||
|
@ -3912,14 +4069,14 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
c_shard_dir = os.path.dirname(c_shard_brokers[2].db_file)
|
||||
c_shard_tmp_dir = c_shard_dir + ".tmp"
|
||||
os.rename(c_shard_dir, c_shard_tmp_dir)
|
||||
self.sharders_once(additional_args='--partitions=%s' %
|
||||
child_shard_part)
|
||||
self.sharders_once_non_auto(additional_args='--partitions=%s' %
|
||||
child_shard_part)
|
||||
for node in c_shard_nodes[:2]:
|
||||
self.assert_container_state(
|
||||
node, 'sharded', 2, account=c_shard_ranges[0].account,
|
||||
container=c_shard_ranges[0].container, part=child_shard_part)
|
||||
# get updates done...
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
|
||||
# shard first grand-child shard into 2 grand-grand-child-shards.
|
||||
gc_shard_ranges = self.get_container_shard_ranges(
|
||||
|
@ -3936,12 +4093,12 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
gc_shard_ranges[0].account, gc_shard_ranges[0].container)
|
||||
self.container_replicators.once(
|
||||
additional_args='--partitions=%s' % grandchild_shard_part)
|
||||
self.sharders_once(additional_args='--partitions=%s' %
|
||||
grandchild_shard_part)
|
||||
self.sharders_once_non_auto(additional_args='--partitions=%s' %
|
||||
grandchild_shard_part)
|
||||
|
||||
# get shards to update state from parent...
|
||||
self.sharders_once()
|
||||
self.sharders_once()
|
||||
self.sharders_once_non_auto()
|
||||
self.sharders_once_non_auto()
|
||||
self.container_replicators.once(
|
||||
additional_args='--partitions=%s' % child_shard_part)
|
||||
|
||||
|
@ -3966,13 +4123,13 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
|
|||
# now, finally, run the sharder on the child that is still waiting to
|
||||
# shard. It will get 2 great-grandchild ranges from root to replace
|
||||
# deleted grandchild.
|
||||
self.sharders_once(
|
||||
self.sharders_once_non_auto(
|
||||
additional_args=['--partitions=%s' %
|
||||
child_shard_part, '--devices=%s' %
|
||||
c_shard_nodes[2]['device']])
|
||||
# batch size is 2 but this replicas has 3 shard ranges so we need two
|
||||
# runs of the sharder
|
||||
self.sharders_once(
|
||||
self.sharders_once_non_auto(
|
||||
additional_args=['--partitions=%s' %
|
||||
child_shard_part, '--devices=%s' %
|
||||
c_shard_nodes[2]['device']])
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
# limitations under the License.
|
||||
import json
|
||||
import random
|
||||
from argparse import Namespace
|
||||
import argparse
|
||||
|
||||
import eventlet
|
||||
import os
|
||||
|
@ -47,7 +47,7 @@ from swift.container.sharder import ContainerSharder, sharding_enabled, \
|
|||
update_own_shard_range_stats
|
||||
from swift.common.utils import ShardRange, Timestamp, hash_path, \
|
||||
encode_timestamps, parse_db_filename, quorum_size, Everything, md5, \
|
||||
ShardName
|
||||
ShardName, Namespace
|
||||
from test import annotate_failure
|
||||
|
||||
from test.debug_logger import debug_logger
|
||||
|
@ -1261,6 +1261,10 @@ class TestSharder(BaseTestSharder):
|
|||
do_test('')
|
||||
do_test(json.dumps({}))
|
||||
do_test(json.dumps([{'account': 'a', 'container': 'c'}]))
|
||||
do_test(json.dumps([dict(Namespace('a/c', 'l', 'u'))]))
|
||||
sr_dict = dict(ShardRange('a/c', next(self.ts_iter), 'l', 'u'))
|
||||
sr_dict.pop('object_count')
|
||||
do_test(json.dumps([sr_dict]))
|
||||
|
||||
def test_fetch_shard_ranges_ok(self):
|
||||
def do_test(mock_resp_body, params):
|
||||
|
@ -9696,11 +9700,11 @@ class TestContainerSharderConf(unittest.TestCase):
|
|||
# given namespace
|
||||
def assert_bad(conf):
|
||||
with self.assertRaises(ValueError):
|
||||
ContainerSharderConf.validate_conf(Namespace(**conf))
|
||||
ContainerSharderConf.validate_conf(argparse.Namespace(**conf))
|
||||
|
||||
def assert_ok(conf):
|
||||
try:
|
||||
ContainerSharderConf.validate_conf(Namespace(**conf))
|
||||
ContainerSharderConf.validate_conf(argparse.Namespace(**conf))
|
||||
except ValueError as err:
|
||||
self.fail('Unexpected ValueError: %s' % err)
|
||||
|
||||
|
|
|
@ -486,7 +486,7 @@ class TestContainerController(TestRingBase):
|
|||
def _check_GET_shard_listing(self, mock_responses, expected_objects,
|
||||
expected_requests, query_string='',
|
||||
reverse=False, expected_status=200,
|
||||
memcache=False):
|
||||
memcache=False, req_headers=None):
|
||||
# mock_responses is a list of tuples (status, json body, headers)
|
||||
# expected objects is a list of dicts
|
||||
# expected_requests is a list of tuples (path, hdrs dict, params dict)
|
||||
|
@ -506,6 +506,8 @@ class TestContainerController(TestRingBase):
|
|||
for resp in mock_responses])
|
||||
exp_headers = [resp[2] for resp in mock_responses]
|
||||
request = Request.blank(container_path)
|
||||
if req_headers:
|
||||
request.headers.update(req_headers)
|
||||
if memcache:
|
||||
# memcache exists, which causes backend to ignore constraints and
|
||||
# reverse params for shard range GETs
|
||||
|
@ -566,6 +568,7 @@ class TestContainerController(TestRingBase):
|
|||
int(resp.headers['X-Container-Object-Count']))
|
||||
self.assertEqual(exp_sharding_state,
|
||||
resp.headers['X-Backend-Sharding-State'])
|
||||
self.assertNotIn('X-Backend-Record-Type', resp.headers)
|
||||
for k, v in root_resp_hdrs.items():
|
||||
if k.lower().startswith('x-container-meta'):
|
||||
self.assertEqual(v, resp.headers[k])
|
||||
|
@ -662,6 +665,18 @@ class TestContainerController(TestRingBase):
|
|||
self.check_response(resp, root_resp_hdrs,
|
||||
expected_objects=expected_objects)
|
||||
|
||||
resp = self._check_GET_shard_listing(
|
||||
mock_responses, expected_objects, expected_requests,
|
||||
req_headers={'X-Backend-Record-Type': 'auto'})
|
||||
self.check_response(resp, root_resp_hdrs,
|
||||
expected_objects=expected_objects)
|
||||
|
||||
resp = self._check_GET_shard_listing(
|
||||
mock_responses, expected_objects, expected_requests,
|
||||
req_headers={'X-Backend-Record-Type': 'banana'})
|
||||
self.check_response(resp, root_resp_hdrs,
|
||||
expected_objects=expected_objects)
|
||||
|
||||
# GET all objects - sharding, final shard range points back to root
|
||||
root_range = ShardRange('a/c', Timestamp.now(), 'pie', '')
|
||||
mock_responses = [
|
||||
|
@ -1049,6 +1064,18 @@ class TestContainerController(TestRingBase):
|
|||
self.check_response(resp, root_resp_hdrs,
|
||||
expected_objects=expected_objects)
|
||||
|
||||
resp = self._check_GET_shard_listing(
|
||||
mock_responses, expected_objects, expected_requests, memcache=True,
|
||||
req_headers={'X-Backend-Record-Type': 'auto'})
|
||||
self.check_response(resp, root_resp_hdrs,
|
||||
expected_objects=expected_objects)
|
||||
|
||||
resp = self._check_GET_shard_listing(
|
||||
mock_responses, expected_objects, expected_requests, memcache=True,
|
||||
req_headers={'X-Backend-Record-Type': 'banana'})
|
||||
self.check_response(resp, root_resp_hdrs,
|
||||
expected_objects=expected_objects)
|
||||
|
||||
# GET all objects - sharding, final shard range points back to root
|
||||
root_range = ShardRange('a/c', Timestamp.now(), 'pie', '')
|
||||
mock_responses = [
|
||||
|
@ -2730,9 +2757,20 @@ class TestContainerController(TestRingBase):
|
|||
'X-Container-Bytes-Used': '12',
|
||||
'X-Backend-Storage-Policy-Index': '0'}
|
||||
|
||||
def _do_test_caching(self, record_type, exp_recheck_listing):
|
||||
def _do_test_caching(self, record_type, exp_recheck_listing,
|
||||
exp_record_type):
|
||||
# this test gets shard ranges into cache and then reads from cache
|
||||
sharding_state = 'sharded'
|
||||
exp_resp_headers = {
|
||||
'X-Backend-Recheck-Container-Existence': '60',
|
||||
'X-Backend-Sharding-State': sharding_state}
|
||||
if exp_record_type:
|
||||
exp_resp_headers['X-Backend-Record-Type'] = exp_record_type
|
||||
exp_cache_resp_headers = {
|
||||
'X-Backend-Cached-Results': 'true',
|
||||
'X-Backend-Sharding-State': sharding_state}
|
||||
if exp_record_type:
|
||||
exp_cache_resp_headers['X-Backend-Record-Type'] = exp_record_type
|
||||
self.memcache.delete_all()
|
||||
# container is sharded but proxy does not have that state cached;
|
||||
# expect a backend request and expect shard ranges to be cached
|
||||
|
@ -2749,10 +2787,7 @@ class TestContainerController(TestRingBase):
|
|||
req, backend_req,
|
||||
extra_hdrs={'X-Backend-Record-Type': record_type,
|
||||
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
|
||||
self._check_response(resp, self.ns_dicts, {
|
||||
'X-Backend-Recheck-Container-Existence': '60',
|
||||
'X-Backend-Record-Type': 'shard',
|
||||
'X-Backend-Sharding-State': sharding_state})
|
||||
self._check_response(resp, self.ns_dicts, exp_resp_headers)
|
||||
|
||||
cache_key = 'shard-listing-v2/a/c'
|
||||
self.assertEqual(
|
||||
|
@ -2789,10 +2824,7 @@ class TestContainerController(TestRingBase):
|
|||
req, backend_req,
|
||||
extra_hdrs={'X-Backend-Record-Type': record_type,
|
||||
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
|
||||
self._check_response(resp, self.ns_dicts, {
|
||||
'X-Backend-Recheck-Container-Existence': '60',
|
||||
'X-Backend-Record-Type': 'shard',
|
||||
'X-Backend-Sharding-State': sharding_state})
|
||||
self._check_response(resp, self.ns_dicts, exp_resp_headers)
|
||||
self.assertEqual(
|
||||
[mock.call.get('container/a/c'),
|
||||
mock.call.get(cache_key, raise_on_error=True),
|
||||
|
@ -2819,10 +2851,7 @@ class TestContainerController(TestRingBase):
|
|||
req = self._build_request({'X-Backend-Record-Type': record_type},
|
||||
{'states': 'listing'}, {})
|
||||
resp = req.get_response(self.app)
|
||||
self._check_response(resp, self.ns_dicts, {
|
||||
'X-Backend-Cached-Results': 'true',
|
||||
'X-Backend-Record-Type': 'shard',
|
||||
'X-Backend-Sharding-State': sharding_state})
|
||||
self._check_response(resp, self.ns_dicts, exp_cache_resp_headers)
|
||||
self.assertEqual(
|
||||
[mock.call.get('container/a/c'),
|
||||
mock.call.get(cache_key, raise_on_error=True)],
|
||||
|
@ -2853,10 +2882,7 @@ class TestContainerController(TestRingBase):
|
|||
req, backend_req,
|
||||
extra_hdrs={'X-Backend-Record-Type': record_type,
|
||||
'X-Backend-Override-Shard-Name-Filter': 'sharded'})
|
||||
self._check_response(resp, self.ns_dicts, {
|
||||
'X-Backend-Recheck-Container-Existence': '60',
|
||||
'X-Backend-Record-Type': 'shard',
|
||||
'X-Backend-Sharding-State': sharding_state})
|
||||
self._check_response(resp, self.ns_dicts, exp_resp_headers)
|
||||
self.assertEqual(
|
||||
[mock.call.get('container/a/c'),
|
||||
mock.call.set(cache_key, self.ns_bound_list.bounds,
|
||||
|
@ -2882,10 +2908,7 @@ class TestContainerController(TestRingBase):
|
|||
{'states': 'listing'}, {})
|
||||
with mock.patch('random.random', return_value=0.11):
|
||||
resp = req.get_response(self.app)
|
||||
self._check_response(resp, self.ns_dicts, {
|
||||
'X-Backend-Cached-Results': 'true',
|
||||
'X-Backend-Record-Type': 'shard',
|
||||
'X-Backend-Sharding-State': sharding_state})
|
||||
self._check_response(resp, self.ns_dicts, exp_cache_resp_headers)
|
||||
self.assertEqual(
|
||||
[mock.call.get('container/a/c'),
|
||||
mock.call.get(cache_key, raise_on_error=True)],
|
||||
|
@ -2909,10 +2932,7 @@ class TestContainerController(TestRingBase):
|
|||
infocache=req.environ['swift.infocache'])
|
||||
with mock.patch('random.random', return_value=0.11):
|
||||
resp = req.get_response(self.app)
|
||||
self._check_response(resp, self.ns_dicts, {
|
||||
'X-Backend-Cached-Results': 'true',
|
||||
'X-Backend-Record-Type': 'shard',
|
||||
'X-Backend-Sharding-State': sharding_state})
|
||||
self._check_response(resp, self.ns_dicts, exp_cache_resp_headers)
|
||||
self.assertEqual([], self.memcache.calls)
|
||||
self.assertIn('swift.infocache', req.environ)
|
||||
self.assertIn(cache_key, req.environ['swift.infocache'])
|
||||
|
@ -3025,10 +3045,10 @@ class TestContainerController(TestRingBase):
|
|||
def test_GET_shard_ranges(self):
|
||||
self._setup_shard_range_stubs()
|
||||
# expect shard ranges cache time to be default value of 600
|
||||
self._do_test_caching('shard', 600)
|
||||
self._do_test_caching('shard', 600, 'shard')
|
||||
# expect shard ranges cache time to be configured value of 120
|
||||
self.app.recheck_listing_shard_ranges = 120
|
||||
self._do_test_caching('shard', 120)
|
||||
self._do_test_caching('shard', 120, 'shard')
|
||||
|
||||
def mock_get_from_shards(self, req, resp):
|
||||
# for the purposes of these tests we override _get_from_shards so
|
||||
|
@ -3042,7 +3062,7 @@ class TestContainerController(TestRingBase):
|
|||
'ContainerController._get_from_shards',
|
||||
mock_get_from_shards):
|
||||
self.app.recheck_listing_shard_ranges = 600
|
||||
self._do_test_caching('auto', 600)
|
||||
self._do_test_caching('auto', 600, exp_record_type=None)
|
||||
|
||||
def test_GET_shard_ranges_404_response(self):
|
||||
# pre-warm cache with container info but not shard ranges so that the
|
||||
|
@ -3169,6 +3189,8 @@ class TestContainerController(TestRingBase):
|
|||
def mock_get_from_shards(self, req, resp):
|
||||
return resp
|
||||
|
||||
# request with record-type=auto does not expect record-type in response
|
||||
del exp_hdrs['X-Backend-Record-Type']
|
||||
with mock.patch('swift.proxy.controllers.container.'
|
||||
'ContainerController._get_from_shards',
|
||||
mock_get_from_shards):
|
||||
|
@ -3264,6 +3286,8 @@ class TestContainerController(TestRingBase):
|
|||
def mock_get_from_shards(self, req, resp):
|
||||
return resp
|
||||
|
||||
# request with record-type=auto does not expect record-type in response
|
||||
del exp_hdrs['X-Backend-Record-Type']
|
||||
with mock.patch('swift.proxy.controllers.container.'
|
||||
'ContainerController._get_from_shards',
|
||||
mock_get_from_shards):
|
||||
|
|
Loading…
Reference in New Issue