Merge "tests: Use subTest"

This commit is contained in:
Zuul 2025-05-15 07:40:39 +00:00 committed by Gerrit Code Review
commit 4e2d08041a
12 changed files with 140 additions and 172 deletions

View File

@ -56,6 +56,7 @@ pyparsing==2.2.0
pyperclip==1.6.0
pytest==4.6.11
pytest-cov==2.12.1
pytest-subtests==0.3.0
python-keystoneclient==3.19.0
python-mimeparse==1.6.0
python-subunit==1.2.0

View File

@ -6,6 +6,7 @@
hacking>=2.0,<7.1.0 # Apache-2.0
coverage>=5.2.1 # Apache-2.0
pytest>=4.6.11 # MIT
pytest-subtests>=0.3.0 # MIT
pytest-cov>=2.12.1 # MIT
stestr>=2.0.0 # Apache-2.0
python-swiftclient>=3.2.0

View File

@ -14,7 +14,6 @@
# limitations under the License.
import sys
from contextlib import contextmanager
import os
@ -87,29 +86,6 @@ def listen_zero():
return sock
@contextmanager
def annotate_failure(msg):
"""
Catch AssertionError and annotate it with a message. Useful when making
assertions in a loop where the message can indicate the loop index or
richer context about the failure.
:param msg: A message to be prefixed to the AssertionError message.
"""
try:
yield
except AssertionError as err:
if err.args:
msg = '%s Failed with %s' % (msg, err.args[0])
err.args = (msg, ) + err.args[1:]
raise err
else:
# workaround for some IDE's raising custom AssertionErrors
raise AssertionError(
'%s Failed with %s' % (msg, err)
).with_traceback(err.__traceback__) from err.__cause__
class BaseTestCase(unittest.TestCase):
def _assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""

View File

@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from contextlib import contextmanager
import unittest
import uuid
import random
@ -122,28 +121,22 @@ class TestReconstructorRebuild(ECProbeTest):
for (node, exc) in failures]))
return frag_headers, frag_etags
@contextmanager
def _annotate_failure_with_scenario(self, failed, non_durable):
try:
yield
except (AssertionError, ClientException) as err:
self.fail(
'Scenario with failed nodes: %r, non-durable nodes: %r\n'
' failed with:\n%s' %
([self._format_node(self.onodes[n]) for n in failed],
[self._format_node(self.onodes[n]) for n in non_durable], err)
)
def _test_rebuild_scenario(self, failed, non_durable,
reconstructor_cycles):
# helper method to test a scenario with some nodes missing their
# fragment and some nodes having non-durable fragments
with self._annotate_failure_with_scenario(failed, non_durable):
with self.subTest(
failed=[self._format_node(self.onodes[n]) for n in failed],
non_durable=[self._format_node(self.onodes[n])
for n in non_durable]):
self.break_nodes(self.onodes, self.opart, failed, non_durable)
# make sure we can still GET the object and it is correct; the
# proxy is doing decode on remaining fragments to get the obj
with self._annotate_failure_with_scenario(failed, non_durable):
with self.subTest(
failed=[self._format_node(self.onodes[n]) for n in failed],
non_durable=[self._format_node(self.onodes[n])
for n in non_durable]):
headers, etag = self.proxy_get()
self.assertEqual(self.etag, etag)
for key in self.headers_post:
@ -159,7 +152,10 @@ class TestReconstructorRebuild(ECProbeTest):
self.reconstructor.once()
# check GET via proxy returns expected data and metadata
with self._annotate_failure_with_scenario(failed, non_durable):
with self.subTest(
failed=[self._format_node(self.onodes[n]) for n in failed],
non_durable=[self._format_node(self.onodes[n])
for n in non_durable]):
headers, etag = self.proxy_get()
self.assertEqual(self.etag, etag)
for key in self.headers_post:
@ -168,7 +164,10 @@ class TestReconstructorRebuild(ECProbeTest):
self.assertEqual(self.headers_post[key],
wsgi_to_str(headers[wsgi_key]))
# check all frags are intact, durable and have expected metadata
with self._annotate_failure_with_scenario(failed, non_durable):
with self.subTest(
failed=[self._format_node(self.onodes[n]) for n in failed],
non_durable=[self._format_node(self.onodes[n])
for n in non_durable]):
frag_headers, frag_etags = self._assert_all_nodes_have_frag()
self.assertEqual(self.frag_etags, frag_etags)
# self._frag_headers include X-Backend-Durable-Timestamp so this

View File

@ -38,7 +38,6 @@ from swiftclient import client, get_auth, ClientException
from swift.proxy.controllers.base import get_cache_key
from swift.proxy.controllers.obj import num_container_updates
from test import annotate_failure
from test.debug_logger import debug_logger
from test.probe import PROXY_BASE_URL
from test.probe.brain import BrainSplitter
@ -313,7 +312,7 @@ class BaseTestContainerSharding(ReplProbeTest):
for d in shard_ranges)
self.assertLengthEqual(actual_shard_ranges, expected_number)
if expected_number:
with annotate_failure('Ranges %s.' % actual_shard_ranges):
with self.subTest(shard_ranges=actual_shard_ranges):
self.assertEqual(first_lower, actual_shard_ranges[0].lower_str)
for x, y in zip(actual_shard_ranges, actual_shard_ranges[1:]):
self.assertEqual(x.upper, y.lower)
@ -399,7 +398,7 @@ class BaseTestContainerSharding(ReplProbeTest):
def assert_container_has_shard_sysmeta(self):
node_headers = self.direct_head_container()
for node_id, headers in node_headers.items():
with annotate_failure('%s in %s' % (node_id, node_headers.keys())):
with self.subTest(node_id=node_id, nodes=node_headers.keys()):
for k, v in headers.items():
if k.lower().startswith('x-container-sysmeta-shard'):
break
@ -1085,7 +1084,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
root_shard_ranges = self.direct_get_container_shard_ranges()
for node, (hdrs, root_shards) in root_shard_ranges.items():
self.assertLengthEqual(root_shards, 2)
with annotate_failure('node %s. ' % node):
with self.subTest(node=node):
self.assertEqual(
[ShardRange.ACTIVE] * 2,
[sr['state'] for sr in root_shards])
@ -1122,7 +1121,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
self.assertLengthEqual(found_for_shard['shard_dbs'], 2)
for db_file in found_for_shard['shard_dbs'][:2]:
broker = ContainerBroker(db_file)
with annotate_failure('shard db file %s. ' % db_file):
with self.subTest(db_file=db_file):
self.assertIs(False, broker.is_root_container())
self.assertEqual('sharding', broker.get_db_state())
self.assertEqual(
@ -1193,7 +1192,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
self.assertLengthEqual(found_for_shard['normal_dbs'], 3)
for db_file in found_for_shard['shard_dbs']:
broker = ContainerBroker(db_file)
with annotate_failure('shard db file %s. ' % db_file):
with self.subTest(db_file=db_file):
self.assertIs(False, broker.is_root_container())
self.assertEqual('sharding', broker.get_db_state())
self.assertEqual(
@ -1216,7 +1215,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
self.assertLengthEqual(found_for_sub_shard['normal_dbs'], 3)
for db_file in found_for_sub_shard['normal_dbs']:
broker = ContainerBroker(db_file)
with annotate_failure('sub shard db file %s. ' % db_file):
with self.subTest(db_file=db_file):
self.assertIs(False, broker.is_root_container())
self.assertEqual('unsharded', broker.get_db_state())
self.assertEqual(
@ -1227,7 +1226,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
root_shard_ranges = self.direct_get_container_shard_ranges()
for node, (hdrs, root_shards) in root_shard_ranges.items():
self.assertLengthEqual(root_shards, 5)
with annotate_failure('node %s. ' % node):
with self.subTest(node=node):
# shard ranges are sorted by upper, state, lower, so expect:
# sub-shards, orig shard 0, orig shard 1
self.assertEqual(
@ -1254,7 +1253,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
shard_listings = self.direct_get_container(shard_shards[0].account,
shard_shards[0].container)
for node, (hdrs, listing) in shard_listings.items():
with annotate_failure(node):
with self.subTest(node=node):
self.assertIn('alpha', [o['name'] for o in listing])
self.assert_container_listing(['alpha'] + more_obj_names + obj_names)
# Run sharders again so things settle.
@ -1265,7 +1264,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
# check original first shard range shards
for db_file in found_for_shard['shard_dbs']:
broker = ContainerBroker(db_file)
with annotate_failure('shard db file %s. ' % db_file):
with self.subTest(db_file=db_file):
self.assertIs(False, broker.is_root_container())
self.assertEqual('sharded', broker.get_db_state())
self.assertEqual(
@ -1283,7 +1282,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
for node, (hdrs, root_shards) in root_shard_ranges.items():
# old first shard range should have been deleted
self.assertLengthEqual(root_shards, 4)
with annotate_failure('node %s. ' % node):
with self.subTest(node=node):
self.assertEqual(
[ShardRange.ACTIVE] * 4,
[sr['state'] for sr in root_shards])
@ -1727,7 +1726,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
object_counts = []
bytes_used = []
for node_id, node_data in node_data.items():
with annotate_failure('Node id %s.' % node_id):
with self.subTest(node_id=node_id):
check_node_data(
node_data, exp_shard_hdrs, exp_obj_count,
expected_shards, exp_sharded_root_range)
@ -1811,7 +1810,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
root_nodes_data = self.direct_get_container_shard_ranges()
self.assertEqual(3, len(root_nodes_data))
for node_id, node_data in root_nodes_data.items():
with annotate_failure('Node id %s.' % node_id):
with self.subTest(node_id=node_id):
check_node_data(node_data, exp_hdrs, exp_obj_count, 2)
# run updaters to update .sharded account; shard containers have
@ -1875,7 +1874,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
num_obj_replicas, self.policy.quorum)
expected_num_pendings = min(expected_num_updates, num_obj_replicas)
# sanity check
with annotate_failure('policy %s. ' % self.policy):
with self.subTest(policy=self.policy):
self.assertLengthEqual(async_pendings, expected_num_pendings)
# root object count is not updated...
@ -1884,7 +1883,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
root_nodes_data = self.direct_get_container_shard_ranges()
self.assertEqual(3, len(root_nodes_data))
for node_id, node_data in root_nodes_data.items():
with annotate_failure('Node id %s.' % node_id):
with self.subTest(node_id=node_id):
check_node_data(node_data, exp_hdrs, exp_obj_count, 2)
range_data = node_data[1]
self.assert_shard_range_lists_equal(
@ -1919,7 +1918,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
self.assertEqual(3, len(root_nodes_data))
exp_hdrs['X-Container-Object-Count'] = str(exp_obj_count)
for node_id, node_data in root_nodes_data.items():
with annotate_failure('Node id %s.' % node_id):
with self.subTest(node_id=node_id):
# NB now only *one* shard range in root
check_node_data(node_data, exp_hdrs, exp_obj_count, 1)
@ -1947,7 +1946,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
part, nodes = self.brain.ring.get_nodes(
donor.account, donor.container)
for node in nodes:
with annotate_failure(node):
with self.subTest(node=node):
broker = self.get_broker(
part, node, donor.account, donor.container)
own_sr = broker.get_own_shard_range()
@ -2006,7 +2005,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
# just the alpha object
'X-Container-Object-Count': '1'}
for node_id, node_data in root_nodes_data.items():
with annotate_failure('Node id %s.' % node_id):
with self.subTest(node_id=node_id):
# NB now no shard ranges in root
check_node_data(node_data, exp_hdrs, 0, 0)
@ -2161,8 +2160,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
# the 'alpha' object is NOT replicated to the two sharded nodes
for node in self.brain.nodes[:2]:
broker = self.get_broker(self.brain.part, node)
with annotate_failure(
'Node id %s in %s' % (node['id'], self.brain.nodes[:2])):
with self.subTest(node=node['id'], nodes=self.brain.nodes[:2]):
self.assertFalse(broker.get_objects())
self.assert_container_state(node, 'sharding', 3)
self.brain.servers.stop(number=node_numbers[2])
@ -2172,7 +2170,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
self.brain.servers.start(number=node_numbers[2])
node_data = self.direct_get_container_shard_ranges()
for node, (hdrs, shard_ranges) in node_data.items():
with annotate_failure(node):
with self.subTest(node=node):
self.assert_shard_ranges_contiguous(3, shard_ranges)
# complete cleaving third shard range on first two nodes
@ -2213,8 +2211,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
for node in self.brain.nodes[:2]:
broker = self.get_broker(self.brain.part, node)
info = broker.get_info()
with annotate_failure(
'Node id %s in %s' % (node['id'], self.brain.nodes[:2])):
with self.subTest(node=node['id'], nodes=self.brain.nodes[:2]):
self.assertEqual(len(obj_names), info['object_count'])
self.assertFalse(broker.get_objects())
@ -2232,8 +2229,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
# the 'alpha' object is NOT replicated to the two sharded nodes
for node in self.brain.nodes[:2]:
broker = self.get_broker(self.brain.part, node)
with annotate_failure(
'Node id %s in %s' % (node['id'], self.brain.nodes[:2])):
with self.subTest(node=node['id'], nodes=self.brain.nodes[:2]):
self.assertFalse(broker.get_objects())
self.assert_container_state(node, 'sharded', 2)
self.brain.servers.stop(number=node_numbers[2])
@ -2243,7 +2239,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
self.brain.servers.start(number=node_numbers[2])
node_data = self.direct_get_container_shard_ranges()
for node, (hdrs, shard_ranges) in node_data.items():
with annotate_failure(node):
with self.subTest(node=node):
self.assert_shard_ranges_contiguous(2, shard_ranges)
# run the sharder on the third server, alpha object is included in
@ -2965,8 +2961,7 @@ class TestContainerSharding(BaseAutoContainerSharding):
self.assert_container_state(node, 'unsharded', 3)
node_data = self.direct_get_container_shard_ranges()
for node_id, (hdrs, shard_ranges) in node_data.items():
with annotate_failure(
'node id %s from %s' % (node_id, node_data.keys)):
with self.subTest(node_id=node_id, nodes=node_data.keys()):
self.assert_shard_range_state(ShardRange.ACTIVE, shard_ranges)
# check handoff cleaved all objects before it was deleted - stop all
@ -3304,8 +3299,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# root container own shard range should still be SHARDED
for i, node in enumerate(self.brain.nodes):
with annotate_failure('node[%d]' % i):
broker = self.get_broker(self.brain.part, self.brain.nodes[0])
with self.subTest(i=i):
broker = self.get_broker(self.brain.part, node)
self.assertEqual(ShardRange.SHARDED,
broker.get_own_shard_range().state)
@ -3326,8 +3321,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# root container own shard range should now be ACTIVE
for i, node in enumerate(self.brain.nodes):
with annotate_failure('node[%d]' % i):
broker = self.get_broker(self.brain.part, self.brain.nodes[0])
with self.subTest(i=i):
broker = self.get_broker(self.brain.part, node)
self.assertEqual(ShardRange.ACTIVE,
broker.get_own_shard_range().state)
@ -3437,7 +3432,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
self.assert_container_listing(obj_names)
# check the unwanted shards did shrink away...
for shard_range in shard_ranges_0:
with annotate_failure(shard_range):
with self.subTest(shard_range=shard_range):
found_for_shard = self.categorize_container_dir_content(
shard_range.account, shard_range.container)
self.assertLengthEqual(found_for_shard['shard_dbs'], 3)
@ -3468,7 +3463,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
for sr in shard_ranges_1],
key=ShardRange.sort_key)
for node in (0, 1, 2):
with annotate_failure('node %s' % node):
with self.subTest(node=node):
broker = self.get_broker(self.brain.part,
self.brain.nodes[node])
brokers[node] = broker
@ -3505,7 +3500,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# 3 active shards, albeit with zero objects to cleave.
self.sharders_once_non_auto()
for node in (0, 1, 2):
with annotate_failure('node %s' % node):
with self.subTest(node=node):
broker = self.get_broker(self.brain.part,
self.brain.nodes[node])
brokers[node] = broker
@ -3527,7 +3522,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# the shard is updated from a root) may have happened before all roots
# have had their shard ranges transitioned to ACTIVE.
for shard_range in shard_ranges_1:
with annotate_failure(shard_range):
with self.subTest(shard_range=shard_range):
found_for_shard = self.categorize_container_dir_content(
shard_range.account, shard_range.container)
self.assertLengthEqual(found_for_shard['normal_dbs'], 3)
@ -3549,7 +3544,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# replicate it as per a normal cleave.
self.sharders_once_non_auto()
for node in (0, 1, 2):
with annotate_failure('node %s' % node):
with self.subTest(node=node):
broker = self.get_broker(self.brain.part,
self.brain.nodes[node])
brokers[node] = broker
@ -3592,7 +3587,7 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# ensure all shards learn their ACTIVE state from root
self.sharders_once_non_auto()
for node in (0, 1, 2):
with annotate_failure('node %d' % node):
with self.subTest(node=node):
shard_ranges = self.assert_container_state(
self.brain.nodes[node], 'sharded', 3)
for sr in shard_ranges:
@ -4022,9 +4017,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# all DBs should now be sharded and still deleted
for node in self.brain.nodes:
with annotate_failure(
'node %s in %s'
% (node['index'], [n['index'] for n in self.brain.nodes])):
with self.subTest(node_index=node['index'],
nodes=[n['index'] for n in self.brain.nodes]):
self.assert_container_state(node, 'sharded', 2,
override_deleted=True)
broker = self.get_broker(self.brain.part, node,
@ -4118,9 +4112,8 @@ class TestManagedContainerSharding(BaseTestContainerSharding):
# all DBs should now be sharded and NOT deleted
for node in self.brain.nodes:
with annotate_failure(
'node %s in %s'
% (node['index'], [n['index'] for n in self.brain.nodes])):
with self.subTest(node_index=node['index'],
nodes=[n['index'] for n in self.brain.nodes]):
broker = self.get_broker(self.brain.part, node,
self.account, self.container_name)
self.assertEqual(SHARDED, broker.get_db_state())

View File

@ -24,7 +24,6 @@ from unittest import mock
from swift.common.utils import config
from io import StringIO
from test import annotate_failure
from test.unit import temptree
@ -240,7 +239,7 @@ class TestUtilsConfig(unittest.TestCase):
for bad in ('1.1', 1.1, 'auto', 'bad',
'2.5 * replicas', 'two * replicas'):
with annotate_failure(bad):
with self.subTest(option=bad):
with self.assertRaises(ValueError):
config.config_request_node_count_value(bad)

View File

@ -46,7 +46,6 @@ from swift.common.storage_policy import POLICIES
from unittest import mock
from test import annotate_failure
from test.debug_logger import debug_logger
from test.unit import (patch_policies, with_tempdir, make_timestamp_iter,
mock_timestamp_now)
@ -2772,19 +2771,19 @@ class TestContainerBroker(test_db.TestDbBase):
# selected rows
for since_row in range(10):
actual = broker.get_objects(since_row=since_row)
with annotate_failure(since_row):
with self.subTest(since_row=since_row):
self.assertEqual(obj_names[since_row:],
[o['name'] for o in actual])
for since_row in range(10, 20):
actual = broker.get_objects(since_row=since_row)
with annotate_failure(since_row):
with self.subTest(since_row=since_row):
self.assertEqual(obj_names[10:],
[o['name'] for o in actual])
for since_row in range(20, len(obj_names) + 1):
actual = broker.get_objects(since_row=since_row)
with annotate_failure(since_row):
with self.subTest(since_row=since_row):
self.assertEqual(obj_names[since_row - 10:],
[o['name'] for o in actual])

View File

@ -32,7 +32,6 @@ from swift.container.reconciler import (
from swift.common.utils import Timestamp, encode_timestamps, ShardRange, \
get_db_files, make_db_file_path, MD5_OF_EMPTY_STRING
from swift.common.storage_policy import POLICIES
from test import annotate_failure
from test.debug_logger import debug_logger
from test.unit.common import test_db_replicator
@ -1552,7 +1551,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
(default_osr_newer, osr_with_epoch, False, False, True),
)
for i, params in enumerate(tests):
with annotate_failure((i, params)):
with self.subTest(i=i, params=params):
do_test(*params)
def test_sync_shard_ranges(self):

View File

@ -47,7 +47,7 @@ from test.unit import fake_http_connect, mock_check_drive
from swift.common.storage_policy import (POLICIES, StoragePolicy)
from swift.common.request_helpers import get_sys_meta_prefix, get_reserved_name
from test import listen_zero, annotate_failure
from test import listen_zero
from test.unit import patch_policies, make_timestamp_iter, mock_timestamp_now
@ -4289,7 +4289,7 @@ class TestContainerController(unittest.TestCase):
sr_happy.update_state(state,
state_timestamp=next(self.ts)))
self._put_shard_range(sr_happy)
with annotate_failure(state):
with self.subTest(state=state):
obj_name = 'grumpy%s' % state
if state in redirect_states:
assert_redirected(obj_name, sr_happy, headers=headers)
@ -4314,7 +4314,7 @@ class TestContainerController(unittest.TestCase):
sr_happy.update_state(state,
state_timestamp=next(self.ts)))
self._put_shard_range(sr_happy)
with annotate_failure(state):
with self.subTest(state=state):
obj_name = 'grumpier%s' % state
if state in redirect_states:
assert_redirected(obj_name, sr_happy, headers=headers)
@ -4353,7 +4353,7 @@ class TestContainerController(unittest.TestCase):
sr_happy.update_state(state,
state_timestamp=next(self.ts)))
self._put_shard_range(sr_happy)
with annotate_failure(state):
with self.subTest(state=state):
obj_name = 'dopey%s' % state
if state in redirect_states:
assert_redirected(obj_name, sr_happy, headers=headers)
@ -4391,7 +4391,7 @@ class TestContainerController(unittest.TestCase):
sr_happy.update_state(state,
state_timestamp=next(self.ts)))
self._put_shard_range(sr_happy)
with annotate_failure(state):
with self.subTest(state=state):
obj_name = 'grumpiest%s' % state
if state in redirect_states:
assert_redirected(obj_name, sr_happy, headers=headers)

View File

@ -46,7 +46,6 @@ from swift.container.sharder import ContainerSharder, sharding_enabled, \
from swift.common.utils import ShardRange, Timestamp, hash_path, \
encode_timestamps, parse_db_filename, quorum_size, Everything, md5, \
ShardName, Namespace
from test import annotate_failure
from test.debug_logger import debug_logger
from test.unit import FakeRing, make_timestamp_iter, unlink_files, \
@ -1562,7 +1561,7 @@ class TestSharder(BaseTestSharder):
self.assertEqual(UNSHARDED, broker.get_db_state())
sharder._replicate_object.assert_not_called()
for db in expected_shard_dbs:
with annotate_failure(db):
with self.subTest(db=db):
self.assertFalse(os.path.exists(db))
# run cleave - all shard ranges in found state, nothing happens
@ -1585,10 +1584,10 @@ class TestSharder(BaseTestSharder):
self.assertEqual(SHARDING, broker.get_db_state())
sharder._replicate_object.assert_not_called()
for db in expected_shard_dbs:
with annotate_failure(db):
with self.subTest(db=db):
self.assertFalse(os.path.exists(db))
for shard_range in broker.get_shard_ranges():
with annotate_failure(shard_range):
with self.subTest(shard_range=shard_range):
self.assertEqual(ShardRange.FOUND, shard_range.state)
# move first shard range to created state, first shard range is cleaved
@ -1630,10 +1629,10 @@ class TestSharder(BaseTestSharder):
self._check_objects(objects[:2], expected_shard_dbs[0])
# other shard ranges should be unchanged
for i in range(1, len(shard_ranges)):
with annotate_failure(i):
with self.subTest(i=i):
self.assertFalse(os.path.exists(expected_shard_dbs[i]))
for i in range(1, len(updated_shard_ranges)):
with annotate_failure(i):
with self.subTest(i=i):
self.assertEqual(dict(shard_ranges[i]),
dict(updated_shard_ranges[i]))
@ -1682,7 +1681,7 @@ class TestSharder(BaseTestSharder):
updated_shard_ranges = broker.get_shard_ranges()
self.assertEqual(4, len(updated_shard_ranges))
for i in range(1, len(updated_shard_ranges)):
with annotate_failure(i):
with self.subTest(i=i):
self.assertEqual(dict(shard_ranges[i]),
dict(updated_shard_ranges[i]))
context = CleavingContext.load(broker)
@ -1743,7 +1742,7 @@ class TestSharder(BaseTestSharder):
shard_ranges[2].object_count = 1
shard_ranges[2].state = ShardRange.CLEAVED
for i in range(0, 3):
with annotate_failure(i):
with self.subTest(i=i):
self._check_shard_range(
shard_ranges[i], updated_shard_ranges[i])
self._check_objects(objects[2:5], expected_shard_dbs[1])
@ -1751,10 +1750,10 @@ class TestSharder(BaseTestSharder):
# other shard ranges should be unchanged
self.assertFalse(os.path.exists(expected_shard_dbs[0]))
for i, db in enumerate(expected_shard_dbs[3:], 3):
with annotate_failure(i):
with self.subTest(i=i):
self.assertFalse(os.path.exists(db))
for i, updated_shard_range in enumerate(updated_shard_ranges[3:], 3):
with annotate_failure(i):
with self.subTest(i=i):
self.assertEqual(dict(shard_ranges[i]),
dict(updated_shard_range))
context = CleavingContext.load(broker)
@ -1804,18 +1803,18 @@ class TestSharder(BaseTestSharder):
shard_ranges[3].object_count = 1
shard_ranges[3].state = ShardRange.CLEAVED
for i in range(0, 4):
with annotate_failure(i):
with self.subTest(i=i):
self._check_shard_range(
shard_ranges[i], updated_shard_ranges[i])
# NB includes the deleted object
self._check_objects(objects[6:8], expected_shard_dbs[3])
# other shard ranges should be unchanged
for i, db in enumerate(expected_shard_dbs[:3]):
with annotate_failure(i):
with self.subTest(i=i):
self.assertFalse(os.path.exists(db))
self.assertFalse(os.path.exists(expected_shard_dbs[4]))
for i, updated_shard_range in enumerate(updated_shard_ranges[4:], 4):
with annotate_failure(i):
with self.subTest(i=i):
self.assertEqual(dict(shard_ranges[i]),
dict(updated_shard_range))
@ -1875,7 +1874,7 @@ class TestSharder(BaseTestSharder):
self.assertEqual(5, len(updated_shard_ranges))
# NB stats of the ACTIVE shard range should not be reset by cleaving
for i in range(0, 4):
with annotate_failure(i):
with self.subTest(i=i):
self._check_shard_range(
shard_ranges[i], updated_shard_ranges[i])
self.assertEqual(dict(shard_ranges[4]), dict(updated_shard_ranges[4]))
@ -1884,7 +1883,7 @@ class TestSharder(BaseTestSharder):
self._check_objects(objects[8:], expected_shard_dbs[4])
# other shard ranges should be unchanged
for i, db in enumerate(expected_shard_dbs[:4]):
with annotate_failure(i):
with self.subTest(i=i):
self.assertFalse(os.path.exists(db))
self.assertEqual(initial_root_info['object_count'],
@ -3723,7 +3722,7 @@ class TestSharder(BaseTestSharder):
with mock_timestamp_now(now):
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
with annotate_failure(state):
with self.subTest(state=state):
self.assertEqual([stats_0], sharder.sharding_candidates)
# reduce the threshold and the second container is included
@ -3762,7 +3761,7 @@ class TestSharder(BaseTestSharder):
with mock_timestamp_now(now):
for broker in brokers:
sharder._identify_sharding_candidate(broker, node)
with annotate_failure(state):
with self.subTest(state=state):
self.assertEqual([stats_2], sharder.sharding_candidates)
own_sr.update_state(ShardRange.ACTIVE, state_timestamp=Timestamp.now())
@ -5950,7 +5949,7 @@ class TestSharder(BaseTestSharder):
broker.merge_shard_ranges([own_shard_range])
for state in ShardRange.STATES:
with annotate_failure(state):
with self.subTest(state=state):
check_only_own_shard_range_sent(state)
init_obj_count = len(obj_names)
@ -5981,7 +5980,7 @@ class TestSharder(BaseTestSharder):
self.check_shard_ranges_sent(broker, expected_sent)
for i, state in enumerate(ShardRange.STATES):
with annotate_failure(state):
with self.subTest(state=state):
check_tombstones_sent(state)
def test_update_root_container_already_reported(self):
@ -6013,7 +6012,7 @@ class TestSharder(BaseTestSharder):
broker.merge_shard_ranges([own_shard_range])
for state in ShardRange.STATES:
with annotate_failure(state):
with self.subTest(state=state):
check_already_reported_not_sent(state)
def test_update_root_container_all_ranges(self):
@ -6057,7 +6056,7 @@ class TestSharder(BaseTestSharder):
broker, [dict(sr) for sr in expected_sent])
for state in ShardRange.STATES.keys():
with annotate_failure(state):
with self.subTest(state=state):
check_all_shard_ranges_sent(state)
def test_audit_root_container_reset_epoch(self):
@ -6625,8 +6624,7 @@ class TestSharder(BaseTestSharder):
# other shard ranges
for own_state in ShardRange.STATES:
for root_state in ShardRange.STATES:
with annotate_failure('own_state=%s, root_state=%s' %
(own_state, root_state)):
with self.subTest(own_state=own_state, root_state=root_state):
own_ts = next(self.ts_iter)
root_ts = next(self.ts_iter)
broker, shard_ranges = check_audit(own_state, root_state)
@ -6647,8 +6645,7 @@ class TestSharder(BaseTestSharder):
# other shard ranges
for own_state in ShardRange.STATES:
for root_state in ShardRange.STATES:
with annotate_failure('own_state=%s, root_state=%s' %
(own_state, root_state)):
with self.subTest(own_state=own_state, root_state=root_state):
root_ts = next(self.ts_iter)
own_ts = next(self.ts_iter)
broker, shard_ranges = check_audit(own_state, root_state)
@ -6722,8 +6719,9 @@ class TestSharder(BaseTestSharder):
timestamp=next(self.ts_iter), state=acceptor_state)
root_from_root = root_own_sr.copy(
timestamp=next(self.ts_iter), state=root_state)
with annotate_failure('with states %s %s %s'
% (own_state, acceptor_state, root_state)):
with self.subTest(own_state=own_state,
acceptor_state=acceptor_state,
root_state=root_state):
own_sr_name = ShardName.create(
'.shards_a', 'c', 'c', next(self.ts_iter), 0)
own_sr = ShardRange(
@ -6775,8 +6773,9 @@ class TestSharder(BaseTestSharder):
str(ShardName.create('.shards_a', 'c', 'c', ts, 0)),
ts, lower='a', upper='b', state=own_state, state_timestamp=ts)
expected = [acceptor_from_root]
with annotate_failure('with states %s %s %s'
% (own_state, acceptor_state, root_state)):
with self.subTest(own_state=own_state,
acceptor_state=acceptor_state,
root_state=root_state):
sharder = self._assert_merge_into_shard(
own_sr, [],
# own sr is in ranges fetched from root
@ -6794,9 +6793,9 @@ class TestSharder(BaseTestSharder):
if root_state == ShardRange.ACTIVE:
# special case: ACTIVE root *is* merged
continue
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
with self.subTest(own_state=own_state,
acceptor_state=acceptor_state,
root_state=root_state):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_ranges_missing_own_merged_while_shrinking(self):
@ -6819,8 +6818,9 @@ class TestSharder(BaseTestSharder):
str(ShardName.create('.shards_a', 'c', 'c', ts, 0)),
ts, lower='a', upper='b', state=own_state, state_timestamp=ts)
expected = [acceptor_from_root]
with annotate_failure('with states %s %s %s'
% (own_state, acceptor_state, root_state)):
with self.subTest(own_state=own_state,
acceptor_state=acceptor_state,
root_state=root_state):
sharder = self._assert_merge_into_shard(
own_sr, [],
# own sr is NOT in ranges fetched from root
@ -6841,9 +6841,9 @@ class TestSharder(BaseTestSharder):
if root_state == ShardRange.ACTIVE:
# special case: ACTIVE root *is* merged
continue
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
with self.subTest(own_state=own_state,
acceptor_state=acceptor_state,
root_state=root_state):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_range_not_merged_while_shrinking(self):
@ -6866,7 +6866,7 @@ class TestSharder(BaseTestSharder):
for root_state in ShardRange.STATES:
if root_state == ShardRange.ACTIVE:
continue # special case tested below
with annotate_failure((own_state, root_state)):
with self.subTest(own_state=own_state, root_state=root_state):
do_test(own_state, root_state)
def test_audit_shard_root_range_overlap_not_merged_while_shrinking(self):
@ -6892,7 +6892,7 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.SHRINKING_STATES:
with annotate_failure(own_state):
with self.subTest(own_state=own_state):
do_test(own_state)
def test_audit_shard_active_root_range_merged_while_shrinking(self):
@ -6912,7 +6912,7 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.SHRINKING_STATES:
with annotate_failure(own_state):
with self.subTest(own_state=own_state):
do_test(own_state)
def test_audit_shard_root_ranges_fetch_fails_while_shrinking(self):
@ -6965,9 +6965,10 @@ class TestSharder(BaseTestSharder):
# special case covered in other tests
continue
for root_state in ShardRange.STATES:
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
with self.subTest(
own_state=own_state,
acceptor_state=acceptor_state,
root_state=root_state):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_ranges_merge_while_sharding(self):
@ -7003,9 +7004,10 @@ class TestSharder(BaseTestSharder):
# special case covered in other tests
continue
for root_state in ShardRange.STATES:
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
with self.subTest(
own_state=own_state,
acceptor_state=acceptor_state,
root_state=root_state):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_ranges_not_merged_once_sharded(self):
@ -7043,9 +7045,10 @@ class TestSharder(BaseTestSharder):
for own_state in (ShardRange.SHARDED, ShardRange.SHRUNK):
for other_sub_shard_state in ShardRange.STATES:
for root_state in ShardRange.STATES:
with annotate_failure(
'with states %s %s %s'
% (own_state, other_sub_shard_state, root_state)):
with self.subTest(
own_state=own_state,
other_sub_shard_state=other_sub_shard_state,
root_state=root_state):
do_test(own_state, other_sub_shard_state, root_state)
def test_audit_shard_root_ranges_replace_existing_while_cleaving(self):
@ -7088,7 +7091,7 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
with self.subTest(own_state=own_state):
do_test(own_state)
def test_audit_shard_root_ranges_supplement_deleted_while_cleaving(self):
@ -7129,7 +7132,7 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
with self.subTest(own_state=own_state):
do_test(own_state)
def test_audit_shard_root_ranges_supplement_existing_while_cleaving(self):
@ -7169,7 +7172,7 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
with self.subTest(own_state=own_state):
do_test(own_state)
def test_audit_shard_root_ranges_cleaving_not_merged_while_cleaving(self):
@ -7210,9 +7213,10 @@ class TestSharder(BaseTestSharder):
for own_state in ShardRange.CLEAVING_STATES:
for acceptor_state in ShardRange.CLEAVING_STATES:
for root_state in ShardRange.STATES:
with annotate_failure(
'with states %s %s %s'
% (own_state, acceptor_state, root_state)):
with self.subTest(
own_state=own_state,
acceptor_state=acceptor_state,
root_state=root_state):
do_test(own_state, acceptor_state, root_state)
def test_audit_shard_root_ranges_overlap_not_merged_while_cleaving_1(self):
@ -7247,7 +7251,7 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
with self.subTest(own_state=own_state):
do_test(own_state)
def test_audit_shard_root_ranges_overlap_not_merged_while_cleaving_2(self):
@ -7286,7 +7290,7 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
with self.subTest(own_state=own_state):
do_test(own_state)
def test_audit_shard_root_ranges_with_gap_not_merged_while_cleaving(self):
@ -7320,7 +7324,7 @@ class TestSharder(BaseTestSharder):
self.assertFalse(sharder.logger.get_lines_for_level('error'))
for own_state in ShardRange.CLEAVING_STATES:
with annotate_failure(own_state):
with self.subTest(own_state=own_state):
do_test(own_state)
def test_audit_shard_container_ancestors_not_merged_while_sharding(self):
@ -7419,8 +7423,8 @@ class TestSharder(BaseTestSharder):
for child_deleted in (False, True):
for child_state in ShardRange.STATES:
with annotate_failure('deleted: %s, state: %s'
% (child_deleted, child_state)):
with self.subTest(child_deleted=child_deleted,
child_state=child_state):
do_test(child_deleted, child_state)
def test_audit_shard_container_children_not_merged_once_sharded(self):
@ -8262,7 +8266,7 @@ class TestCleavingContext(BaseTestSharder):
ref = CleavingContext._make_ref(broker)
for curs in ('curs', u'curs\u00e4\u00fb'):
with annotate_failure('%r' % curs):
with self.subTest(curs=curs):
expected = curs
ctx = CleavingContext(ref, curs, 12, 11, 10, False, True)
self.assertEqual(dict(ctx), {
@ -9148,7 +9152,7 @@ class TestSharderFunctions(BaseTestSharder):
sr = ShardRange('.shards_a/c', next(self.ts_iter), '', '',
state=state, object_count=object_count,
tombstones=100) # tombstones not considered
with annotate_failure('%s %s' % (state, object_count)):
with self.subTest(state=state, object_count=object_count):
if state == ShardRange.ACTIVE and object_count >= 10:
self.assertTrue(is_sharding_candidate(sr, 10))
else:
@ -9186,13 +9190,13 @@ class TestSharderFunctions(BaseTestSharder):
for state in ShardRange.STATES:
for object_count in (10, 11):
with annotate_failure('%s %s' % (state, object_count)):
with self.subTest(state=state, object_count=object_count):
do_check_false(state, object_count, 0)
for tombstones in (10, 11):
with annotate_failure('%s %s' % (state, tombstones)):
with self.subTest(state=state, tombstones=tombstones):
do_check_false(state, 0, tombstones)
for tombstones in (5, 6):
with annotate_failure('%s %s' % (state, tombstones)):
with self.subTest(state=state, tombstones=tombstones):
do_check_false(state, 5, tombstones)
def test_find_and_rank_whole_path_split(self):

View File

@ -41,7 +41,6 @@ from swift.common import ring
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
POLICIES, EC_POLICY)
from swift.obj.reconstructor import SYNC, REVERT
from test import annotate_failure
from test.debug_logger import debug_logger
from test.unit import (patch_policies, mocked_http_conn, FabricatedRing,
@ -5488,7 +5487,7 @@ class TestReconstructFragmentArchive(BaseTestObjectReconstructor):
2 * ring.replicas,
3 * ring.replicas,
99 * ring.replicas):
with annotate_failure(request_node_count):
with self.subTest(request_node_count=request_node_count):
self.logger.clear()
self.reconstructor.request_node_count = \
lambda replicas: request_node_count
@ -5957,7 +5956,7 @@ class TestReconstructFragmentArchive(BaseTestObjectReconstructor):
self.assertEqual(2, reconstructor.quarantine_threshold)
for bad in ('1.1', '-1', -1, 'auto', 'bad'):
with annotate_failure(bad):
with self.subTest(option=bad):
with self.assertRaises(ValueError):
object_reconstructor.ObjectReconstructor(
{'quarantine_threshold': bad})
@ -5988,7 +5987,7 @@ class TestReconstructFragmentArchive(BaseTestObjectReconstructor):
self.assertEqual(2, reconstructor.quarantine_age)
for bad in ('1.1', 'auto', 'bad'):
with annotate_failure(bad):
with self.subTest(option=bad):
with self.assertRaises(ValueError):
object_reconstructor.ObjectReconstructor(
{'quarantine_age': bad})
@ -6015,7 +6014,7 @@ class TestReconstructFragmentArchive(BaseTestObjectReconstructor):
for bad in ('1.1', 1.1, 'auto', 'bad',
'2.5 * replicas', 'two * replicas'):
with annotate_failure(bad):
with self.subTest(option=bad):
with self.assertRaises(ValueError):
object_reconstructor.ObjectReconstructor(
{'request_node_count': bad})
@ -6261,8 +6260,7 @@ class TestReconstructFragmentArchive(BaseTestObjectReconstructor):
- num_frags - 1)
other_responses = [(404, None, None)] * (num_other_resps - 1)
other_responses.append((bad_status, None, None))
with annotate_failure(
'request_node_count=%d' % request_node_count):
with self.subTest(request_node_count=request_node_count):
exc = self._do_test_reconstruct_insufficient_frags(
{'quarantine_threshold': 1,
'reclaim_age': 0,

View File

@ -29,7 +29,6 @@ from swift.common.utils import ShardRange, Timestamp, Namespace, \
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_container_info, \
Controller, get_container_info, get_cache_key
from test import annotate_failure
from test.unit import fake_http_connect, FakeRing, FakeMemcache, \
make_timestamp_iter
from swift.common.storage_policy import StoragePolicy
@ -590,7 +589,7 @@ class TestGetShardedContainer(BaseTestContainerController):
self.assertEqual(len(expected_requests), len(fake_conn.requests))
for i, ((exp_path, exp_headers, exp_params), req) in enumerate(
zip(expected_requests, fake_conn.requests)):
with annotate_failure('Request check at index %d.' % i):
with self.subTest(index=i):
# strip off /sdx/0/ from path
self.assertEqual(exp_path, req['path'][7:])
got_params = dict(urllib.parse.parse_qsl(