Merge pull request #165 from jml/rerun-test
Allow tests to be run more than once
This commit is contained in:
8
NEWS
8
NEWS
@@ -33,6 +33,14 @@ Changes
|
||||
|
||||
* Add a new test dependency of testscenarios. (Robert Collins)
|
||||
|
||||
* ``addCleanup`` can now only be called within a test run.
|
||||
(Jonathan Lange)
|
||||
|
||||
* ``TestCase`` objects can now be run twice. All internal state is reset
|
||||
between runs. (Jonathan Lange)
|
||||
|
||||
* Last release of testtools to support Python 3.2. (Jonathan Lange)
|
||||
|
||||
* ``TestCase.skip`` deprecated. Use ``skipTest`` instead.
|
||||
(Jonathan Lange, #988893)
|
||||
|
||||
|
||||
@@ -206,16 +206,7 @@ class TestCase(unittest.TestCase):
|
||||
"""
|
||||
runTest = kwargs.pop('runTest', None)
|
||||
super(TestCase, self).__init__(*args, **kwargs)
|
||||
self._cleanups = []
|
||||
self._unique_id_gen = itertools.count(1)
|
||||
# Generators to ensure unique traceback ids. Maps traceback label to
|
||||
# iterators.
|
||||
self._traceback_id_gens = {}
|
||||
self.__setup_called = False
|
||||
self.__teardown_called = False
|
||||
# __details is lazy-initialized so that a constructed-but-not-run
|
||||
# TestCase is safe to use with clone_test_with_new_id.
|
||||
self.__details = None
|
||||
self._reset()
|
||||
test_method = self._get_test_method()
|
||||
if runTest is None:
|
||||
runTest = getattr(
|
||||
@@ -235,6 +226,19 @@ class TestCase(unittest.TestCase):
|
||||
(Exception, self._report_error),
|
||||
]
|
||||
|
||||
def _reset(self):
|
||||
"""Reset the test case as if it had never been run."""
|
||||
self._cleanups = []
|
||||
self._unique_id_gen = itertools.count(1)
|
||||
# Generators to ensure unique traceback ids. Maps traceback label to
|
||||
# iterators.
|
||||
self._traceback_id_gens = {}
|
||||
self.__setup_called = False
|
||||
self.__teardown_called = False
|
||||
# __details is lazy-initialized so that a constructed-but-not-run
|
||||
# TestCase is safe to use with clone_test_with_new_id.
|
||||
self.__details = None
|
||||
|
||||
def __eq__(self, other):
|
||||
eq = getattr(unittest.TestCase, '__eq__', None)
|
||||
if eq is not None and not unittest.TestCase.__eq__(self, other):
|
||||
@@ -604,6 +608,7 @@ class TestCase(unittest.TestCase):
|
||||
result.addUnexpectedSuccess(self, details=self.getDetails())
|
||||
|
||||
def run(self, result=None):
|
||||
self._reset()
|
||||
try:
|
||||
run_test = self.__RunTest(
|
||||
self, self.exception_handlers, last_resort=self._report_error)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
|
||||
# Copyright (c) 2008-2016 testtools developers. See LICENSE for details.
|
||||
|
||||
"""Helpers for tests."""
|
||||
|
||||
@@ -12,6 +12,12 @@ from extras import safe_hasattr
|
||||
|
||||
from testtools import TestResult
|
||||
from testtools.content import StackLinesContent
|
||||
from testtools.matchers import (
|
||||
AfterPreprocessing,
|
||||
Equals,
|
||||
MatchesDict,
|
||||
MatchesListwise,
|
||||
)
|
||||
from testtools import runtest
|
||||
|
||||
|
||||
@@ -24,6 +30,7 @@ try:
|
||||
except Exception:
|
||||
an_exc_info = sys.exc_info()
|
||||
|
||||
|
||||
# Deprecated: This classes attributes are somewhat non deterministic which
|
||||
# leads to hard to predict tests (because Python upstream are changing things.
|
||||
class LoggingResult(TestResult):
|
||||
@@ -106,3 +113,55 @@ class FullStackRunTest(runtest.RunTest):
|
||||
return run_with_stack_hidden(
|
||||
False,
|
||||
super(FullStackRunTest, self)._run_user, fn, *args, **kwargs)
|
||||
|
||||
|
||||
class MatchesEvents(object):
|
||||
"""Match a list of test result events.
|
||||
|
||||
Specify events as a data structure. Ordinary Python objects within this
|
||||
structure will be compared exactly, but you can also use matchers at any
|
||||
point.
|
||||
"""
|
||||
|
||||
def __init__(self, *expected):
|
||||
self._expected = expected
|
||||
|
||||
def _make_matcher(self, obj):
|
||||
# This isn't very safe for general use, but is good enough to make
|
||||
# some tests in this module more readable.
|
||||
if hasattr(obj, 'match'):
|
||||
return obj
|
||||
elif isinstance(obj, tuple) or isinstance(obj, list):
|
||||
return MatchesListwise(
|
||||
[self._make_matcher(item) for item in obj])
|
||||
elif isinstance(obj, dict):
|
||||
return MatchesDict(dict(
|
||||
(key, self._make_matcher(value))
|
||||
for key, value in obj.items()))
|
||||
else:
|
||||
return Equals(obj)
|
||||
|
||||
def match(self, observed):
|
||||
matcher = self._make_matcher(self._expected)
|
||||
return matcher.match(observed)
|
||||
|
||||
|
||||
class AsText(AfterPreprocessing):
|
||||
"""Match the text of a Content instance."""
|
||||
|
||||
def __init__(self, matcher, annotate=True):
|
||||
super(AsText, self).__init__(
|
||||
lambda log: log.as_text(), matcher, annotate=annotate)
|
||||
|
||||
|
||||
def raise_(exception):
|
||||
"""Raise ``exception``.
|
||||
|
||||
Useful for raising exceptions when it is inconvenient to use a statement
|
||||
(e.g. in a lambda).
|
||||
|
||||
:param Exception exception: An exception to raise.
|
||||
:raises: Whatever exception is
|
||||
|
||||
"""
|
||||
raise exception
|
||||
|
||||
235
testtools/tests/samplecases.py
Normal file
235
testtools/tests/samplecases.py
Normal file
@@ -0,0 +1,235 @@
|
||||
# Copyright (c) 2015 testtools developers. See LICENSE for details.
|
||||
|
||||
"""A collection of sample TestCases.
|
||||
|
||||
These are primarily of use in testing the test framework.
|
||||
"""
|
||||
|
||||
from testscenarios import multiply_scenarios
|
||||
|
||||
from testtools import TestCase
|
||||
from testtools.matchers import (
|
||||
AfterPreprocessing,
|
||||
Contains,
|
||||
Equals,
|
||||
MatchesDict,
|
||||
MatchesListwise,
|
||||
)
|
||||
|
||||
|
||||
def make_test_case(test_method_name, set_up=None, test_body=None,
|
||||
tear_down=None, cleanups=(), pre_set_up=None,
|
||||
post_tear_down=None):
|
||||
"""Make a test case with the given behaviors.
|
||||
|
||||
All callables are unary callables that receive this test as their argument.
|
||||
|
||||
:param str test_method_name: The name of the test method.
|
||||
:param callable set_up: Implementation of setUp.
|
||||
:param callable test_body: Implementation of the actual test. Will be
|
||||
assigned to the test method.
|
||||
:param callable tear_down: Implementation of tearDown.
|
||||
:param cleanups: Iterable of callables that will be added as cleanups.
|
||||
:param callable pre_set_up: Called before the upcall to setUp().
|
||||
:param callable post_tear_down: Called after the upcall to tearDown().
|
||||
|
||||
:return: A ``testtools.TestCase``.
|
||||
"""
|
||||
set_up = set_up if set_up else _do_nothing
|
||||
test_body = test_body if test_body else _do_nothing
|
||||
tear_down = tear_down if tear_down else _do_nothing
|
||||
pre_set_up = pre_set_up if pre_set_up else _do_nothing
|
||||
post_tear_down = post_tear_down if post_tear_down else _do_nothing
|
||||
return _ConstructedTest(
|
||||
test_method_name, set_up, test_body, tear_down, cleanups,
|
||||
pre_set_up, post_tear_down,
|
||||
)
|
||||
|
||||
|
||||
class _ConstructedTest(TestCase):
|
||||
"""A test case defined by arguments, rather than overrides."""
|
||||
|
||||
def __init__(self, test_method_name, set_up, test_body, tear_down,
|
||||
cleanups, pre_set_up, post_tear_down):
|
||||
"""Construct a test case.
|
||||
|
||||
See ``make_test_case`` for full documentation.
|
||||
"""
|
||||
setattr(self, test_method_name, self.test_case)
|
||||
super(_ConstructedTest, self).__init__(test_method_name)
|
||||
self._set_up = set_up
|
||||
self._test_body = test_body
|
||||
self._tear_down = tear_down
|
||||
self._test_cleanups = cleanups
|
||||
self._pre_set_up = pre_set_up
|
||||
self._post_tear_down = post_tear_down
|
||||
|
||||
def setUp(self):
|
||||
self._pre_set_up(self)
|
||||
super(_ConstructedTest, self).setUp()
|
||||
for cleanup in self._test_cleanups:
|
||||
self.addCleanup(cleanup, self)
|
||||
self._set_up(self)
|
||||
|
||||
def test_case(self):
|
||||
self._test_body(self)
|
||||
|
||||
def tearDown(self):
|
||||
self._tear_down(self)
|
||||
super(_ConstructedTest, self).tearDown()
|
||||
self._post_tear_down(self)
|
||||
|
||||
|
||||
def _do_nothing(case):
|
||||
pass
|
||||
|
||||
|
||||
_success = _do_nothing
|
||||
|
||||
|
||||
def _error(case):
|
||||
1/0 # arbitrary non-failure exception
|
||||
|
||||
|
||||
def _failure(case):
|
||||
case.fail('arbitrary failure')
|
||||
|
||||
|
||||
def _skip(case):
|
||||
case.skip('arbitrary skip message')
|
||||
|
||||
|
||||
def _expected_failure(case):
|
||||
case.expectFailure('arbitrary expected failure', _failure, case)
|
||||
|
||||
|
||||
def _unexpected_success(case):
|
||||
case.expectFailure('arbitrary unexpected success', _success, case)
|
||||
|
||||
|
||||
behaviors = [
|
||||
('success', _success),
|
||||
('fail', _failure),
|
||||
('error', _error),
|
||||
('skip', _skip),
|
||||
('xfail', _expected_failure),
|
||||
('uxsuccess', _unexpected_success),
|
||||
]
|
||||
|
||||
|
||||
def _make_behavior_scenarios(stage):
|
||||
"""Given a test stage, iterate over behavior scenarios for that stage.
|
||||
|
||||
e.g.
|
||||
>>> list(_make_behavior_scenarios('set_up'))
|
||||
[('set_up=success', {'set_up_behavior': <function _success>}),
|
||||
('set_up=fail', {'set_up_behavior': <function _failure>}),
|
||||
('set_up=error', {'set_up_behavior': <function _error>}),
|
||||
('set_up=skip', {'set_up_behavior': <function _skip>}),
|
||||
('set_up=xfail', {'set_up_behavior': <function _expected_failure>),
|
||||
('set_up=uxsuccess',
|
||||
{'set_up_behavior': <function _unexpected_success>})]
|
||||
|
||||
Ordering is not consistent.
|
||||
"""
|
||||
return (
|
||||
('%s=%s' % (stage, behavior),
|
||||
{'%s_behavior' % (stage,): function})
|
||||
for (behavior, function) in behaviors
|
||||
)
|
||||
|
||||
|
||||
def make_case_for_behavior_scenario(case):
|
||||
"""Given a test with a behavior scenario installed, make a TestCase."""
|
||||
cleanup_behavior = getattr(case, 'cleanup_behavior', None)
|
||||
cleanups = [cleanup_behavior] if cleanup_behavior else []
|
||||
return make_test_case(
|
||||
case.getUniqueString(),
|
||||
set_up=getattr(case, 'set_up_behavior', _do_nothing),
|
||||
test_body=getattr(case, 'body_behavior', _do_nothing),
|
||||
tear_down=getattr(case, 'tear_down_behavior', _do_nothing),
|
||||
cleanups=cleanups,
|
||||
pre_set_up=getattr(case, 'pre_set_up_behavior', _do_nothing),
|
||||
post_tear_down=getattr(case, 'post_tear_down_behavior', _do_nothing),
|
||||
)
|
||||
|
||||
|
||||
class _SetUpFailsOnGlobalState(TestCase):
|
||||
"""Fail to upcall setUp on first run. Fail to upcall tearDown after.
|
||||
|
||||
This simulates a test that fails to upcall in ``setUp`` if some global
|
||||
state is broken, and fails to call ``tearDown`` when the global state
|
||||
breaks but works after that.
|
||||
"""
|
||||
|
||||
first_run = True
|
||||
|
||||
def setUp(self):
|
||||
if not self.first_run:
|
||||
return
|
||||
super(_SetUpFailsOnGlobalState, self).setUp()
|
||||
|
||||
def test_success(self):
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
if not self.first_run:
|
||||
super(_SetUpFailsOnGlobalState, self).tearDown()
|
||||
self.__class__.first_run = False
|
||||
|
||||
@classmethod
|
||||
def make_scenario(cls):
|
||||
case = cls('test_success')
|
||||
return {
|
||||
'case': case,
|
||||
'expected_first_result': _test_error_traceback(
|
||||
case, Contains('TestCase.tearDown was not called')),
|
||||
'expected_second_result': _test_error_traceback(
|
||||
case, Contains('TestCase.setUp was not called')),
|
||||
}
|
||||
|
||||
|
||||
def _test_error_traceback(case, traceback_matcher):
|
||||
"""Match result log of single test that errored out.
|
||||
|
||||
``traceback_matcher`` is applied to the text of the traceback.
|
||||
"""
|
||||
return MatchesListwise([
|
||||
Equals(('startTest', case)),
|
||||
MatchesListwise([
|
||||
Equals('addError'),
|
||||
Equals(case),
|
||||
MatchesDict({
|
||||
'traceback': AfterPreprocessing(
|
||||
lambda x: x.as_text(),
|
||||
traceback_matcher,
|
||||
)
|
||||
})
|
||||
]),
|
||||
Equals(('stopTest', case)),
|
||||
])
|
||||
|
||||
|
||||
"""
|
||||
A list that can be used with testscenarios to test every deterministic sample
|
||||
case that we have.
|
||||
"""
|
||||
deterministic_sample_cases_scenarios = multiply_scenarios(
|
||||
_make_behavior_scenarios('set_up'),
|
||||
_make_behavior_scenarios('body'),
|
||||
_make_behavior_scenarios('tear_down'),
|
||||
_make_behavior_scenarios('cleanup'),
|
||||
) + [
|
||||
('tear_down_fails_after_upcall', {
|
||||
'post_tear_down_behavior': _error,
|
||||
}),
|
||||
]
|
||||
|
||||
|
||||
"""
|
||||
A list that can be used with testscenarios to test every non-deterministic
|
||||
sample case that we have.
|
||||
"""
|
||||
nondeterministic_sample_cases_scenarios = [
|
||||
('setup-fails-global-state', _SetUpFailsOnGlobalState.make_scenario()),
|
||||
]
|
||||
@@ -13,20 +13,21 @@ from testtools import (
|
||||
TestResult,
|
||||
)
|
||||
from testtools.matchers import (
|
||||
AfterPreprocessing,
|
||||
ContainsAll,
|
||||
EndsWith,
|
||||
Equals,
|
||||
Is,
|
||||
KeysEqual,
|
||||
MatchesDict,
|
||||
MatchesException,
|
||||
MatchesListwise,
|
||||
Not,
|
||||
Raises,
|
||||
)
|
||||
from testtools.runtest import RunTest
|
||||
from testtools.testresult.doubles import ExtendedTestResult
|
||||
from testtools.tests.helpers import (
|
||||
AsText,
|
||||
MatchesEvents,
|
||||
)
|
||||
from testtools.tests.test_spinner import NeedsTwistedTestCase
|
||||
|
||||
assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with')
|
||||
@@ -43,45 +44,6 @@ log = try_import('twisted.python.log')
|
||||
DelayedCall = try_import('twisted.internet.base.DelayedCall')
|
||||
|
||||
|
||||
class MatchesEvents(object):
|
||||
"""Match a list of test result events.
|
||||
|
||||
Specify events as a data structure. Ordinary Python objects within this
|
||||
structure will be compared exactly, but you can also use matchers at any
|
||||
point.
|
||||
"""
|
||||
|
||||
def __init__(self, *expected):
|
||||
self._expected = expected
|
||||
|
||||
def _make_matcher(self, obj):
|
||||
# This isn't very safe for general use, but is good enough to make
|
||||
# some tests in this module more readable.
|
||||
if hasattr(obj, 'match'):
|
||||
return obj
|
||||
elif isinstance(obj, tuple) or isinstance(obj, list):
|
||||
return MatchesListwise(
|
||||
[self._make_matcher(item) for item in obj])
|
||||
elif isinstance(obj, dict):
|
||||
return MatchesDict(dict(
|
||||
(key, self._make_matcher(value))
|
||||
for key, value in obj.items()))
|
||||
else:
|
||||
return Equals(obj)
|
||||
|
||||
def match(self, observed):
|
||||
matcher = self._make_matcher(self._expected)
|
||||
return matcher.match(observed)
|
||||
|
||||
|
||||
class AsText(AfterPreprocessing):
|
||||
"""Match the text of a Content instance."""
|
||||
|
||||
def __init__(self, matcher, annotate=True):
|
||||
super(AsText, self).__init__(
|
||||
lambda log: log.as_text(), matcher, annotate=annotate)
|
||||
|
||||
|
||||
class X(object):
|
||||
"""Tests that we run as part of our tests, nested to avoid discovery."""
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from testtools import (
|
||||
TestCase,
|
||||
TestResult,
|
||||
)
|
||||
from testtools.matchers import MatchesException, Is, Raises
|
||||
from testtools.matchers import HasLength, MatchesException, Is, Raises
|
||||
from testtools.testresult.doubles import ExtendedTestResult
|
||||
from testtools.tests.helpers import FullStackRunTest
|
||||
|
||||
@@ -68,9 +68,13 @@ class TestRunTest(TestCase):
|
||||
self.assertEqual(['foo'], log)
|
||||
|
||||
def test__run_prepared_result_does_not_mask_keyboard(self):
|
||||
tearDownRuns = []
|
||||
class Case(TestCase):
|
||||
def test(self):
|
||||
raise KeyboardInterrupt("go")
|
||||
def _run_teardown(self, result):
|
||||
tearDownRuns.append(self)
|
||||
return super(Case, self)._run_teardown(result)
|
||||
case = Case('test')
|
||||
run = RunTest(case)
|
||||
run.result = ExtendedTestResult()
|
||||
@@ -79,7 +83,7 @@ class TestRunTest(TestCase):
|
||||
self.assertEqual(
|
||||
[('startTest', case), ('stopTest', case)], run.result._events)
|
||||
# tearDown is still run though!
|
||||
self.assertEqual(True, getattr(case, '_TestCase__teardown_called'))
|
||||
self.assertThat(tearDownRuns, HasLength(1))
|
||||
|
||||
def test__run_user_calls_onException(self):
|
||||
case = self.make_case()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
|
||||
# Copyright (c) 2008-2015 testtools developers. See LICENSE for details.
|
||||
|
||||
"""Tests for extensions to the base test library."""
|
||||
|
||||
@@ -30,6 +30,7 @@ from testtools.content import (
|
||||
)
|
||||
from testtools.matchers import (
|
||||
Annotate,
|
||||
ContainsAll,
|
||||
DocTestMatches,
|
||||
Equals,
|
||||
HasLength,
|
||||
@@ -48,9 +49,18 @@ from testtools.testresult.doubles import (
|
||||
)
|
||||
from testtools.tests.helpers import (
|
||||
an_exc_info,
|
||||
AsText,
|
||||
FullStackRunTest,
|
||||
LoggingResult,
|
||||
MatchesEvents,
|
||||
raise_,
|
||||
)
|
||||
from testtools.tests.samplecases import (
|
||||
deterministic_sample_cases_scenarios,
|
||||
make_case_for_behavior_scenario,
|
||||
make_test_case,
|
||||
nondeterministic_sample_cases_scenarios,
|
||||
)
|
||||
|
||||
|
||||
class TestPlaceHolder(TestCase):
|
||||
@@ -758,76 +768,42 @@ class TestAssertions(TestCase):
|
||||
class TestAddCleanup(TestCase):
|
||||
"""Tests for TestCase.addCleanup."""
|
||||
|
||||
run_test_with = FullStackRunTest
|
||||
run_tests_with = FullStackRunTest
|
||||
|
||||
class LoggingTest(TestCase):
|
||||
"""A test that logs calls to setUp, runTest and tearDown."""
|
||||
|
||||
def setUp(self):
|
||||
TestCase.setUp(self)
|
||||
self._calls = ['setUp']
|
||||
|
||||
def brokenSetUp(self):
|
||||
# A tearDown that deliberately fails.
|
||||
self._calls = ['brokenSetUp']
|
||||
raise RuntimeError('Deliberate Failure')
|
||||
|
||||
def runTest(self):
|
||||
self._calls.append('runTest')
|
||||
|
||||
def brokenTest(self):
|
||||
raise RuntimeError('Deliberate broken test')
|
||||
|
||||
def tearDown(self):
|
||||
self._calls.append('tearDown')
|
||||
TestCase.tearDown(self)
|
||||
|
||||
def setUp(self):
|
||||
TestCase.setUp(self)
|
||||
self._result_calls = []
|
||||
self.test = TestAddCleanup.LoggingTest('runTest')
|
||||
self.logging_result = LoggingResult(self._result_calls)
|
||||
|
||||
def assertErrorLogEqual(self, messages):
|
||||
self.assertEqual(messages, [call[0] for call in self._result_calls])
|
||||
|
||||
def assertTestLogEqual(self, messages):
|
||||
"""Assert that the call log equals 'messages'."""
|
||||
case = self._result_calls[0][1]
|
||||
self.assertEqual(messages, case._calls)
|
||||
|
||||
def logAppender(self, message):
|
||||
"""A cleanup that appends 'message' to the tests log.
|
||||
|
||||
Cleanups are callables that are added to a test by addCleanup. To
|
||||
verify that our cleanups run in the right order, we add strings to a
|
||||
list that acts as a log. This method returns a cleanup that will add
|
||||
the given message to that log when run.
|
||||
"""
|
||||
self.test._calls.append(message)
|
||||
|
||||
def test_fixture(self):
|
||||
# A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'.
|
||||
# This test doesn't test addCleanup itself, it just sanity checks the
|
||||
# fixture.
|
||||
self.test.run(self.logging_result)
|
||||
self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
|
||||
|
||||
def test_cleanup_run_before_tearDown(self):
|
||||
# Cleanup functions added with 'addCleanup' are called before tearDown
|
||||
def test_cleanup_run_after_tearDown(self):
|
||||
# Cleanup functions added with 'addCleanup' are called after tearDown
|
||||
# runs.
|
||||
self.test.addCleanup(self.logAppender, 'cleanup')
|
||||
self.test.run(self.logging_result)
|
||||
self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup'])
|
||||
log = []
|
||||
test = make_test_case(
|
||||
self.getUniqueString(),
|
||||
set_up=lambda _: log.append('setUp'),
|
||||
test_body=lambda _: log.append('runTest'),
|
||||
tear_down=lambda _: log.append('tearDown'),
|
||||
cleanups=[lambda _: log.append('cleanup')],
|
||||
)
|
||||
test.run()
|
||||
self.assertThat(
|
||||
log, Equals(['setUp', 'runTest', 'tearDown', 'cleanup']))
|
||||
|
||||
def test_add_cleanup_called_if_setUp_fails(self):
|
||||
# Cleanup functions added with 'addCleanup' are called even if setUp
|
||||
# fails. Note that tearDown has a different behavior: it is only
|
||||
# called when setUp succeeds.
|
||||
self.test.setUp = self.test.brokenSetUp
|
||||
self.test.addCleanup(self.logAppender, 'cleanup')
|
||||
self.test.run(self.logging_result)
|
||||
self.assertTestLogEqual(['brokenSetUp', 'cleanup'])
|
||||
log = []
|
||||
|
||||
def broken_set_up(ignored):
|
||||
log.append('brokenSetUp')
|
||||
raise RuntimeError('Deliberate broken setUp')
|
||||
|
||||
test = make_test_case(
|
||||
self.getUniqueString(),
|
||||
set_up=broken_set_up,
|
||||
test_body=lambda _: log.append('runTest'),
|
||||
tear_down=lambda _: log.append('tearDown'),
|
||||
cleanups=[lambda _: log.append('cleanup')],
|
||||
)
|
||||
test.run()
|
||||
self.assertThat(log, Equals(['brokenSetUp', 'cleanup']))
|
||||
|
||||
def test_addCleanup_called_in_reverse_order(self):
|
||||
# Cleanup functions added with 'addCleanup' are called in reverse
|
||||
@@ -841,46 +817,82 @@ class TestAddCleanup(TestCase):
|
||||
#
|
||||
# When this happens, we generally want to clean up the second resource
|
||||
# before the first one, since the second depends on the first.
|
||||
self.test.addCleanup(self.logAppender, 'first')
|
||||
self.test.addCleanup(self.logAppender, 'second')
|
||||
self.test.run(self.logging_result)
|
||||
self.assertTestLogEqual(
|
||||
['setUp', 'runTest', 'tearDown', 'second', 'first'])
|
||||
log = []
|
||||
test = make_test_case(
|
||||
self.getUniqueString(),
|
||||
set_up=lambda _: log.append('setUp'),
|
||||
test_body=lambda _: log.append('runTest'),
|
||||
tear_down=lambda _: log.append('tearDown'),
|
||||
cleanups=[
|
||||
lambda _: log.append('first'),
|
||||
lambda _: log.append('second'),
|
||||
],
|
||||
)
|
||||
test.run()
|
||||
self.assertThat(
|
||||
log, Equals(['setUp', 'runTest', 'tearDown', 'second', 'first']))
|
||||
|
||||
def test_tearDown_runs_after_cleanup_failure(self):
|
||||
def test_tearDown_runs_on_cleanup_failure(self):
|
||||
# tearDown runs even if a cleanup function fails.
|
||||
self.test.addCleanup(lambda: 1/0)
|
||||
self.test.run(self.logging_result)
|
||||
self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
|
||||
log = []
|
||||
test = make_test_case(
|
||||
self.getUniqueString(),
|
||||
set_up=lambda _: log.append('setUp'),
|
||||
test_body=lambda _: log.append('runTest'),
|
||||
tear_down=lambda _: log.append('tearDown'),
|
||||
cleanups=[lambda _: 1/0],
|
||||
)
|
||||
test.run()
|
||||
self.assertThat(log, Equals(['setUp', 'runTest', 'tearDown']))
|
||||
|
||||
def test_cleanups_continue_running_after_error(self):
|
||||
# All cleanups are always run, even if one or two of them fail.
|
||||
self.test.addCleanup(self.logAppender, 'first')
|
||||
self.test.addCleanup(lambda: 1/0)
|
||||
self.test.addCleanup(self.logAppender, 'second')
|
||||
self.test.run(self.logging_result)
|
||||
self.assertTestLogEqual(
|
||||
['setUp', 'runTest', 'tearDown', 'second', 'first'])
|
||||
log = []
|
||||
test = make_test_case(
|
||||
self.getUniqueString(),
|
||||
set_up=lambda _: log.append('setUp'),
|
||||
test_body=lambda _: log.append('runTest'),
|
||||
tear_down=lambda _: log.append('tearDown'),
|
||||
cleanups=[
|
||||
lambda _: log.append('first'),
|
||||
lambda _: 1/0,
|
||||
lambda _: log.append('second'),
|
||||
],
|
||||
)
|
||||
test.run()
|
||||
self.assertThat(
|
||||
log, Equals(['setUp', 'runTest', 'tearDown', 'second', 'first']))
|
||||
|
||||
def test_error_in_cleanups_are_captured(self):
|
||||
# If a cleanup raises an error, we want to record it and fail the the
|
||||
# test, even though we go on to run other cleanups.
|
||||
self.test.addCleanup(lambda: 1/0)
|
||||
self.test.run(self.logging_result)
|
||||
self.assertErrorLogEqual(['startTest', 'addError', 'stopTest'])
|
||||
test = make_test_case(self.getUniqueString(), cleanups=[lambda _: 1/0])
|
||||
log = []
|
||||
test.run(ExtendedTestResult(log))
|
||||
self.assertThat(
|
||||
log, MatchesEvents(
|
||||
('startTest', test),
|
||||
('addError', test, {
|
||||
'traceback': AsText(ContainsAll([
|
||||
'Traceback (most recent call last):',
|
||||
'ZeroDivisionError',
|
||||
])),
|
||||
}),
|
||||
('stopTest', test),
|
||||
)
|
||||
)
|
||||
|
||||
def test_keyboard_interrupt_not_caught(self):
|
||||
# If a cleanup raises KeyboardInterrupt, it gets reraised.
|
||||
def raiseKeyboardInterrupt():
|
||||
raise KeyboardInterrupt()
|
||||
self.test.addCleanup(raiseKeyboardInterrupt)
|
||||
self.assertThat(lambda:self.test.run(self.logging_result),
|
||||
Raises(MatchesException(KeyboardInterrupt)))
|
||||
test = make_test_case(
|
||||
self.getUniqueString(), cleanups=[
|
||||
lambda _: raise_(KeyboardInterrupt())])
|
||||
self.assertThat(test.run, Raises(MatchesException(KeyboardInterrupt)))
|
||||
|
||||
def test_all_errors_from_MultipleExceptions_reported(self):
|
||||
# When a MultipleExceptions exception is caught, all the errors are
|
||||
# reported.
|
||||
def raiseMany():
|
||||
def raise_many(ignored):
|
||||
try:
|
||||
1/0
|
||||
except Exception:
|
||||
@@ -890,37 +902,86 @@ class TestAddCleanup(TestCase):
|
||||
except Exception:
|
||||
exc_info2 = sys.exc_info()
|
||||
raise MultipleExceptions(exc_info1, exc_info2)
|
||||
self.test.addCleanup(raiseMany)
|
||||
self.logging_result = ExtendedTestResult()
|
||||
self.test.run(self.logging_result)
|
||||
self.assertEqual(['startTest', 'addError', 'stopTest'],
|
||||
[event[0] for event in self.logging_result._events])
|
||||
self.assertEqual(set(['traceback', 'traceback-1']),
|
||||
set(self.logging_result._events[1][2].keys()))
|
||||
|
||||
test = make_test_case(self.getUniqueString(), cleanups=[raise_many])
|
||||
log = []
|
||||
test.run(ExtendedTestResult(log))
|
||||
self.assertThat(
|
||||
log, MatchesEvents(
|
||||
('startTest', test),
|
||||
('addError', test, {
|
||||
'traceback': AsText(ContainsAll([
|
||||
'Traceback (most recent call last):',
|
||||
'ZeroDivisionError',
|
||||
])),
|
||||
'traceback-1': AsText(ContainsAll([
|
||||
'Traceback (most recent call last):',
|
||||
'ZeroDivisionError',
|
||||
])),
|
||||
}),
|
||||
('stopTest', test),
|
||||
)
|
||||
)
|
||||
|
||||
def test_multipleCleanupErrorsReported(self):
|
||||
# Errors from all failing cleanups are reported as separate backtraces.
|
||||
self.test.addCleanup(lambda: 1/0)
|
||||
self.test.addCleanup(lambda: 1/0)
|
||||
self.logging_result = ExtendedTestResult()
|
||||
self.test.run(self.logging_result)
|
||||
self.assertEqual(['startTest', 'addError', 'stopTest'],
|
||||
[event[0] for event in self.logging_result._events])
|
||||
self.assertEqual(set(['traceback', 'traceback-1']),
|
||||
set(self.logging_result._events[1][2].keys()))
|
||||
test = make_test_case(self.getUniqueString(), cleanups=[
|
||||
lambda _: 1/0,
|
||||
lambda _: 1/0,
|
||||
])
|
||||
log = []
|
||||
test.run(ExtendedTestResult(log))
|
||||
self.assertThat(
|
||||
log, MatchesEvents(
|
||||
('startTest', test),
|
||||
('addError', test, {
|
||||
'traceback': AsText(ContainsAll([
|
||||
'Traceback (most recent call last):',
|
||||
'ZeroDivisionError',
|
||||
])),
|
||||
'traceback-1': AsText(ContainsAll([
|
||||
'Traceback (most recent call last):',
|
||||
'ZeroDivisionError',
|
||||
])),
|
||||
}),
|
||||
('stopTest', test),
|
||||
)
|
||||
)
|
||||
|
||||
def test_multipleErrorsCoreAndCleanupReported(self):
|
||||
# Errors from all failing cleanups are reported, with stopTest,
|
||||
# startTest inserted.
|
||||
self.test = TestAddCleanup.LoggingTest('brokenTest')
|
||||
self.test.addCleanup(lambda: 1/0)
|
||||
self.test.addCleanup(lambda: 1/0)
|
||||
self.logging_result = ExtendedTestResult()
|
||||
self.test.run(self.logging_result)
|
||||
self.assertEqual(['startTest', 'addError', 'stopTest'],
|
||||
[event[0] for event in self.logging_result._events])
|
||||
self.assertEqual(set(['traceback', 'traceback-1', 'traceback-2']),
|
||||
set(self.logging_result._events[1][2].keys()))
|
||||
test = make_test_case(
|
||||
self.getUniqueString(),
|
||||
test_body=lambda _: raise_(
|
||||
RuntimeError('Deliberately broken test')),
|
||||
cleanups=[
|
||||
lambda _: 1/0,
|
||||
lambda _: 1/0,
|
||||
]
|
||||
)
|
||||
log = []
|
||||
test.run(ExtendedTestResult(log))
|
||||
self.assertThat(
|
||||
log, MatchesEvents(
|
||||
('startTest', test),
|
||||
('addError', test, {
|
||||
'traceback': AsText(ContainsAll([
|
||||
'Traceback (most recent call last):',
|
||||
'RuntimeError: Deliberately broken test',
|
||||
])),
|
||||
'traceback-1': AsText(ContainsAll([
|
||||
'Traceback (most recent call last):',
|
||||
'ZeroDivisionError',
|
||||
])),
|
||||
'traceback-2': AsText(ContainsAll([
|
||||
'Traceback (most recent call last):',
|
||||
'ZeroDivisionError',
|
||||
])),
|
||||
}),
|
||||
('stopTest', test),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class TestRunTestUsage(TestCase):
|
||||
@@ -1297,6 +1358,51 @@ class TestSetupTearDown(TestCase):
|
||||
ELLIPSIS))
|
||||
|
||||
|
||||
class TestRunTwiceDeterminstic(TestCase):
|
||||
"""Can we run the same test case twice?"""
|
||||
|
||||
# XXX: Reviewer, please note that all of the other test cases in this
|
||||
# module are doing this wrong, saying 'run_test_with' instead of
|
||||
# 'run_tests_with'.
|
||||
run_tests_with = FullStackRunTest
|
||||
|
||||
scenarios = deterministic_sample_cases_scenarios
|
||||
|
||||
def test_runTwice(self):
|
||||
# Tests that are intrinsically determistic can be run twice and
|
||||
# produce exactly the same results each time, without need for
|
||||
# explicit resetting or reconstruction.
|
||||
test = make_case_for_behavior_scenario(self)
|
||||
first_result = ExtendedTestResult()
|
||||
test.run(first_result)
|
||||
second_result = ExtendedTestResult()
|
||||
test.run(second_result)
|
||||
self.assertEqual(first_result._events, second_result._events)
|
||||
|
||||
|
||||
class TestRunTwiceNondeterministic(TestCase):
|
||||
"""Can we run the same test case twice?
|
||||
|
||||
Separate suite for non-deterministic tests, which require more complicated
|
||||
assertions and scenarios.
|
||||
"""
|
||||
|
||||
run_tests_with = FullStackRunTest
|
||||
|
||||
scenarios = nondeterministic_sample_cases_scenarios
|
||||
|
||||
def test_runTwice(self):
|
||||
test = self.case
|
||||
first_result = ExtendedTestResult()
|
||||
test.run(first_result)
|
||||
second_result = ExtendedTestResult()
|
||||
test.run(second_result)
|
||||
self.expectThat(
|
||||
first_result._events, self.expected_first_result)
|
||||
self.assertThat(
|
||||
second_result._events, self.expected_second_result)
|
||||
|
||||
|
||||
require_py27_minimum = skipIf(
|
||||
sys.version < '2.7',
|
||||
"Requires python 2.7 or greater"
|
||||
@@ -1510,59 +1616,82 @@ class TestOnException(TestCase):
|
||||
|
||||
class TestPatchSupport(TestCase):
|
||||
|
||||
run_test_with = FullStackRunTest
|
||||
run_tests_with = FullStackRunTest
|
||||
|
||||
class Case(TestCase):
|
||||
def test(self):
|
||||
pass
|
||||
|
||||
def run_test(self, test_body):
|
||||
"""Run a test with ``test_body`` as the body.
|
||||
|
||||
:return: Whatever ``test_body`` returns.
|
||||
"""
|
||||
log = []
|
||||
def wrapper(case):
|
||||
log.append(test_body(case))
|
||||
case = make_test_case(self.getUniqueString(), test_body=wrapper)
|
||||
case.run()
|
||||
return log[0]
|
||||
|
||||
def test_patch(self):
|
||||
# TestCase.patch masks obj.attribute with the new value.
|
||||
self.foo = 'original'
|
||||
test = self.Case('test')
|
||||
test.patch(self, 'foo', 'patched')
|
||||
self.assertEqual('patched', self.foo)
|
||||
def test_body(case):
|
||||
case.patch(self, 'foo', 'patched')
|
||||
return self.foo
|
||||
|
||||
result = self.run_test(test_body)
|
||||
self.assertThat(result, Equals('patched'))
|
||||
|
||||
def test_patch_restored_after_run(self):
|
||||
# TestCase.patch masks obj.attribute with the new value, but restores
|
||||
# the original value after the test is finished.
|
||||
self.foo = 'original'
|
||||
test = self.Case('test')
|
||||
test.patch(self, 'foo', 'patched')
|
||||
test.run()
|
||||
self.assertEqual('original', self.foo)
|
||||
self.run_test(lambda case: case.patch(self, 'foo', 'patched'))
|
||||
self.assertThat(self.foo, Equals('original'))
|
||||
|
||||
def test_successive_patches_apply(self):
|
||||
# TestCase.patch can be called multiple times per test. Each time you
|
||||
# call it, it overrides the original value.
|
||||
self.foo = 'original'
|
||||
test = self.Case('test')
|
||||
test.patch(self, 'foo', 'patched')
|
||||
test.patch(self, 'foo', 'second')
|
||||
self.assertEqual('second', self.foo)
|
||||
def test_body(case):
|
||||
case.patch(self, 'foo', 'patched')
|
||||
case.patch(self, 'foo', 'second')
|
||||
return self.foo
|
||||
|
||||
result = self.run_test(test_body)
|
||||
self.assertThat(result, Equals('second'))
|
||||
|
||||
def test_successive_patches_restored_after_run(self):
|
||||
# TestCase.patch restores the original value, no matter how many times
|
||||
# it was called.
|
||||
self.foo = 'original'
|
||||
test = self.Case('test')
|
||||
test.patch(self, 'foo', 'patched')
|
||||
test.patch(self, 'foo', 'second')
|
||||
test.run()
|
||||
self.assertEqual('original', self.foo)
|
||||
def test_body(case):
|
||||
case.patch(self, 'foo', 'patched')
|
||||
case.patch(self, 'foo', 'second')
|
||||
return self.foo
|
||||
|
||||
self.run_test(test_body)
|
||||
self.assertThat(self.foo, Equals('original'))
|
||||
|
||||
def test_patch_nonexistent_attribute(self):
|
||||
# TestCase.patch can be used to patch a non-existent attribute.
|
||||
test = self.Case('test')
|
||||
test.patch(self, 'doesntexist', 'patched')
|
||||
self.assertEqual('patched', self.doesntexist)
|
||||
def test_body(case):
|
||||
case.patch(self, 'doesntexist', 'patched')
|
||||
return self.doesntexist
|
||||
|
||||
result = self.run_test(test_body)
|
||||
self.assertThat(result, Equals('patched'))
|
||||
|
||||
def test_restore_nonexistent_attribute(self):
|
||||
# TestCase.patch can be used to patch a non-existent attribute, after
|
||||
# the test run, the attribute is then removed from the object.
|
||||
test = self.Case('test')
|
||||
test.patch(self, 'doesntexist', 'patched')
|
||||
test.run()
|
||||
def test_body(case):
|
||||
case.patch(self, 'doesntexist', 'patched')
|
||||
return self.doesntexist
|
||||
|
||||
self.run_test(test_body)
|
||||
marker = object()
|
||||
value = getattr(self, 'doesntexist', marker)
|
||||
self.assertIs(marker, value)
|
||||
|
||||
Reference in New Issue
Block a user