Use unittest.TestCase instead of testtools.TestCase

We do not use much cool features from testtools.TestCase so no need to
install it.

Change-Id: Ieca6b98b056c268020449cee5dd760f37077de1a
This commit is contained in:
Andriy Kurilin 2024-04-28 23:17:17 -04:00
parent e5d9fc8e63
commit cd78ec4b1b
19 changed files with 90 additions and 132 deletions

View File

@ -14,9 +14,6 @@ pytest-html # MIT
pytest-xdist # MIT
ddt # MIT
testtools # MIT
testresources # UNKNOWN
docutils # BSD License/GNU General Public License (GPL)/Python Software Foundation License
Pygments # BSD-2-Clause

View File

@ -16,7 +16,7 @@ All internal methods should be fully covered by unit tests with a reasonable moc
About Rally unit tests:
- All `unit tests <http://en.wikipedia.org/wiki/Unit_testing>`_ are located inside /tests/unit/*
- Tests are written on top of: *testtools* and *mock* libs
- Tests are written on top of: *unittest* lib
- `Tox <https://tox.readthedocs.org/en/latest/>`_ is used to run unit tests
@ -25,20 +25,20 @@ To run unit tests locally::
$ pip install tox
$ tox
To run py27, py34, py35 or pep8 only::
To run py311 or pep8 only::
$ tox -e <name>
# NOTE: <name> is one of py27, py34, py35 or pep8
# NOTE: <name> is one of py311 or pep8
To run py27/py34/py35 against mysql or psql
To run py311 against mysql or psql
$ export RALLY_UNITTEST_DB_URL="mysql://user:secret@localhost/rally"
$ tox -epy27
$ tox -epy311
To run specific test of py27/py34/py35::
To run specific test of py311::
$ tox -e py27 -- tests.unit.test_osclients
$ tox -e py311 -- tests.unit.test_osclients
To get test coverage::

View File

@ -11,13 +11,12 @@
# under the License.
import os
import testtools
import unittest
from tests.functional import utils
class DeploymentTestCase(testtools.TestCase):
class DeploymentTestCase(unittest.TestCase):
def test_create_deployment_from_env(self):
os.environ.update(

View File

@ -16,13 +16,12 @@
import json
import os
import tempfile
import testtools
import unittest
from tests.functional import utils
class EnvTestCase(testtools.TestCase):
class EnvTestCase(unittest.TestCase):
def test_create_no_spec(self):
rally = utils.Rally()

View File

@ -14,13 +14,12 @@
# under the License.
import subprocess
import testtools
import unittest
from rally.utils import encodeutils
class CLITestCase(testtools.TestCase):
class CLITestCase(unittest.TestCase):
def test_rally_cli(self):
try:

View File

@ -13,12 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import unittest
from tests.functional import utils
class PluginTestCase(testtools.TestCase):
class PluginTestCase(unittest.TestCase):
def test_show_one(self):
rally = utils.Rally()
@ -30,8 +30,9 @@ class PluginTestCase(testtools.TestCase):
def test_show_multiple(self):
rally = utils.Rally()
result = self.assertRaises(utils.RallyCliError,
rally, "plugin show Dummy")
with self.assertRaises(utils.RallyCliError) as e_ctx:
rally("plugin show Dummy")
result = e_ctx.exception
self.assertIn("Multiple plugins found:", result.output)
self.assertIn("Dummy.dummy", result.output)
self.assertIn("Dummy.dummy_exception", result.output)
@ -40,22 +41,22 @@ class PluginTestCase(testtools.TestCase):
def test_show_not_found(self):
rally = utils.Rally()
name = "Dummy666666"
result = self.assertRaises(utils.RallyCliError,
rally, "plugin show %s" % name)
self.assertIn("Plugin %s not found" % name, result.output)
with self.assertRaises(utils.RallyCliError) as e_ctx:
rally(f"plugin show {name}")
self.assertIn("Plugin %s not found" % name, e_ctx.exception.output)
def test_show_not_found_in_specific_platform(self):
rally = utils.Rally()
name = "Dummy"
platform = "non_existing"
result = self.assertRaises(
utils.RallyCliError,
rally, "plugin show --name %(name)s --platform %(platform)s"
% {"name": name, "platform": platform})
with self.assertRaises(utils.RallyCliError) as e_ctx:
rally(f"plugin show --name {name} --platform {platform}")
self.assertIn(
"Plugin %(name)s@%(platform)s not found"
% {"name": name, "platform": platform},
result.output)
e_ctx.exception.output)
def test_list(self):
rally = utils.Rally()

View File

@ -18,11 +18,11 @@ import os
import re
import threading
import time
import unittest
from unittest import mock
import jsonschema
import pytest
import testtools
from rally import api
from tests.functional import utils
@ -31,7 +31,7 @@ from tests.functional import utils
FAKE_TASK_UUID = "87ab639d-4968-4638-b9a1-07774c32484a"
class TaskTestCase(testtools.TestCase):
class TaskTestCase(unittest.TestCase):
def _get_sample_task_config(self):
return {
@ -210,11 +210,10 @@ class TaskTestCase(testtools.TestCase):
def test_start_with_empty_config(self):
rally = utils.Rally()
config = utils.TaskConfig(None)
err = self.assertRaises(
utils.RallyCliError,
rally, "task start --task %s" % config.filename)
with self.assertRaises(utils.RallyCliError) as e_ctx:
rally(f"task start --task {config.filename}")
self.assertIn("Task config is invalid: `It is empty`",
err.output)
e_ctx.exception.output)
def test_results(self):
rally = utils.Rally()
@ -265,11 +264,12 @@ class TaskTestCase(testtools.TestCase):
def test_report_with_wrong_task_id(self):
rally = utils.Rally()
e = self.assertRaises(utils.RallyCliError,
rally, "task report --uuid %s" % FAKE_TASK_UUID)
with self.assertRaises(utils.RallyCliError) as e_ctx:
rally(f"task report --uuid {FAKE_TASK_UUID}")
self.assertIn(
"Record for uuid: %s not found in table task" % FAKE_TASK_UUID,
str(e))
f"Record for uuid: {FAKE_TASK_UUID} not found in table task",
str(e_ctx.exception))
def test_sla_check_with_wrong_task_id(self):
rally = utils.Rally()
@ -912,16 +912,14 @@ class TaskTestCase(testtools.TestCase):
}
self._test_start_abort_on_sla_failure(cfg, times)
def _start_task_in_new_thread(self, rally, cfg, report_file):
def _start_task_in_new_thread(self, rally, cfg, suffix):
deployment_id = utils.get_global("RALLY_DEPLOYMENT", rally.env)
config = utils.TaskConfig(cfg)
cmd = (("task start --task %(task_file)s "
"--deployment %(deployment_id)s") %
{"task_file": config.filename,
"deployment_id": deployment_id})
report_path = os.path.join(
os.environ.get("REPORTS_ROOT", "rally-cli-output-files"),
"TaskTestCase", report_file)
report_path = rally.gen_report_path(suffix=suffix)
task = threading.Thread(target=rally, args=(cmd, ),
kwargs={"report_path": report_path})
task.start()
@ -950,7 +948,7 @@ class TaskTestCase(testtools.TestCase):
}
rally = utils.Rally()
task, uuid = self._start_task_in_new_thread(
rally, cfg, "test_abort-thread_with_abort.txt")
rally, cfg, "-thread_with_abort")
rally("task abort %s" % uuid)
task.join()
results = rally("task results", getjson=True)
@ -984,7 +982,7 @@ class TaskTestCase(testtools.TestCase):
}
rally = utils.Rally()
task, uuid = self._start_task_in_new_thread(
rally, cfg, "test_abort_soft-thread_with_soft_abort.txt")
rally, cfg, suffix="-thread_with_soft_abort")
rally("task abort --soft")
task.join()
results = rally("task results", getjson=True)
@ -1101,7 +1099,7 @@ class TaskTestCase(testtools.TestCase):
"task restart --scenario fake.fake_scenario")
class SLATestCase(testtools.TestCase):
class SLATestCase(unittest.TestCase):
def _get_sample_task_config(self, max_seconds_per_iteration=4,
failure_rate_max=0):
@ -1128,12 +1126,11 @@ class SLATestCase(testtools.TestCase):
rally = utils.Rally()
cfg = self._get_sample_task_config(max_seconds_per_iteration=0.001)
config = utils.TaskConfig(cfg)
err = self.assertRaises(
utils.RallyCliError,
rally, "task start --task %s" % config.filename)
output = err.output
with self.assertRaises(utils.RallyCliError) as e_ctx:
rally(f"task start --task {config.filename}")
self.assertIn("At least one workload did not pass SLA criteria.",
output)
e_ctx.exception.output)
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
def test_sla_success(self):
@ -1155,7 +1152,7 @@ class SLATestCase(testtools.TestCase):
self.assertEqual(expected, data)
class SLAExtraFlagsTestCase(testtools.TestCase):
class SLAExtraFlagsTestCase(unittest.TestCase):
def test_abort_on_sla_fail(self):
rally = utils.Rally()
@ -1187,9 +1184,9 @@ class SLAExtraFlagsTestCase(testtools.TestCase):
"detail": mock.ANY,
"pos": 0, "status": "FAIL"}
]
e = self.assertRaises(utils.RallyCliError,
rally, "task sla-check --json", getjson=True)
self.assertEqual(expected, json.loads(e.output))
with self.assertRaises(utils.RallyCliError) as e_ctx:
rally("task sla-check --json", getjson=True)
self.assertEqual(expected, json.loads(e_ctx.exception.output))
def _test_broken_context(self, runner):
rally = utils.Rally()
@ -1218,9 +1215,9 @@ class SLAExtraFlagsTestCase(testtools.TestCase):
"detail": mock.ANY,
"pos": 0, "status": "FAIL"}
]
e = self.assertRaises(utils.RallyCliError,
rally, "task sla-check --json", getjson=True)
self.assertEqual(expected, json.loads(e.output))
with self.assertRaises(utils.RallyCliError) as e_ctx:
rally("task sla-check --json", getjson=True)
self.assertEqual(expected, json.loads(e_ctx.exception.output))
def test_broken_context_with_constant_runner(self):
self._test_broken_context({"type": "constant",
@ -1234,7 +1231,7 @@ class SLAExtraFlagsTestCase(testtools.TestCase):
"timeout": 6})
class SLAPerfDegrTestCase(testtools.TestCase):
class SLAPerfDegrTestCase(unittest.TestCase):
def _get_sample_task_config(self, max_degradation=500):
return {
@ -1263,12 +1260,10 @@ class SLAPerfDegrTestCase(testtools.TestCase):
rally = utils.Rally()
cfg = self._get_sample_task_config(max_degradation=1)
config = utils.TaskConfig(cfg)
err = self.assertRaises(
utils.RallyCliError,
rally, "task start --task %s" % config.filename)
output = err.output
with self.assertRaises(utils.RallyCliError) as e_ctx:
rally(f"task start --task {config.filename}")
self.assertIn("At least one workload did not pass SLA criteria.",
output)
e_ctx.exception.output)
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
def test_sla_success(self):
@ -1286,7 +1281,7 @@ class SLAPerfDegrTestCase(testtools.TestCase):
self.assertEqual(expected, data)
class HookTestCase(testtools.TestCase):
class HookTestCase(unittest.TestCase):
def setUp(self):
super(HookTestCase, self).setUp()

View File

@ -14,13 +14,12 @@
# under the License.
import re
import testtools
import unittest
from tests.functional import utils
class VerifyTestCase(testtools.TestCase):
class VerifyTestCase(unittest.TestCase):
def test_list_plugins(self):
rally = utils.Rally(plugin_path="tests/functional/extra")

View File

@ -15,13 +15,12 @@
import os
import subprocess
import testtools
import unittest
from tests.functional import utils
class LibAPITestCase(testtools.TestCase):
class LibAPITestCase(unittest.TestCase):
def test_rally_lib(self):
rally = utils.Rally(force_new_db=True)

View File

@ -702,7 +702,7 @@ class TaskCommandsTestCase(test.TestCase):
self.assertIsNone(self.task.results(self.fake_api, task_id))
self.assertEqual(1, mock_json_dumps.call_count)
self.assertEqual(1, len(mock_json_dumps.call_args[0]))
self.assertSequenceEqual(result, mock_json_dumps.call_args[0][0])
self.assertEqual(list(result), mock_json_dumps.call_args[0][0])
self.assertEqual({"sort_keys": False, "indent": 4},
mock_json_dumps.call_args[1])
self.fake_api.task.get.assert_called_once_with(

View File

@ -37,7 +37,7 @@ class SubunitParserTestCase(test.TestCase):
skipped_test = "test_foo.SimpleTestCase.test_skip_something"
self.assertEqual(result.totals["skipped"], len(skipped_tests))
self.assertSequenceEqual([skipped_test], skipped_tests.keys())
self.assertSequenceEqual([skipped_test], list(skipped_tests))
self.assertEqual(
{"status": "skip", "reason": "This should be skipped.",
"duration": "0.000", "name": skipped_test, "tags": [],

View File

@ -382,7 +382,7 @@ class TaskTestCase(test.TestCase):
mock_validate_output.return_value = validate_output_return_value
self.assertEqual(expected,
task.result_has_valid_schema(data),
message=repr(data))
msg=repr(data))
if validate_output_calls:
mock_validate_output.assert_has_calls(
[mock.call(*args) for args in validate_output_calls],

View File

@ -28,11 +28,11 @@ class RegisterOptsTestCase(test.TestCase):
opts.register_options_from_path("unexisting.path.without.method.name")
self.assertFalse(mock_register_opts.called)
self.assertIsEmpty(opts._registered_paths)
self.assertEqual(0, len(opts._registered_paths))
opts.register_options_from_path("unexisting.path:method_name")
self.assertFalse(mock_register_opts.called)
self.assertIsEmpty(opts._registered_paths)
self.assertEqual(0, len(opts._registered_paths))
opts.register_options_from_path(
"tests.unit.common.test_opts:fake_list_opts")

View File

@ -18,11 +18,11 @@ import string
import sys
import threading
import time
import unittest
from unittest import mock
import ddt
import pytest
import testtools
from rally.common import utils
from rally import exceptions
@ -125,8 +125,8 @@ def module_level_method():
class MethodClassTestCase(test.TestCase):
@testtools.skipIf(sys.version_info > (2, 9), "Problems with access to "
"class from <locals>")
@unittest.skipIf(sys.version_info > (2, 9), "Problems with access to "
"class from <locals>")
def test_method_class_for_class_level_method(self):
class A(object):
def m(self):

View File

@ -14,11 +14,10 @@ import fnmatch
import io
import os
import re
import testtools
import unittest
class TestFormat(testtools.TestCase):
class TestFormat(unittest.TestCase):
def _check_lines_wrapping(self, doc_file, raw):
code_block = False
text_inside_simple_tables = False

View File

@ -34,7 +34,7 @@ class TestrContextTestCase(test.TestCase):
def assertEqualCmd(self, expected, actual, msg="", stestr=False):
cmd = ["stestr" if stestr else "testr", "run", "--subunit"]
cmd.extend(expected)
self.assertEqual(cmd, actual, message=msg)
self.assertEqual(cmd, actual, msg=msg)
def test_setup_with_concurrency(self):
# default behaviour

View File

@ -13,10 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import sys
import testtools
from rally import exceptions
from rally.task import functional
from tests.unit import test
@ -113,8 +109,6 @@ class FunctionalMixinTestCase(test.TestCase):
a.assertGreater,
len(["1", "2"]), len(["3", "4", "5"]))
@testtools.skipIf(sys.version_info < (2, 7),
"assertRaises as context not supported")
def test_assert_with_custom_message(self):
class A(functional.FunctionalMixin):
def __init__(self):

View File

@ -16,11 +16,11 @@
import fixtures
from fixtures._fixtures.tempdir import TempDir
import os
import unittest
from unittest import mock
import uuid
from oslo_config import fixture as cfg_fixture # noqa N311
import testtools
from rally.common import db
from rally import plugins
@ -48,7 +48,7 @@ class DatabaseFixture(cfg_fixture.Config):
db.schema.schema_create()
class TestCase(testtools.TestCase):
class TestCase(fixtures.TestWithFixtures, unittest.TestCase):
"""Test case base class for all unit tests."""
def __init__(self, *args, **kwargs):
@ -67,8 +67,7 @@ class TestCase(testtools.TestCase):
self.useFixture(TempHomeDir())
def _test_atomic_action_timer(self, atomic_actions, name, count=1,
parent=[]):
parent=None):
if parent:
is_found = False
for action in atomic_actions:
@ -94,18 +93,17 @@ class TestCase(testtools.TestCase):
% {"name": name, "count": count,
"actual_count": actual_count})
def assertSequenceEqual(self, iterable_1, iterable_2, msg=None):
self.assertEqual(tuple(iterable_1), tuple(iterable_2), msg)
_IS_EMPTY_MSG = "Iterable is not empty"
def assertIsEmpty(self, iterable, msg=None):
if len(iterable):
if msg:
msg = "%s : %s" % (self._IS_EMPTY_MSG, msg)
else:
msg = self._IS_EMPTY_MSG
raise self.failureException(msg)
# TODO(andreykurilin): port existing code to use 'standard' flow
def assertRaises(
self,
expected_exception,
callable,
*args,
**kwargs,
):
with super().assertRaises(expected_exception) as ctx:
callable(*args, **kwargs)
return ctx.exception
class DBTestCase(TestCase):

View File

@ -11,7 +11,6 @@
# under the License.
import ast
import sys
from unittest import mock
from tests.unit import test
@ -327,15 +326,10 @@ def test_func(self, mock_args, mock_args2, mock_some_longer_args):
)
self.assertIsNone(self.visitor.visit_FunctionDef(self.tree))
if sys.version_info < (3, 8):
# https://github.com/python/cpython/pull/9731
lineno = 2
else:
lineno = 7
self.assertEqual(
[
{
"lineno": lineno,
"lineno": 7,
"messages": [
"Argument 'mock_bar_foo_misnamed' misnamed; should be "
"either of %s that is derived from the mock decorator "
@ -367,15 +361,10 @@ def test_func(self, mock_args, mock_args2, mock_some_longer_args):
)
self.assertIsNone(self.visitor.visit_FunctionDef(self.tree))
if sys.version_info < (3, 8):
# https://github.com/python/cpython/pull/9731
lineno = 2
else:
lineno = 7
self.assertEqual(
[
{
"lineno": lineno,
"lineno": 7,
"messages": [
"Argument 'mock_bar_foo_misnamed' misnamed; should be "
"either of %s that is derived from the mock decorator "
@ -408,15 +397,10 @@ def test_func(self, mock_args, mock_args2, mock_some_longer_args):
)
self.assertIsNone(self.visitor.visit_FunctionDef(self.tree))
if sys.version_info < (3, 8):
# https://github.com/python/cpython/pull/9731
lineno = 2
else:
lineno = 7
self.assertEqual(
[
{
"lineno": lineno,
"lineno": 7,
"messages": [
"Missing or malformed argument for {'mock_foo', "
"'mock_foo_bar', 'mock_pkg_foo_bar', ...} decorator."
@ -448,12 +432,7 @@ def test_func(self, mock_args, mock_args2, mock_some_longer_args):
self.visitor.errors[0]["decs"]
)
if sys.version_info < (3, 8):
# https://github.com/python/cpython/pull/9731
lineno = 2
else:
lineno = 7
self.assertEqual(lineno, self.visitor.errors[0]["lineno"])
self.assertEqual(7, self.visitor.errors[0]["lineno"])
def test_visit_ok(self):
self.visitor.classname_python = "my_class_object"