[functional] Fix trivial failures in TaskTestCase
* the task format v2 changed * the logs changed Change-Id: I8475d0e9b1ec80868ae0f73239b2929a181b5eb1
This commit is contained in:
parent
0913743b43
commit
4be1ce3350
@ -55,36 +55,31 @@ class TaskTestCase(unittest.TestCase):
|
|||||||
"group": "Dummy group",
|
"group": "Dummy group",
|
||||||
"description": "The first subtask in dummy task",
|
"description": "The first subtask in dummy task",
|
||||||
"tags": ["dummy", "functional_test"],
|
"tags": ["dummy", "functional_test"],
|
||||||
"run_in_parallel": False,
|
"workloads": [
|
||||||
"workloads": [{
|
{
|
||||||
"name": "Dummy.dummy",
|
"scenario": {
|
||||||
"args": {
|
"Dummy.dummy": {"sleep": 0}},
|
||||||
"sleep": 0
|
"runner": {
|
||||||
},
|
"constant": {
|
||||||
"runner": {
|
"times": 10,
|
||||||
"type": "constant",
|
"concurrency": 2
|
||||||
"times": 10,
|
}
|
||||||
"concurrency": 2
|
}
|
||||||
},
|
}
|
||||||
}]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "second-subtask",
|
"title": "second-subtask",
|
||||||
"group": "Dummy group",
|
|
||||||
"description": "The second subtask in dummy task",
|
"description": "The second subtask in dummy task",
|
||||||
"tags": ["dummy", "functional_test"],
|
"tags": ["dummy", "functional_test"],
|
||||||
"run_in_parallel": False,
|
"scenario": {
|
||||||
"workloads": [{
|
"Dummy.dummy": {"sleep": 1}},
|
||||||
"name": "Dummy.dummy",
|
"runner": {
|
||||||
"args": {
|
"constant": {
|
||||||
"sleep": 1
|
|
||||||
},
|
|
||||||
"runner": {
|
|
||||||
"type": "constant",
|
|
||||||
"times": 10,
|
"times": 10,
|
||||||
"concurrency": 2
|
"concurrency": 2
|
||||||
},
|
}
|
||||||
}]
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -535,14 +530,12 @@ class TaskTestCase(unittest.TestCase):
|
|||||||
"--status finished")
|
"--status finished")
|
||||||
self.assertEqual(res, res2)
|
self.assertEqual(res, res2)
|
||||||
|
|
||||||
@unittest.skip("It started failing due to broken launching script. "
|
|
||||||
"Requires investigation.")
|
|
||||||
def test_validate_is_valid(self):
|
def test_validate_is_valid(self):
|
||||||
rally = utils.Rally()
|
rally = utils.Rally()
|
||||||
cfg = self._get_sample_task_config()
|
cfg = self._get_sample_task_config()
|
||||||
config = utils.TaskConfig(cfg)
|
config = utils.TaskConfig(cfg)
|
||||||
output = rally("task validate --task %s" % config.filename)
|
output = rally("task validate --task %s" % config.filename)
|
||||||
self.assertIn("Task config is valid", output)
|
self.assertIn("Input Task is valid :)", output)
|
||||||
|
|
||||||
def test_validate_is_invalid(self):
|
def test_validate_is_invalid(self):
|
||||||
rally = utils.Rally()
|
rally = utils.Rally()
|
||||||
@ -569,8 +562,6 @@ class TaskTestCase(unittest.TestCase):
|
|||||||
r"(?P<task_id>[0-9a-f\-]{36}): started", output)
|
r"(?P<task_id>[0-9a-f\-]{36}): started", output)
|
||||||
self.assertIsNotNone(result)
|
self.assertIsNotNone(result)
|
||||||
|
|
||||||
@unittest.skip("It started failing due to broken launching script. "
|
|
||||||
"Requires investigation.")
|
|
||||||
def test_validate_with_plugin_paths(self):
|
def test_validate_with_plugin_paths(self):
|
||||||
rally = utils.Rally()
|
rally = utils.Rally()
|
||||||
plugin_paths = ("tests/functional/extra/fake_dir1/,"
|
plugin_paths = ("tests/functional/extra/fake_dir1/,"
|
||||||
@ -581,7 +572,7 @@ class TaskTestCase(unittest.TestCase):
|
|||||||
{"task_file": task_file,
|
{"task_file": task_file,
|
||||||
"plugin_paths": plugin_paths})
|
"plugin_paths": plugin_paths})
|
||||||
|
|
||||||
self.assertIn("Task config is valid", output)
|
self.assertIn("Input Task is valid :)", output)
|
||||||
|
|
||||||
plugin_paths = ("tests/functional/extra/fake_dir1/"
|
plugin_paths = ("tests/functional/extra/fake_dir1/"
|
||||||
"fake_plugin1.py,"
|
"fake_plugin1.py,"
|
||||||
@ -593,7 +584,7 @@ class TaskTestCase(unittest.TestCase):
|
|||||||
{"task_file": task_file,
|
{"task_file": task_file,
|
||||||
"plugin_paths": plugin_paths})
|
"plugin_paths": plugin_paths})
|
||||||
|
|
||||||
self.assertIn("Task config is valid", output)
|
self.assertIn("Input Task is valid :)", output)
|
||||||
|
|
||||||
plugin_paths = ("tests/functional/extra/fake_dir1/,"
|
plugin_paths = ("tests/functional/extra/fake_dir1/,"
|
||||||
"tests/functional/extra/fake_dir2/"
|
"tests/functional/extra/fake_dir2/"
|
||||||
@ -604,7 +595,7 @@ class TaskTestCase(unittest.TestCase):
|
|||||||
{"task_file": task_file,
|
{"task_file": task_file,
|
||||||
"plugin_paths": plugin_paths})
|
"plugin_paths": plugin_paths})
|
||||||
|
|
||||||
self.assertIn("Task config is valid", output)
|
self.assertIn("Input Task is valid :)", output)
|
||||||
|
|
||||||
def _test_start_abort_on_sla_failure_success(self, cfg, times):
|
def _test_start_abort_on_sla_failure_success(self, cfg, times):
|
||||||
rally = utils.Rally()
|
rally = utils.Rally()
|
||||||
@ -973,8 +964,6 @@ class TaskTestCase(unittest.TestCase):
|
|||||||
current_task = utils.get_global("RALLY_TASK", rally.env)
|
current_task = utils.get_global("RALLY_TASK", rally.env)
|
||||||
self.assertEqual(uuid, current_task)
|
self.assertEqual(uuid, current_task)
|
||||||
|
|
||||||
@unittest.skip("It started failing due to broken launching script. "
|
|
||||||
"Requires investigation.")
|
|
||||||
def test_start_v2(self):
|
def test_start_v2(self):
|
||||||
rally = utils.Rally()
|
rally = utils.Rally()
|
||||||
deployment_id = utils.get_global("RALLY_DEPLOYMENT", rally.env)
|
deployment_id = utils.get_global("RALLY_DEPLOYMENT", rally.env)
|
||||||
@ -1029,10 +1018,10 @@ class SLATestCase(unittest.TestCase):
|
|||||||
def _get_sample_task_config(self, max_seconds_per_iteration=4,
|
def _get_sample_task_config(self, max_seconds_per_iteration=4,
|
||||||
failure_rate_max=0):
|
failure_rate_max=0):
|
||||||
return {
|
return {
|
||||||
"KeystoneBasic.create_and_list_users": [
|
"Dummy.dummy": [
|
||||||
{
|
{
|
||||||
"args": {
|
"args": {
|
||||||
"enabled": True
|
"sleep": 0.2
|
||||||
},
|
},
|
||||||
"runner": {
|
"runner": {
|
||||||
"type": "constant",
|
"type": "constant",
|
||||||
@ -1047,8 +1036,6 @@ class SLATestCase(unittest.TestCase):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@unittest.skip("It started failing due to broken launching script. "
|
|
||||||
"Requires investigation.")
|
|
||||||
def test_sla_fail(self):
|
def test_sla_fail(self):
|
||||||
rally = utils.Rally()
|
rally = utils.Rally()
|
||||||
cfg = self._get_sample_task_config(max_seconds_per_iteration=0.001)
|
cfg = self._get_sample_task_config(max_seconds_per_iteration=0.001)
|
||||||
@ -1056,19 +1043,17 @@ class SLATestCase(unittest.TestCase):
|
|||||||
rally("task start --task %s" % config.filename)
|
rally("task start --task %s" % config.filename)
|
||||||
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
|
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
|
||||||
|
|
||||||
@unittest.skip("It started failing due to broken launching script. "
|
|
||||||
"Requires investigation.")
|
|
||||||
def test_sla_success(self):
|
def test_sla_success(self):
|
||||||
rally = utils.Rally()
|
rally = utils.Rally()
|
||||||
config = utils.TaskConfig(self._get_sample_task_config())
|
config = utils.TaskConfig(self._get_sample_task_config())
|
||||||
rally("task start --task %s" % config.filename)
|
rally("task start --task %s" % config.filename)
|
||||||
rally("task sla-check")
|
rally("task sla-check")
|
||||||
expected = [
|
expected = [
|
||||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
{"benchmark": "Dummy.dummy",
|
||||||
"criterion": "failure_rate",
|
"criterion": "failure_rate",
|
||||||
"detail": mock.ANY,
|
"detail": mock.ANY,
|
||||||
"pos": 0, "status": "PASS"},
|
"pos": 0, "status": "PASS"},
|
||||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
{"benchmark": "Dummy.dummy",
|
||||||
"criterion": "max_seconds_per_iteration",
|
"criterion": "max_seconds_per_iteration",
|
||||||
"detail": mock.ANY,
|
"detail": mock.ANY,
|
||||||
"pos": 0, "status": "PASS"}
|
"pos": 0, "status": "PASS"}
|
||||||
@ -1130,6 +1115,12 @@ class SLAExtraFlagsTestCase(unittest.TestCase):
|
|||||||
config = utils.TaskConfig(cfg)
|
config = utils.TaskConfig(cfg)
|
||||||
rally("task start --task %s" % config.filename)
|
rally("task start --task %s" % config.filename)
|
||||||
expected = [
|
expected = [
|
||||||
|
{"status": "PASS",
|
||||||
|
"benchmark": "Dummy.dummy",
|
||||||
|
"criterion": "failure_rate",
|
||||||
|
"detail": "Failure rate criteria 0.00% <= 0.00% <= 0.00% - "
|
||||||
|
"Passed",
|
||||||
|
"pos": 0},
|
||||||
{"benchmark": "Dummy.dummy",
|
{"benchmark": "Dummy.dummy",
|
||||||
"criterion": "something_went_wrong",
|
"criterion": "something_went_wrong",
|
||||||
"detail": mock.ANY,
|
"detail": mock.ANY,
|
||||||
@ -1143,15 +1134,11 @@ class SLAExtraFlagsTestCase(unittest.TestCase):
|
|||||||
self.fail("`rally task sla-check` command should return non-zero "
|
self.fail("`rally task sla-check` command should return non-zero "
|
||||||
"exit code")
|
"exit code")
|
||||||
|
|
||||||
@unittest.skip("It started failing due to broken launching script. "
|
|
||||||
"Requires investigation.")
|
|
||||||
def test_broken_context_with_constant_runner(self):
|
def test_broken_context_with_constant_runner(self):
|
||||||
self._test_broken_context({"type": "constant",
|
self._test_broken_context({"type": "constant",
|
||||||
"times": 5,
|
"times": 5,
|
||||||
"concurrency": 5})
|
"concurrency": 5})
|
||||||
|
|
||||||
@unittest.skip("It started failing due to broken launching script. "
|
|
||||||
"Requires investigation.")
|
|
||||||
def test_broken_context_with_rps_runner(self):
|
def test_broken_context_with_rps_runner(self):
|
||||||
self._test_broken_context({"type": "rps",
|
self._test_broken_context({"type": "rps",
|
||||||
"times": 5,
|
"times": 5,
|
||||||
|
Loading…
Reference in New Issue
Block a user