From d13c6d5d6c7d615e854d8bd1df6fa248cdba7096 Mon Sep 17 00:00:00 2001 From: chen-li Date: Wed, 19 Mar 2014 13:22:33 +0800 Subject: [PATCH] standardized output for scenario runners We have format requests for the result of scenario runners. So, we create class ScenarioRunnerResult to check the formate. Basically, all runners should return the result with an instance of class ScenarioRunnerResult. Change-Id: Iaf62fd19e46654c7bcbd46719c7c4bea4ccaf9ad --- rally/benchmark/runners/base.py | 71 ++++++++++++++++++++-- rally/benchmark/runners/continuous.py | 6 +- rally/benchmark/runners/periodic.py | 2 +- rally/exceptions.py | 5 ++ tests/benchmark/runners/test_base.py | 47 +++++++++++++- tests/benchmark/runners/test_continuous.py | 8 +-- tests/benchmark/runners/test_periodic.py | 1 + 7 files changed, 127 insertions(+), 13 deletions(-) diff --git a/rally/benchmark/runners/base.py b/rally/benchmark/runners/base.py index 35fb16932a..06a8289cc6 100644 --- a/rally/benchmark/runners/base.py +++ b/rally/benchmark/runners/base.py @@ -45,9 +45,10 @@ def _run_scenario_once(args): clients=osclients.Clients(user["endpoint"])) try: - scenario_output = None + scenario_output = {} with rutils.Timer() as timer: - scenario_output = getattr(scenario, method_name)(**kwargs) + scenario_output = getattr(scenario, + method_name)(**kwargs) or {} error = None except Exception as e: error = utils.format_exc(e) @@ -64,6 +65,60 @@ def _run_scenario_once(args): "atomic_actions_time": scenario.atomic_actions_time()} +class ScenarioRunnerResult(list): + """Class for all scenario runners' result. + + """ + + RESULT_SCHEMA = { + "type": "array", + "$schema": "http://json-schema.org/draft-03/schema", + "items": { + "type": "object", + "properties": { + "time": { + "type": "number" + }, + "idle_time": { + "type": "number" + }, + "scenario_output": { + "type": "object", + "properties": { + "data": { + "type": "object", + "patternProperties": { + ".*": {"type": "number"} + } + }, + "error": { + "type": "string" + }, + }, + "additionalProperties": False + }, + "atomic_actions_time": { + "type": "array", + "items": { + "type": "object", + "properties": { + "action": {"type": "string"}, + "duration": {"type": "number"} + }, + "additionalProperties": False + } + }, + "error": {}, + }, + "additionalProperties": False + } + } + + def __init__(self, result_list): + super(ScenarioRunnerResult, self).__init__(result_list) + jsonschema.validate(result_list, self.RESULT_SCHEMA) + + class ScenarioRunner(object): """Base class for all scenario runners. @@ -156,6 +211,14 @@ class ScenarioRunner(object): def run(self, name, kwargs): if self.admin_user: - return self._run_as_admin(name, kwargs) + results = self._run_as_admin(name, kwargs) else: - return self._run_as_non_admin(name, kwargs) + results = self._run_as_non_admin(name, kwargs) + + if not isinstance(results, ScenarioRunnerResult): + name = self.__execution_type__ + results_type = type(results) + raise exceptions.InvalidRunnerResult(name=name, + results_type=results_type) + + return results diff --git a/rally/benchmark/runners/continuous.py b/rally/benchmark/runners/continuous.py index 670d9fff11..dae88388ca 100644 --- a/rally/benchmark/runners/continuous.py +++ b/rally/benchmark/runners/continuous.py @@ -137,11 +137,13 @@ class ContinuousScenarioRunner(base.ScenarioRunner): # amount of times. if "times" in config: times = config["times"] - return self._run_scenario_continuously_for_times( + results = self._run_scenario_continuously_for_times( cls, method_name, context, args, times, concurrent, timeout) # Continiously run a scenario as many times as needed # to fill up the given period of time. elif "duration" in config: duration = config["duration"] - return self._run_scenario_continuously_for_duration( + results = self._run_scenario_continuously_for_duration( cls, method_name, context, args, duration, concurrent, timeout) + + return base.ScenarioRunnerResult(results) diff --git a/rally/benchmark/runners/periodic.py b/rally/benchmark/runners/periodic.py index 5e5f09c36e..1888718859 100644 --- a/rally/benchmark/runners/periodic.py +++ b/rally/benchmark/runners/periodic.py @@ -90,4 +90,4 @@ class PeriodicScenarioRunner(base.ScenarioRunner): "error": utils.format_exc(e)} results.append(result) - return results + return base.ScenarioRunnerResult(results) diff --git a/rally/exceptions.py b/rally/exceptions.py index 32653bf999..40fc9c8032 100644 --- a/rally/exceptions.py +++ b/rally/exceptions.py @@ -88,6 +88,11 @@ class InvalidConfigException(RallyException): msg_fmt = _("This config has invalid schema: `%(message)s`") +class InvalidRunnerResult(RallyException): + msg_fmt = _("Type of result of `%(name)s` runner should be" + " `base.ScenarioRunnerResult`. Got: `%(results_type)s`") + + class InvalidTaskException(InvalidConfigException): msg_fmt = _("This config is invalid: `%(message)s`") diff --git a/tests/benchmark/runners/test_base.py b/tests/benchmark/runners/test_base.py index 77527591fc..0e532cdbed 100644 --- a/tests/benchmark/runners/test_base.py +++ b/tests/benchmark/runners/test_base.py @@ -13,16 +13,52 @@ # License for the specific language governing permissions and limitations # under the License. +import jsonschema import mock import multiprocessing from rally.benchmark.runners import base from rally.benchmark.runners import continuous from rally import consts +from rally import exceptions from tests import fakes from tests import test +class ScenarioRunnerResultTestCase(test.TestCase): + + def test_validate(self): + config = [ + { + "time": 1.0, + "idle_time": 1.0, + "scenario_output": { + "data": {"test": 1.0}, + "error": "test error string 1" + }, + "atomic_actions_time": [{"action": "test1", "duration": 1.0}], + "error": "test1" + }, + { + "time": 2.0, + "idle_time": 2.0, + "scenario_output": { + "data": {"test": 2.0}, + "error": "test error string 2" + }, + "atomic_actions_time": [{"action": "test2", "duration": 2.0}], + "error": "test2" + } + ] + + self.assertEqual(config, base.ScenarioRunnerResult(config)) + + def test_validate_failed(self): + config = [{"a": 10}] + self.assertRaises(jsonschema.ValidationError, + base.ScenarioRunnerResult, config) + + class ScenarioRunnerTestCase(test.TestCase): def setUp(self): @@ -68,7 +104,7 @@ class ScenarioRunnerTestCase(test.TestCase): "active_users": active_users, "timeout": 2}) expected = [{"time": 10, "idle_time": 0, "error": None, - "scenario_output": None, "atomic_actions_time": []} + "scenario_output": {}, "atomic_actions_time": []} for i in range(times)] self.assertEqual(results, expected) @@ -78,7 +114,7 @@ class ScenarioRunnerTestCase(test.TestCase): "active_users": active_users, "timeout": 2}) expected = [{"time": 10, "idle_time": 0, "error": None, - "scenario_output": None, "atomic_actions_time": []} + "scenario_output": {}, "atomic_actions_time": []} for i in range(active_users)] self.assertEqual(results, expected) @@ -164,3 +200,10 @@ class ScenarioRunnerTestCase(test.TestCase): self.assertEqual(r['time'], 10) self.assertEqual(r['error'][:2], [str(Exception), "Something went wrong"]) + + @mock.patch("rally.benchmark.runners.base.ScenarioRunner._run_as_admin") + def test_run_scenario_runner_results_exception(self, mock_run_method): + runner = continuous.ContinuousScenarioRunner(mock.MagicMock(), + self.fake_endpoints) + self.assertRaises(exceptions.InvalidRunnerResult, + runner.run, mock.MagicMock(), mock.MagicMock()) diff --git a/tests/benchmark/runners/test_continuous.py b/tests/benchmark/runners/test_continuous.py index 3c59829f1b..ab921298ce 100644 --- a/tests/benchmark/runners/test_continuous.py +++ b/tests/benchmark/runners/test_continuous.py @@ -122,7 +122,7 @@ class ContinuousScenarioRunnerTestCase(test.TestCase): "continuous") runner._run_scenario_continuously_for_times = \ - mock.MagicMock(return_value="times") + mock.MagicMock(return_value=[{"time": 1}]) mock_base.Scenario.get_by_name = \ mock.MagicMock(return_value=FakeScenario) @@ -132,7 +132,7 @@ class ContinuousScenarioRunnerTestCase(test.TestCase): {"a": 1}, {"times": 2, "active_users": 3, "timeout": 1}) - self.assertEqual(result, "times") + self.assertEqual(result, [{"time": 1}]) runner._run_scenario_continuously_for_times.assert_called_once_with( FakeScenario, "do_it", fakecontext, {"a": 1}, 2, 3, 1) @@ -149,7 +149,7 @@ class ContinuousScenarioRunnerTestCase(test.TestCase): self.fake_endpoints, "continuous") runner._run_scenario_continuously_for_duration = \ - mock.MagicMock(return_value="duration") + mock.MagicMock(return_value=[{"time": 1}]) mock_base.Scenario.get_by_name = \ mock.MagicMock(return_value=FakeScenario) @@ -159,7 +159,7 @@ class ContinuousScenarioRunnerTestCase(test.TestCase): {"a": 1}, {"duration": 2, "active_users": 3, "timeout": 1}) - self.assertEqual(result, "duration") + self.assertEqual(result, [{"time": 1}]) runner._run_scenario_continuously_for_duration.\ assert_called_once_with(FakeScenario, "do_it", fakecontext, {"a": 1}, 2, 3, 1) diff --git a/tests/benchmark/runners/test_periodic.py b/tests/benchmark/runners/test_periodic.py index 509070b1a7..a5aa036727 100644 --- a/tests/benchmark/runners/test_periodic.py +++ b/tests/benchmark/runners/test_periodic.py @@ -52,6 +52,7 @@ class PeriodicScenarioRunnerTestCase(test.TestCase): def test_run_scenario(self, mock_osclients, mock_sleep, mock_run_scenario_once): mock_osclients.Clients.return_value = fakes.FakeClients() + mock_run_scenario_once.return_value = {} runner = periodic.PeriodicScenarioRunner(mock.MagicMock(), self.fake_endpoints) times = 3