diff --git a/rally/benchmark/engine.py b/rally/benchmark/engine.py index d74dac5d35..a1f395b920 100644 --- a/rally/benchmark/engine.py +++ b/rally/benchmark/engine.py @@ -137,9 +137,21 @@ class TestEngine(object): for name in self.config: for n, kwargs in enumerate(self.config[name]): key = {'name': name, 'pos': n, 'kw': kwargs} - result = scenario_runner.run(name, kwargs) - self.task.append_results(key, {"raw": result}) - results[json.dumps(key)] = result + try: + result = scenario_runner.run(name, kwargs) + self.task.append_results(key, {"raw": result, + "validation": + {"is_valid": True}}) + results[json.dumps(key)] = result + except exceptions.InvalidScenarioArgument as e: + self.task.append_results(key, {"raw": [], + "validation": + {"is_valid": False, + "exc_msg": e.message}}) + LOG.error(_("Scenario (%(pos)s, %(name)s) input arguments " + "validation error: %(msg)s") % + {"pos": n, "name": name, "msg": e.message}) + return results def bind(self, endpoint): diff --git a/rally/benchmark/runner.py b/rally/benchmark/runner.py index fc1134d1df..d3c4a690e8 100644 --- a/rally/benchmark/runner.py +++ b/rally/benchmark/runner.py @@ -23,6 +23,7 @@ import uuid from rally.benchmark import base from rally.benchmark import utils +from rally import exceptions from rally.openstack.common.gettextutils import _ from rally.openstack.common import log as logging from rally import utils as rutils @@ -294,6 +295,13 @@ class ScenarioRunner(object): cls._clients = __admin_clients__ __scenario_context__ = cls.init(init_args) + method = getattr(cls, method_name) + validators = getattr(method, "validators", []) + for validator in validators: + result = validator(clients=__admin_clients__, **args) + if not result.is_valid: + raise exceptions.InvalidScenarioArgument(message=result.msg) + # NOTE(msdubov): Launch scenarios with non-admin openstack clients keys = ["username", "password", "tenant_name", "auth_url"] __openstack_clients__ = utils.create_openstack_clients(temp_users, diff --git a/rally/benchmark/validation.py b/rally/benchmark/validation.py new file mode 100644 index 0000000000..e12915e664 --- /dev/null +++ b/rally/benchmark/validation.py @@ -0,0 +1,31 @@ +# Copyright 2014: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may + +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class ValidationResult(object): + + def __init__(self, is_valid=True, msg=None): + self.is_valid = is_valid + self.msg = msg + + +def add_validator(validator): + def wrapper(func): + if not getattr(func, 'validators', None): + func.validators = [] + func.validators.append(validator) + return func + return wrapper diff --git a/rally/cmd/main.py b/rally/cmd/main.py index ffdcd7f30c..843b7d3e06 100644 --- a/rally/cmd/main.py +++ b/rally/cmd/main.py @@ -223,6 +223,11 @@ class TaskCommands(object): print("args values:") pprint.pprint(key["kw"]) + if not result["data"]["validation"]["is_valid"]: + print("-" * 80) + print(result["data"]["validation"]["exc_msg"]) + continue + raw = result["data"]["raw"] times = map(lambda x: x['time'], filter(lambda r: not r['error'], raw)) diff --git a/rally/exceptions.py b/rally/exceptions.py index 4b8394e402..057dbb8732 100644 --- a/rally/exceptions.py +++ b/rally/exceptions.py @@ -164,3 +164,7 @@ class InvalidEndpointsException(InvalidArgumentsException): class HostUnreachableException(InvalidArgumentsException): msg_fmt = _("unable to establish connection to the remote host: %(url)s") + + +class InvalidScenarioArgument(RallyException): + msg_fmt = _("Invalid scenario argument: '%(message)s'") diff --git a/tests/benchmark/test_engine.py b/tests/benchmark/test_engine.py index ebc38cb2c9..aee0f798ed 100644 --- a/tests/benchmark/test_engine.py +++ b/tests/benchmark/test_engine.py @@ -153,9 +153,9 @@ class TestEngineTestCase(test.TestCase): @mock.patch("rally.benchmark.runner.ScenarioRunner.run") @mock.patch("rally.benchmark.utils.osclients") @mock.patch("rally.benchmark.engine.osclients") - def test_run(self, mock_osclients_engine, mock_osclients_utils, mock_run): - mock_osclients_engine.Clients.return_value = fakes.FakeClients() - mock_osclients_utils.Clients.return_value = fakes.FakeClients() + def test_run(self, mock_engine_osclients, mock_utils_osclients, mock_run): + mock_engine_osclients.Clients.return_value = fakes.FakeClients() + mock_utils_osclients.Clients.return_value = fakes.FakeClients() tester = engine.TestEngine(self.valid_test_config_continuous_times, mock.MagicMock()) with tester.bind(self.valid_endpoint): @@ -164,13 +164,13 @@ class TestEngineTestCase(test.TestCase): @mock.patch("rally.benchmark.runner.ScenarioRunner.run") @mock.patch("rally.benchmark.utils.osclients") @mock.patch("rally.benchmark.engine.osclients") - def test_task_status_basic_chain(self, mock_osclients_engine, - mock_osclients_utils, mock_scenario_run): + def test_task_status_basic_chain(self, mock_engine_osclients, + mock_utils_osclients, mock_scenario_run): fake_task = mock.MagicMock() tester = engine.TestEngine(self.valid_test_config_continuous_times, fake_task) - mock_osclients_engine.Clients.return_value = fakes.FakeClients() - mock_osclients_utils.Clients.return_value = fakes.FakeClients() + mock_engine_osclients.Clients.return_value = fakes.FakeClients() + mock_utils_osclients.Clients.return_value = fakes.FakeClients() mock_scenario_run.return_value = {} with tester.bind(self.valid_endpoint): tester.run() @@ -184,7 +184,8 @@ class TestEngineTestCase(test.TestCase): s = consts.TaskStatus expected = [ mock.call.update_status(s.TEST_TOOL_BENCHMARKING), - mock.call.append_results(benchmark_results, {'raw': {}}), + mock.call.append_results(benchmark_results, {'raw': {}, + 'validation': {'is_valid': True}}), mock.call.update_status(s.FINISHED) ] # NOTE(msdubov): Ignore task['uuid'] calls which are used for logging @@ -195,13 +196,51 @@ class TestEngineTestCase(test.TestCase): @mock.patch("rally.benchmark.runner.ScenarioRunner.run") @mock.patch("rally.benchmark.utils.osclients") @mock.patch("rally.benchmark.engine.osclients") - def test_task_status_failed(self, mock_osclients_engine, - mock_osclients_utils, mock_scenario_run): + def test_task_status_basic_chain_validation_fails(self, + mock_engine_osclients, + mock_utils_osclients, + mock_scenario_run): fake_task = mock.MagicMock() tester = engine.TestEngine(self.valid_test_config_continuous_times, fake_task) - mock_osclients_engine.Clients.return_value = fakes.FakeClients() - mock_osclients_utils.Clients.return_value = fakes.FakeClients() + mock_engine_osclients.Clients.return_value = fakes.FakeClients() + mock_utils_osclients.Clients.return_value = fakes.FakeClients() + validation_exc = exceptions.InvalidScenarioArgument() + mock_scenario_run.side_effect = validation_exc + + with tester.bind(self.valid_endpoint): + tester.run() + + benchmark_name = 'NovaServers.boot_and_delete_server' + benchmark_results = { + 'name': benchmark_name, 'pos': 0, + 'kw': self.valid_test_config_continuous_times[benchmark_name][0], + } + + s = consts.TaskStatus + expected = [ + mock.call.update_status(s.TEST_TOOL_BENCHMARKING), + mock.call.append_results(benchmark_results, + {'raw': [], + 'validation': {'is_valid': False, + 'exc_msg': validation_exc.message}}), + mock.call.update_status(s.FINISHED) + ] + # NOTE(msdubov): Ignore task['uuid'] calls which are used for logging + mock_calls = filter(lambda call: '__getitem__' not in call[0], + fake_task.mock_calls) + self.assertEqual(mock_calls, expected) + + @mock.patch("rally.benchmark.runner.ScenarioRunner.run") + @mock.patch("rally.benchmark.utils.osclients") + @mock.patch("rally.benchmark.engine.osclients") + def test_task_status_failed(self, mock_engine_osclients, + mock_utils_osclients, mock_scenario_run): + fake_task = mock.MagicMock() + tester = engine.TestEngine(self.valid_test_config_continuous_times, + fake_task) + mock_engine_osclients.Clients.return_value = fakes.FakeClients() + mock_utils_osclients.Clients.return_value = fakes.FakeClients() mock_scenario_run.side_effect = exceptions.TestException() try: with tester.bind(self.valid_endpoint): diff --git a/tests/benchmark/test_runner.py b/tests/benchmark/test_runner.py index 6f9012ab80..000513c294 100644 --- a/tests/benchmark/test_runner.py +++ b/tests/benchmark/test_runner.py @@ -18,6 +18,8 @@ import mock import multiprocessing from rally.benchmark import runner +from rally.benchmark import validation +from rally import exceptions from tests import fakes from tests import test @@ -304,12 +306,12 @@ class ScenarioTestCase(test.TestCase): srunner._run_scenario_periodically.assert_called_once_with( FakeScenario, "do_it", {"a": 1}, 2, 3, 1) - @mock.patch("rally.benchmark.utils.create_openstack_clients") - @mock.patch("rally.benchmark.runner.base") - @mock.patch("rally.benchmark.utils.osclients") - def test_run(self, mock_osclients, mock_base, mock_clients): + def _set_mocks_for_run(self, mock_osclients, mock_base, mock_clients, + validators=None): FakeScenario = mock.MagicMock() FakeScenario.init = mock.MagicMock(return_value={}) + if validators: + FakeScenario.do_it.validators = validators mock_osclients.Clients.return_value = fakes.FakeClients() srunner = runner.ScenarioRunner(mock.MagicMock(), self.fake_kw) @@ -320,6 +322,16 @@ class ScenarioTestCase(test.TestCase): mock_base.Scenario.get_by_name = \ mock.MagicMock(return_value=FakeScenario) + return FakeScenario, srunner + + @mock.patch("rally.benchmark.utils.create_openstack_clients") + @mock.patch("rally.benchmark.runner.base") + @mock.patch("rally.benchmark.utils.osclients") + def test_run(self, mock_osclients, mock_base, mock_clients): + FakeScenario, srunner = self._set_mocks_for_run(mock_osclients, + mock_base, + mock_clients) + result = srunner.run("FakeScenario.do_it", {}) self.assertEqual(result, "result") srunner.run("FakeScenario.do_it", @@ -357,7 +369,25 @@ class ScenarioTestCase(test.TestCase): mock.call.init({"arg": 1}), mock.call.init({"fake": "arg"}), ] - self.assertEqual(FakeScenario.mock_calls, expected) + # NOTE(olkonami): Ignore __iter__ calls in loop + mock_calls = filter(lambda call: '__iter__' not in call[0], + FakeScenario.mock_calls) + self.assertEqual(mock_calls, expected) + + @mock.patch("rally.benchmark.utils.create_openstack_clients") + @mock.patch("rally.benchmark.runner.base") + @mock.patch("rally.benchmark.utils.osclients") + def test_run_validation_failure(self, mock_osclients, mock_base, + mock_clients): + def evil_validator(**kwargs): + return validation.ValidationResult(is_valid=False) + + FakeScenario, srunner = self._set_mocks_for_run(mock_osclients, + mock_base, + mock_clients, + [evil_validator]) + self.assertRaises(exceptions.InvalidScenarioArgument, + srunner.run, "FakeScenario.do_it", {}) @mock.patch("rally.benchmark.utils.create_openstack_clients") @mock.patch("rally.benchmark.runner.base") diff --git a/tests/benchmark/test_validation.py b/tests/benchmark/test_validation.py new file mode 100644 index 0000000000..2c2c95e343 --- /dev/null +++ b/tests/benchmark/test_validation.py @@ -0,0 +1,32 @@ +# Copyright 2014: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from rally.benchmark import validation +from tests import test + + +class ValidationUtilsTestCase(test.TestCase): + + def test_add_validator(self): + def test_validator(): + pass + + @validation.add_validator(test_validator) + def test_function(): + pass + + validators = getattr(test_function, "validators") + self.assertEqual(len(validators), 1) + self.assertEqual(validators[0], test_validator) diff --git a/tests/orchestrator/test_api.py b/tests/orchestrator/test_api.py index b7db4f3bf6..2d69a3fb33 100644 --- a/tests/orchestrator/test_api.py +++ b/tests/orchestrator/test_api.py @@ -134,6 +134,7 @@ class APITestCase(test.TestCase): }, { 'raw': ['fake_result'], + 'validation': {'is_valid': True}, }, )