Validators adder for benchmarks preconditions

Implements decorator for adding validators to method.
Validators are called in benchmark runner.
Validation method should return instance of
rally.benchmark.validation.ValidationResult class.

Blueprint validate-benchmark-preconditions

Change-Id: I68c8f5cc5d8758026bbe7ef211aa914b313ebb81
This commit is contained in:
Olga Kopylova 2014-01-16 15:33:54 +02:00
parent ec42300a25
commit e4f58bf63b
9 changed files with 182 additions and 20 deletions

View File

@ -137,9 +137,21 @@ class TestEngine(object):
for name in self.config:
for n, kwargs in enumerate(self.config[name]):
key = {'name': name, 'pos': n, 'kw': kwargs}
result = scenario_runner.run(name, kwargs)
self.task.append_results(key, {"raw": result})
results[json.dumps(key)] = result
try:
result = scenario_runner.run(name, kwargs)
self.task.append_results(key, {"raw": result,
"validation":
{"is_valid": True}})
results[json.dumps(key)] = result
except exceptions.InvalidScenarioArgument as e:
self.task.append_results(key, {"raw": [],
"validation":
{"is_valid": False,
"exc_msg": e.message}})
LOG.error(_("Scenario (%(pos)s, %(name)s) input arguments "
"validation error: %(msg)s") %
{"pos": n, "name": name, "msg": e.message})
return results
def bind(self, endpoint):

View File

@ -23,6 +23,7 @@ import uuid
from rally.benchmark import base
from rally.benchmark import utils
from rally import exceptions
from rally.openstack.common.gettextutils import _
from rally.openstack.common import log as logging
from rally import utils as rutils
@ -294,6 +295,13 @@ class ScenarioRunner(object):
cls._clients = __admin_clients__
__scenario_context__ = cls.init(init_args)
method = getattr(cls, method_name)
validators = getattr(method, "validators", [])
for validator in validators:
result = validator(clients=__admin_clients__, **args)
if not result.is_valid:
raise exceptions.InvalidScenarioArgument(message=result.msg)
# NOTE(msdubov): Launch scenarios with non-admin openstack clients
keys = ["username", "password", "tenant_name", "auth_url"]
__openstack_clients__ = utils.create_openstack_clients(temp_users,

View File

@ -0,0 +1,31 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ValidationResult(object):
def __init__(self, is_valid=True, msg=None):
self.is_valid = is_valid
self.msg = msg
def add_validator(validator):
def wrapper(func):
if not getattr(func, 'validators', None):
func.validators = []
func.validators.append(validator)
return func
return wrapper

View File

@ -223,6 +223,11 @@ class TaskCommands(object):
print("args values:")
pprint.pprint(key["kw"])
if not result["data"]["validation"]["is_valid"]:
print("-" * 80)
print(result["data"]["validation"]["exc_msg"])
continue
raw = result["data"]["raw"]
times = map(lambda x: x['time'],
filter(lambda r: not r['error'], raw))

View File

@ -164,3 +164,7 @@ class InvalidEndpointsException(InvalidArgumentsException):
class HostUnreachableException(InvalidArgumentsException):
msg_fmt = _("unable to establish connection to the remote host: %(url)s")
class InvalidScenarioArgument(RallyException):
msg_fmt = _("Invalid scenario argument: '%(message)s'")

View File

@ -153,9 +153,9 @@ class TestEngineTestCase(test.TestCase):
@mock.patch("rally.benchmark.runner.ScenarioRunner.run")
@mock.patch("rally.benchmark.utils.osclients")
@mock.patch("rally.benchmark.engine.osclients")
def test_run(self, mock_osclients_engine, mock_osclients_utils, mock_run):
mock_osclients_engine.Clients.return_value = fakes.FakeClients()
mock_osclients_utils.Clients.return_value = fakes.FakeClients()
def test_run(self, mock_engine_osclients, mock_utils_osclients, mock_run):
mock_engine_osclients.Clients.return_value = fakes.FakeClients()
mock_utils_osclients.Clients.return_value = fakes.FakeClients()
tester = engine.TestEngine(self.valid_test_config_continuous_times,
mock.MagicMock())
with tester.bind(self.valid_endpoint):
@ -164,13 +164,13 @@ class TestEngineTestCase(test.TestCase):
@mock.patch("rally.benchmark.runner.ScenarioRunner.run")
@mock.patch("rally.benchmark.utils.osclients")
@mock.patch("rally.benchmark.engine.osclients")
def test_task_status_basic_chain(self, mock_osclients_engine,
mock_osclients_utils, mock_scenario_run):
def test_task_status_basic_chain(self, mock_engine_osclients,
mock_utils_osclients, mock_scenario_run):
fake_task = mock.MagicMock()
tester = engine.TestEngine(self.valid_test_config_continuous_times,
fake_task)
mock_osclients_engine.Clients.return_value = fakes.FakeClients()
mock_osclients_utils.Clients.return_value = fakes.FakeClients()
mock_engine_osclients.Clients.return_value = fakes.FakeClients()
mock_utils_osclients.Clients.return_value = fakes.FakeClients()
mock_scenario_run.return_value = {}
with tester.bind(self.valid_endpoint):
tester.run()
@ -184,7 +184,8 @@ class TestEngineTestCase(test.TestCase):
s = consts.TaskStatus
expected = [
mock.call.update_status(s.TEST_TOOL_BENCHMARKING),
mock.call.append_results(benchmark_results, {'raw': {}}),
mock.call.append_results(benchmark_results, {'raw': {},
'validation': {'is_valid': True}}),
mock.call.update_status(s.FINISHED)
]
# NOTE(msdubov): Ignore task['uuid'] calls which are used for logging
@ -195,13 +196,51 @@ class TestEngineTestCase(test.TestCase):
@mock.patch("rally.benchmark.runner.ScenarioRunner.run")
@mock.patch("rally.benchmark.utils.osclients")
@mock.patch("rally.benchmark.engine.osclients")
def test_task_status_failed(self, mock_osclients_engine,
mock_osclients_utils, mock_scenario_run):
def test_task_status_basic_chain_validation_fails(self,
mock_engine_osclients,
mock_utils_osclients,
mock_scenario_run):
fake_task = mock.MagicMock()
tester = engine.TestEngine(self.valid_test_config_continuous_times,
fake_task)
mock_osclients_engine.Clients.return_value = fakes.FakeClients()
mock_osclients_utils.Clients.return_value = fakes.FakeClients()
mock_engine_osclients.Clients.return_value = fakes.FakeClients()
mock_utils_osclients.Clients.return_value = fakes.FakeClients()
validation_exc = exceptions.InvalidScenarioArgument()
mock_scenario_run.side_effect = validation_exc
with tester.bind(self.valid_endpoint):
tester.run()
benchmark_name = 'NovaServers.boot_and_delete_server'
benchmark_results = {
'name': benchmark_name, 'pos': 0,
'kw': self.valid_test_config_continuous_times[benchmark_name][0],
}
s = consts.TaskStatus
expected = [
mock.call.update_status(s.TEST_TOOL_BENCHMARKING),
mock.call.append_results(benchmark_results,
{'raw': [],
'validation': {'is_valid': False,
'exc_msg': validation_exc.message}}),
mock.call.update_status(s.FINISHED)
]
# NOTE(msdubov): Ignore task['uuid'] calls which are used for logging
mock_calls = filter(lambda call: '__getitem__' not in call[0],
fake_task.mock_calls)
self.assertEqual(mock_calls, expected)
@mock.patch("rally.benchmark.runner.ScenarioRunner.run")
@mock.patch("rally.benchmark.utils.osclients")
@mock.patch("rally.benchmark.engine.osclients")
def test_task_status_failed(self, mock_engine_osclients,
mock_utils_osclients, mock_scenario_run):
fake_task = mock.MagicMock()
tester = engine.TestEngine(self.valid_test_config_continuous_times,
fake_task)
mock_engine_osclients.Clients.return_value = fakes.FakeClients()
mock_utils_osclients.Clients.return_value = fakes.FakeClients()
mock_scenario_run.side_effect = exceptions.TestException()
try:
with tester.bind(self.valid_endpoint):

View File

@ -18,6 +18,8 @@ import mock
import multiprocessing
from rally.benchmark import runner
from rally.benchmark import validation
from rally import exceptions
from tests import fakes
from tests import test
@ -304,12 +306,12 @@ class ScenarioTestCase(test.TestCase):
srunner._run_scenario_periodically.assert_called_once_with(
FakeScenario, "do_it", {"a": 1}, 2, 3, 1)
@mock.patch("rally.benchmark.utils.create_openstack_clients")
@mock.patch("rally.benchmark.runner.base")
@mock.patch("rally.benchmark.utils.osclients")
def test_run(self, mock_osclients, mock_base, mock_clients):
def _set_mocks_for_run(self, mock_osclients, mock_base, mock_clients,
validators=None):
FakeScenario = mock.MagicMock()
FakeScenario.init = mock.MagicMock(return_value={})
if validators:
FakeScenario.do_it.validators = validators
mock_osclients.Clients.return_value = fakes.FakeClients()
srunner = runner.ScenarioRunner(mock.MagicMock(), self.fake_kw)
@ -320,6 +322,16 @@ class ScenarioTestCase(test.TestCase):
mock_base.Scenario.get_by_name = \
mock.MagicMock(return_value=FakeScenario)
return FakeScenario, srunner
@mock.patch("rally.benchmark.utils.create_openstack_clients")
@mock.patch("rally.benchmark.runner.base")
@mock.patch("rally.benchmark.utils.osclients")
def test_run(self, mock_osclients, mock_base, mock_clients):
FakeScenario, srunner = self._set_mocks_for_run(mock_osclients,
mock_base,
mock_clients)
result = srunner.run("FakeScenario.do_it", {})
self.assertEqual(result, "result")
srunner.run("FakeScenario.do_it",
@ -357,7 +369,25 @@ class ScenarioTestCase(test.TestCase):
mock.call.init({"arg": 1}),
mock.call.init({"fake": "arg"}),
]
self.assertEqual(FakeScenario.mock_calls, expected)
# NOTE(olkonami): Ignore __iter__ calls in loop
mock_calls = filter(lambda call: '__iter__' not in call[0],
FakeScenario.mock_calls)
self.assertEqual(mock_calls, expected)
@mock.patch("rally.benchmark.utils.create_openstack_clients")
@mock.patch("rally.benchmark.runner.base")
@mock.patch("rally.benchmark.utils.osclients")
def test_run_validation_failure(self, mock_osclients, mock_base,
mock_clients):
def evil_validator(**kwargs):
return validation.ValidationResult(is_valid=False)
FakeScenario, srunner = self._set_mocks_for_run(mock_osclients,
mock_base,
mock_clients,
[evil_validator])
self.assertRaises(exceptions.InvalidScenarioArgument,
srunner.run, "FakeScenario.do_it", {})
@mock.patch("rally.benchmark.utils.create_openstack_clients")
@mock.patch("rally.benchmark.runner.base")

View File

@ -0,0 +1,32 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark import validation
from tests import test
class ValidationUtilsTestCase(test.TestCase):
def test_add_validator(self):
def test_validator():
pass
@validation.add_validator(test_validator)
def test_function():
pass
validators = getattr(test_function, "validators")
self.assertEqual(len(validators), 1)
self.assertEqual(validators[0], test_validator)

View File

@ -134,6 +134,7 @@ class APITestCase(test.TestCase):
},
{
'raw': ['fake_result'],
'validation': {'is_valid': True},
},
)