Merge "Add task format converter"

This commit is contained in:
Jenkins 2015-10-15 17:32:06 +00:00 committed by Gerrit Code Review
commit b2995a9dd0
3 changed files with 478 additions and 180 deletions

View File

@ -40,39 +40,6 @@ from rally.task import sla
LOG = logging.getLogger(__name__)
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"patternProperties": {
".*": {
"type": "array",
"items": {
"type": "object",
"properties": {
"args": {
"type": "object"
},
"runner": {
"type": "object",
"properties": {
"type": {"type": "string"}
},
"required": ["type"]
},
"context": {
"type": "object"
},
"sla": {
"type": "object"
},
},
"additionalProperties": False
}
}
}
}
class ResultConsumer(object):
"""ResultConsumer class stores results from ScenarioRunner, checks SLA."""
@ -80,7 +47,7 @@ class ResultConsumer(object):
"""ResultConsumer constructor.
:param key: Scenario identifier
:param task: Task to run
:param task: Instance of Task, task to run
:param runner: ScenarioRunner instance that produces results to be
consumed
:param abort_on_sla_failure: True if the execution should be stopped
@ -198,14 +165,21 @@ class BenchmarkEngine(object):
abort_on_sla_failure=False):
"""BenchmarkEngine constructor.
:param config: The configuration with specified benchmark scenarios
:param task: The current task which is being performed
:param config: Dict with configuration of specified benchmark scenarios
:param task: Instance of Task,
the current task which is being performed
:param admin: Dict with admin credentials
:param users: List of dicts with user credentials
:param abort_on_sla_failure: True if the execution should be stopped
when some SLA check fails
"""
self.config = config
try:
self.config = TaskConfig(config)
except Exception as e:
log = [str(type(e)), str(e), json.dumps(traceback.format_exc())]
task.set_failed(log=log)
raise exceptions.InvalidTaskException(str(e))
self.task = task
self.admin = admin and objects.Endpoint(**admin) or None
self.existing_users = users or []
@ -220,7 +194,11 @@ class BenchmarkEngine(object):
_("Task validation of scenarios names."))
def _validate_config_scenarios_name(self, config):
available = set(s.get_name() for s in scenario.Scenario.get_all())
specified = set(six.iterkeys(config))
specified = set()
for subtask in config.subtasks:
for s in subtask.scenarios:
specified.add(s["name"])
if not specified.issubset(available):
names = ", ".join(specified - available)
@ -228,18 +206,19 @@ class BenchmarkEngine(object):
@rutils.log_task_wrapper(LOG.info, _("Task validation of syntax."))
def _validate_config_syntax(self, config):
for scenario_name, values in six.iteritems(config):
for pos, kw in enumerate(values):
for subtask in config.subtasks:
for pos, scenario_obj in enumerate(subtask.scenarios):
try:
runner.ScenarioRunner.validate(kw.get("runner", {}))
context.ContextManager.validate(kw.get("context", {}),
non_hidden=True)
sla.SLA.validate(kw.get("sla", {}))
runner.ScenarioRunner.validate(
scenario_obj.get("runner", {}))
context.ContextManager.validate(
scenario_obj.get("context", {}), non_hidden=True)
sla.SLA.validate(scenario_obj.get("sla", {}))
except (exceptions.RallyException,
jsonschema.ValidationError) as e:
raise exceptions.InvalidBenchmarkConfig(
name=scenario_name,
pos=pos, config=kw,
name=scenario_obj["name"],
pos=pos, config=scenario_obj,
reason=six.text_type(e)
)
@ -281,17 +260,17 @@ class BenchmarkEngine(object):
for u in ctx_conf["users"]:
user = osclients.Clients(u["endpoint"])
for name, values in six.iteritems(config):
for pos, kwargs in enumerate(values):
for subtask in config.subtasks:
for pos, scenario_obj in enumerate(subtask.scenarios):
self._validate_config_semantic_helper(
admin, user, name, pos, deployment, kwargs)
admin, user, scenario_obj["name"],
pos, deployment, scenario_obj)
@rutils.log_task_wrapper(LOG.info, _("Task validation."))
def validate(self):
"""Perform full task configuration validation."""
self.task.update_status(consts.TaskStatus.VERIFYING)
try:
jsonschema.validate(self.config, CONFIG_SCHEMA)
self._validate_config_scenarios_name(self.config)
self._validate_config_syntax(self.config)
self._validate_config_semantic(self.config)
@ -332,28 +311,191 @@ class BenchmarkEngine(object):
corresponding benchmark test launches
"""
self.task.update_status(consts.TaskStatus.RUNNING)
for name in self.config:
for n, kw in enumerate(self.config[name]):
for subtask in self.config.subtasks:
for pos, scenario_obj in enumerate(subtask.scenarios):
if ResultConsumer.is_task_in_aborting_status(
self.task["uuid"]):
LOG.info("Received aborting signal.")
self.task.update_status(consts.TaskStatus.ABORTED)
return
key = {"name": name, "pos": n, "kw": kw}
name = scenario_obj["name"]
key = {"name": name, "pos": pos, "kw": scenario_obj}
LOG.info("Running benchmark with key: \n%s"
% json.dumps(key, indent=2))
runner_obj = self._get_runner(kw)
context_obj = self._prepare_context(kw.get("context", {}),
name, self.admin)
runner_obj = self._get_runner(scenario_obj)
context_obj = self._prepare_context(
scenario_obj.get("context", {}), name, self.admin)
try:
with ResultConsumer(key, self.task, runner_obj,
self.abort_on_sla_failure):
with context.ContextManager(context_obj):
runner_obj.run(
name, context_obj, kw.get("args", {}))
runner_obj.run(name, context_obj,
scenario_obj.get("args", {}))
except Exception as e:
LOG.exception(e)
if objects.Task.get_status(
self.task["uuid"]) != consts.TaskStatus.ABORTED:
self.task.update_status(consts.TaskStatus.FINISHED)
class TaskConfig(object):
"""Version-aware wrapper around task.
"""
CONFIG_SCHEMA_V1 = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"patternProperties": {
".*": {
"type": "array",
"items": {
"type": "object",
"properties": {
"args": {"type": "object"},
"runner": {
"type": "object",
"properties": {"type": {"type": "string"}},
"required": ["type"]
},
"context": {"type": "object"},
"sla": {"type": "object"},
},
"additionalProperties": False
}
}
}
}
CONFIG_SCHEMA_V2 = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"version": {"type": "number"},
"title": {"type": "string"},
"description": {"type": "string"},
"tags": {
"type": "array",
"items": {"type": "string"}
},
"subtasks": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"title": {"type": "string"},
"group": {"type": "string"},
"description": {"type": "string"},
"tags": {
"type": "array",
"items": {"type": "string"}
},
"run_in_parallel": {"type": "boolean"},
"scenarios": {
"type": "array",
"minItems": 1,
"maxItems": 1,
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"args": {"type": "object"},
"runner": {
"type": "object",
"properties": {
"type": {"type": "string"}
},
"required": ["type"]
},
"sla": {"type": "object"},
"context": {"type": "object"}
},
"additionalProperties": False,
"required": ["name", "runner"]
}
}
},
"additionalProperties": False,
"required": ["title", "scenarios"]
}
}
},
"additionalProperties": False,
"required": ["title", "subtasks"]
}
CONFIG_SCHEMAS = {1: CONFIG_SCHEMA_V1, 2: CONFIG_SCHEMA_V2}
def __init__(self, config):
"""TaskConfig constructor.
:param config: Dict with configuration of specified task
"""
self.version = self._get_version(config)
self._validate_version()
self._validate_json(config)
self.title = config.get("title", "Task")
self.tags = config.get("tags", [])
self.description = config.get("description")
self.subtasks = self._make_subtasks(config)
# if self.version == 1:
# TODO(ikhudoshyn): Warn user about deprecated format
@staticmethod
def _get_version(config):
return config.get("version", 1)
def _validate_version(self):
if self.version not in self.CONFIG_SCHEMAS:
allowed = ", ".join([str(k) for k in self.CONFIG_SCHEMAS])
msg = (_("Task configuration version {0} is not supported. "
"Supported versions: {1}")).format(self.version, allowed)
raise exceptions.InvalidTaskException(msg)
def _validate_json(self, config):
try:
jsonschema.validate(config, self.CONFIG_SCHEMAS[self.version])
except Exception as e:
raise exceptions.InvalidTaskException(str(e))
def _make_subtasks(self, config):
if self.version == 2:
return [SubTask(s) for s in config["subtasks"]]
elif self.version == 1:
subtasks = []
for name, v1_scenarios in six.iteritems(config):
for v1_scenario in v1_scenarios:
v2_scenario = copy.deepcopy(v1_scenario)
v2_scenario["name"] = name
subtasks.append(
SubTask({"title": name, "scenarios": [v2_scenario]}))
return subtasks
class SubTask(object):
"""Subtask -- unit of execution in Task
"""
def __init__(self, config):
"""Subtask constructor.
:param config: Dict with configuration of specified subtask
"""
self.title = config["title"]
self.tags = config.get("tags", [])
self.group = config.get("group")
self.description = config.get("description")
self.scenarios = config["scenarios"]
self.context = config.get("context", {})

View File

@ -43,6 +43,63 @@ class TaskTestCase(unittest.TestCase):
]
}
def _get_sample_task_config_v2(self):
return {
"version": 2,
"title": "Dummy task",
"tags": ["dummy", "functional_test"],
"subtasks": [
{
"title": "first-subtask",
"group": "Dummy group",
"description": "The first subtask in dummy task",
"tags": ["dummy", "functional_test"],
"run_in_parallel": False,
"scenarios": [{
"name": "Dummy.dummy",
"args": {
"sleep": 0
},
"runner": {
"type": "constant",
"times": 10,
"concurrency": 2
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 2
}
}
}]
},
{
"title": "second-subtask",
"group": "Dummy group",
"description": "The second subtask in dummy task",
"tags": ["dummy", "functional_test"],
"run_in_parallel": False,
"scenarios": [{
"name": "Dummy.dummy",
"args": {
"sleep": 1
},
"runner": {
"type": "constant",
"times": 10,
"concurrency": 2
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 2
}
}
}]
}
]
}
def _get_deployment_uuid(self, output):
return re.search(
r"Using deployment: (?P<uuid>[0-9a-f\-]{36})",
@ -643,6 +700,19 @@ class TaskTestCase(unittest.TestCase):
current_task = utils.get_global("RALLY_TASK", rally.env)
self.assertEqual(uuid, current_task)
def test_start_v2(self):
rally = utils.Rally()
deployment_id = utils.get_global("RALLY_DEPLOYMENT", rally.env)
cfg = self._get_sample_task_config_v2()
config = utils.TaskConfig(cfg)
output = rally(("task start --task %(task_file)s "
"--deployment %(deployment_id)s") %
{"task_file": config.filename,
"deployment_id": deployment_id})
result = re.search(
r"(?P<task_id>[0-9a-f\-]{36}): started", output)
self.assertIsNotNone(result)
class SLATestCase(unittest.TestCase):

View File

@ -34,17 +34,21 @@ class TestException(exceptions.RallyException):
class BenchmarkEngineTestCase(test.TestCase):
def test_init(self):
@mock.patch("rally.task.engine.TaskConfig")
def test_init(self, mock_task_config):
config = mock.MagicMock()
task = mock.MagicMock()
mock_task_config.return_value = fake_task_instance = mock.MagicMock()
eng = engine.BenchmarkEngine(config, task)
self.assertEqual(eng.config, config)
mock_task_config.assert_has_calls([mock.call(config)])
self.assertEqual(eng.config, fake_task_instance)
self.assertEqual(eng.task, task)
@mock.patch("rally.task.engine.jsonschema.validate")
def test_validate(self, mock_validate):
config = mock.MagicMock()
eng = engine.BenchmarkEngine(config, mock.MagicMock())
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("jsonschema.validate")
def test_validate(self, mock_validate, mock_task_config):
mock_task_config.return_value = config = mock.MagicMock()
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
mock_validate = mock.MagicMock()
eng._validate_config_scenarios_name = mock_validate.names
@ -65,13 +69,12 @@ class BenchmarkEngineTestCase(test.TestCase):
"wrong": True
}
task = mock.MagicMock()
eng = engine.BenchmarkEngine(config, task)
self.assertRaises(exceptions.InvalidTaskException,
eng.validate)
engine.BenchmarkEngine, config, task)
self.assertTrue(task.set_failed.called)
@mock.patch("rally.task.engine.jsonschema.validate")
def test_validate__wrong_scenarios_name(self, mock_validate):
@mock.patch("rally.task.engine.TaskConfig")
def test_validate__wrong_scenarios_name(self, mock_task_config):
task = mock.MagicMock()
eng = engine.BenchmarkEngine(mock.MagicMock(), task)
eng._validate_config_scenarios_name = mock.MagicMock(
@ -80,8 +83,8 @@ class BenchmarkEngineTestCase(test.TestCase):
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
self.assertTrue(task.set_failed.called)
@mock.patch("rally.task.engine.jsonschema.validate")
def test_validate__wrong_syntax(self, mock_validate):
@mock.patch("rally.task.engine.TaskConfig")
def test_validate__wrong_syntax(self, mock_task_config):
task = mock.MagicMock()
eng = engine.BenchmarkEngine(mock.MagicMock(), task)
eng._validate_config_scenarios_name = mock.MagicMock()
@ -91,8 +94,8 @@ class BenchmarkEngineTestCase(test.TestCase):
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
self.assertTrue(task.set_failed.called)
@mock.patch("rally.task.engine.jsonschema.validate")
def test_validate__wrong_semantic(self, mock_validate):
@mock.patch("rally.task.engine.TaskConfig")
def test_validate__wrong_semantic(self, mock_task_config):
task = mock.MagicMock()
eng = engine.BenchmarkEngine(mock.MagicMock(), task)
eng._validate_config_scenarios_name = mock.MagicMock()
@ -103,75 +106,114 @@ class BenchmarkEngineTestCase(test.TestCase):
self.assertRaises(exceptions.InvalidTaskException, eng.validate)
self.assertTrue(task.set_failed.called)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.get_all")
def test__validate_config_scenarios_name(self, mock_scenario_get_all):
config = {
"a": [],
"b": []
}
def test__validate_config_scenarios_name(
self, mock_scenario_get_all, mock_task_config):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "a"},
{"name": "b"}
]
mock_task_instance.subtasks = [mock_subtask]
mock_scenario_get_all.return_value = [
mock.MagicMock(get_name=lambda: "e"),
mock.MagicMock(get_name=lambda: "b"),
mock.MagicMock(get_name=lambda: "a")
]
eng = engine.BenchmarkEngine(config, mock.MagicMock())
eng._validate_config_scenarios_name(config)
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
eng._validate_config_scenarios_name(mock_task_instance)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario")
def test__validate_config_scenarios_name_non_exsisting(self,
mock_scenario):
config = {
"exist": [],
"nonexist1": [],
"nonexist2": []
}
def test__validate_config_scenarios_name_non_exsisting(
self, mock_scenario, mock_task_config):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "exist"},
{"name": "nonexist1"},
{"name": "nonexist2"}
]
mock_task_instance.subtasks = [mock_subtask]
mock_scenario.list_benchmark_scenarios.return_value = ["exist", "aaa"]
eng = engine.BenchmarkEngine(config, mock.MagicMock())
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
self.assertRaises(exceptions.NotFoundScenarios,
eng._validate_config_scenarios_name, config)
eng._validate_config_scenarios_name,
mock_task_instance)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.runner.ScenarioRunner.validate")
@mock.patch("rally.task.engine.context.ContextManager.validate")
def test__validate_config_syntax(
self, mock_context_manager_validate,
mock_scenario_runner_validate):
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
mock_scenario_runner_validate,
mock_task_config
):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "sca", "context": "a"},
{"name": "scb", "runner": "b"}
]
mock_task_instance.subtasks = [mock_subtask]
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
eng._validate_config_syntax(config)
eng._validate_config_syntax(mock_task_instance)
mock_scenario_runner_validate.assert_has_calls(
[mock.call({}), mock.call("b")], any_order=True)
mock_context_manager_validate.assert_has_calls(
[mock.call("a", non_hidden=True), mock.call({}, non_hidden=True)],
any_order=True)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
@mock.patch("rally.task.engine.context.ContextManager.validate")
def test__validate_config_syntax__wrong_runner(
self, mock_context_manager_validate, mock_scenario_runner):
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
self, mock_context_manager_validate,
mock_scenario_runner, mock_task_config):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "sca", "context": "a"},
{"name": "scb", "runner": "b"}
]
mock_task_instance.subtasks = [mock_subtask]
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
mock_scenario_runner.validate = mock.MagicMock(
side_effect=jsonschema.ValidationError("a"))
self.assertRaises(exceptions.InvalidBenchmarkConfig,
eng._validate_config_syntax, config)
eng._validate_config_syntax, mock_task_instance)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.runner.ScenarioRunner.validate")
@mock.patch("rally.task.engine.context.ContextManager")
def test__validate_config_syntax__wrong_context(
self, mock_context_manager, mock_scenario_runner_validate):
config = {"sca": [{"context": "a"}], "scb": [{"runner": "b"}]}
self, mock_context_manager, mock_scenario_runner_validate,
mock_task_config):
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "sca", "context": "a"},
{"name": "scb", "runner": "b"}
]
mock_task_instance.subtasks = [mock_subtask]
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
mock_context_manager.validate = mock.MagicMock(
side_effect=jsonschema.ValidationError("a"))
self.assertRaises(exceptions.InvalidBenchmarkConfig,
eng._validate_config_syntax, config)
eng._validate_config_syntax, mock_task_instance)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.validate")
def test__validate_config_semantic_helper(self, mock_scenario_validate):
def test__validate_config_semantic_helper(self, mock_scenario_validate,
mock_task_config):
deployment = mock.MagicMock()
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
eng._validate_config_semantic_helper("admin", "user", "name", "pos",
@ -180,19 +222,21 @@ class BenchmarkEngineTestCase(test.TestCase):
"name", {"args": "args"}, admin="admin", users=["user"],
deployment=deployment)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.validate",
side_effect=exceptions.InvalidScenarioArgument)
def test__validate_config_semanitc_helper_invalid_arg(
self, mock_scenario_validate):
self, mock_scenario_validate, mock_task_config):
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
self.assertRaises(exceptions.InvalidBenchmarkConfig,
eng._validate_config_semantic_helper, "a", "u", "n",
"p", mock.MagicMock(), {})
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.existing_users.ExistingUsers")
def test_get_user_ctx_for_validation_existing_users(
self, mock_existing_users):
self, mock_existing_users, mock_task_config):
context = {"a": 10}
users = [mock.MagicMock(), mock.MagicMock()]
@ -207,6 +251,7 @@ class BenchmarkEngineTestCase(test.TestCase):
self.assertEqual(mock_existing_users.return_value, result)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.osclients.Clients")
@mock.patch("rally.task.engine.users_ctx")
@mock.patch("rally.task.engine.BenchmarkEngine"
@ -216,20 +261,29 @@ class BenchmarkEngineTestCase(test.TestCase):
def test__validate_config_semantic(
self, mock_deployment_get,
mock__validate_config_semantic_helper,
mock_users_ctx, mock_clients):
mock_users_ctx, mock_clients, mock_task_config):
mock_users_ctx.UserGenerator = fakes.FakeUserContext
mock_clients.return_value = mock.MagicMock()
config = {
"a": [mock.MagicMock(), mock.MagicMock()],
"b": [mock.MagicMock()]
}
mock_task_instance = mock.MagicMock()
mock_subtask1 = mock.MagicMock()
mock_subtask1.scenarios = [
{"name": "a", "kw": 0},
{"name": "a", "kw": 1}
]
mock_subtask2 = mock.MagicMock()
mock_subtask2.scenarios = [
{"name": "b", "kw": 0},
]
mock_task_instance.subtasks = [mock_subtask1, mock_subtask2]
fake_task = mock.MagicMock()
eng = engine.BenchmarkEngine(config, fake_task)
eng = engine.BenchmarkEngine(mock_task_instance, fake_task)
eng.admin = "admin"
eng._validate_config_semantic(config)
eng._validate_config_semantic(mock_task_instance)
expected_calls = [
mock.call("admin"),
@ -242,14 +296,18 @@ class BenchmarkEngineTestCase(test.TestCase):
admin = user = mock_clients.return_value
fake_deployment = mock_deployment_get.return_value
expected_calls = [
mock.call(admin, user, "a", 0, fake_deployment, config["a"][0]),
mock.call(admin, user, "a", 1, fake_deployment, config["a"][1]),
mock.call(admin, user, "b", 0, fake_deployment, config["b"][0])
mock.call(admin, user, "a", 0, fake_deployment,
{"name": "a", "kw": 0}),
mock.call(admin, user, "a", 1, fake_deployment,
{"name": "a", "kw": 1}),
mock.call(admin, user, "b", 0, fake_deployment,
{"name": "b", "kw": 0})
]
mock__validate_config_semantic_helper.assert_has_calls(
expected_calls, any_order=True)
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
@ -258,74 +316,19 @@ class BenchmarkEngineTestCase(test.TestCase):
def test_run__update_status(
self, mock_scenario_runner, mock_scenario,
mock_context_manager_setup, mock_context_manager_cleanup,
mock_result_consumer, mock_task_get_status):
mock_result_consumer, mock_task_config, mock_task_get_status):
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.ABORTING
eng = engine.BenchmarkEngine([], task)
eng = engine.BenchmarkEngine(mock.MagicMock(), task)
eng.run()
task.update_status.assert_has_calls([
mock.call(consts.TaskStatus.RUNNING),
mock.call(consts.TaskStatus.FINISHED)
])
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
def test_run__config_has_args(
self, mock_context_manager_setup, mock_context_manager_cleanup,
mock_scenario_runner, mock_scenario,
mock_result_consumer, mock_task_get_status):
config = {
"a.benchmark": [{"args": {"a": "a", "b": 1}}],
"b.benchmark": [{"args": {"a": 1}}]
}
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
eng = engine.BenchmarkEngine(config, task)
eng.run()
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
def test_run__config_has_runner(
self, mock_context_manager_setup, mock_context_manager_cleanup,
mock_scenario_runner, mock_scenario, mock_result_consumer,
mock_task_get_status):
config = {
"a.benchmark": [{"runner": {"type": "a", "b": 1}}],
"b.benchmark": [{"runner": {"type": "c", "a": 1}}]
}
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.ABORTED
eng = engine.BenchmarkEngine(config, task)
eng.run()
@mock.patch("rally.common.objects.Task.get_status")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.scenario.Scenario")
@mock.patch("rally.task.engine.runner.ScenarioRunner")
@mock.patch("rally.task.engine.context.ContextManager.cleanup")
@mock.patch("rally.task.engine.context.ContextManager.setup")
def test_run__config_has_context(
self, mock_context_manager_setup, mock_context_manager_cleanup,
mock_scenario_runner, mock_scenario, mock_result_consumer,
mock_task_get_status):
config = {
"a.benchmark": [{"context": {"context_a": {"a": 1}}}],
"b.benchmark": [{"context": {"context_b": {"b": 2}}}]
}
task = mock.MagicMock()
mock_task_get_status.return_value = consts.TaskStatus.RUNNING
eng = engine.BenchmarkEngine(config, task)
eng.run()
@mock.patch("rally.task.engine.objects.task.Task.get_status")
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.LOG")
@mock.patch("rally.task.engine.ResultConsumer")
@mock.patch("rally.task.engine.scenario.Scenario")
@ -334,19 +337,22 @@ class BenchmarkEngineTestCase(test.TestCase):
@mock.patch("rally.task.engine.context.ContextManager.setup")
def test_run_exception_is_logged(
self, mock_context_manager_setup, mock_context_manager_cleanup,
mock_scenario_runner, mock_scenario,
mock_result_consumer, mock_log, mock_task_get_status):
mock_scenario_runner, mock_scenario, mock_result_consumer,
mock_log, mock_task_config, mock_task_get_status):
mock_context_manager_setup.side_effect = Exception
mock_result_consumer.is_task_in_aborting_status.return_value = False
config = {
"a.benchmark": [{"context": {"context_a": {"a": 1}}}],
"b.benchmark": [{"context": {"context_b": {"b": 2}}}]
}
task = mock.MagicMock()
eng = engine.BenchmarkEngine(config, task)
mock_task_instance = mock.MagicMock()
mock_subtask = mock.MagicMock()
mock_subtask.scenarios = [
{"name": "a.benchmark", "context": {"context_a": {"a": 1}}},
{"name": "b.benchmark", "context": {"context_b": {"b": 2}}}
]
mock_task_instance.subtasks = [mock_subtask]
mock_task_config.return_value = mock_task_instance
eng = engine.BenchmarkEngine(mock.MagicMock(), mock.MagicMock())
eng.run()
self.assertEqual(2, mock_log.exception.call_count)
@ -407,8 +413,9 @@ class BenchmarkEngineTestCase(test.TestCase):
self.assertEqual(mock.call(consts.TaskStatus.ABORTED),
task.update_status.mock_calls[-1])
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.get")
def test__prepare_context(self, mock_scenario_get):
def test__prepare_context(self, mock_scenario_get, mock_task_config):
default_context = {"a": 1, "b": 2}
mock_scenario_get.return_value._meta_get.return_value = default_context
task = mock.MagicMock()
@ -435,8 +442,10 @@ class BenchmarkEngineTestCase(test.TestCase):
"default_context"
)
@mock.patch("rally.task.engine.TaskConfig")
@mock.patch("rally.task.engine.scenario.Scenario.get")
def test__prepare_context_with_existing_users(self, mock_scenario_get):
def test__prepare_context_with_existing_users(self, mock_scenario_get,
mock_task_config):
mock_scenario_get.return_value._meta_get.return_value = {}
task = mock.MagicMock()
name = "a.benchmark"
@ -638,3 +647,80 @@ class ResultConsumerTestCase(test.TestCase):
self.assertFalse(runner.abort.called)
# test task.get_status is checked until is_done is not set
self.assertEqual(4, mock_task_get_status.call_count)
class TaskTestCase(test.TestCase):
@mock.patch("jsonschema.validate")
def test_validate_json(self, mock_validate):
config = {}
engine.TaskConfig(config)
mock_validate.assert_has_calls([
mock.call(config, engine.TaskConfig.CONFIG_SCHEMA_V1)])
@mock.patch("jsonschema.validate")
@mock.patch("rally.task.engine.TaskConfig._make_subtasks")
def test_validate_json_v2(self, mock_task_config__make_subtasks,
mock_validate):
config = {"version": 2}
engine.TaskConfig(config)
mock_validate.assert_has_calls([
mock.call(config, engine.TaskConfig.CONFIG_SCHEMA_V2)])
@mock.patch("rally.task.engine.TaskConfig._get_version")
@mock.patch("rally.task.engine.TaskConfig._validate_json")
@mock.patch("rally.task.engine.TaskConfig._make_subtasks")
def test_validate_version(self, mock_task_config__make_subtasks,
mock_task_config__validate_json,
mock_task_config__get_version):
mock_task_config__get_version.return_value = 1
engine.TaskConfig(mock.MagicMock())
@mock.patch("rally.task.engine.TaskConfig._get_version")
@mock.patch("rally.task.engine.TaskConfig._validate_json")
@mock.patch("rally.task.engine.TaskConfig._make_subtasks")
def test_validate_version_wrong_version(
self, mock_task_config__make_subtasks,
mock_task_config__validate_json,
mock_task_config__get_version):
mock_task_config__get_version.return_value = "wrong"
self.assertRaises(exceptions.InvalidTaskException, engine.TaskConfig,
mock.MagicMock)
@mock.patch("rally.task.engine.SubTask")
@mock.patch("rally.task.engine.TaskConfig._get_version")
@mock.patch("rally.task.engine.TaskConfig._validate_json")
def test_make_subtasks_v1(self, mock_task_config__validate_json,
mock_task_config__get_version, mock_sub_task):
mock_task_config__get_version.return_value = 1
config = {"a.benchmark": [{"s": 1}, {"s": 2}],
"b.benchmark": [{"s": 3}]}
self.assertEqual(3, len(engine.TaskConfig(config).subtasks))
mock_sub_task.assert_has_calls([
mock.call({
"title": "a.benchmark",
"scenarios": [{"s": 1, "name": "a.benchmark"}]
}),
mock.call({
"title": "a.benchmark",
"scenarios": [{"s": 2, "name": "a.benchmark"}]
}),
mock.call({
"title": "b.benchmark",
"scenarios": [{"s": 3, "name": "b.benchmark"}]
})
])
@mock.patch("rally.task.engine.SubTask")
@mock.patch("rally.task.engine.TaskConfig._get_version")
@mock.patch("rally.task.engine.TaskConfig._validate_json")
def test_make_subtasks_v2(self, mock_task_config__validate_json,
mock_task_config__get_version, mock_sub_task):
mock_task_config__get_version.return_value = 2
subtask_conf1 = mock.MagicMock()
subtask_conf2 = mock.MagicMock()
config = {"subtasks": [subtask_conf1, subtask_conf2]}
self.assertEqual(2, len(engine.TaskConfig(config).subtasks))
mock_sub_task.assert_has_calls([
mock.call(subtask_conf1),
mock.call(subtask_conf2)])