Files
rally/tests/benchmark/runners/test_constant.py
uppi 047b255fdf Asynchronous result yielding for scenario runners
Benchmark engine spawns a consumer thread, which asynchronously handles results
for each scenario run. Current consumer implementation just collects all
results and stores them as 'raw' to save integrity.

Scenario runners have to call _send_result method for every single result
instead of collecting a list of results.

Change-Id: Icbff8b2470af3b3ced6bb55573b5842726177b70
2014-07-09 17:57:10 +04:00

141 lines
6.0 KiB
Python

# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from rally.benchmark.runners import base
from rally.benchmark.runners import constant
from rally import consts
from tests import fakes
from tests import test
class ConstantScenarioRunnerTestCase(test.TestCase):
def setUp(self):
super(ConstantScenarioRunnerTestCase, self).setUp()
times = 4
concurrency = 2
timeout = 2
type = consts.RunnerType.CONSTANT
self.config = {"times": times, "concurrency": concurrency,
"timeout": timeout, "type": type}
self.context = fakes.FakeUserContext({"task":
{"uuid": "uuid"}}).context
self.args = {"a": 1}
def test_validate(self):
constant.ConstantScenarioRunner.validate(self.config)
def test_validate_failed(self):
self.config["type"] = consts.RunnerType.CONSTANT_FOR_DURATION
self.assertRaises(jsonschema.ValidationError,
constant.ConstantScenarioRunner.validate,
self.config)
def test_run_scenario_constantly_for_times(self):
runner = constant.ConstantScenarioRunner(
None, [self.context["admin"]["endpoint"]], self.config)
runner._run_scenario(fakes.FakeScenario,
"do_it", self.context, self.args)
self.assertEqual(len(runner.result_queue), self.config["times"])
for result in runner.result_queue:
self.assertIsNotNone(base.ScenarioRunnerResult(result))
def test_run_scenario_constantly_for_times_exception(self):
runner = constant.ConstantScenarioRunner(
None, [self.context["admin"]["endpoint"]], self.config)
runner._run_scenario(fakes.FakeScenario,
"something_went_wrong", self.context, self.args)
self.assertEqual(len(runner.result_queue), self.config["times"])
for result in runner.result_queue:
self.assertIsNotNone(base.ScenarioRunnerResult(result))
self.assertIn('error', runner.result_queue[0])
def test_run_scenario_constantly_for_times_timeout(self):
runner = constant.ConstantScenarioRunner(
None, [self.context["admin"]["endpoint"]], self.config)
runner._run_scenario(fakes.FakeScenario,
"raise_timeout", self.context, self.args)
self.assertEqual(len(runner.result_queue), self.config["times"])
for result in runner.result_queue:
self.assertIsNotNone(base.ScenarioRunnerResult(result))
self.assertIn('error', runner.result_queue[0])
class ConstantForDurationScenarioRunnerTeestCase(test.TestCase):
def setUp(self):
super(ConstantForDurationScenarioRunnerTeestCase, self).setUp()
duration = 0
concurrency = 2
timeout = 2
type = consts.RunnerType.CONSTANT_FOR_DURATION
self.config = {"duration": duration, "concurrency": concurrency,
"timeout": timeout, "type": type}
self.context = fakes.FakeUserContext({"task":
{"uuid": "uuid"}}).context
self.args = {"a": 1}
def test_validate(self):
constant.ConstantForDurationScenarioRunner.validate(self.config)
def test_validate_failed(self):
self.config["type"] = consts.RunnerType.CONSTANT
self.assertRaises(jsonschema.ValidationError, constant.
ConstantForDurationScenarioRunner.validate,
self.config)
def test_run_scenario_constantly_for_duration(self):
runner = constant.ConstantForDurationScenarioRunner(
None, [self.context["admin"]["endpoint"]], self.config)
runner._run_scenario(fakes.FakeScenario, "do_it",
self.context, self.args)
# NOTE(mmorais): when duration is 0, scenario executes exactly 1 time
expected_times = 1
self.assertEqual(len(runner.result_queue), expected_times)
for result in runner.result_queue:
self.assertIsNotNone(base.ScenarioRunnerResult(result))
def test_run_scenario_constantly_for_duration_exception(self):
runner = constant.ConstantForDurationScenarioRunner(
None, [self.context["admin"]["endpoint"]], self.config)
runner._run_scenario(fakes.FakeScenario,
"something_went_wrong", self.context, self.args)
# NOTE(mmorais): when duration is 0, scenario executes exactly 1 time
expected_times = 1
self.assertEqual(len(runner.result_queue), expected_times)
for result in runner.result_queue:
self.assertIsNotNone(base.ScenarioRunnerResult(result))
self.assertIn('error', runner.result_queue[0])
def test_run_scenario_constantly_for_duration_timeout(self):
runner = constant.ConstantForDurationScenarioRunner(
None, [self.context["admin"]["endpoint"]], self.config)
runner._run_scenario(fakes.FakeScenario,
"raise_timeout", self.context, self.args)
# NOTE(mmorais): when duration is 0, scenario executes exactly 1 time
expected_times = 1
self.assertEqual(len(runner.result_queue), expected_times)
for result in runner.result_queue:
self.assertIsNotNone(base.ScenarioRunnerResult(result))
self.assertIn('error', runner.result_queue[0])