Merge "Abort scenario execution on SLA failure"

This commit is contained in:
Jenkins 2015-02-13 17:57:44 +00:00 committed by Gerrit Code Review
commit b6807f8e40
10 changed files with 556 additions and 173 deletions

View File

@ -168,7 +168,7 @@ class Task(object):
benchmark_engine.validate() benchmark_engine.validate()
@classmethod @classmethod
def start(cls, deployment, config, task=None): def start(cls, deployment, config, task=None, abort_on_sla_failure=False):
"""Start a task. """Start a task.
Task is a list of benchmarks that will be called one by one, results of Task is a list of benchmarks that will be called one by one, results of
@ -177,13 +177,17 @@ class Task(object):
:param deployment: UUID or name of the deployment :param deployment: UUID or name of the deployment
:param config: a dict with a task configuration :param config: a dict with a task configuration
:param task: Task object. If None, it will be created :param task: Task object. If None, it will be created
:param abort_on_sla_failure: if True, the execution of a benchmark
scenario will stop when any SLA check
for it fails
""" """
deployment = objects.Deployment.get(deployment) deployment = objects.Deployment.get(deployment)
task = task or objects.Task(deployment_uuid=deployment["uuid"]) task = task or objects.Task(deployment_uuid=deployment["uuid"])
LOG.info("Benchmark Task %s on Deployment %s" % (task["uuid"], LOG.info("Benchmark Task %s on Deployment %s" % (task["uuid"],
deployment["uuid"])) deployment["uuid"]))
benchmark_engine = engine.BenchmarkEngine( benchmark_engine = engine.BenchmarkEngine(
config, task, admin=deployment["admin"], users=deployment["users"]) config, task, admin=deployment["admin"], users=deployment["users"],
abort_on_sla_failure=abort_on_sla_failure)
try: try:
benchmark_engine.validate() benchmark_engine.validate()

View File

@ -93,18 +93,22 @@ class BenchmarkEngine(object):
engine.run() # to run config engine.run() # to run config
""" """
def __init__(self, config, task, admin=None, users=None): def __init__(self, config, task, admin=None, users=None,
abort_on_sla_failure=False):
"""BenchmarkEngine constructor. """BenchmarkEngine constructor.
:param config: The configuration with specified benchmark scenarios :param config: The configuration with specified benchmark scenarios
:param task: The current task which is being performed :param task: The current task which is being performed
:param admin: Dict with admin credentials :param admin: Dict with admin credentials
:param users: List of dicts with user credentials :param users: List of dicts with user credentials
:param abort_on_sla_failure: True if the execution should be stopped
when some SLA check fails
""" """
self.config = config self.config = config
self.task = task self.task = task
self.admin = admin and objects.Endpoint(**admin) or None self.admin = admin and objects.Endpoint(**admin) or None
self.users = map(lambda u: objects.Endpoint(**u), users or []) self.users = map(lambda u: objects.Endpoint(**u), users or [])
self.abort_on_sla_failure = abort_on_sla_failure
@rutils.log_task_wrapper(LOG.info, _("Task validation check cloud.")) @rutils.log_task_wrapper(LOG.info, _("Task validation check cloud."))
def _check_cloud(self): def _check_cloud(self):
@ -221,7 +225,7 @@ class BenchmarkEngine(object):
is_done = threading.Event() is_done = threading.Event()
consumer = threading.Thread( consumer = threading.Thread(
target=self.consume_results, target=self.consume_results,
args=(key, self.task, runner.result_queue, is_done)) args=(key, self.task, is_done, runner))
consumer.start() consumer.start()
context_obj = self._prepare_context(kw.get("context", {}), context_obj = self._prepare_context(kw.get("context", {}),
name, self.admin) name, self.admin)
@ -240,7 +244,7 @@ class BenchmarkEngine(object):
consumer.join() consumer.join()
self.task.update_status(consts.TaskStatus.FINISHED) self.task.update_status(consts.TaskStatus.FINISHED)
def consume_results(self, key, task, result_queue, is_done): def consume_results(self, key, task, is_done, runner):
"""Consume scenario runner results from queue and send them to db. """Consume scenario runner results from queue and send them to db.
Has to be run from different thread simultaneously with the runner.run Has to be run from different thread simultaneously with the runner.run
@ -248,22 +252,25 @@ class BenchmarkEngine(object):
:param key: Scenario identifier :param key: Scenario identifier
:param task: Running task :param task: Running task
:param result_queue: Deque with runner results
:param is_done: Event which is set from the runner thread after the :param is_done: Event which is set from the runner thread after the
runner finishes it's work. runner finishes it's work.
:param runner: ScenarioRunner object that was used to run a task
""" """
results = [] results = []
sla_checker = base_sla.SLAChecker(key["kw"])
while True: while True:
if result_queue: if runner.result_queue:
result = result_queue.popleft() result = runner.result_queue.popleft()
results.append(result) results.append(result)
success = sla_checker.add_iteration(result)
if self.abort_on_sla_failure and not success:
runner.abort()
elif is_done.isSet(): elif is_done.isSet():
break break
else: else:
time.sleep(0.1) time.sleep(0.1)
sla = base_sla.SLA.check_all(key["kw"], results)
task.append_results(key, {"raw": results, task.append_results(key, {"raw": results,
"load_duration": self.duration, "load_duration": self.duration,
"full_duration": self.full_duration, "full_duration": self.full_duration,
"sla": sla}) "sla": sla_checker.results()})

View File

@ -24,24 +24,43 @@ import abc
import jsonschema import jsonschema
import six import six
from rally.benchmark.processing import utils as putils
from rally.common.i18n import _ from rally.common.i18n import _
from rally.common import utils from rally.common import utils
from rally import consts from rally import consts
from rally import exceptions from rally import exceptions
class SLAResult(object): class SLAChecker(object):
"""Base SLA checker class."""
def __init__(self, success=True, msg=None): def __init__(self, config):
self.success = success self.config = config
self.msg = msg self.sla_criteria = [SLA.get_by_name(name)(criterion_value)
for name, criterion_value
in config.get("sla", {}).items()]
def add_iteration(self, iteration):
"""Process the result of a single iteration.
The call to add_iteration() will return True if all the SLA checks
passed, and False otherwise.
:param iteration: iteration result object
"""
return all([sla.add_iteration(iteration) for sla in self.sla_criteria])
def results(self):
return [sla.result() for sla in self.sla_criteria]
@six.add_metaclass(abc.ABCMeta) @six.add_metaclass(abc.ABCMeta)
class SLA(object): class SLA(object):
"""Factory for criteria classes.""" """Factory for criteria classes."""
def __init__(self, criterion_value):
self.criterion_value = criterion_value
self.success = True
@staticmethod @staticmethod
def validate(config): def validate(config):
properties = dict([(c.OPTION_NAME, c.CONFIG_SCHEMA) properties = dict([(c.OPTION_NAME, c.CONFIG_SCHEMA)
@ -53,36 +72,6 @@ class SLA(object):
} }
jsonschema.validate(config, schema) jsonschema.validate(config, schema)
@staticmethod
@abc.abstractmethod
def check(criterion_value, result):
"""Check if task succeeded according to criterion.
:param criterion_value: Criterion value specified in configuration
:param result: result object
:returns: True if success
"""
@staticmethod
def check_all(config, result):
"""Check all SLA criteria.
:param config: sla related config for a task
:param result: Result of a task
:returns: A list of sla results
"""
results = []
opt_name_map = dict([(c.OPTION_NAME, c)
for c in utils.itersubclasses(SLA)])
for name, criterion in six.iteritems(config.get("sla", {})):
check_result = opt_name_map[name].check(criterion, result)
results.append({"criterion": name,
"success": check_result.success,
"detail": check_result.msg})
return results
@staticmethod @staticmethod
def get_by_name(name): def get_by_name(name):
"""Returns SLA by name or config option name.""" """Returns SLA by name or config option name."""
@ -91,23 +80,56 @@ class SLA(object):
return sla return sla
raise exceptions.NoSuchSLA(name=name) raise exceptions.NoSuchSLA(name=name)
@abc.abstractmethod
def add_iteration(self, iteration):
"""Process the result of a single iteration and perform a SLA check.
The call to add_iteration() will return True if the SLA check passed,
and False otherwise.
:param iteration: iteration result object
:returns: True if the SLA check passed, False otherwise
"""
def result(self):
"""Returns the SLA result dict corresponding to the current state."""
return {
"criterion": self.OPTION_NAME,
"success": self.success,
"detail": self.details()
}
@abc.abstractmethod
def details(self):
"""Returns the string describing the current results of the SLA."""
def status(self):
"""Return "Passed" or "Failed" depending on the current SLA status."""
return "Passed" if self.success else "Failed"
class FailureRateDeprecated(SLA): class FailureRateDeprecated(SLA):
"""[Deprecated] Failure rate in percents.""" """[Deprecated] Failure rate in percents."""
OPTION_NAME = "max_failure_percent" OPTION_NAME = "max_failure_percent"
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, "maximum": 100.0} CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, "maximum": 100.0}
@staticmethod def __init__(self, criterion_value):
def check(criterion_value, result): super(FailureRateDeprecated, self).__init__(criterion_value)
errors = len([x for x in result if x["error"]]) self.errors = 0
error_rate = errors * 100.0 / len(result) if len(result) > 0 else 100.0 self.total = 0
if criterion_value < error_rate: self.error_rate = 0.0
success = False
else: def add_iteration(self, iteration):
success = True self.total += 1
msg = (_("Maximum failure percent %s%% failures, actually %s%%") % if iteration["error"]:
(criterion_value * 100.0, error_rate)) self.errors += 1
return SLAResult(success, msg) self.error_rate = self.errors * 100.0 / self.total
self.success = self.error_rate <= self.criterion_value
return self.success
def details(self):
return (_("Maximum failure rate %s%% <= %s%% - %s") %
(self.criterion_value, self.error_rate, self.status()))
class FailureRate(SLA): class FailureRate(SLA):
@ -122,20 +144,26 @@ class FailureRate(SLA):
} }
} }
@staticmethod def __init__(self, criterion_value):
def check(criterion_value, result): super(FailureRate, self).__init__(criterion_value)
min_percent = criterion_value.get("min", 0) self.min_percent = self.criterion_value.get("min", 0)
max_percent = criterion_value.get("max", 100) self.max_percent = self.criterion_value.get("max", 100)
errors = len([x for x in result if x["error"]]) self.errors = 0
error_rate = errors * 100.0 / len(result) if len(result) > 0 else 100.0 self.total = 0
self.error_rate = 0.0
success = min_percent <= error_rate <= max_percent def add_iteration(self, iteration):
self.total += 1
if iteration["error"]:
self.errors += 1
self.error_rate = self.errors * 100.0 / self.total
self.success = self.min_percent <= self.error_rate <= self.max_percent
return self.success
msg = (_("Maximum failure rate percent %s%% failures, minimum failure " def details(self):
"rate percent %s%% failures, actually %s%%") % return (_("Failure rate criteria %.2f%% <= %.2f%% <= %.2f%% - %s") %
(max_percent, min_percent, error_rate)) (self.min_percent, self.error_rate, self.max_percent,
self.status()))
return SLAResult(success, msg)
class IterationTime(SLA): class IterationTime(SLA):
@ -144,31 +172,41 @@ class IterationTime(SLA):
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, CONFIG_SCHEMA = {"type": "number", "minimum": 0.0,
"exclusiveMinimum": True} "exclusiveMinimum": True}
@staticmethod def __init__(self, criterion_value):
def check(criterion_value, result): super(IterationTime, self).__init__(criterion_value)
duration = 0 self.max_iteration_time = 0.0
success = True
for i in result: def add_iteration(self, iteration):
if i["duration"] >= duration: if iteration["duration"] > self.max_iteration_time:
duration = i["duration"] self.max_iteration_time = iteration["duration"]
if i["duration"] > criterion_value: self.success = self.max_iteration_time <= self.criterion_value
success = False return self.success
msg = (_("Maximum seconds per iteration %ss, found with %ss") %
(criterion_value, duration)) def details(self):
return SLAResult(success, msg) return (_("Maximum seconds per iteration %.2fs<= %.2fs - %s") %
(self.max_iteration_time, self.criterion_value, self.status()))
class MaxAverageDuration(SLA): class MaxAverageDuration(SLA):
"""Maximum average duration for one iteration in seconds.""" """Maximum average duration of one iteration in seconds."""
OPTION_NAME = "max_avg_duration" OPTION_NAME = "max_avg_duration"
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, CONFIG_SCHEMA = {"type": "number", "minimum": 0.0,
"exclusiveMinimum": True} "exclusiveMinimum": True}
@staticmethod def __init__(self, criterion_value):
def check(criterion_value, result): super(MaxAverageDuration, self).__init__(criterion_value)
durations = [r["duration"] for r in result if not r.get("error")] self.total_duration = 0.0
avg = putils.mean(durations) self.iterations = 0
success = avg < criterion_value self.avg = 0.0
msg = (_("Maximum average duration per iteration %ss, found with %ss")
% (criterion_value, avg)) def add_iteration(self, iteration):
return SLAResult(success, msg) if not iteration.get("error"):
self.total_duration += iteration["duration"]
self.iterations += 1
self.avg = self.total_duration / self.iterations
self.success = self.avg <= self.criterion_value
return self.success
def details(self):
return (_("Maximum average duration of one iteration %.2fs <= %.2fs - "
"%s") % (self.avg, self.criterion_value, self.status()))

View File

@ -180,9 +180,13 @@ class TaskCommands(object):
@cliutils.args("--tag", help="Tag for this task") @cliutils.args("--tag", help="Tag for this task")
@cliutils.args("--no-use", action="store_false", dest="do_use", @cliutils.args("--no-use", action="store_false", dest="do_use",
help="Don't set new task as default for future operations") help="Don't set new task as default for future operations")
@cliutils.args("--abort-on-sla-failure", action="store_true",
dest="abort_on_sla_failure",
help="Abort the execution of a benchmark scenario when"
"any SLA check for it fails")
@envutils.with_default_deployment(cli_arg_name="deployment") @envutils.with_default_deployment(cli_arg_name="deployment")
def start(self, task, deployment=None, task_args=None, task_args_file=None, def start(self, task, deployment=None, task_args=None, task_args_file=None,
tag=None, do_use=False): tag=None, do_use=False, abort_on_sla_failure=False):
"""Start benchmark task. """Start benchmark task.
:param task: a file with yaml/json task :param task: a file with yaml/json task
@ -193,6 +197,11 @@ class TaskCommands(object):
is jinja2 template. is jinja2 template.
:param deployment: UUID or name of a deployment :param deployment: UUID or name of a deployment
:param tag: optional tag for this task :param tag: optional tag for this task
:param do_use: if True, the new task will be stored as the default one
for future operations
:param abort_on_sla_failure: if True, the execution of a benchmark
scenario will stop when any SLA check
for it fails
""" """
try: try:
input_task = self._load_task(task, task_args, task_args_file) input_task = self._load_task(task, task_args, task_args_file)
@ -207,7 +216,8 @@ class TaskCommands(object):
print("Benchmarking... This can take a while...\n") print("Benchmarking... This can take a while...\n")
print("To track task status use:\n") print("To track task status use:\n")
print("\trally task status\n\tor\n\trally task detailed\n") print("\trally task status\n\tor\n\trally task detailed\n")
api.Task.start(deployment, input_task, task=task) api.Task.start(deployment, input_task, task=task,
abort_on_sla_failure=abort_on_sla_failure)
self.detailed(task_id=task["uuid"]) self.detailed(task_id=task["uuid"])
if do_use: if do_use:
use.UseCommands().task(task["uuid"]) use.UseCommands().task(task["uuid"])

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import json
import os import os
import re import re
import unittest import unittest
@ -243,6 +244,210 @@ class TaskTestCase(unittest.TestCase):
r"(?P<task_id>[0-9a-f\-]{36}): started", output) r"(?P<task_id>[0-9a-f\-]{36}): started", output)
self.assertIsNotNone(result) self.assertIsNotNone(result)
def _test_start_abort_on_sla_failure_success(self, cfg, times):
rally = utils.Rally()
with mock.patch.dict("os.environ", utils.TEST_ENV):
deployment_id = envutils.get_global("RALLY_DEPLOYMENT")
config = utils.TaskConfig(cfg)
rally(("task start --task %(task_file)s "
"--deployment %(deployment_id)s --abort-on-sla-failure") %
{"task_file": config.filename,
"deployment_id": deployment_id})
results = json.loads(rally("task results"))
iterations_completed = len(results[0]["result"])
self.assertEqual(times, iterations_completed)
def test_start_abort_on_sla_failure_success_constant(self):
times = 100
cfg = {
"Dummy.dummy": [
{
"args": {
"sleep": 0.1
},
"runner": {
"type": "constant",
"times": times,
"concurrency": 5
},
"sla": {
"failure_rate": {"max": 0.0}
}
}
]
}
self._test_start_abort_on_sla_failure_success(cfg, times)
def test_start_abort_on_sla_failure_success_serial(self):
times = 100
cfg = {
"Dummy.dummy": [
{
"args": {
"sleep": 0.1
},
"runner": {
"type": "serial",
"times": times
},
"sla": {
"failure_rate": {"max": 0.0}
}
}
]
}
self._test_start_abort_on_sla_failure_success(cfg, times)
def test_start_abort_on_sla_failure_success_rps(self):
times = 100
cfg = {
"Dummy.dummy": [
{
"args": {
"sleep": 0.1
},
"runner": {
"type": "rps",
"times": times,
"rps": 20
},
"sla": {
"failure_rate": {"max": 0.0}
}
}
]
}
self._test_start_abort_on_sla_failure_success(cfg, times)
def _test_start_abort_on_sla_failure(self, cfg, times):
rally = utils.Rally()
with mock.patch.dict("os.environ", utils.TEST_ENV):
deployment_id = envutils.get_global("RALLY_DEPLOYMENT")
config = utils.TaskConfig(cfg)
rally(("task start --task %(task_file)s "
"--deployment %(deployment_id)s --abort-on-sla-failure") %
{"task_file": config.filename,
"deployment_id": deployment_id})
results = json.loads(rally("task results"))
iterations_completed = len(results[0]["result"])
# NOTE(msdubov): Change '<=' to '<' as soon as we fix the runners.
self.assertTrue(iterations_completed <= times)
def test_start_abort_on_sla_failure_max_seconds_constant(self):
times = 100
cfg = {
"Dummy.dummy": [
{
"args": {
"sleep": 0.1
},
"runner": {
"type": "constant",
"times": times,
"concurrency": 5
},
"sla": {
"max_seconds_per_iteration": 0.01
}
}
]
}
self._test_start_abort_on_sla_failure(cfg, times)
def test_start_abort_on_sla_failure_max_seconds_serial(self):
times = 100
cfg = {
"Dummy.dummy": [
{
"args": {
"sleep": 0.1
},
"runner": {
"type": "serial",
"times": times
},
"sla": {
"max_seconds_per_iteration": 0.01
}
}
]
}
self._test_start_abort_on_sla_failure(cfg, times)
def test_start_abort_on_sla_failure_max_seconds_rps(self):
times = 100
cfg = {
"Dummy.dummy": [
{
"args": {
"sleep": 0.1
},
"runner": {
"type": "rps",
"times": times,
"rps": 20
},
"sla": {
"max_seconds_per_iteration": 0.01
}
}
]
}
self._test_start_abort_on_sla_failure(cfg, times)
def test_start_abort_on_sla_failure_max_failure_rate_constant(self):
times = 100
cfg = {
"Dummy.dummy_exception": [
{
"runner": {
"type": "constant",
"times": times,
"concurrency": 5
},
"sla": {
"failure_rate": {"max": 0.0}
}
}
]
}
self._test_start_abort_on_sla_failure(cfg, times)
def test_start_abort_on_sla_failure_max_failure_rate_serial(self):
times = 100
cfg = {
"Dummy.dummy_exception": [
{
"runner": {
"type": "serial",
"times": times
},
"sla": {
"failure_rate": {"max": 0.0}
}
}
]
}
self._test_start_abort_on_sla_failure(cfg, times)
def test_start_abort_on_sla_failure_max_failure_rate_rps(self):
times = 100
cfg = {
"Dummy.dummy_exception": [
{
"runner": {
"type": "rps",
"times": times,
"rps": 20
},
"sla": {
"failure_rate": {"max": 0.0}
}
}
]
}
self._test_start_abort_on_sla_failure(cfg, times)
# NOTE(oanufriev): Not implemented # NOTE(oanufriev): Not implemented
def test_abort(self): def test_abort(self):
pass pass
@ -251,7 +456,7 @@ class TaskTestCase(unittest.TestCase):
class SLATestCase(unittest.TestCase): class SLATestCase(unittest.TestCase):
def _get_sample_task_config(self, max_seconds_per_iteration=4, def _get_sample_task_config(self, max_seconds_per_iteration=4,
max_failure_percent=0): failure_rate_max=0):
return { return {
"KeystoneBasic.create_and_list_users": [ "KeystoneBasic.create_and_list_users": [
{ {
@ -265,7 +470,7 @@ class SLATestCase(unittest.TestCase):
}, },
"sla": { "sla": {
"max_seconds_per_iteration": max_seconds_per_iteration, "max_seconds_per_iteration": max_seconds_per_iteration,
"max_failure_percent": max_failure_percent, "failure_rate": {"max": failure_rate_max}
} }
} }
] ]
@ -289,9 +494,9 @@ class SLATestCase(unittest.TestCase):
"detail": mock.ANY, "detail": mock.ANY,
"pos": 0, "status": "PASS"}, "pos": 0, "status": "PASS"},
{"benchmark": "KeystoneBasic.create_and_list_users", {"benchmark": "KeystoneBasic.create_and_list_users",
"criterion": "max_failure_percent", "criterion": "failure_rate",
"detail": mock.ANY, "detail": mock.ANY,
"pos": 0, "status": "PASS"}, "pos": 0, "status": "PASS"}
] ]
data = rally("task sla_check --json", getjson=True) data = rally("task sla_check --json", getjson=True)
self.assertEqual(expected, data) self.assertEqual(expected, data)

View File

@ -25,10 +25,32 @@ class TestCriterion(base.SLA):
OPTION_NAME = "test_criterion" OPTION_NAME = "test_criterion"
CONFIG_SCHEMA = {"type": "integer"} CONFIG_SCHEMA = {"type": "integer"}
@staticmethod def add_iteration(self, iteration):
def check(criterion_value, result): self.success = self.criterion_value == iteration
return base.SLAResult(criterion_value == result, return self.success
msg="detail")
def details(self):
return "detail"
class SLACheckerTestCase(test.TestCase):
def test_add_iteration_and_results(self):
sla_checker = base.SLAChecker({"sla": {"test_criterion": 42}})
iteration = {"key": {"name": "fake", "pos": 0}, "data": 42}
self.assertTrue(sla_checker.add_iteration(iteration["data"]))
expected_result = [{"criterion": "test_criterion",
"detail": "detail",
"success": True}]
self.assertEqual(expected_result, sla_checker.results())
iteration["data"] = 43
self.assertFalse(sla_checker.add_iteration(iteration["data"]))
expected_result = [{"criterion": "test_criterion",
"detail": "detail",
"success": False}]
self.assertEqual(expected_result, sla_checker.results())
class BaseSLATestCase(test.TestCase): class BaseSLATestCase(test.TestCase):
@ -52,39 +74,24 @@ class BaseSLATestCase(test.TestCase):
self.assertRaises(jsonschema.ValidationError, self.assertRaises(jsonschema.ValidationError,
base.SLA.validate, {"test_criterion": 42.0}) base.SLA.validate, {"test_criterion": 42.0})
def test_check_all(self):
config = {
"sla": {"test_criterion": 42},
}
result = {"key": {"kw": config, "name": "fake", "pos": 0},
"data": 42}
results = list(base.SLA.check_all(config, result["data"]))
expected = [{"criterion": "test_criterion",
"detail": "detail",
"success": True}]
self.assertEqual(expected, results)
result["data"] = 43
results = list(base.SLA.check_all(config, result["data"]))
expected = [{"criterion": "test_criterion",
"detail": "detail",
"success": False}]
self.assertEqual(expected, results)
class FailureRateDeprecatedTestCase(test.TestCase): class FailureRateDeprecatedTestCase(test.TestCase):
def test_check(self):
result = [
{"error": ["error"]},
{"error": []},
] # one error and one success. 50% success rate
# 50% < 75.0%
self.assertTrue(base.FailureRateDeprecated.check(75.0, result).success)
# 50% > 25%
self.assertFalse(base.FailureRateDeprecated.check(25, result).success)
def test_check_with_no_results(self): def test_result(self):
result = [] sla1 = base.FailureRateDeprecated(75.0)
self.assertFalse(base.FailureRateDeprecated.check(10, result).success) sla2 = base.FailureRateDeprecated(25.0)
# 50% failure rate
for sla in [sla1, sla2]:
sla.add_iteration({"error": ["error"]})
sla.add_iteration({"error": []})
self.assertTrue(sla1.result()["success"]) # 50% < 75.0%
self.assertFalse(sla2.result()["success"]) # 50% > 25.0%
self.assertEqual("Passed", sla1.status())
self.assertEqual("Failed", sla2.status())
def test_result_no_iterations(self):
sla = base.FailureRateDeprecated(10.0)
self.assertTrue(sla.result()["success"])
class FailureRateTestCase(test.TestCase): class FailureRateTestCase(test.TestCase):
@ -103,30 +110,60 @@ class FailureRateTestCase(test.TestCase):
base.IterationTime.validate, base.IterationTime.validate,
{"failure_rate": {"max": 101}}) {"failure_rate": {"max": 101}})
def test_check_min(self): def test_result_min(self):
result = [{"error": ["error"]}, {"error": []}, {"error": ["error"]}, sla1 = base.FailureRate({"min": 80.0})
{"error": ["error"]}, ] # 75% failure rate sla2 = base.FailureRate({"min": 60.5})
self.assertFalse(base.FailureRate.check({"min": 80}, result).success) # 75% failure rate
self.assertTrue(base.FailureRate.check({"min": 60.5}, result).success) for sla in [sla1, sla2]:
sla.add_iteration({"error": ["error"]})
sla.add_iteration({"error": []})
sla.add_iteration({"error": ["error"]})
sla.add_iteration({"error": ["error"]})
self.assertFalse(sla1.result()["success"]) # 80.0% > 75.0%
self.assertTrue(sla2.result()["success"]) # 60.5% < 75.0%
self.assertEqual("Failed", sla1.status())
self.assertEqual("Passed", sla2.status())
def test_check_max(self): def test_result_max(self):
result = [{"error": ["error"]}, {"error": []}] # 50% failure rate sla1 = base.FailureRate({"max": 25.0})
self.assertFalse(base.FailureRate.check({"max": 25}, result).success) sla2 = base.FailureRate({"max": 75.0})
self.assertTrue(base.FailureRate.check({"max": 75.0}, result).success) # 50% failure rate
for sla in [sla1, sla2]:
sla.add_iteration({"error": ["error"]})
sla.add_iteration({"error": []})
self.assertFalse(sla1.result()["success"]) # 25.0% < 50.0%
self.assertTrue(sla2.result()["success"]) # 75.0% > 50.0%
self.assertEqual("Failed", sla1.status())
self.assertEqual("Passed", sla2.status())
def test_check_min_max(self): def test_result_min_max(self):
result = [{"error": ["error"]}, {"error": []}, {"error": []}, sla1 = base.FailureRate({"min": 50, "max": 90})
{"error": []}] # 25% failure rate sla2 = base.FailureRate({"min": 5, "max": 20})
self.assertFalse(base.FailureRate.check({"min": 50, "max": 90}, result) sla3 = base.FailureRate({"min": 24.9, "max": 25.1})
.success) # 25% failure rate
self.assertFalse(base.FailureRate.check({"min": 5, "max": 20}, result) for sla in [sla1, sla2, sla3]:
.success) sla.add_iteration({"error": ["error"]})
self.assertTrue(base.FailureRate.check({"min": 24.9, "max": 25.1}, sla.add_iteration({"error": []})
result).success) sla.add_iteration({"error": []})
sla.add_iteration({"error": []})
self.assertFalse(sla1.result()["success"]) # 25.0% < 50.0%
self.assertFalse(sla2.result()["success"]) # 25.0% > 20.0%
self.assertTrue(sla3.result()["success"]) # 24.9% < 25.0% < 25.1%
self.assertEqual("Failed", sla1.status())
self.assertEqual("Failed", sla2.status())
self.assertEqual("Passed", sla3.status())
def test_check_empty_result(self): def test_result_no_iterations(self):
result = [] sla = base.FailureRate({"max": 10.0})
self.assertFalse(base.FailureRate.check({"max": 10.0}, result).success) self.assertTrue(sla.result()["success"])
def test_add_iteration(self):
sla = base.FailureRate({"max": 35.0})
self.assertTrue(sla.add_iteration({"error": []}))
self.assertTrue(sla.add_iteration({"error": []}))
self.assertTrue(sla.add_iteration({"error": []}))
self.assertTrue(sla.add_iteration({"error": ["error"]})) # 33%
self.assertFalse(sla.add_iteration({"error": ["error"]})) # 40%
class IterationTimeTestCase(test.TestCase): class IterationTimeTestCase(test.TestCase):
@ -137,13 +174,28 @@ class IterationTimeTestCase(test.TestCase):
self.assertRaises(jsonschema.ValidationError, self.assertRaises(jsonschema.ValidationError,
base.IterationTime.validate, properties) base.IterationTime.validate, properties)
def test_check(self): def test_result(self):
result = [ sla1 = base.IterationTime(42)
{"duration": 3.14}, sla2 = base.IterationTime(3.62)
{"duration": 6.28}, for sla in [sla1, sla2]:
] sla.add_iteration({"duration": 3.14})
self.assertTrue(base.IterationTime.check(42, result).success) sla.add_iteration({"duration": 6.28})
self.assertFalse(base.IterationTime.check(3.62, result).success) self.assertTrue(sla1.result()["success"]) # 42 > 6.28
self.assertFalse(sla2.result()["success"]) # 3.62 < 6.28
self.assertEqual("Passed", sla1.status())
self.assertEqual("Failed", sla2.status())
def test_result_no_iterations(self):
sla = base.IterationTime(42)
self.assertTrue(sla.result()["success"])
def test_add_iteration(self):
sla = base.IterationTime(4.0)
self.assertTrue(sla.add_iteration({"duration": 3.14}))
self.assertTrue(sla.add_iteration({"duration": 2.0}))
self.assertTrue(sla.add_iteration({"duration": 3.99}))
self.assertFalse(sla.add_iteration({"duration": 4.5}))
self.assertFalse(sla.add_iteration({"duration": 3.8}))
class MaxAverageDurationTestCase(test.TestCase): class MaxAverageDurationTestCase(test.TestCase):
@ -154,10 +206,25 @@ class MaxAverageDurationTestCase(test.TestCase):
self.assertRaises(jsonschema.ValidationError, self.assertRaises(jsonschema.ValidationError,
base.MaxAverageDuration.validate, properties) base.MaxAverageDuration.validate, properties)
def test_check(self): def test_result(self):
result = [ sla1 = base.MaxAverageDuration(42)
{"duration": 3.14}, sla2 = base.MaxAverageDuration(3.62)
{"duration": 6.28}, for sla in [sla1, sla2]:
] sla.add_iteration({"duration": 3.14})
self.assertTrue(base.MaxAverageDuration.check(42, result).success) sla.add_iteration({"duration": 6.28})
self.assertFalse(base.MaxAverageDuration.check(3.62, result).success) self.assertTrue(sla1.result()["success"]) # 42 > avg([3.14, 6.28])
self.assertFalse(sla2.result()["success"]) # 3.62 < avg([3.14, 6.28])
self.assertEqual("Passed", sla1.status())
self.assertEqual("Failed", sla2.status())
def test_result_no_iterations(self):
sla = base.MaxAverageDuration(42)
self.assertTrue(sla.result()["success"])
def test_add_iteration(self):
sla = base.MaxAverageDuration(4.0)
self.assertTrue(sla.add_iteration({"duration": 3.5}))
self.assertTrue(sla.add_iteration({"duration": 2.5}))
self.assertTrue(sla.add_iteration({"duration": 5.0})) # avg = 3.667
self.assertFalse(sla.add_iteration({"duration": 7.0})) # avg = 4.5
self.assertTrue(sla.add_iteration({"duration": 1.0})) # avg = 3.8

View File

@ -303,17 +303,68 @@ class BenchmarkEngineTestCase(test.TestCase):
self.assertEqual(result, expected_result) self.assertEqual(result, expected_result)
mock_meta.assert_called_once_with(name, "context") mock_meta.assert_called_once_with(name, "context")
@mock.patch("rally.benchmark.sla.base.SLA.check_all") @mock.patch("rally.benchmark.sla.base.SLAChecker")
def test_consume_results(self, mock_check_all): def test_consume_results(self, mock_sla):
mock_sla_instance = mock.MagicMock()
mock_sla.return_value = mock_sla_instance
key = {"kw": {"fake": 2}, "name": "fake", "pos": 0} key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
task = mock.MagicMock() task = mock.MagicMock()
config = { config = {
"a.benchmark": [{"context": {"context_a": {"a": 1}}}], "a.benchmark": [{"context": {"context_a": {"a": 1}}}],
} }
runner = mock.MagicMock()
runner.result_queue = collections.deque([1, 2])
is_done = mock.MagicMock() is_done = mock.MagicMock()
is_done.isSet.side_effect = [False, False, True] is_done.isSet.side_effect = [False, False, True]
eng = engine.BenchmarkEngine(config, task) eng = engine.BenchmarkEngine(config, task)
eng.duration = 123 eng.duration = 123
eng.full_duration = 456 eng.full_duration = 456
eng.consume_results(key, task, collections.deque([1, 2]), is_done) eng.consume_results(key, task, is_done, runner)
mock_check_all.assert_called_once_with({"fake": 2}, [1, 2]) mock_sla.assert_called_once_with({"fake": 2})
expected_iteration_calls = [mock.call(1), mock.call(2)]
self.assertEqual(expected_iteration_calls,
mock_sla_instance.add_iteration.mock_calls)
@mock.patch("rally.benchmark.sla.base.SLAChecker")
def test_consume_results_sla_failure_abort(self, mock_sla):
mock_sla_instance = mock.MagicMock()
mock_sla.return_value = mock_sla_instance
mock_sla_instance.add_iteration.side_effect = [True, True, False,
False]
key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
task = mock.MagicMock()
config = {
"a.benchmark": [{"context": {"context_a": {"a": 1}}}],
}
runner = mock.MagicMock()
runner.result_queue = collections.deque([1, 2, 3, 4])
is_done = mock.MagicMock()
is_done.isSet.side_effect = [False, False, False, False, True]
eng = engine.BenchmarkEngine(config, task, abort_on_sla_failure=True)
eng.duration = 123
eng.full_duration = 456
eng.consume_results(key, task, is_done, runner)
mock_sla.assert_called_once_with({"fake": 2})
self.assertTrue(runner.abort.called)
@mock.patch("rally.benchmark.sla.base.SLAChecker")
def test_consume_results_sla_failure_continue(self, mock_sla):
mock_sla_instance = mock.MagicMock()
mock_sla.return_value = mock_sla_instance
mock_sla_instance.add_iteration.side_effect = [True, True, False,
False]
key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
task = mock.MagicMock()
config = {
"a.benchmark": [{"context": {"context_a": {"a": 1}}}],
}
runner = mock.MagicMock()
runner.result_queue = collections.deque([1, 2, 3, 4])
is_done = mock.MagicMock()
is_done.isSet.side_effect = [False, False, False, False, True]
eng = engine.BenchmarkEngine(config, task, abort_on_sla_failure=False)
eng.duration = 123
eng.full_duration = 456
eng.consume_results(key, task, is_done, runner)
mock_sla.assert_called_once_with({"fake": 2})
self.assertEqual(0, runner.abort.call_count)

View File

@ -112,7 +112,8 @@ class TaskCommandsTestCase(test.TestCase):
task_path = "path_to_config.json" task_path = "path_to_config.json"
self.task.start(task_path, deployment_id) self.task.start(task_path, deployment_id)
mock_api.assert_called_once_with(deployment_id, {"some": "json"}, mock_api.assert_called_once_with(deployment_id, {"some": "json"},
task=mock_create_task.return_value) task=mock_create_task.return_value,
abort_on_sla_failure=False)
mock_load.assert_called_once_with(task_path, None, None) mock_load.assert_called_once_with(task_path, None, None)
@mock.patch("rally.cmd.commands.task.TaskCommands._load_task", @mock.patch("rally.cmd.commands.task.TaskCommands._load_task",
@ -142,7 +143,7 @@ class TaskCommandsTestCase(test.TestCase):
mock_api.Task.create.assert_called_once_with("deployment", "tag") mock_api.Task.create.assert_called_once_with("deployment", "tag")
mock_api.Task.start.assert_called_once_with( mock_api.Task.start.assert_called_once_with(
"deployment", mock_load.return_value, "deployment", mock_load.return_value,
task=mock_api.Task.create.return_value) task=mock_api.Task.create.return_value, abort_on_sla_failure=False)
@mock.patch("rally.cmd.commands.task.api") @mock.patch("rally.cmd.commands.task.api")
def test_abort(self, mock_api): def test_abort(self, mock_api):

View File

@ -116,9 +116,9 @@ class TaskAPITestCase(test.TestCase):
mock_engine.assert_has_calls([ mock_engine.assert_has_calls([
mock.call("config", mock_task.return_value, mock.call("config", mock_task.return_value,
admin=mock_deployment_get.return_value["admin"], admin=mock_deployment_get.return_value["admin"],
users=[]), users=[], abort_on_sla_failure=False),
mock.call().validate(), mock.call().validate(),
mock.call().run(), mock.call().run()
]) ])
mock_task.assert_called_once_with( mock_task.assert_called_once_with(

View File

@ -32,7 +32,7 @@ _rally()
OPTS["task_report"]="--tasks --out --open" OPTS["task_report"]="--tasks --out --open"
OPTS["task_results"]="--uuid" OPTS["task_results"]="--uuid"
OPTS["task_sla_check"]="--uuid --json" OPTS["task_sla_check"]="--uuid --json"
OPTS["task_start"]="--deployment --task --task-args --task-args-file --tag --no-use" OPTS["task_start"]="--deployment --task --task-args --task-args-file --tag --no-use --abort-on-sla-failure"
OPTS["task_status"]="--uuid" OPTS["task_status"]="--uuid"
OPTS["task_validate"]="--deployment --task --task-args --task-args-file" OPTS["task_validate"]="--deployment --task --task-args --task-args-file"
OPTS["use_deployment"]="--deployment" OPTS["use_deployment"]="--deployment"