diff --git a/rally/benchmark/sla/base.py b/rally/benchmark/sla/base.py index 98e8df762f..a46ae30aa0 100644 --- a/rally/benchmark/sla/base.py +++ b/rally/benchmark/sla/base.py @@ -24,9 +24,17 @@ import abc import jsonschema import six +from rally.openstack.common.gettextutils import _ from rally import utils +class SLAResult(object): + + def __init__(self, success=True, msg=None): + self.success = success + self.msg = msg + + @six.add_metaclass(abc.ABCMeta) class SLA(object): """Factory for criteria classes.""" @@ -67,11 +75,12 @@ class SLA(object): config = result['key']['kw'].get('sla', None) if config: for name, criterion in config.iteritems(): - success = opt_name_map[name].check(criterion, result) + check_result = opt_name_map[name].check(criterion, result) yield {'benchmark': result['key']['name'], 'pos': result['key']['pos'], 'criterion': name, - 'success': success} + 'success': check_result.success, + 'detail': check_result.msg} class FailureRate(SLA): @@ -84,8 +93,12 @@ class FailureRate(SLA): raw = result['data']['raw'] errors = len(filter(lambda x: x['error'], raw)) if criterion_value < errors * 100.0 / len(raw): - return False - return True + success = False + else: + success = True + msg = (_("Maximum failure percent %s%% failures, actually %s%%") % + (criterion_value, errors * 100.0 / len(raw))) + return SLAResult(success, msg) class IterationTime(SLA): @@ -96,7 +109,13 @@ class IterationTime(SLA): @staticmethod def check(criterion_value, result): + duration = 0 + success = True for i in result['data']['raw']: + duration = i['duration'] if i['duration'] > criterion_value: - return False - return True + success = False + break + msg = (_("Maximum seconds per iteration %is, actually %is") % + (criterion_value, duration)) + return SLAResult(success, msg) diff --git a/rally/cmd/commands/task.py b/rally/cmd/commands/task.py index b4d7965b88..13692900d3 100644 --- a/rally/cmd/commands/task.py +++ b/rally/cmd/commands/task.py @@ -388,5 +388,6 @@ class TaskCommands(object): print(json.dumps(rows)) else: common_cliutils.print_list(rows, ('benchmark', 'pos', - 'criterion', 'success')) + 'criterion', 'success', + 'detail')) return failed_criteria diff --git a/tests/benchmark/sla/test_base.py b/tests/benchmark/sla/test_base.py index c3317d22bc..634bc57d30 100644 --- a/tests/benchmark/sla/test_base.py +++ b/tests/benchmark/sla/test_base.py @@ -27,7 +27,8 @@ class TestCriterion(base.SLA): @staticmethod def check(criterion_value, result): - return criterion_value == result["data"] + return base.SLAResult(criterion_value == result["data"], + msg='detail') class BaseSLATestCase(test.TestCase): @@ -54,6 +55,7 @@ class BaseSLATestCase(test.TestCase): results = list(base.SLA.check_all(task)) expected = [{'benchmark': 'fake', 'criterion': 'test_criterion', + 'detail': 'detail', 'pos': 0, 'success': True}] self.assertEqual(expected, results) @@ -61,6 +63,7 @@ class BaseSLATestCase(test.TestCase): results = list(base.SLA.check_all(task)) expected = [{'benchmark': 'fake', 'criterion': 'test_criterion', + 'detail': 'detail', 'pos': 0, 'success': False}] self.assertEqual(expected, results) @@ -73,8 +76,10 @@ class FailureRateTestCase(test.TestCase): {"error": []}, ] # one error and one success. 50% success rate result = {"data": {"raw": raw}} - self.assertTrue(base.FailureRate.check(75.0, result)) # 50% < 75.0% - self.assertFalse(base.FailureRate.check(25, result)) # 50% > 25% + # 50% < 75.0% + self.assertTrue(base.FailureRate.check(75.0, result).success) + # 50% > 25% + self.assertFalse(base.FailureRate.check(25, result).success) class IterationTimeTestCase(test.TestCase): @@ -84,5 +89,5 @@ class IterationTimeTestCase(test.TestCase): {"duration": 6.28}, ] result = {"data": {"raw": raw}} - self.assertTrue(base.IterationTime.check(42, result)) - self.assertFalse(base.IterationTime.check(3.62, result)) + self.assertTrue(base.IterationTime.check(42, result).success) + self.assertFalse(base.IterationTime.check(3.62, result).success)