Extend sla output result
Add details to sla result output. Change-Id: I550135b9742bc3f5f44c63bb123db387f599e0e0
This commit is contained in:
parent
54dbf08b86
commit
334ccadef6
@ -24,9 +24,17 @@ import abc
|
|||||||
import jsonschema
|
import jsonschema
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from rally.openstack.common.gettextutils import _
|
||||||
from rally import utils
|
from rally import utils
|
||||||
|
|
||||||
|
|
||||||
|
class SLAResult(object):
|
||||||
|
|
||||||
|
def __init__(self, success=True, msg=None):
|
||||||
|
self.success = success
|
||||||
|
self.msg = msg
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
class SLA(object):
|
class SLA(object):
|
||||||
"""Factory for criteria classes."""
|
"""Factory for criteria classes."""
|
||||||
@ -67,11 +75,12 @@ class SLA(object):
|
|||||||
config = result['key']['kw'].get('sla', None)
|
config = result['key']['kw'].get('sla', None)
|
||||||
if config:
|
if config:
|
||||||
for name, criterion in config.iteritems():
|
for name, criterion in config.iteritems():
|
||||||
success = opt_name_map[name].check(criterion, result)
|
check_result = opt_name_map[name].check(criterion, result)
|
||||||
yield {'benchmark': result['key']['name'],
|
yield {'benchmark': result['key']['name'],
|
||||||
'pos': result['key']['pos'],
|
'pos': result['key']['pos'],
|
||||||
'criterion': name,
|
'criterion': name,
|
||||||
'success': success}
|
'success': check_result.success,
|
||||||
|
'detail': check_result.msg}
|
||||||
|
|
||||||
|
|
||||||
class FailureRate(SLA):
|
class FailureRate(SLA):
|
||||||
@ -84,8 +93,12 @@ class FailureRate(SLA):
|
|||||||
raw = result['data']['raw']
|
raw = result['data']['raw']
|
||||||
errors = len(filter(lambda x: x['error'], raw))
|
errors = len(filter(lambda x: x['error'], raw))
|
||||||
if criterion_value < errors * 100.0 / len(raw):
|
if criterion_value < errors * 100.0 / len(raw):
|
||||||
return False
|
success = False
|
||||||
return True
|
else:
|
||||||
|
success = True
|
||||||
|
msg = (_("Maximum failure percent %s%% failures, actually %s%%") %
|
||||||
|
(criterion_value, errors * 100.0 / len(raw)))
|
||||||
|
return SLAResult(success, msg)
|
||||||
|
|
||||||
|
|
||||||
class IterationTime(SLA):
|
class IterationTime(SLA):
|
||||||
@ -96,7 +109,13 @@ class IterationTime(SLA):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def check(criterion_value, result):
|
def check(criterion_value, result):
|
||||||
|
duration = 0
|
||||||
|
success = True
|
||||||
for i in result['data']['raw']:
|
for i in result['data']['raw']:
|
||||||
|
duration = i['duration']
|
||||||
if i['duration'] > criterion_value:
|
if i['duration'] > criterion_value:
|
||||||
return False
|
success = False
|
||||||
return True
|
break
|
||||||
|
msg = (_("Maximum seconds per iteration %is, actually %is") %
|
||||||
|
(criterion_value, duration))
|
||||||
|
return SLAResult(success, msg)
|
||||||
|
@ -388,5 +388,6 @@ class TaskCommands(object):
|
|||||||
print(json.dumps(rows))
|
print(json.dumps(rows))
|
||||||
else:
|
else:
|
||||||
common_cliutils.print_list(rows, ('benchmark', 'pos',
|
common_cliutils.print_list(rows, ('benchmark', 'pos',
|
||||||
'criterion', 'success'))
|
'criterion', 'success',
|
||||||
|
'detail'))
|
||||||
return failed_criteria
|
return failed_criteria
|
||||||
|
@ -27,7 +27,8 @@ class TestCriterion(base.SLA):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def check(criterion_value, result):
|
def check(criterion_value, result):
|
||||||
return criterion_value == result["data"]
|
return base.SLAResult(criterion_value == result["data"],
|
||||||
|
msg='detail')
|
||||||
|
|
||||||
|
|
||||||
class BaseSLATestCase(test.TestCase):
|
class BaseSLATestCase(test.TestCase):
|
||||||
@ -54,6 +55,7 @@ class BaseSLATestCase(test.TestCase):
|
|||||||
results = list(base.SLA.check_all(task))
|
results = list(base.SLA.check_all(task))
|
||||||
expected = [{'benchmark': 'fake',
|
expected = [{'benchmark': 'fake',
|
||||||
'criterion': 'test_criterion',
|
'criterion': 'test_criterion',
|
||||||
|
'detail': 'detail',
|
||||||
'pos': 0,
|
'pos': 0,
|
||||||
'success': True}]
|
'success': True}]
|
||||||
self.assertEqual(expected, results)
|
self.assertEqual(expected, results)
|
||||||
@ -61,6 +63,7 @@ class BaseSLATestCase(test.TestCase):
|
|||||||
results = list(base.SLA.check_all(task))
|
results = list(base.SLA.check_all(task))
|
||||||
expected = [{'benchmark': 'fake',
|
expected = [{'benchmark': 'fake',
|
||||||
'criterion': 'test_criterion',
|
'criterion': 'test_criterion',
|
||||||
|
'detail': 'detail',
|
||||||
'pos': 0,
|
'pos': 0,
|
||||||
'success': False}]
|
'success': False}]
|
||||||
self.assertEqual(expected, results)
|
self.assertEqual(expected, results)
|
||||||
@ -73,8 +76,10 @@ class FailureRateTestCase(test.TestCase):
|
|||||||
{"error": []},
|
{"error": []},
|
||||||
] # one error and one success. 50% success rate
|
] # one error and one success. 50% success rate
|
||||||
result = {"data": {"raw": raw}}
|
result = {"data": {"raw": raw}}
|
||||||
self.assertTrue(base.FailureRate.check(75.0, result)) # 50% < 75.0%
|
# 50% < 75.0%
|
||||||
self.assertFalse(base.FailureRate.check(25, result)) # 50% > 25%
|
self.assertTrue(base.FailureRate.check(75.0, result).success)
|
||||||
|
# 50% > 25%
|
||||||
|
self.assertFalse(base.FailureRate.check(25, result).success)
|
||||||
|
|
||||||
|
|
||||||
class IterationTimeTestCase(test.TestCase):
|
class IterationTimeTestCase(test.TestCase):
|
||||||
@ -84,5 +89,5 @@ class IterationTimeTestCase(test.TestCase):
|
|||||||
{"duration": 6.28},
|
{"duration": 6.28},
|
||||||
]
|
]
|
||||||
result = {"data": {"raw": raw}}
|
result = {"data": {"raw": raw}}
|
||||||
self.assertTrue(base.IterationTime.check(42, result))
|
self.assertTrue(base.IterationTime.check(42, result).success)
|
||||||
self.assertFalse(base.IterationTime.check(3.62, result))
|
self.assertFalse(base.IterationTime.check(3.62, result).success)
|
||||||
|
Loading…
Reference in New Issue
Block a user