Store sla result to db
Change-Id: I96e46493ba70c2bfea5d0ce7e97195f3170d66dd
This commit is contained in:
parent
30c0624821
commit
dcf2f8dd65
@ -262,5 +262,7 @@ class BenchmarkEngine(object):
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
|
||||
sla = base_sla.SLA.check_all(key['kw'], results)
|
||||
task.append_results(key, {"raw": results,
|
||||
"scenario_duration": self.duration})
|
||||
"scenario_duration": self.duration,
|
||||
"sla": sla})
|
||||
|
@ -61,26 +61,24 @@ class SLA(object):
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def check_all(task):
|
||||
def check_all(config, result):
|
||||
"""Check all SLA criteria.
|
||||
|
||||
:param task: Task object
|
||||
:returns: Generator
|
||||
:param config: sla related config for a task
|
||||
:param result: Result of a task
|
||||
:returns: A list of sla results
|
||||
"""
|
||||
|
||||
results = []
|
||||
opt_name_map = dict([(c.OPTION_NAME, c)
|
||||
for c in utils.itersubclasses(SLA)])
|
||||
|
||||
for result in task.results:
|
||||
config = result['key']['kw'].get('sla', None)
|
||||
if config:
|
||||
for name, criterion in config.iteritems():
|
||||
check_result = opt_name_map[name].check(criterion, result)
|
||||
yield {'benchmark': result['key']['name'],
|
||||
'pos': result['key']['pos'],
|
||||
'criterion': name,
|
||||
'success': check_result.success,
|
||||
'detail': check_result.msg}
|
||||
for name, criterion in config.get("sla", {}).iteritems():
|
||||
check_result = opt_name_map[name].check(criterion, result)
|
||||
results.append({'criterion': name,
|
||||
'success': check_result.success,
|
||||
'detail': check_result.msg})
|
||||
return results
|
||||
|
||||
|
||||
class FailureRate(SLA):
|
||||
@ -90,14 +88,13 @@ class FailureRate(SLA):
|
||||
|
||||
@staticmethod
|
||||
def check(criterion_value, result):
|
||||
raw = result['data']['raw']
|
||||
errors = len(filter(lambda x: x['error'], raw))
|
||||
if criterion_value < errors * 100.0 / len(raw):
|
||||
errors = len(filter(lambda x: x['error'], result))
|
||||
if criterion_value < errors * 100.0 / len(result):
|
||||
success = False
|
||||
else:
|
||||
success = True
|
||||
msg = (_("Maximum failure percent %s%% failures, actually %s%%") %
|
||||
(criterion_value, errors * 100.0 / len(raw)))
|
||||
(criterion_value, errors * 100.0 / len(result)))
|
||||
return SLAResult(success, msg)
|
||||
|
||||
|
||||
@ -111,7 +108,7 @@ class IterationTime(SLA):
|
||||
def check(criterion_value, result):
|
||||
duration = 0
|
||||
success = True
|
||||
for i in result['data']['raw']:
|
||||
for i in result:
|
||||
duration = i['duration']
|
||||
if i['duration'] > criterion_value:
|
||||
success = False
|
||||
|
@ -26,7 +26,6 @@ import yaml
|
||||
|
||||
from rally.benchmark.processing import plot
|
||||
from rally.benchmark.processing import utils
|
||||
from rally.benchmark.sla import base as base_sla
|
||||
from rally.cmd import cliutils
|
||||
from rally.cmd.commands import use
|
||||
from rally.cmd import envutils
|
||||
@ -301,7 +300,8 @@ class TaskCommands(object):
|
||||
:param output_pprint: Output in pretty print format
|
||||
:param output_json: Output in json format (Default)
|
||||
"""
|
||||
results = map(lambda x: {"key": x["key"], 'result': x['data']['raw']},
|
||||
results = map(lambda x: {"key": x["key"], 'result': x['data']['raw'],
|
||||
"sla": x["data"]["sla"]},
|
||||
db.task_result_get_all_by_uuid(task_id))
|
||||
|
||||
if results:
|
||||
@ -378,16 +378,20 @@ class TaskCommands(object):
|
||||
:param task_id: Task uuid.
|
||||
:returns: Number of failed criteria.
|
||||
"""
|
||||
task = db.task_get_detailed(task_id)
|
||||
task = db.task_result_get_all_by_uuid(task_id)
|
||||
failed_criteria = 0
|
||||
rows = []
|
||||
for row in base_sla.SLA.check_all(task):
|
||||
failed_criteria += 0 if row['success'] else 1
|
||||
rows.append(row if tojson else rutils.Struct(**row))
|
||||
results = []
|
||||
for result in task:
|
||||
key = result["key"]
|
||||
for sla in result["data"]["sla"]:
|
||||
sla["benchmark"] = key["name"]
|
||||
sla["pos"] = key["pos"]
|
||||
failed_criteria += 0 if sla['success'] else 1
|
||||
results.append(sla if tojson else rutils.Struct(**sla))
|
||||
if tojson:
|
||||
print(json.dumps(rows))
|
||||
print(json.dumps(results))
|
||||
else:
|
||||
common_cliutils.print_list(rows, ('benchmark', 'pos',
|
||||
'criterion', 'success',
|
||||
'detail'))
|
||||
common_cliutils.print_list(results, ('benchmark', 'pos',
|
||||
'criterion', 'success',
|
||||
'detail'))
|
||||
return failed_criteria
|
||||
|
@ -15,7 +15,6 @@
|
||||
|
||||
|
||||
import jsonschema
|
||||
import mock
|
||||
|
||||
from rally.benchmark.sla import base
|
||||
from tests import test
|
||||
@ -27,7 +26,7 @@ class TestCriterion(base.SLA):
|
||||
|
||||
@staticmethod
|
||||
def check(criterion_value, result):
|
||||
return base.SLAResult(criterion_value == result["data"],
|
||||
return base.SLAResult(criterion_value == result,
|
||||
msg='detail')
|
||||
|
||||
|
||||
@ -46,36 +45,30 @@ class BaseSLATestCase(test.TestCase):
|
||||
base.SLA.validate, {"test_criterion": 42.0})
|
||||
|
||||
def test_check_all(self):
|
||||
task = mock.Mock()
|
||||
config = {
|
||||
"sla": {"test_criterion": 42},
|
||||
}
|
||||
task.results = [{"key": {"kw": config, "name": "fake", "pos": 0},
|
||||
"data": 42}]
|
||||
results = list(base.SLA.check_all(task))
|
||||
expected = [{'benchmark': 'fake',
|
||||
'criterion': 'test_criterion',
|
||||
result = {"key": {"kw": config, "name": "fake", "pos": 0},
|
||||
"data": 42}
|
||||
results = list(base.SLA.check_all(config, result["data"]))
|
||||
expected = [{'criterion': 'test_criterion',
|
||||
'detail': 'detail',
|
||||
'pos': 0,
|
||||
'success': True}]
|
||||
self.assertEqual(expected, results)
|
||||
task.results[0]["data"] = 43
|
||||
results = list(base.SLA.check_all(task))
|
||||
expected = [{'benchmark': 'fake',
|
||||
'criterion': 'test_criterion',
|
||||
result["data"] = 43
|
||||
results = list(base.SLA.check_all(config, result["data"]))
|
||||
expected = [{'criterion': 'test_criterion',
|
||||
'detail': 'detail',
|
||||
'pos': 0,
|
||||
'success': False}]
|
||||
self.assertEqual(expected, results)
|
||||
|
||||
|
||||
class FailureRateTestCase(test.TestCase):
|
||||
def test_check(self):
|
||||
raw = [
|
||||
result = [
|
||||
{"error": ["error"]},
|
||||
{"error": []},
|
||||
] # one error and one success. 50% success rate
|
||||
result = {"data": {"raw": raw}}
|
||||
# 50% < 75.0%
|
||||
self.assertTrue(base.FailureRate.check(75.0, result).success)
|
||||
# 50% > 25%
|
||||
@ -84,10 +77,9 @@ class FailureRateTestCase(test.TestCase):
|
||||
|
||||
class IterationTimeTestCase(test.TestCase):
|
||||
def test_check(self):
|
||||
raw = [
|
||||
result = [
|
||||
{"duration": 3.14},
|
||||
{"duration": 6.28},
|
||||
]
|
||||
result = {"data": {"raw": raw}}
|
||||
self.assertTrue(base.IterationTime.check(42, result).success)
|
||||
self.assertFalse(base.IterationTime.check(3.62, result).success)
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
"""Tests for the Test engine."""
|
||||
|
||||
import collections
|
||||
import copy
|
||||
|
||||
import jsonschema
|
||||
@ -341,4 +342,18 @@ class BenchmarkEngineTestCase(test.TestCase):
|
||||
"config": expected_context
|
||||
}
|
||||
self.assertEqual(result, expected_result)
|
||||
mock_meta.assert_called_once_with(name, "context")
|
||||
mock_meta.assert_called_once_with(name, "context")
|
||||
|
||||
@mock.patch("rally.benchmark.sla.base.SLA.check_all")
|
||||
def test_consume_results(self, mock_check_all):
|
||||
key = {"kw": {"fake": 2}, "name": "fake", "pos": 0}
|
||||
task = mock.MagicMock()
|
||||
config = {
|
||||
"a.benchmark": [{"context": {"context_a": {"a": 1}}}],
|
||||
}
|
||||
is_done = mock.MagicMock()
|
||||
is_done.isSet.side_effect = [False, False, True]
|
||||
eng = engine.BenchmarkEngine(config, task)
|
||||
eng.duration = 1
|
||||
eng.consume_results(key, task, collections.deque([1, 2]), is_done)
|
||||
mock_check_all.assert_called_once_with({"fake": 2}, [1, 2])
|
||||
|
@ -139,10 +139,11 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
def test_results_default(self, mock_json, mock_db):
|
||||
test_uuid = 'aa808c14-69cc-4faf-a906-97e05f5aebbd'
|
||||
value = [
|
||||
{'key': 'key', 'data': {'raw': 'raw'}}
|
||||
{'key': 'key', 'data': {'raw': 'raw', 'sla': []}}
|
||||
]
|
||||
result = map(lambda x: {"key": x["key"],
|
||||
'result': x['data']['raw']}, value)
|
||||
"result": x["data"]["raw"],
|
||||
"sla": x["data"]["sla"]}, value)
|
||||
mock_db.task_result_get_all_by_uuid.return_value = value
|
||||
self.task.results(test_uuid)
|
||||
mock_json.assert_called_once_with(result)
|
||||
@ -153,10 +154,11 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
def test_results_json(self, mock_json, mock_db):
|
||||
test_uuid = 'e87dd629-cd3d-4a1e-b377-7b93c19226fb'
|
||||
value = [
|
||||
{'key': 'key', 'data': {'raw': 'raw'}}
|
||||
{'key': 'key', 'data': {'raw': 'raw', 'sla': []}}
|
||||
]
|
||||
result = map(lambda x: {"key": x["key"],
|
||||
'result': x['data']['raw']}, value)
|
||||
"result": x["data"]["raw"],
|
||||
"sla": x["data"]["sla"]}, value)
|
||||
|
||||
mock_db.task_result_get_all_by_uuid.return_value = value
|
||||
self.task.results(test_uuid, output_json=True)
|
||||
@ -168,10 +170,11 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
def test_results_pprint(self, mock_pprint, mock_db):
|
||||
test_uuid = 'c1e4bc59-a8fd-458c-9abb-c922d8df4285'
|
||||
value = [
|
||||
{'key': 'key', 'data': {'raw': 'raw'}}
|
||||
{'key': 'key', 'data': {'raw': 'raw', 'sla': []}}
|
||||
]
|
||||
result = map(lambda x: {"key": x["key"],
|
||||
'result': x['data']['raw']}, value)
|
||||
"result": x["data"]["raw"],
|
||||
"sla": x["data"]["sla"]}, value)
|
||||
mock_db.task_result_get_all_by_uuid.return_value = value
|
||||
self.task.results(test_uuid, output_pprint=True)
|
||||
mock_pprint.assert_called_once_with(result)
|
||||
@ -233,18 +236,27 @@ class TaskCommandsTestCase(test.TestCase):
|
||||
self.assertTrue(mock_api.delete_task.mock_calls == expected_calls)
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.common_cliutils.print_list')
|
||||
@mock.patch("rally.cmd.commands.task.base_sla")
|
||||
@mock.patch("rally.cmd.commands.task.db")
|
||||
def test_sla_check(self, mock_db, mock_sla, mock_print_list):
|
||||
fake_rows = [
|
||||
{'success': True},
|
||||
{'success': False},
|
||||
]
|
||||
mock_db.task_get_detailed.return_value = 'fake_task'
|
||||
mock_sla.SLA.check_all.return_value = fake_rows
|
||||
def test_sla_check(self, mock_db, mock_print_list):
|
||||
value = [{
|
||||
"key": {
|
||||
"name": "fake_name",
|
||||
"pos": "fake_pos",
|
||||
"kw": "fake_kw"
|
||||
},
|
||||
"data": {
|
||||
"scenario_duration": 1.0,
|
||||
"raw": [],
|
||||
"sla":
|
||||
[{"benchmark": "KeystoneBasic.create_user",
|
||||
"criterion": "max_seconds_per_iteration",
|
||||
"pos": 0, "success": False, "detail":
|
||||
"Maximum seconds per iteration 4s, actually 5s"}]
|
||||
}
|
||||
}]
|
||||
mock_db.task_result_get_all_by_uuid.return_value = value
|
||||
retval = self.task.sla_check(task_id='fake_task_id')
|
||||
self.assertEqual(1, retval)
|
||||
mock_sla.SLA.check_all.assert_called_once_with('fake_task')
|
||||
|
||||
@mock.patch('rally.cmd.commands.task.open',
|
||||
mock.mock_open(read_data='{"some": "json"}'),
|
||||
|
Loading…
Reference in New Issue
Block a user