Add sla checking

SLA (Service-level agreement) is set of details for determining
compliance with contracted values such as maximum error rate or
minimum response time.

Add two criteria:
 maximum time per iteration
 maximum error rate

Change-Id: I1212bd684831461a7d7e33636d45a6d346e3b574
Blueprint: task-success-criteria
This commit is contained in:
Sergey Skripnick 2014-06-26 20:55:16 +03:00
parent 39ea59fe37
commit 6dbd51ac8d
11 changed files with 294 additions and 1 deletions

View File

@ -0,0 +1,21 @@
SLA Configuration Samples
=========================
This directory contains SLA configuration samples.
SLA (Service-level agreement) is set of details for determining compliance
with contracted values such as maximum error rate or minimum response time.
Currently supported criteria:
max_failure_percent
===================
Maximum allowed failure rate in percent.
max_seconds_per_iteration
=========================
Maximum time in seconds per one iteration.

View File

@ -0,0 +1,18 @@
{
"KeystoneBasic.create_delete_user": [
{
"args": {
"name_length": 10
},
"runner": {
"type": "constant",
"times": 100,
"concurrency": 10
},
"sla": {
"max_seconds_per_iteration": 4,
"max_failure_percent": 1
}
}
]
}

View File

@ -0,0 +1,12 @@
---
KeystoneBasic.create_delete_user:
-
args:
name_length: 10
runner:
type: "constant"
times: 100
concurrency: 10
sla:
max_seconds_per_iteration: 4
max_failure_percent: 1

View File

@ -18,6 +18,9 @@ General structure of configuration file:
"context": {
...
}
"sla": {
...
}
}
}
@ -36,5 +39,11 @@ be launched. Look at `doc/samples/tasks/context
<https://github.com/stackforge/rally/tree/master/doc/samples/tasks/context>`_
for samples.
Section "sla" defines details for determining compliance with contracted values
such as maximum error rate or minimum response time.
Look at `doc/samples/tasks/sla
<https://github.com/stackforge/rally/tree/master/doc/samples/tasks/sla>`_ for
samples.
See a `detailed description of benchmark scenarios, contexts & runners
<https://github.com/stackforge/rally/tree/master/doc/source/benchmark.rst>`_.

View File

@ -23,6 +23,7 @@ from rally.benchmark.context import base as base_ctx
from rally.benchmark.context import users as users_ctx
from rally.benchmark.runners import base as base_runner
from rally.benchmark.scenarios import base as base_scenario
from rally.benchmark.sla import base as base_sla
from rally import consts
from rally import exceptions
from rally.objects import endpoint
@ -56,7 +57,10 @@ CONFIG_SCHEMA = {
},
"context": {
"type": "object"
}
},
"sla": {
"type": "object",
},
},
"additionalProperties": False
}
@ -108,6 +112,7 @@ class BenchmarkEngine(object):
base_runner.ScenarioRunner.validate(kw.get("runner", {}))
base_ctx.ContextManager.validate(kw.get("context", {}),
non_hidden=True)
base_sla.SLA.validate(kw.get("sla", {}))
except (exceptions.RallyException,
jsonschema.ValidationError) as e:
raise exceptions.InvalidBenchmarkConfig(

View File

101
rally/benchmark/sla/base.py Normal file
View File

@ -0,0 +1,101 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SLA (Service-level agreement) is set of details for determining compliance
with contracted values such as maximum error rate or minimum response time.
"""
import abc
import jsonschema
import six
from rally import utils
@six.add_metaclass(abc.ABCMeta)
class SLA(object):
"""Factory for criteria classes."""
@staticmethod
def validate(config):
properties = dict([(c.OPTION_NAME, c.CONFIG_SCHEMA)
for c in utils.itersubclasses(SLA)])
schema = {
"type": "object",
"properties": properties,
"additionalProperties": False,
}
jsonschema.validate(config, schema)
@staticmethod
@abc.abstractmethod
def check(criterion_value, result):
"""Check if task succeeded according to criterion.
:param criterion_value: Criterion value specified in configuration
:param result: result object
:returns: True if success
"""
@staticmethod
def check_all(task):
"""Check all SLA criteria.
:param task: Task object
:returns: Generator
"""
opt_name_map = dict([(c.OPTION_NAME, c)
for c in utils.itersubclasses(SLA)])
for result in task.results:
config = result['key']['kw'].get('sla', None)
if config:
for name, criterion in config.iteritems():
success = opt_name_map[name].check(criterion, result)
yield {'benchmark': result['key']['name'],
'pos': result['key']['pos'],
'criterion': name,
'success': success}
class FailureRate(SLA):
"""Failure rate in percents."""
OPTION_NAME = "max_failure_percent"
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, "maximum": 100.0}
@staticmethod
def check(criterion_value, result):
raw = result['data']['raw']
errors = len(filter(lambda x: x['error'], raw))
if criterion_value < errors * 100.0 / len(raw):
return False
return True
class IterationTime(SLA):
"""Maximum time for one iteration in seconds."""
OPTION_NAME = "max_seconds_per_iteration"
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0,
"exclusuveMinimum": True}
@staticmethod
def check(criterion_value, result):
for i in result['data']['raw']:
if i['duration'] > criterion_value:
return False
return True

View File

@ -26,6 +26,7 @@ import yaml
from rally.benchmark.processing import plot
from rally.benchmark.processing import utils
from rally.benchmark.sla import base as base_sla
from rally.cmd import cliutils
from rally.cmd.commands import use
from rally.cmd import envutils
@ -351,3 +352,27 @@ class TaskCommands(object):
api.delete_task(tid, force=force)
else:
api.delete_task(task_id, force=force)
@cliutils.args("--uuid", type=str, dest="task_id", help="uuid of task")
@cliutils.args("--json", dest="tojson",
action="store_true",
help="output in json format")
@envutils.with_default_task_id
def sla_check(self, task_id=None, tojson=False):
"""Check if task was succeded according to SLA.
:param task_id: Task uuid.
:returns: Number of failed criteria.
"""
task = db.task_get_detailed(task_id)
failed_criteria = 0
rows = []
for row in base_sla.SLA.check_all(task):
failed_criteria += 0 if row['success'] else 1
rows.append(row if tojson else rutils.Struct(**row))
if tojson:
print(json.dumps(rows))
else:
common_cliutils.print_list(rows, ('benchmark', 'pos',
'criterion', 'success'))
return failed_criteria

View File

View File

@ -0,0 +1,88 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
import mock
from rally.benchmark.sla import base
from tests import test
class TestCriterion(base.SLA):
OPTION_NAME = "test_criterion"
CONFIG_SCHEMA = {"type": "integer"}
@staticmethod
def check(criterion_value, result):
return criterion_value == result["data"]
class BaseSLATestCase(test.TestCase):
def test_validate(self):
cnf = {"test_criterion": 42}
base.SLA.validate(cnf)
def test_validate_invalid_name(self):
self.assertRaises(jsonschema.ValidationError,
base.SLA.validate, {"nonexistent": 42})
def test_validate_invalid_type(self):
self.assertRaises(jsonschema.ValidationError,
base.SLA.validate, {"test_criterion": 42.0})
def test_check_all(self):
task = mock.Mock()
config = {
"sla": {"test_criterion": 42},
}
task.results = [{"key": {"kw": config, "name": "fake", "pos": 0},
"data": 42}]
results = list(base.SLA.check_all(task))
expected = [{'benchmark': 'fake',
'criterion': 'test_criterion',
'pos': 0,
'success': True}]
self.assertEqual(expected, results)
task.results[0]["data"] = 43
results = list(base.SLA.check_all(task))
expected = [{'benchmark': 'fake',
'criterion': 'test_criterion',
'pos': 0,
'success': False}]
self.assertEqual(expected, results)
class FailureRateTestCase(test.TestCase):
def test_check(self):
raw = [
{"error": ["error"]},
{"error": []},
] # one error and one success. 50% success rate
result = {"data": {"raw": raw}}
self.assertTrue(base.FailureRate.check(75.0, result)) # 50% < 75.0%
self.assertFalse(base.FailureRate.check(25, result)) # 50% > 25%
class IterationTimeTestCase(test.TestCase):
def test_check(self):
raw = [
{"duration": 3.14},
{"duration": 6.28},
]
result = {"data": {"raw": raw}}
self.assertTrue(base.IterationTime.check(42, result))
self.assertFalse(base.IterationTime.check(3.62, result))

View File

@ -164,3 +164,17 @@ class TaskCommandsTestCase(test.TestCase):
expected_calls = [mock.call(task_uuid, force=force) for task_uuid
in task_uuids]
self.assertTrue(mock_api.delete_task.mock_calls == expected_calls)
@mock.patch('rally.cmd.commands.task.common_cliutils.print_list')
@mock.patch("rally.cmd.commands.task.base_sla")
@mock.patch("rally.cmd.commands.task.db")
def test_sla_check(self, mock_db, mock_sla, mock_print_list):
fake_rows = [
{'success': True},
{'success': False},
]
mock_db.task_get_detailed.return_value = 'fake_task'
mock_sla.SLA.check_all.return_value = fake_rows
retval = self.task.sla_check(task_id='fake_task_id')
self.assertEqual(1, retval)
mock_sla.SLA.check_all.assert_called_once_with('fake_task')