From 6dbd51ac8d2b357928077eef37a1784c1e864514 Mon Sep 17 00:00:00 2001 From: Sergey Skripnick Date: Thu, 26 Jun 2014 20:55:16 +0300 Subject: [PATCH] Add sla checking SLA (Service-level agreement) is set of details for determining compliance with contracted values such as maximum error rate or minimum response time. Add two criteria: maximum time per iteration maximum error rate Change-Id: I1212bd684831461a7d7e33636d45a6d346e3b574 Blueprint: task-success-criteria --- doc/samples/tasks/sla/README.rst | 21 ++++ .../tasks/sla/create-and-delete-user.json | 18 ++++ .../tasks/sla/create-and-delete-user.yaml | 12 +++ doc/samples/tasks/tasks.rst | 9 ++ rally/benchmark/engine.py | 7 +- rally/benchmark/sla/__init__.py | 0 rally/benchmark/sla/base.py | 101 ++++++++++++++++++ rally/cmd/commands/task.py | 25 +++++ tests/benchmark/sla/__init__.py | 0 tests/benchmark/sla/test_base.py | 88 +++++++++++++++ tests/cmd/commands/test_task.py | 14 +++ 11 files changed, 294 insertions(+), 1 deletion(-) create mode 100644 doc/samples/tasks/sla/README.rst create mode 100644 doc/samples/tasks/sla/create-and-delete-user.json create mode 100644 doc/samples/tasks/sla/create-and-delete-user.yaml create mode 100644 rally/benchmark/sla/__init__.py create mode 100644 rally/benchmark/sla/base.py create mode 100644 tests/benchmark/sla/__init__.py create mode 100644 tests/benchmark/sla/test_base.py diff --git a/doc/samples/tasks/sla/README.rst b/doc/samples/tasks/sla/README.rst new file mode 100644 index 0000000000..4cf6ccae28 --- /dev/null +++ b/doc/samples/tasks/sla/README.rst @@ -0,0 +1,21 @@ +SLA Configuration Samples +========================= + +This directory contains SLA configuration samples. + +SLA (Service-level agreement) is set of details for determining compliance +with contracted values such as maximum error rate or minimum response time. + +Currently supported criteria: + + +max_failure_percent +=================== + +Maximum allowed failure rate in percent. + + +max_seconds_per_iteration +========================= + +Maximum time in seconds per one iteration. diff --git a/doc/samples/tasks/sla/create-and-delete-user.json b/doc/samples/tasks/sla/create-and-delete-user.json new file mode 100644 index 0000000000..3a5367302f --- /dev/null +++ b/doc/samples/tasks/sla/create-and-delete-user.json @@ -0,0 +1,18 @@ +{ + "KeystoneBasic.create_delete_user": [ + { + "args": { + "name_length": 10 + }, + "runner": { + "type": "constant", + "times": 100, + "concurrency": 10 + }, + "sla": { + "max_seconds_per_iteration": 4, + "max_failure_percent": 1 + } + } + ] +} diff --git a/doc/samples/tasks/sla/create-and-delete-user.yaml b/doc/samples/tasks/sla/create-and-delete-user.yaml new file mode 100644 index 0000000000..fd5112000f --- /dev/null +++ b/doc/samples/tasks/sla/create-and-delete-user.yaml @@ -0,0 +1,12 @@ +--- + KeystoneBasic.create_delete_user: + - + args: + name_length: 10 + runner: + type: "constant" + times: 100 + concurrency: 10 + sla: + max_seconds_per_iteration: 4 + max_failure_percent: 1 diff --git a/doc/samples/tasks/tasks.rst b/doc/samples/tasks/tasks.rst index 392159d75f..189bbc376e 100644 --- a/doc/samples/tasks/tasks.rst +++ b/doc/samples/tasks/tasks.rst @@ -18,6 +18,9 @@ General structure of configuration file: "context": { ... } + "sla": { + ... + } } } @@ -36,5 +39,11 @@ be launched. Look at `doc/samples/tasks/context `_ for samples. +Section "sla" defines details for determining compliance with contracted values +such as maximum error rate or minimum response time. +Look at `doc/samples/tasks/sla +`_ for +samples. + See a `detailed description of benchmark scenarios, contexts & runners `_. diff --git a/rally/benchmark/engine.py b/rally/benchmark/engine.py index 3d25382fad..89898bea7e 100644 --- a/rally/benchmark/engine.py +++ b/rally/benchmark/engine.py @@ -23,6 +23,7 @@ from rally.benchmark.context import base as base_ctx from rally.benchmark.context import users as users_ctx from rally.benchmark.runners import base as base_runner from rally.benchmark.scenarios import base as base_scenario +from rally.benchmark.sla import base as base_sla from rally import consts from rally import exceptions from rally.objects import endpoint @@ -56,7 +57,10 @@ CONFIG_SCHEMA = { }, "context": { "type": "object" - } + }, + "sla": { + "type": "object", + }, }, "additionalProperties": False } @@ -108,6 +112,7 @@ class BenchmarkEngine(object): base_runner.ScenarioRunner.validate(kw.get("runner", {})) base_ctx.ContextManager.validate(kw.get("context", {}), non_hidden=True) + base_sla.SLA.validate(kw.get("sla", {})) except (exceptions.RallyException, jsonschema.ValidationError) as e: raise exceptions.InvalidBenchmarkConfig( diff --git a/rally/benchmark/sla/__init__.py b/rally/benchmark/sla/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/rally/benchmark/sla/base.py b/rally/benchmark/sla/base.py new file mode 100644 index 0000000000..bf4b8c047e --- /dev/null +++ b/rally/benchmark/sla/base.py @@ -0,0 +1,101 @@ +# Copyright 2014: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +""" +SLA (Service-level agreement) is set of details for determining compliance +with contracted values such as maximum error rate or minimum response time. +""" + +import abc +import jsonschema +import six + +from rally import utils + + +@six.add_metaclass(abc.ABCMeta) +class SLA(object): + """Factory for criteria classes.""" + + @staticmethod + def validate(config): + properties = dict([(c.OPTION_NAME, c.CONFIG_SCHEMA) + for c in utils.itersubclasses(SLA)]) + schema = { + "type": "object", + "properties": properties, + "additionalProperties": False, + } + jsonschema.validate(config, schema) + + @staticmethod + @abc.abstractmethod + def check(criterion_value, result): + """Check if task succeeded according to criterion. + + :param criterion_value: Criterion value specified in configuration + :param result: result object + :returns: True if success + """ + + @staticmethod + def check_all(task): + """Check all SLA criteria. + + :param task: Task object + :returns: Generator + """ + + opt_name_map = dict([(c.OPTION_NAME, c) + for c in utils.itersubclasses(SLA)]) + + for result in task.results: + config = result['key']['kw'].get('sla', None) + if config: + for name, criterion in config.iteritems(): + success = opt_name_map[name].check(criterion, result) + yield {'benchmark': result['key']['name'], + 'pos': result['key']['pos'], + 'criterion': name, + 'success': success} + + +class FailureRate(SLA): + """Failure rate in percents.""" + OPTION_NAME = "max_failure_percent" + CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, "maximum": 100.0} + + @staticmethod + def check(criterion_value, result): + raw = result['data']['raw'] + errors = len(filter(lambda x: x['error'], raw)) + if criterion_value < errors * 100.0 / len(raw): + return False + return True + + +class IterationTime(SLA): + """Maximum time for one iteration in seconds.""" + OPTION_NAME = "max_seconds_per_iteration" + CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, + "exclusuveMinimum": True} + + @staticmethod + def check(criterion_value, result): + for i in result['data']['raw']: + if i['duration'] > criterion_value: + return False + return True diff --git a/rally/cmd/commands/task.py b/rally/cmd/commands/task.py index c00fba2ba4..f4cf13512c 100644 --- a/rally/cmd/commands/task.py +++ b/rally/cmd/commands/task.py @@ -26,6 +26,7 @@ import yaml from rally.benchmark.processing import plot from rally.benchmark.processing import utils +from rally.benchmark.sla import base as base_sla from rally.cmd import cliutils from rally.cmd.commands import use from rally.cmd import envutils @@ -351,3 +352,27 @@ class TaskCommands(object): api.delete_task(tid, force=force) else: api.delete_task(task_id, force=force) + + @cliutils.args("--uuid", type=str, dest="task_id", help="uuid of task") + @cliutils.args("--json", dest="tojson", + action="store_true", + help="output in json format") + @envutils.with_default_task_id + def sla_check(self, task_id=None, tojson=False): + """Check if task was succeded according to SLA. + + :param task_id: Task uuid. + :returns: Number of failed criteria. + """ + task = db.task_get_detailed(task_id) + failed_criteria = 0 + rows = [] + for row in base_sla.SLA.check_all(task): + failed_criteria += 0 if row['success'] else 1 + rows.append(row if tojson else rutils.Struct(**row)) + if tojson: + print(json.dumps(rows)) + else: + common_cliutils.print_list(rows, ('benchmark', 'pos', + 'criterion', 'success')) + return failed_criteria diff --git a/tests/benchmark/sla/__init__.py b/tests/benchmark/sla/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/benchmark/sla/test_base.py b/tests/benchmark/sla/test_base.py new file mode 100644 index 0000000000..c3317d22bc --- /dev/null +++ b/tests/benchmark/sla/test_base.py @@ -0,0 +1,88 @@ +# Copyright 2014: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import jsonschema +import mock + +from rally.benchmark.sla import base +from tests import test + + +class TestCriterion(base.SLA): + OPTION_NAME = "test_criterion" + CONFIG_SCHEMA = {"type": "integer"} + + @staticmethod + def check(criterion_value, result): + return criterion_value == result["data"] + + +class BaseSLATestCase(test.TestCase): + + def test_validate(self): + cnf = {"test_criterion": 42} + base.SLA.validate(cnf) + + def test_validate_invalid_name(self): + self.assertRaises(jsonschema.ValidationError, + base.SLA.validate, {"nonexistent": 42}) + + def test_validate_invalid_type(self): + self.assertRaises(jsonschema.ValidationError, + base.SLA.validate, {"test_criterion": 42.0}) + + def test_check_all(self): + task = mock.Mock() + config = { + "sla": {"test_criterion": 42}, + } + task.results = [{"key": {"kw": config, "name": "fake", "pos": 0}, + "data": 42}] + results = list(base.SLA.check_all(task)) + expected = [{'benchmark': 'fake', + 'criterion': 'test_criterion', + 'pos': 0, + 'success': True}] + self.assertEqual(expected, results) + task.results[0]["data"] = 43 + results = list(base.SLA.check_all(task)) + expected = [{'benchmark': 'fake', + 'criterion': 'test_criterion', + 'pos': 0, + 'success': False}] + self.assertEqual(expected, results) + + +class FailureRateTestCase(test.TestCase): + def test_check(self): + raw = [ + {"error": ["error"]}, + {"error": []}, + ] # one error and one success. 50% success rate + result = {"data": {"raw": raw}} + self.assertTrue(base.FailureRate.check(75.0, result)) # 50% < 75.0% + self.assertFalse(base.FailureRate.check(25, result)) # 50% > 25% + + +class IterationTimeTestCase(test.TestCase): + def test_check(self): + raw = [ + {"duration": 3.14}, + {"duration": 6.28}, + ] + result = {"data": {"raw": raw}} + self.assertTrue(base.IterationTime.check(42, result)) + self.assertFalse(base.IterationTime.check(3.62, result)) diff --git a/tests/cmd/commands/test_task.py b/tests/cmd/commands/test_task.py index 83875d59f7..f56973e292 100644 --- a/tests/cmd/commands/test_task.py +++ b/tests/cmd/commands/test_task.py @@ -164,3 +164,17 @@ class TaskCommandsTestCase(test.TestCase): expected_calls = [mock.call(task_uuid, force=force) for task_uuid in task_uuids] self.assertTrue(mock_api.delete_task.mock_calls == expected_calls) + + @mock.patch('rally.cmd.commands.task.common_cliutils.print_list') + @mock.patch("rally.cmd.commands.task.base_sla") + @mock.patch("rally.cmd.commands.task.db") + def test_sla_check(self, mock_db, mock_sla, mock_print_list): + fake_rows = [ + {'success': True}, + {'success': False}, + ] + mock_db.task_get_detailed.return_value = 'fake_task' + mock_sla.SLA.check_all.return_value = fake_rows + retval = self.task.sla_check(task_id='fake_task_id') + self.assertEqual(1, retval) + mock_sla.SLA.check_all.assert_called_once_with('fake_task')