Split SLA to base and plugins
Move all subclasses of SLA under plugins/sla Split sla-plugins to modules Implements: blueprint split-plugins Change-Id: Icdc895fb829809944791fb48edb8e1f83a1f6bf0
This commit is contained in:
parent
c0e281b6f2
commit
a06b212b28
@ -25,9 +25,7 @@ import jsonschema
|
||||
import six
|
||||
|
||||
from rally.common.i18n import _
|
||||
from rally.common import streaming_algorithms
|
||||
from rally.common import utils
|
||||
from rally import consts
|
||||
from rally import exceptions
|
||||
|
||||
|
||||
@ -127,163 +125,3 @@ class SLA(object):
|
||||
def status(self):
|
||||
"""Return "Passed" or "Failed" depending on the current SLA status."""
|
||||
return "Passed" if self.success else "Failed"
|
||||
|
||||
|
||||
class FailureRateDeprecated(SLA):
|
||||
"""[Deprecated] Failure rate in percents."""
|
||||
OPTION_NAME = "max_failure_percent"
|
||||
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, "maximum": 100.0}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(FailureRateDeprecated, self).__init__(criterion_value)
|
||||
self.errors = 0
|
||||
self.total = 0
|
||||
self.error_rate = 0.0
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
self.total += 1
|
||||
if iteration["error"]:
|
||||
self.errors += 1
|
||||
self.error_rate = self.errors * 100.0 / self.total
|
||||
self.success = self.error_rate <= self.criterion_value
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Maximum failure rate %s%% <= %s%% - %s") %
|
||||
(self.criterion_value, self.error_rate, self.status()))
|
||||
|
||||
|
||||
class FailureRate(SLA):
|
||||
"""Failure rate minimum and maximum in percents."""
|
||||
OPTION_NAME = "failure_rate"
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": consts.JSON_SCHEMA,
|
||||
"properties": {
|
||||
"min": {"type": "number", "minimum": 0.0, "maximum": 100.0},
|
||||
"max": {"type": "number", "minimum": 0.0, "maximum": 100.0}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(FailureRate, self).__init__(criterion_value)
|
||||
self.min_percent = self.criterion_value.get("min", 0)
|
||||
self.max_percent = self.criterion_value.get("max", 100)
|
||||
self.errors = 0
|
||||
self.total = 0
|
||||
self.error_rate = 0.0
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
self.total += 1
|
||||
if iteration["error"]:
|
||||
self.errors += 1
|
||||
self.error_rate = self.errors * 100.0 / self.total
|
||||
self.success = self.min_percent <= self.error_rate <= self.max_percent
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Failure rate criteria %.2f%% <= %.2f%% <= %.2f%% - %s") %
|
||||
(self.min_percent, self.error_rate, self.max_percent,
|
||||
self.status()))
|
||||
|
||||
|
||||
class IterationTime(SLA):
|
||||
"""Maximum time for one iteration in seconds."""
|
||||
OPTION_NAME = "max_seconds_per_iteration"
|
||||
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0,
|
||||
"exclusiveMinimum": True}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(IterationTime, self).__init__(criterion_value)
|
||||
self.max_iteration_time = 0.0
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
if iteration["duration"] > self.max_iteration_time:
|
||||
self.max_iteration_time = iteration["duration"]
|
||||
self.success = self.max_iteration_time <= self.criterion_value
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Maximum seconds per iteration %.2fs <= %.2fs - %s") %
|
||||
(self.max_iteration_time, self.criterion_value, self.status()))
|
||||
|
||||
|
||||
class MaxAverageDuration(SLA):
|
||||
"""Maximum average duration of one iteration in seconds."""
|
||||
OPTION_NAME = "max_avg_duration"
|
||||
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0,
|
||||
"exclusiveMinimum": True}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(MaxAverageDuration, self).__init__(criterion_value)
|
||||
self.total_duration = 0.0
|
||||
self.iterations = 0
|
||||
self.avg = 0.0
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
if not iteration.get("error"):
|
||||
self.total_duration += iteration["duration"]
|
||||
self.iterations += 1
|
||||
self.avg = self.total_duration / self.iterations
|
||||
self.success = self.avg <= self.criterion_value
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Average duration of one iteration %.2fs <= %.2fs - %s") %
|
||||
(self.avg, self.criterion_value, self.status()))
|
||||
|
||||
|
||||
class Outliers(SLA):
|
||||
"""Limit the number of outliers (iterations that take too much time).
|
||||
|
||||
The outliers are detected automatically using the computation of the mean
|
||||
and standard deviation (std) of the data.
|
||||
"""
|
||||
OPTION_NAME = "outliers"
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": consts.JSON_SCHEMA,
|
||||
"properties": {
|
||||
"max": {"type": "integer", "minimum": 0},
|
||||
"min_iterations": {"type": "integer", "minimum": 3},
|
||||
"sigmas": {"type": "number", "minimum": 0.0,
|
||||
"exclusiveMinimum": True}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(Outliers, self).__init__(criterion_value)
|
||||
self.max_outliers = self.criterion_value.get("max", 0)
|
||||
# NOTE(msdubov): Having 3 as default is reasonable (need enough data).
|
||||
self.min_iterations = self.criterion_value.get("min_iterations", 3)
|
||||
self.sigmas = self.criterion_value.get("sigmas", 3.0)
|
||||
self.iterations = 0
|
||||
self.outliers = 0
|
||||
self.threshold = None
|
||||
self.mean_comp = streaming_algorithms.MeanStreamingComputation()
|
||||
self.std_comp = streaming_algorithms.StdDevStreamingComputation()
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
if not iteration.get("error"):
|
||||
duration = iteration["duration"]
|
||||
self.iterations += 1
|
||||
|
||||
# NOTE(msdubov): First check if the current iteration is an outlier
|
||||
if ((self.iterations >= self.min_iterations and self.threshold and
|
||||
duration > self.threshold)):
|
||||
self.outliers += 1
|
||||
|
||||
# NOTE(msdubov): Then update the threshold value
|
||||
self.mean_comp.add(duration)
|
||||
self.std_comp.add(duration)
|
||||
if self.iterations >= 2:
|
||||
mean = self.mean_comp.result()
|
||||
std = self.std_comp.result()
|
||||
self.threshold = mean + self.sigmas * std
|
||||
|
||||
self.success = self.outliers <= self.max_outliers
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Maximum number of outliers %i <= %i - %s") %
|
||||
(self.outliers, self.max_outliers, self.status()))
|
||||
|
@ -510,6 +510,7 @@ def run(argv, categories):
|
||||
try:
|
||||
utils.load_plugins("/opt/rally/plugins/")
|
||||
utils.load_plugins(os.path.expanduser("~/.rally/plugins/"))
|
||||
utils.import_modules_from_package("rally.plugins")
|
||||
|
||||
validate_deprecated_args(argv, fn)
|
||||
|
||||
|
0
rally/plugins/common/sla/__init__.py
Normal file
0
rally/plugins/common/sla/__init__.py
Normal file
82
rally/plugins/common/sla/failure_rate.py
Normal file
82
rally/plugins/common/sla/failure_rate.py
Normal file
@ -0,0 +1,82 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""
|
||||
SLA (Service-level agreement) is set of details for determining compliance
|
||||
with contracted values such as maximum error rate or minimum response time.
|
||||
"""
|
||||
|
||||
from rally.benchmark.sla import base
|
||||
from rally.common.i18n import _
|
||||
from rally import consts
|
||||
|
||||
|
||||
class FailureRateDeprecated(base.SLA):
|
||||
"""[Deprecated] Failure rate in percents."""
|
||||
OPTION_NAME = "max_failure_percent"
|
||||
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0, "maximum": 100.0}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(FailureRateDeprecated, self).__init__(criterion_value)
|
||||
self.errors = 0
|
||||
self.total = 0
|
||||
self.error_rate = 0.0
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
self.total += 1
|
||||
if iteration["error"]:
|
||||
self.errors += 1
|
||||
self.error_rate = self.errors * 100.0 / self.total
|
||||
self.success = self.error_rate <= self.criterion_value
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Maximum failure rate %s%% <= %s%% - %s") %
|
||||
(self.criterion_value, self.error_rate, self.status()))
|
||||
|
||||
|
||||
class FailureRate(base.SLA):
|
||||
"""Failure rate minimum and maximum in percents."""
|
||||
OPTION_NAME = "failure_rate"
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": consts.JSON_SCHEMA,
|
||||
"properties": {
|
||||
"min": {"type": "number", "minimum": 0.0, "maximum": 100.0},
|
||||
"max": {"type": "number", "minimum": 0.0, "maximum": 100.0}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(FailureRate, self).__init__(criterion_value)
|
||||
self.min_percent = self.criterion_value.get("min", 0)
|
||||
self.max_percent = self.criterion_value.get("max", 100)
|
||||
self.errors = 0
|
||||
self.total = 0
|
||||
self.error_rate = 0.0
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
self.total += 1
|
||||
if iteration["error"]:
|
||||
self.errors += 1
|
||||
self.error_rate = self.errors * 100.0 / self.total
|
||||
self.success = self.min_percent <= self.error_rate <= self.max_percent
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Failure rate criteria %.2f%% <= %.2f%% <= %.2f%% - %s") %
|
||||
(self.min_percent, self.error_rate, self.max_percent,
|
||||
self.status()))
|
44
rally/plugins/common/sla/iteraion_time.py
Normal file
44
rally/plugins/common/sla/iteraion_time.py
Normal file
@ -0,0 +1,44 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""
|
||||
SLA (Service-level agreement) is set of details for determining compliance
|
||||
with contracted values such as maximum error rate or minimum response time.
|
||||
"""
|
||||
|
||||
from rally.benchmark.sla import base
|
||||
from rally.common.i18n import _
|
||||
|
||||
|
||||
class IterationTime(base.SLA):
|
||||
"""Maximum time for one iteration in seconds."""
|
||||
OPTION_NAME = "max_seconds_per_iteration"
|
||||
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0,
|
||||
"exclusiveMinimum": True}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(IterationTime, self).__init__(criterion_value)
|
||||
self.max_iteration_time = 0.0
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
if iteration["duration"] > self.max_iteration_time:
|
||||
self.max_iteration_time = iteration["duration"]
|
||||
self.success = self.max_iteration_time <= self.criterion_value
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Maximum seconds per iteration %.2fs <= %.2fs - %s") %
|
||||
(self.max_iteration_time, self.criterion_value, self.status()))
|
48
rally/plugins/common/sla/max_average_duration.py
Normal file
48
rally/plugins/common/sla/max_average_duration.py
Normal file
@ -0,0 +1,48 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""
|
||||
SLA (Service-level agreement) is set of details for determining compliance
|
||||
with contracted values such as maximum error rate or minimum response time.
|
||||
"""
|
||||
|
||||
from rally.benchmark.sla import base
|
||||
from rally.common.i18n import _
|
||||
|
||||
|
||||
class MaxAverageDuration(base.SLA):
|
||||
"""Maximum average duration of one iteration in seconds."""
|
||||
OPTION_NAME = "max_avg_duration"
|
||||
CONFIG_SCHEMA = {"type": "number", "minimum": 0.0,
|
||||
"exclusiveMinimum": True}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(MaxAverageDuration, self).__init__(criterion_value)
|
||||
self.total_duration = 0.0
|
||||
self.iterations = 0
|
||||
self.avg = 0.0
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
if not iteration.get("error"):
|
||||
self.total_duration += iteration["duration"]
|
||||
self.iterations += 1
|
||||
self.avg = self.total_duration / self.iterations
|
||||
self.success = self.avg <= self.criterion_value
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Average duration of one iteration %.2fs <= %.2fs - %s") %
|
||||
(self.avg, self.criterion_value, self.status()))
|
81
rally/plugins/common/sla/outliers.py
Normal file
81
rally/plugins/common/sla/outliers.py
Normal file
@ -0,0 +1,81 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""
|
||||
SLA (Service-level agreement) is set of details for determining compliance
|
||||
with contracted values such as maximum error rate or minimum response time.
|
||||
"""
|
||||
|
||||
from rally.benchmark.sla import base
|
||||
from rally.common.i18n import _
|
||||
from rally.common import streaming_algorithms
|
||||
from rally import consts
|
||||
|
||||
|
||||
class Outliers(base.SLA):
|
||||
"""Limit the number of outliers (iterations that take too much time).
|
||||
|
||||
The outliers are detected automatically using the computation of the mean
|
||||
and standard deviation (std) of the data.
|
||||
"""
|
||||
OPTION_NAME = "outliers"
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": consts.JSON_SCHEMA,
|
||||
"properties": {
|
||||
"max": {"type": "integer", "minimum": 0},
|
||||
"min_iterations": {"type": "integer", "minimum": 3},
|
||||
"sigmas": {"type": "number", "minimum": 0.0,
|
||||
"exclusiveMinimum": True}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, criterion_value):
|
||||
super(Outliers, self).__init__(criterion_value)
|
||||
self.max_outliers = self.criterion_value.get("max", 0)
|
||||
# NOTE(msdubov): Having 3 as default is reasonable (need enough data).
|
||||
self.min_iterations = self.criterion_value.get("min_iterations", 3)
|
||||
self.sigmas = self.criterion_value.get("sigmas", 3.0)
|
||||
self.iterations = 0
|
||||
self.outliers = 0
|
||||
self.threshold = None
|
||||
self.mean_comp = streaming_algorithms.MeanStreamingComputation()
|
||||
self.std_comp = streaming_algorithms.StdDevStreamingComputation()
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
if not iteration.get("error"):
|
||||
duration = iteration["duration"]
|
||||
self.iterations += 1
|
||||
|
||||
# NOTE(msdubov): First check if the current iteration is an outlier
|
||||
if ((self.iterations >= self.min_iterations and self.threshold and
|
||||
duration > self.threshold)):
|
||||
self.outliers += 1
|
||||
|
||||
# NOTE(msdubov): Then update the threshold value
|
||||
self.mean_comp.add(duration)
|
||||
self.std_comp.add(duration)
|
||||
if self.iterations >= 2:
|
||||
mean = self.mean_comp.result()
|
||||
std = self.std_comp.result()
|
||||
self.threshold = mean + self.sigmas * std
|
||||
|
||||
self.success = self.outliers <= self.max_outliers
|
||||
return self.success
|
||||
|
||||
def details(self):
|
||||
return (_("Maximum number of outliers %i <= %i - %s") %
|
||||
(self.outliers, self.max_outliers, self.status()))
|
@ -14,8 +14,6 @@
|
||||
# under the License.
|
||||
|
||||
|
||||
import jsonschema
|
||||
|
||||
from rally.benchmark.sla import base
|
||||
from tests.unit import test
|
||||
|
||||
@ -71,28 +69,6 @@ class SLACheckerTestCase(test.TestCase):
|
||||
"detail": "Task was aborted due to SLA failure(s)."}],
|
||||
sla_checker.results())
|
||||
|
||||
|
||||
class BaseSLATestCase(test.TestCase):
|
||||
|
||||
def test_get_by_name(self):
|
||||
self.assertEqual(base.FailureRate, base.SLA.get_by_name("FailureRate"))
|
||||
|
||||
def test_get_by_name_by_config_option(self):
|
||||
self.assertEqual(base.FailureRate,
|
||||
base.SLA.get_by_name("failure_rate"))
|
||||
|
||||
def test_validate(self):
|
||||
cnf = {"test_criterion": 42}
|
||||
base.SLA.validate(cnf)
|
||||
|
||||
def test_validate_invalid_name(self):
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.SLA.validate, {"nonexistent": 42})
|
||||
|
||||
def test_validate_invalid_type(self):
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.SLA.validate, {"test_criterion": 42.0})
|
||||
|
||||
def test__format_result(self):
|
||||
name = "some_name"
|
||||
success = True
|
||||
@ -101,228 +77,3 @@ class BaseSLATestCase(test.TestCase):
|
||||
"success": success,
|
||||
"detail": detail},
|
||||
base._format_result(name, success, detail))
|
||||
|
||||
|
||||
class FailureRateDeprecatedTestCase(test.TestCase):
|
||||
|
||||
def test_result(self):
|
||||
sla1 = base.FailureRateDeprecated(75.0)
|
||||
sla2 = base.FailureRateDeprecated(25.0)
|
||||
# 50% failure rate
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": []})
|
||||
self.assertTrue(sla1.result()["success"]) # 50% < 75.0%
|
||||
self.assertFalse(sla2.result()["success"]) # 50% > 25.0%
|
||||
self.assertEqual("Passed", sla1.status())
|
||||
self.assertEqual("Failed", sla2.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = base.FailureRateDeprecated(10.0)
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
|
||||
class FailureRateTestCase(test.TestCase):
|
||||
|
||||
def test_config_schema(self):
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.IterationTime.validate,
|
||||
{"failure_rate": {"min": -1}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.IterationTime.validate,
|
||||
{"failure_rate": {"min": 100.1}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.IterationTime.validate,
|
||||
{"failure_rate": {"max": -0.1}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.IterationTime.validate,
|
||||
{"failure_rate": {"max": 101}})
|
||||
|
||||
def test_result_min(self):
|
||||
sla1 = base.FailureRate({"min": 80.0})
|
||||
sla2 = base.FailureRate({"min": 60.5})
|
||||
# 75% failure rate
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": []})
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
self.assertFalse(sla1.result()["success"]) # 80.0% > 75.0%
|
||||
self.assertTrue(sla2.result()["success"]) # 60.5% < 75.0%
|
||||
self.assertEqual("Failed", sla1.status())
|
||||
self.assertEqual("Passed", sla2.status())
|
||||
|
||||
def test_result_max(self):
|
||||
sla1 = base.FailureRate({"max": 25.0})
|
||||
sla2 = base.FailureRate({"max": 75.0})
|
||||
# 50% failure rate
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": []})
|
||||
self.assertFalse(sla1.result()["success"]) # 25.0% < 50.0%
|
||||
self.assertTrue(sla2.result()["success"]) # 75.0% > 50.0%
|
||||
self.assertEqual("Failed", sla1.status())
|
||||
self.assertEqual("Passed", sla2.status())
|
||||
|
||||
def test_result_min_max(self):
|
||||
sla1 = base.FailureRate({"min": 50, "max": 90})
|
||||
sla2 = base.FailureRate({"min": 5, "max": 20})
|
||||
sla3 = base.FailureRate({"min": 24.9, "max": 25.1})
|
||||
# 25% failure rate
|
||||
for sla in [sla1, sla2, sla3]:
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": []})
|
||||
sla.add_iteration({"error": []})
|
||||
sla.add_iteration({"error": []})
|
||||
self.assertFalse(sla1.result()["success"]) # 25.0% < 50.0%
|
||||
self.assertFalse(sla2.result()["success"]) # 25.0% > 20.0%
|
||||
self.assertTrue(sla3.result()["success"]) # 24.9% < 25.0% < 25.1%
|
||||
self.assertEqual("Failed", sla1.status())
|
||||
self.assertEqual("Failed", sla2.status())
|
||||
self.assertEqual("Passed", sla3.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = base.FailureRate({"max": 10.0})
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_add_iteration(self):
|
||||
sla = base.FailureRate({"max": 35.0})
|
||||
self.assertTrue(sla.add_iteration({"error": []}))
|
||||
self.assertTrue(sla.add_iteration({"error": []}))
|
||||
self.assertTrue(sla.add_iteration({"error": []}))
|
||||
self.assertTrue(sla.add_iteration({"error": ["error"]})) # 33%
|
||||
self.assertFalse(sla.add_iteration({"error": ["error"]})) # 40%
|
||||
|
||||
|
||||
class IterationTimeTestCase(test.TestCase):
|
||||
def test_config_schema(self):
|
||||
properties = {
|
||||
"max_seconds_per_iteration": 0
|
||||
}
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.IterationTime.validate, properties)
|
||||
|
||||
def test_result(self):
|
||||
sla1 = base.IterationTime(42)
|
||||
sla2 = base.IterationTime(3.62)
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"duration": 3.14})
|
||||
sla.add_iteration({"duration": 6.28})
|
||||
self.assertTrue(sla1.result()["success"]) # 42 > 6.28
|
||||
self.assertFalse(sla2.result()["success"]) # 3.62 < 6.28
|
||||
self.assertEqual("Passed", sla1.status())
|
||||
self.assertEqual("Failed", sla2.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = base.IterationTime(42)
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_add_iteration(self):
|
||||
sla = base.IterationTime(4.0)
|
||||
self.assertTrue(sla.add_iteration({"duration": 3.14}))
|
||||
self.assertTrue(sla.add_iteration({"duration": 2.0}))
|
||||
self.assertTrue(sla.add_iteration({"duration": 3.99}))
|
||||
self.assertFalse(sla.add_iteration({"duration": 4.5}))
|
||||
self.assertFalse(sla.add_iteration({"duration": 3.8}))
|
||||
|
||||
|
||||
class MaxAverageDurationTestCase(test.TestCase):
|
||||
def test_config_schema(self):
|
||||
properties = {
|
||||
"max_avg_duration": 0
|
||||
}
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.MaxAverageDuration.validate, properties)
|
||||
|
||||
def test_result(self):
|
||||
sla1 = base.MaxAverageDuration(42)
|
||||
sla2 = base.MaxAverageDuration(3.62)
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"duration": 3.14})
|
||||
sla.add_iteration({"duration": 6.28})
|
||||
self.assertTrue(sla1.result()["success"]) # 42 > avg([3.14, 6.28])
|
||||
self.assertFalse(sla2.result()["success"]) # 3.62 < avg([3.14, 6.28])
|
||||
self.assertEqual("Passed", sla1.status())
|
||||
self.assertEqual("Failed", sla2.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = base.MaxAverageDuration(42)
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_add_iteration(self):
|
||||
sla = base.MaxAverageDuration(4.0)
|
||||
self.assertTrue(sla.add_iteration({"duration": 3.5}))
|
||||
self.assertTrue(sla.add_iteration({"duration": 2.5}))
|
||||
self.assertTrue(sla.add_iteration({"duration": 5.0})) # avg = 3.667
|
||||
self.assertFalse(sla.add_iteration({"duration": 7.0})) # avg = 4.5
|
||||
self.assertTrue(sla.add_iteration({"duration": 1.0})) # avg = 3.8
|
||||
|
||||
|
||||
class OutliersTestCase(test.TestCase):
|
||||
|
||||
def test_config_schema(self):
|
||||
base.Outliers.validate({"outliers": {"max": 0, "min_iterations": 5,
|
||||
"sigmas": 2.5}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.Outliers.validate,
|
||||
{"outliers": {"max": -1}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.Outliers.validate,
|
||||
{"outliers": {"max": 0, "min_iterations": 2}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
base.Outliers.validate,
|
||||
{"outliers": {"max": 0, "sigmas": 0}})
|
||||
|
||||
def test_result(self):
|
||||
sla1 = base.Outliers({"max": 1})
|
||||
sla2 = base.Outliers({"max": 2})
|
||||
iteration_durations = [3.1, 4.2, 3.6, 4.5, 2.8, 3.3, 4.1, 3.8, 4.3,
|
||||
2.9, 10.2, 11.2, 3.4] # outliers: 10.2, 11.2
|
||||
for sla in [sla1, sla2]:
|
||||
for d in iteration_durations:
|
||||
sla.add_iteration({"duration": d})
|
||||
self.assertFalse(sla1.result()["success"]) # 2 outliers > 1
|
||||
self.assertTrue(sla2.result()["success"]) # 2 outliers <= 2
|
||||
self.assertEqual("Failed", sla1.status())
|
||||
self.assertEqual("Passed", sla2.status())
|
||||
|
||||
def test_result_large_sigmas(self):
|
||||
sla = base.Outliers({"max": 1, "sigmas": 5})
|
||||
iteration_durations = [3.1, 4.2, 3.6, 4.5, 2.8, 3.3, 4.1, 3.8, 4.3,
|
||||
2.9, 10.2, 11.2, 3.4]
|
||||
for d in iteration_durations:
|
||||
sla.add_iteration({"duration": d})
|
||||
# NOTE(msdubov): No outliers registered since sigmas = 5 (not 2)
|
||||
self.assertTrue(sla.result()["success"])
|
||||
self.assertEqual("Passed", sla.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = base.Outliers({"max": 0})
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_result_few_iterations_large_min_iterations(self):
|
||||
sla = base.Outliers({"max": 0, "min_iterations": 10})
|
||||
iteration_durations = [3.1, 4.2, 4.7, 3.6, 15.14, 2.8]
|
||||
for d in iteration_durations:
|
||||
sla.add_iteration({"duration": d})
|
||||
# NOTE(msdubov): SLA doesn't fail because it hasn't iterations < 10
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_result_few_iterations_small_min_iterations(self):
|
||||
sla = base.Outliers({"max": 0, "min_iterations": 5})
|
||||
iteration_durations = [3.1, 4.2, 4.7, 3.6, 15.14, 2.8]
|
||||
for d in iteration_durations:
|
||||
sla.add_iteration({"duration": d})
|
||||
# NOTE(msdubov): Now this SLA can fail with >= 5 iterations
|
||||
self.assertFalse(sla.result()["success"])
|
||||
|
||||
def test_add_iteration(self):
|
||||
sla = base.Outliers({"max": 1})
|
||||
# NOTE(msdubov): One outlier in the first 11 iterations
|
||||
first_iterations = [3.1, 4.2, 3.6, 4.5, 2.8, 3.3, 4.1, 3.8, 4.3,
|
||||
2.9, 10.2]
|
||||
for d in first_iterations:
|
||||
self.assertTrue(sla.add_iteration({"duration": d}))
|
||||
# NOTE(msdubov): 12th iteration makes the SLA always failed
|
||||
self.assertFalse(sla.add_iteration({"duration": 11.2}))
|
||||
self.assertFalse(sla.add_iteration({"duration": 3.4}))
|
||||
|
@ -24,6 +24,7 @@ from rally.deploy.engines import existing as existing_cloud
|
||||
from rally.deploy import serverprovider
|
||||
from rally.deploy.serverprovider.providers import existing as existing_servers
|
||||
from rally import exceptions
|
||||
from rally.plugins.common.sla import failure_rate
|
||||
from tests.unit import test
|
||||
|
||||
|
||||
@ -64,14 +65,14 @@ class InfoCommandsTestCase(test.TestCase):
|
||||
mock_get_scenario_by_name.assert_called_once_with(query)
|
||||
self.assertEqual(1, status)
|
||||
|
||||
@mock.patch(SLA + ".get_by_name", return_value=sla_base.FailureRate)
|
||||
@mock.patch(SLA + ".get_by_name", return_value=failure_rate.FailureRate)
|
||||
def test_find_failure_rate_sla(self, mock_get_by_name):
|
||||
query = "failure_rate"
|
||||
status = self.info.find(query)
|
||||
mock_get_by_name.assert_called_once_with(query)
|
||||
self.assertIsNone(status)
|
||||
|
||||
@mock.patch(SLA + ".get_by_name", return_value=sla_base.FailureRate)
|
||||
@mock.patch(SLA + ".get_by_name", return_value=failure_rate.FailureRate)
|
||||
def test_find_failure_rate_sla_by_class_name(self, mock_get_by_name):
|
||||
query = "FailureRate"
|
||||
status = self.info.find(query)
|
||||
@ -113,7 +114,8 @@ class InfoCommandsTestCase(test.TestCase):
|
||||
mock_itersubclasses.assert_called_with(scenario_base.Scenario)
|
||||
self.assertIsNone(status)
|
||||
|
||||
@mock.patch(UTILS + ".itersubclasses", return_value=[sla_base.FailureRate])
|
||||
@mock.patch(UTILS + ".itersubclasses",
|
||||
return_value=[failure_rate.FailureRate])
|
||||
def test_SLA(self, mock_itersubclasses):
|
||||
status = self.info.SLA()
|
||||
mock_itersubclasses.assert_called_with(sla_base.SLA)
|
||||
|
0
tests/unit/plugins/common/sla/__init__.py
Normal file
0
tests/unit/plugins/common/sla/__init__.py
Normal file
136
tests/unit/plugins/common/sla/test_failure_rate.py
Normal file
136
tests/unit/plugins/common/sla/test_failure_rate.py
Normal file
@ -0,0 +1,136 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import jsonschema
|
||||
|
||||
from rally.plugins.common.sla import failure_rate
|
||||
from tests.unit import test
|
||||
|
||||
|
||||
class SLAPluginTestCase(test.TestCase):
|
||||
|
||||
def test_get_by_name(self):
|
||||
self.assertEqual(failure_rate.FailureRate,
|
||||
failure_rate.FailureRate.get_by_name("FailureRate"))
|
||||
|
||||
def test_get_by_name_by_config_option(self):
|
||||
self.assertEqual(failure_rate.FailureRate,
|
||||
failure_rate.FailureRate.get_by_name("failure_rate"))
|
||||
|
||||
def test_validate(self):
|
||||
cnf = {"test_criterion": 42}
|
||||
failure_rate.base.SLA.validate(cnf)
|
||||
|
||||
def test_validate_invalid_name(self):
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
failure_rate.FailureRate.validate,
|
||||
{"nonexistent": 42})
|
||||
|
||||
def test_validate_invalid_type(self):
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
failure_rate.FailureRate.validate,
|
||||
{"test_criterion": 42.0})
|
||||
|
||||
|
||||
class FailureRateDeprecatedTestCase(test.TestCase):
|
||||
|
||||
def test_result(self):
|
||||
sla1 = failure_rate.FailureRateDeprecated(75.0)
|
||||
sla2 = failure_rate.FailureRateDeprecated(25.0)
|
||||
# 50% failure rate
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": []})
|
||||
self.assertTrue(sla1.result()["success"]) # 50% < 75.0%
|
||||
self.assertFalse(sla2.result()["success"]) # 50% > 25.0%
|
||||
self.assertEqual("Passed", sla1.status())
|
||||
self.assertEqual("Failed", sla2.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = failure_rate.FailureRateDeprecated(10.0)
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
|
||||
class FailureRateTestCase(test.TestCase):
|
||||
|
||||
def test_config_schema(self):
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
failure_rate.FailureRate.validate,
|
||||
{"failure_rate": {"min": -1}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
failure_rate.FailureRate.validate,
|
||||
{"failure_rate": {"min": 100.1}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
failure_rate.FailureRate.validate,
|
||||
{"failure_rate": {"max": -0.1}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
failure_rate.FailureRate.validate,
|
||||
{"failure_rate": {"max": 101}})
|
||||
|
||||
def test_result_min(self):
|
||||
sla1 = failure_rate.FailureRate({"min": 80.0})
|
||||
sla2 = failure_rate.FailureRate({"min": 60.5})
|
||||
# 75% failure rate
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": []})
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
self.assertFalse(sla1.result()["success"]) # 80.0% > 75.0%
|
||||
self.assertTrue(sla2.result()["success"]) # 60.5% < 75.0%
|
||||
self.assertEqual("Failed", sla1.status())
|
||||
self.assertEqual("Passed", sla2.status())
|
||||
|
||||
def test_result_max(self):
|
||||
sla1 = failure_rate.FailureRate({"max": 25.0})
|
||||
sla2 = failure_rate.FailureRate({"max": 75.0})
|
||||
# 50% failure rate
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": []})
|
||||
self.assertFalse(sla1.result()["success"]) # 25.0% < 50.0%
|
||||
self.assertTrue(sla2.result()["success"]) # 75.0% > 50.0%
|
||||
self.assertEqual("Failed", sla1.status())
|
||||
self.assertEqual("Passed", sla2.status())
|
||||
|
||||
def test_result_min_max(self):
|
||||
sla1 = failure_rate.FailureRate({"min": 50, "max": 90})
|
||||
sla2 = failure_rate.FailureRate({"min": 5, "max": 20})
|
||||
sla3 = failure_rate.FailureRate({"min": 24.9, "max": 25.1})
|
||||
# 25% failure rate
|
||||
for sla in [sla1, sla2, sla3]:
|
||||
sla.add_iteration({"error": ["error"]})
|
||||
sla.add_iteration({"error": []})
|
||||
sla.add_iteration({"error": []})
|
||||
sla.add_iteration({"error": []})
|
||||
self.assertFalse(sla1.result()["success"]) # 25.0% < 50.0%
|
||||
self.assertFalse(sla2.result()["success"]) # 25.0% > 20.0%
|
||||
self.assertTrue(sla3.result()["success"]) # 24.9% < 25.0% < 25.1%
|
||||
self.assertEqual("Failed", sla1.status())
|
||||
self.assertEqual("Failed", sla2.status())
|
||||
self.assertEqual("Passed", sla3.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = failure_rate.FailureRate({"max": 10.0})
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_add_iteration(self):
|
||||
sla = failure_rate.FailureRate({"max": 35.0})
|
||||
self.assertTrue(sla.add_iteration({"error": []}))
|
||||
self.assertTrue(sla.add_iteration({"error": []}))
|
||||
self.assertTrue(sla.add_iteration({"error": []}))
|
||||
self.assertTrue(sla.add_iteration({"error": ["error"]})) # 33%
|
||||
self.assertFalse(sla.add_iteration({"error": ["error"]})) # 40%
|
52
tests/unit/plugins/common/sla/test_iteration_time.py
Normal file
52
tests/unit/plugins/common/sla/test_iteration_time.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import jsonschema
|
||||
|
||||
from rally.plugins.common.sla import iteraion_time
|
||||
from tests.unit import test
|
||||
|
||||
|
||||
class IterationTimeTestCase(test.TestCase):
|
||||
def test_config_schema(self):
|
||||
properties = {
|
||||
"max_seconds_per_iteration": 0
|
||||
}
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
iteraion_time.IterationTime.validate, properties)
|
||||
|
||||
def test_result(self):
|
||||
sla1 = iteraion_time.IterationTime(42)
|
||||
sla2 = iteraion_time.IterationTime(3.62)
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"duration": 3.14})
|
||||
sla.add_iteration({"duration": 6.28})
|
||||
self.assertTrue(sla1.result()["success"]) # 42 > 6.28
|
||||
self.assertFalse(sla2.result()["success"]) # 3.62 < 6.28
|
||||
self.assertEqual("Passed", sla1.status())
|
||||
self.assertEqual("Failed", sla2.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = iteraion_time.IterationTime(42)
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_add_iteration(self):
|
||||
sla = iteraion_time.IterationTime(4.0)
|
||||
self.assertTrue(sla.add_iteration({"duration": 3.14}))
|
||||
self.assertTrue(sla.add_iteration({"duration": 2.0}))
|
||||
self.assertTrue(sla.add_iteration({"duration": 3.99}))
|
||||
self.assertFalse(sla.add_iteration({"duration": 4.5}))
|
||||
self.assertFalse(sla.add_iteration({"duration": 3.8}))
|
53
tests/unit/plugins/common/sla/test_max_average_duration.py
Normal file
53
tests/unit/plugins/common/sla/test_max_average_duration.py
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import jsonschema
|
||||
|
||||
from rally.plugins.common.sla import max_average_duration
|
||||
from tests.unit import test
|
||||
|
||||
|
||||
class MaxAverageDurationTestCase(test.TestCase):
|
||||
def test_config_schema(self):
|
||||
properties = {
|
||||
"max_avg_duration": 0
|
||||
}
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
max_average_duration.MaxAverageDuration.validate,
|
||||
properties)
|
||||
|
||||
def test_result(self):
|
||||
sla1 = max_average_duration.MaxAverageDuration(42)
|
||||
sla2 = max_average_duration.MaxAverageDuration(3.62)
|
||||
for sla in [sla1, sla2]:
|
||||
sla.add_iteration({"duration": 3.14})
|
||||
sla.add_iteration({"duration": 6.28})
|
||||
self.assertTrue(sla1.result()["success"]) # 42 > avg([3.14, 6.28])
|
||||
self.assertFalse(sla2.result()["success"]) # 3.62 < avg([3.14, 6.28])
|
||||
self.assertEqual("Passed", sla1.status())
|
||||
self.assertEqual("Failed", sla2.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = max_average_duration.MaxAverageDuration(42)
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_add_iteration(self):
|
||||
sla = max_average_duration.MaxAverageDuration(4.0)
|
||||
self.assertTrue(sla.add_iteration({"duration": 3.5}))
|
||||
self.assertTrue(sla.add_iteration({"duration": 2.5}))
|
||||
self.assertTrue(sla.add_iteration({"duration": 5.0})) # avg = 3.667
|
||||
self.assertFalse(sla.add_iteration({"duration": 7.0})) # avg = 4.5
|
||||
self.assertTrue(sla.add_iteration({"duration": 1.0})) # avg = 3.8
|
91
tests/unit/plugins/common/sla/test_ouliers.py
Normal file
91
tests/unit/plugins/common/sla/test_ouliers.py
Normal file
@ -0,0 +1,91 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import jsonschema
|
||||
|
||||
from rally.plugins.common.sla import outliers
|
||||
from tests.unit import test
|
||||
|
||||
|
||||
class OutliersTestCase(test.TestCase):
|
||||
|
||||
def test_config_schema(self):
|
||||
outliers.Outliers.validate({"outliers": {"max": 0,
|
||||
"min_iterations": 5,
|
||||
"sigmas": 2.5}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
outliers.Outliers.validate,
|
||||
{"outliers": {"max": -1}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
outliers.Outliers.validate,
|
||||
{"outliers": {"max": 0, "min_iterations": 2}})
|
||||
self.assertRaises(jsonschema.ValidationError,
|
||||
outliers.Outliers.validate,
|
||||
{"outliers": {"max": 0, "sigmas": 0}})
|
||||
|
||||
def test_result(self):
|
||||
sla1 = outliers.Outliers({"max": 1})
|
||||
sla2 = outliers.Outliers({"max": 2})
|
||||
iteration_durations = [3.1, 4.2, 3.6, 4.5, 2.8, 3.3, 4.1, 3.8, 4.3,
|
||||
2.9, 10.2, 11.2, 3.4] # outliers: 10.2, 11.2
|
||||
for sla in [sla1, sla2]:
|
||||
for d in iteration_durations:
|
||||
sla.add_iteration({"duration": d})
|
||||
self.assertFalse(sla1.result()["success"]) # 2 outliers > 1
|
||||
self.assertTrue(sla2.result()["success"]) # 2 outliers <= 2
|
||||
self.assertEqual("Failed", sla1.status())
|
||||
self.assertEqual("Passed", sla2.status())
|
||||
|
||||
def test_result_large_sigmas(self):
|
||||
sla = outliers.Outliers({"max": 1, "sigmas": 5})
|
||||
iteration_durations = [3.1, 4.2, 3.6, 4.5, 2.8, 3.3, 4.1, 3.8, 4.3,
|
||||
2.9, 10.2, 11.2, 3.4]
|
||||
for d in iteration_durations:
|
||||
sla.add_iteration({"duration": d})
|
||||
# NOTE(msdubov): No outliers registered since sigmas = 5 (not 2)
|
||||
self.assertTrue(sla.result()["success"])
|
||||
self.assertEqual("Passed", sla.status())
|
||||
|
||||
def test_result_no_iterations(self):
|
||||
sla = outliers.Outliers({"max": 0})
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_result_few_iterations_large_min_iterations(self):
|
||||
sla = outliers.Outliers({"max": 0, "min_iterations": 10})
|
||||
iteration_durations = [3.1, 4.2, 4.7, 3.6, 15.14, 2.8]
|
||||
for d in iteration_durations:
|
||||
sla.add_iteration({"duration": d})
|
||||
# NOTE(msdubov): SLA doesn't fail because it hasn't iterations < 10
|
||||
self.assertTrue(sla.result()["success"])
|
||||
|
||||
def test_result_few_iterations_small_min_iterations(self):
|
||||
sla = outliers.Outliers({"max": 0, "min_iterations": 5})
|
||||
iteration_durations = [3.1, 4.2, 4.7, 3.6, 15.14, 2.8]
|
||||
for d in iteration_durations:
|
||||
sla.add_iteration({"duration": d})
|
||||
# NOTE(msdubov): Now this SLA can fail with >= 5 iterations
|
||||
self.assertFalse(sla.result()["success"])
|
||||
|
||||
def test_add_iteration(self):
|
||||
sla = outliers.Outliers({"max": 1})
|
||||
# NOTE(msdubov): One outlier in the first 11 iterations
|
||||
first_iterations = [3.1, 4.2, 3.6, 4.5, 2.8, 3.3, 4.1, 3.8, 4.3,
|
||||
2.9, 10.2]
|
||||
for d in first_iterations:
|
||||
self.assertTrue(sla.add_iteration({"duration": d}))
|
||||
# NOTE(msdubov): 12th iteration makes the SLA always failed
|
||||
self.assertFalse(sla.add_iteration({"duration": 11.2}))
|
||||
self.assertFalse(sla.add_iteration({"duration": 3.4}))
|
Loading…
Reference in New Issue
Block a user