Add aborted and broken flags to sla checker

"aborted" flag will be used for saving information about situation when
task was stopped due to "abort-on-sla" option.

"broken" flag will be used for saving information about situtaion when
something gone wrong in context or runner.

Also, this patch removes two unstable scenarios.

Change-Id: I9e0c16c48af3643b15c8f5a241535bfb4540332c
This commit is contained in:
Andrey Kurilin 2015-04-11 01:54:11 +03:00
parent b3b126dd77
commit c0082aa15e
2 changed files with 78 additions and 48 deletions

View File

@ -991,54 +991,6 @@
failure_rate:
max: 0
-
args:
flavor:
name: "m1.tiny"
image:
name: "from_context_uploaded"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
images:
image_name: "from_context_uploaded"
image_url: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img"
image_type: "qcow2"
image_container: "bare"
images_per_tenant: 1
sla:
failure_rate:
max: 0
-
args:
flavor:
name: "m1.tiny"
image:
regex: "^from_context_uploaded$"
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 1
users_per_tenant: 1
images:
image_name: "from_context_uploaded"
image_url: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img"
image_type: "qcow2"
image_container: "bare"
images_per_tenant: 1
sla:
failure_rate:
max: 0
-
args:
flavor:

View File

@ -575,3 +575,81 @@ class SLATestCase(unittest.TestCase):
]
data = rally("task sla_check --json", getjson=True)
self.assertEqual(expected, data)
class SLAExtraFlagsTestCase(unittest.TestCase):
def test_abort_on_sla_fail(self):
rally = utils.Rally()
cfg = {
"Dummy.dummy_exception": [
{
"args": {},
"runner": {
"type": "constant",
"times": 5,
"concurrency": 5
},
"sla": {
"failure_rate": {"max": 0}
}
}
]}
config = utils.TaskConfig(cfg)
rally("task start --task %s --abort-on-sla-failure" % config.filename)
expected = [
{"benchmark": "Dummy.dummy_exception",
"criterion": "aborted_on_sla",
"detail": "Task was aborted due to SLA failure(s).",
"pos": 0, "status": "FAIL"},
{"benchmark": "Dummy.dummy_exception",
"criterion": "failure_rate",
"detail": mock.ANY,
"pos": 0, "status": "FAIL"}
]
try:
rally("task sla_check --json", getjson=True)
except utils.RallyCmdError as expected_error:
self.assertEqual(json.loads(expected_error.output), expected)
else:
self.fail("`rally task sla_check` command should return non-zero "
"exit code")
def _test_broken_context(self, runner):
rally = utils.Rally()
cfg = {
"Dummy.dummy": [
{
"args": {},
"runner": runner,
"context": {
"dummy_context": {"fail_setup": True}
}
}
]}
config = utils.TaskConfig(cfg)
rally("task start --task %s" % config.filename)
expected = [
{"benchmark": "Dummy.dummy",
"criterion": "something_went_wrong",
"detail": mock.ANY,
"pos": 0, "status": "FAIL"}
]
try:
rally("task sla_check --json", getjson=True)
except utils.RallyCmdError as expected_error:
self.assertEqual(json.loads(expected_error.output), expected)
else:
self.fail("`rally task sla_check` command should return non-zero "
"exit code")
def test_broken_context_with_constant_runner(self):
self._test_broken_context({"type": "constant",
"times": 5,
"concurrency": 5})
def test_broken_context_with_rps_runner(self):
self._test_broken_context({"type": "rps",
"times": 5,
"rps": 3,
"timeout": 6})