
This patch adds UI templates directory and utils, which serve and unify HTML generation. Also, there are some fixes and improvements for HTML task report. In this patch: * Base mako templates (package rally.ui) * Rework tests/ci/rally-gate templates in order to use base template with generic header and styles * Show scenario errors (if any) in task report tab * Show scenario output (if any) in task report tab * Show SLA data in Overview tab * Show total scenario duration value (after the scenario name, above tabs) * If got some iteration error, save exception class name in the database instead of its repr() * Prevent layout from breaking and show proper message if JS libs can not be loaded for some reason * Fix bug 1387661 - the cause of the bug is wrong input json data, generated by plot.py. This happens when some atomic actions data missed (which is a result of scenario errors) - and we have different atomic actions sets between iterations. The fix saves atomic actions integrity by adding missed atomic actions (with 0 value). * Fix: if unexistend task uuid is specified in `task report' command, then proper exception is raised Closes-Bug: 1387661 Change-Id: I4bcbf86e6fad844e6752306eb6c1ccfefa6c6909
136 lines
4.7 KiB
Python
136 lines
4.7 KiB
Python
# Copyright 2013: Mirantis Inc.
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
import os
|
|
import unittest
|
|
|
|
import mock
|
|
|
|
from tests.functional import utils
|
|
|
|
|
|
class TaskTestCase(unittest.TestCase):
|
|
|
|
def _get_sample_task_config(self):
|
|
return {
|
|
"Dummy.dummy_random_fail_in_atomic": [
|
|
{
|
|
"runner": {
|
|
"type": "constant",
|
|
"times": 100,
|
|
"concurrency": 5
|
|
}
|
|
}
|
|
]
|
|
}
|
|
|
|
def test_status(self):
|
|
rally = utils.Rally()
|
|
cfg = self._get_sample_task_config()
|
|
config = utils.TaskConfig(cfg)
|
|
rally("task start --task %s" % config.filename)
|
|
self.assertIn("finished", rally("task status"))
|
|
|
|
def test_detailed(self):
|
|
rally = utils.Rally()
|
|
cfg = self._get_sample_task_config()
|
|
config = utils.TaskConfig(cfg)
|
|
rally("task start --task %s" % config.filename)
|
|
detailed = rally("task detailed")
|
|
self.assertIn("Dummy.dummy_random_fail_in_atomic", detailed)
|
|
self.assertIn("dummy_fail_test (2)", detailed)
|
|
detailed_iterations_data = rally("task detailed --iterations-data")
|
|
self.assertIn("2. dummy_fail_test (2)", detailed_iterations_data)
|
|
|
|
def test_results(self):
|
|
rally = utils.Rally()
|
|
cfg = self._get_sample_task_config()
|
|
config = utils.TaskConfig(cfg)
|
|
rally("task start --task %s" % config.filename)
|
|
self.assertIn("result", rally("task results"))
|
|
|
|
def test_report(self):
|
|
rally = utils.Rally()
|
|
cfg = self._get_sample_task_config()
|
|
config = utils.TaskConfig(cfg)
|
|
html_file = "/tmp/test_plot.html"
|
|
rally("task start --task %s" % config.filename)
|
|
if os.path.exists(html_file):
|
|
os.remove(html_file)
|
|
rally("task report --out %s" % html_file)
|
|
self.assertTrue(os.path.exists(html_file))
|
|
|
|
def test_delete(self):
|
|
rally = utils.Rally()
|
|
cfg = self._get_sample_task_config()
|
|
config = utils.TaskConfig(cfg)
|
|
rally("task start --task %s" % config.filename)
|
|
self.assertIn("finished", rally("task status"))
|
|
rally("task delete")
|
|
self.assertNotIn("finishe", rally("task list"))
|
|
|
|
# NOTE(oanufriev): Not implemented
|
|
def test_abort(self):
|
|
pass
|
|
|
|
|
|
class SLATestCase(unittest.TestCase):
|
|
|
|
def _get_sample_task_config(self, max_seconds_per_iteration=4,
|
|
max_failure_percent=0):
|
|
return {
|
|
"KeystoneBasic.create_and_list_users": [
|
|
{
|
|
"args": {
|
|
"name_length": 10
|
|
},
|
|
"runner": {
|
|
"type": "constant",
|
|
"times": 5,
|
|
"concurrency": 5
|
|
},
|
|
"sla": {
|
|
"max_seconds_per_iteration": max_seconds_per_iteration,
|
|
"max_failure_percent": max_failure_percent,
|
|
}
|
|
}
|
|
]
|
|
}
|
|
|
|
def test_sla_fail(self):
|
|
rally = utils.Rally()
|
|
cfg = self._get_sample_task_config(max_seconds_per_iteration=0.001)
|
|
config = utils.TaskConfig(cfg)
|
|
rally("task start --task %s" % config.filename)
|
|
self.assertRaises(utils.RallyCmdError, rally, "task sla_check")
|
|
|
|
def test_sla_success(self):
|
|
rally = utils.Rally()
|
|
config = utils.TaskConfig(self._get_sample_task_config())
|
|
rally("task start --task %s" % config.filename)
|
|
rally("task sla_check")
|
|
expected = [
|
|
{"benchmark": "KeystoneBasic.create_and_list_users",
|
|
"criterion": "max_seconds_per_iteration",
|
|
"detail": mock.ANY,
|
|
"pos": 0, "success": True},
|
|
{"benchmark": "KeystoneBasic.create_and_list_users",
|
|
"criterion": "max_failure_percent",
|
|
"detail": mock.ANY,
|
|
"pos": 0, "success": True},
|
|
]
|
|
data = rally("task sla_check --json", getjson=True)
|
|
self.assertEqual(expected, data)
|