Merge "[CLI] Display task errors in human-friendly form"

This commit is contained in:
Jenkins
2016-04-07 21:50:33 +00:00
committed by Gerrit Code Review
2 changed files with 114 additions and 2 deletions

View File

@@ -327,7 +327,6 @@ class TaskCommands(object):
print(_("\nFor more details run:\nrally -vd task detailed %s")
% task["uuid"])
return 0
for result in task["results"]:
key = result["key"]
print("-" * 80)
@@ -343,7 +342,7 @@ class TaskCommands(object):
iterations_headers = ["iteration", "full duration"]
iterations_actions = []
output = []
task_errors = []
if iterations_data:
for i, atomic_name in enumerate(result["info"]["atomic"], 1):
action = "%i. %s" % (i, atomic_name)
@@ -382,6 +381,11 @@ class TaskCommands(object):
output.append(output_table)
output[idx].add_iteration(additive["data"])
if itr.get("error"):
task_errors.append(TaskCommands._format_task_error(itr))
self._print_task_errors(task_id, task_errors)
cols = plot.charts.MainStatsTable.columns
float_cols = cols[1:7]
formatters = dict(zip(float_cols,
@@ -749,3 +753,26 @@ class TaskCommands(object):
"connection": connection_string,
"name": parsed_obj.scheme
})
@staticmethod
def _print_task_errors(task_id, task_errors):
print(cliutils.make_header("Task %s has %d error(s)" %
(task_id, len(task_errors))))
for err_data in task_errors:
print(*err_data, sep="\n")
print("-" * 80)
@staticmethod
def _format_task_error(data):
error_type = _("Unknown type")
error_message = _("Rally hasn't caught anything yet")
error_traceback = _("No traceback available.")
try:
error_type = data["error"][0]
error_message = data["error"][1]
error_traceback = data["error"][2]
except IndexError:
pass
return ("%(error_type)s: %(error_message)s\n" %
{"error_type": error_type, "error_message": error_message},
error_traceback)

View File

@@ -17,8 +17,10 @@ import copy
import datetime as dt
import os.path
import ddt
import mock
from rally.cli import cliutils
from rally.cli.commands import task
from rally import consts
from rally import exceptions
@@ -26,6 +28,7 @@ from tests.unit import fakes
from tests.unit import test
@ddt.ddt
class TaskCommandsTestCase(test.TestCase):
def setUp(self):
@@ -775,3 +778,85 @@ class TaskCommandsTestCase(test.TestCase):
"Please check your connection string."),
mock.call("\n")])
mock_task_exporter_get.assert_called_once_with("file-exporter")
@mock.patch("rally.cli.commands.task.sys.stdout")
@mock.patch("rally.cli.commands.task.api.Task")
@ddt.data({"error_type": "test_no_trace_type",
"error_message": "no_trace_error_message",
"error_traceback": None},
{"error_type": "test_error_type",
"error_message": "test_error_message",
"error_traceback": "test\nerror\ntraceback"})
@ddt.unpack
def test_show_task_errors_no_trace(self, mock_task, mock_stdout,
error_type, error_message,
error_traceback=None):
test_uuid = "test_task_id"
error_data = [error_type, error_message]
if error_traceback:
error_data.append(error_traceback)
mock_task.get_detailed.return_value = {
"id": "task",
"uuid": test_uuid,
"status": "status",
"results": [{
"key": {
"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"
},
"info": {
"load_duration": 3.2,
"full_duration": 3.5,
"iterations_count": 1,
"iterations_failed": 1,
"atomic": {"foo": {}, "bar": {}}},
"iterations": [
{"duration": 0.9,
"idle_duration": 0.1,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo": 0.6, "bar": 0.7},
"error": error_data
},
]}
]}
self.task.detailed(test_uuid)
mock_task.get_detailed.assert_called_once_with(test_uuid,
extended_results=True)
err_report = "%(error_type)s: %(error_message)s\n" % (
{"error_type": error_type, "error_message": error_message})
header = cliutils.make_header("Task %s has %d error(s)" %
(test_uuid, 1))
mock_stdout.write.assert_has_calls([
mock.call("\n"), mock.call("-" * 80), mock.call("\n"),
mock.call("Task test_task_id: status"),
mock.call("\n"), mock.call("-" * 80), mock.call("\n"),
mock.call("\n"), mock.call("test scenario fake_name"),
mock.call("\n"), mock.call("args position fake_pos"),
mock.call("\n"), mock.call("args values:"),
mock.call("\n"), mock.call("\"fake_kw\""),
mock.call("\n"), mock.call("\n"),
mock.call(header), mock.call("\n"),
mock.call(err_report), mock.call("\n"),
mock.call(error_traceback or "No traceback available."),
mock.call("\n"), mock.call("-" * 80), mock.call("\n"),
mock.call("\n"), mock.call("Load duration: 3.2"),
mock.call("\n"), mock.call("Full duration: 3.5"),
mock.call("\n"), mock.call("\nHINTS:"),
mock.call("\n"),
mock.call("* To plot HTML graphics with this data, run:"),
mock.call("\n"),
mock.call("\trally task report test_task_id --out output.html\n"),
mock.call("\n"), mock.call("* To generate a JUnit report, run:"),
mock.call("\n"),
mock.call("\trally task report test_task_id "
"--junit --out output.xml\n"),
mock.call("\n"),
mock.call("* To get raw JSON output of task results, run:"),
mock.call("\n"),
mock.call("\trally task results test_task_id\n"),
mock.call("\n")
])