diff --git a/rally/benchmark/context/foo.html b/rally/benchmark/context/foo.html deleted file mode 100644 index 1e11017015..0000000000 --- a/rally/benchmark/context/foo.html +++ /dev/null @@ -1,396 +0,0 @@ - - - - - - Rally | Benchmark Task Report - - - - - - - - - -
-
- Rally  - benchmark results -
-
- -
- -
Failed to render scenario data
- -
- - -
- -
- -

{{scenario.cls}}.{{scenario.name}}

- -
- - - - - - - -
-
- -
- - - diff --git a/rally/benchmark/processing/plot.py b/rally/benchmark/processing/plot.py index 3a009355be..bdddf895bd 100644 --- a/rally/benchmark/processing/plot.py +++ b/rally/benchmark/processing/plot.py @@ -15,45 +15,82 @@ import copy import json -import os - -import mako.template from rally.benchmark.processing.charts import histogram as histo from rally.benchmark.processing import utils +from rally.ui import utils as ui_utils -def _prepare_data(data, reduce_rows=1000): +def _prepare_data(data): durations = [] idle_durations = [] atomic_durations = {} - num_errors = 0 + output = {} + output_errors = [] + output_stacked = [] + errors = [] - for i in data["result"]: - # TODO(maretskiy): store error value and scenario output + # NOTE(maretskiy): We need this extra iteration + # to determine something that we should know about the data + # before starting its processing. + atomic_names = set() + output_names = set() + for r in data["result"]: + atomic_names.update(r["atomic_actions"].keys()) + output_names.update(r["scenario_output"]["data"].keys()) - if i["error"]: - num_errors += 1 + for idx, r in enumerate(data["result"]): + # NOTE(maretskiy): Sometimes we miss iteration data. + # So we care about data integrity by setting zero values + if len(r["atomic_actions"]) < len(atomic_names): + for atomic_name in atomic_names: + r["atomic_actions"].setdefault(atomic_name, 0) - durations.append(i["duration"]) - idle_durations.append(i["idle_duration"]) + if len(r["scenario_output"]["data"]) < len(output_names): + for output_name in output_names: + r["scenario_output"]["data"].setdefault(output_name, 0) - for met, duration in i["atomic_actions"].items(): + if r["scenario_output"]["errors"]: + output_errors.append((idx, r["scenario_output"]["errors"])) + + for param, value in r["scenario_output"]["data"].items(): + try: + output[param].append(value) + except KeyError: + output[param] = [value] + + if r["error"]: + type_, message, traceback = r["error"] + errors.append({"iteration": idx, + "type": type_, + "message": message, + "traceback": traceback}) + + durations.append(r["duration"]) + idle_durations.append(r["idle_duration"]) + + for met, duration in r["atomic_actions"].items(): try: atomic_durations[met].append(duration) except KeyError: atomic_durations[met] = [duration] - for k, v in atomic_durations.items(): - atomic_durations[k] = utils.compress(v, limit=reduce_rows) + for k, v in output.iteritems(): + output_stacked.append({"key": k, "values": utils.compress(v)}) + + for k, v in atomic_durations.iteritems(): + atomic_durations[k] = utils.compress(v) return { "total_durations": { - "duration": utils.compress(durations, limit=reduce_rows), - "idle_duration": utils.compress(idle_durations, - limit=reduce_rows)}, + "duration": utils.compress(durations), + "idle_duration": utils.compress(idle_durations)}, "atomic_durations": atomic_durations, - "num_errors": num_errors, + "output": output_stacked, + "output_errors": output_errors, + "errors": errors, + "sla": data["sla"], + "duration": data["duration"], } @@ -79,7 +116,7 @@ def _process_main_duration(result, data): return { "pie": [ {"key": "success", "value": len(histogram_data)}, - {"key": "errors", "value": data["num_errors"]}, + {"key": "errors", "value": len(data["errors"])}, ], "iter": stacked_area, "histogram": [ @@ -214,14 +251,14 @@ def _get_atomic_action_durations(result): def _process_results(results): output = [] for result in results: - table_cols = ["action", - "min (sec)", - "avg (sec)", - "max (sec)", + table_cols = ["Action", + "Min (sec)", + "Avg (sec)", + "Max (sec)", "90 percentile", "95 percentile", - "success", - "count"] + "Success", + "Count"] table_rows = _get_atomic_action_durations(result) name, kw, pos = (result["key"]["name"], result["key"]["kw"], result["key"]["pos"]) @@ -239,15 +276,16 @@ def _process_results(results): "atomic": _process_atomic(result, data), "table_cols": table_cols, "table_rows": table_rows, + "output": data["output"], + "output_errors": data["output_errors"], + "errors": data["errors"], + "total_duration": data["duration"], + "sla": data["sla"], }) return sorted(output, key=lambda r: "%s%s" % (r["cls"], r["name"])) def plot(results): data = _process_results(results) - - template_file = os.path.join(os.path.dirname(__file__), - "src", "index.mako") - with open(template_file) as index: - template = mako.template.Template(index.read()) - return template.render(data=json.dumps(data)) + template = ui_utils.get_template("task/report.mako") + return template.render(data=json.dumps(data)) diff --git a/rally/benchmark/utils.py b/rally/benchmark/utils.py index d84fdd6688..527acd6c35 100644 --- a/rally/benchmark/utils.py +++ b/rally/benchmark/utils.py @@ -134,7 +134,7 @@ def wait_for_delete(resource, update_resource=None, timeout=60, def format_exc(exc): - return [str(type(exc)), str(exc), traceback.format_exc()] + return [exc.__class__.__name__, str(exc), traceback.format_exc()] def infinite_run_args_generator(args_func): diff --git a/rally/cmd/commands/task.py b/rally/cmd/commands/task.py index 6906aa8196..15dfd39f9d 100644 --- a/rally/cmd/commands/task.py +++ b/rally/cmd/commands/task.py @@ -32,6 +32,7 @@ from rally.cmd import envutils from rally import db from rally import exceptions from rally.i18n import _ +from rally.objects import task from rally.openstack.common import cliutils as common_cliutils from rally.orchestrator import api from rally import utils as rutils @@ -313,9 +314,9 @@ class TaskCommands(object): :param task_id: Task uuid """ - results = map(lambda x: {"key": x["key"], 'result': x['data']['raw'], + results = map(lambda x: {"key": x["key"], "result": x["data"]["raw"], "sla": x["data"]["sla"]}, - db.task_result_get_all_by_uuid(task_id)) + task.Task.get(task_id).get_results()) if results: print(json.dumps(results, sort_keys=True, indent=4)) @@ -350,8 +351,10 @@ class TaskCommands(object): :param open_it: bool, whether to open output file in web browser """ results = map(lambda x: {"key": x["key"], - "result": x["data"]["raw"]}, - db.task_result_get_all_by_uuid(task_id)) + "sla": x["data"]["sla"], + "result": x["data"]["raw"], + "duration": x["data"]["scenario_duration"]}, + task.Task.get(task_id).get_results()) if out: out = os.path.expanduser(out) output_file = out or ("%s.html" % task_id) @@ -403,20 +406,19 @@ class TaskCommands(object): :param task_id: Task uuid. :returns: Number of failed criteria. """ - task = db.task_result_get_all_by_uuid(task_id) + results = task.Task.get(task_id).get_results() failed_criteria = 0 - results = [] - for result in task: + data = [] + for result in results: key = result["key"] for sla in result["data"]["sla"]: sla["benchmark"] = key["name"] sla["pos"] = key["pos"] failed_criteria += 0 if sla['success'] else 1 - results.append(sla if tojson else rutils.Struct(**sla)) + data.append(sla if tojson else rutils.Struct(**sla)) if tojson: - print(json.dumps(results)) + print(json.dumps(data)) else: - common_cliutils.print_list(results, ('benchmark', 'pos', - 'criterion', 'success', - 'detail')) + common_cliutils.print_list(data, ("benchmark", "pos", "criterion", + "success", "detail")) return failed_criteria diff --git a/rally/objects/task.py b/rally/objects/task.py index 20f9b78b39..557fefa707 100644 --- a/rally/objects/task.py +++ b/rally/objects/task.py @@ -53,6 +53,9 @@ class Task(object): 'status': consts.TaskStatus.FAILED, 'verification_log': json.dumps(log)}) + def get_results(self): + return db.task_result_get_all_by_uuid(self.task["uuid"]) + def append_results(self, key, value): db.task_result_create(self.task['uuid'], key, value) diff --git a/rally/ui/__init__.py b/rally/ui/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/rally/ui/templates/base.mako b/rally/ui/templates/base.mako new file mode 100644 index 0000000000..3cd9f33754 --- /dev/null +++ b/rally/ui/templates/base.mako @@ -0,0 +1,49 @@ + +> + + + + Rally | <%block name="title_text"/> + <%block name="libs"/> + + + +> + +
+
+ Rally  + <%block name="header_text"/> +
+
+ +
+ <%block name="content"/> +
+ + + + diff --git a/rally/benchmark/processing/src/index.mako b/rally/ui/templates/task/report.mako similarity index 71% rename from rally/benchmark/processing/src/index.mako rename to rally/ui/templates/task/report.mako index 60ae5c81a6..4ad654aca8 100644 --- a/rally/benchmark/processing/src/index.mako +++ b/rally/ui/templates/task/report.mako @@ -1,15 +1,21 @@ - - - - - - Rally | Benchmark Task Report +## -*- coding: utf-8 -*- +<%inherit file="/base.mako"/> + +<%block name="html_attr"> ng-app="BenchmarkApp" + +<%block name="title_text">Benchmark Task Report + +<%block name="libs"> - + - - - - + -
-
- Rally  - benchmark results -
-
+<%block name="body_attr"> ng-controller="BenchmarkController" -
+<%block name="header_text">benchmark results -
Failed to render scenario data
+<%block name="content"> + -
+
+ {{n.cls}}
-
+
-

{{scenario.cls}}.{{scenario.name}}

+

{{scenario.cls}}.{{scenario.name}} ({{scenario.total_duration | number:2}}s)

  • {{renderTotal()}} -

    Table for task results

    - +
    +

    Service-level agreement

    +
    + + + + + + + + +
    Criterion + Detail + Success +
    {{row.criterion}} + {{row.detail}} + {{row.success}} +
    +
+ +

Total durations

+ - + - - + +
{{i}}{{i}}
{{i}}
{{i}}
-

Charts for the Total Duration

+

Charts for the Total durations

@@ -364,7 +390,7 @@ + + + +