2014-02-12 19:41:20 +04:00
|
|
|
# Copyright 2014: Mirantis Inc.
|
|
|
|
# All Rights Reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
# not use this file except in compliance with the License. You may obtain
|
|
|
|
# a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
# License for the specific language governing permissions and limitations
|
|
|
|
# under the License.
|
|
|
|
|
|
|
|
import json
|
2014-06-16 20:14:51 +03:00
|
|
|
|
2014-02-12 19:41:20 +04:00
|
|
|
import mock
|
|
|
|
|
|
|
|
from rally.benchmark.processing import plot
|
2014-10-06 21:18:24 +03:00
|
|
|
from tests.unit import test
|
2014-02-12 19:41:20 +04:00
|
|
|
|
2014-11-21 19:27:57 +02:00
|
|
|
PLOT = "rally.benchmark.processing.plot."
|
|
|
|
|
2014-02-12 19:41:20 +04:00
|
|
|
|
|
|
|
class PlotTestCase(test.TestCase):
|
2014-11-21 19:27:57 +02:00
|
|
|
@mock.patch(PLOT + "ui_utils")
|
|
|
|
@mock.patch(PLOT + "_process_results")
|
2014-11-18 13:10:30 +02:00
|
|
|
def test_plot(self, mock_proc_results, mock_utils):
|
|
|
|
mock_render = mock.Mock(return_value="plot_html")
|
|
|
|
mock_utils.get_template = mock.Mock(
|
|
|
|
return_value=mock.Mock(render=mock_render))
|
2014-11-21 19:27:57 +02:00
|
|
|
task_data = [{"name": "a"}, {"name": "b"}]
|
|
|
|
task_source = "JSON"
|
|
|
|
mock_proc_results.return_value = (task_source, task_data)
|
2014-02-12 19:41:20 +04:00
|
|
|
|
|
|
|
result = plot.plot(["abc"])
|
|
|
|
|
2014-11-18 13:10:30 +02:00
|
|
|
self.assertEqual(result, "plot_html")
|
|
|
|
mock_render.assert_called_once_with(
|
2014-11-21 19:27:57 +02:00
|
|
|
data=json.dumps(task_data),
|
|
|
|
source=json.dumps(task_source)
|
2014-02-12 19:41:20 +04:00
|
|
|
)
|
2014-11-18 13:10:30 +02:00
|
|
|
mock_utils.get_template.assert_called_once_with("task/report.mako")
|
2014-02-12 19:41:20 +04:00
|
|
|
|
2014-11-21 19:27:57 +02:00
|
|
|
def test__task_json(self):
|
|
|
|
self.assertRaises(TypeError, plot._task_json)
|
|
|
|
self.assertRaises(AttributeError, plot._task_json, [])
|
|
|
|
self.assertEqual(plot._task_json({"foo": ["a", "b"]}),
|
|
|
|
'{\n "foo": [\n "a", \n "b"\n ]\n}')
|
|
|
|
self.assertEqual(plot._task_json({"foo": ["a", "b"], "bar": ["c"]}),
|
|
|
|
('{\n "bar": [\n "c"\n ],'
|
|
|
|
'\n "foo": [\n "a", \n "b"\n ]\n}'))
|
|
|
|
|
|
|
|
@mock.patch(PLOT + "_task_json")
|
|
|
|
@mock.patch(PLOT + "_prepare_data")
|
|
|
|
@mock.patch(PLOT + "_process_atomic")
|
|
|
|
@mock.patch(PLOT + "_get_atomic_action_durations")
|
|
|
|
@mock.patch(PLOT + "_process_main_duration")
|
|
|
|
def test__process_results(self, mock_main_duration, mock_get_atomic,
|
|
|
|
mock_atomic, mock_prepare, mock_task_json):
|
|
|
|
sla = [{"success": True}]
|
|
|
|
result = ["iter_1", "iter_2"]
|
|
|
|
iterations = len(result)
|
|
|
|
kw = {"runner": {"type": "foo_runner"}}
|
|
|
|
result_ = lambda i: {
|
|
|
|
"key": {"pos": i,
|
|
|
|
"name": "Class.method",
|
|
|
|
"kw": kw},
|
|
|
|
"result": result,
|
|
|
|
"sla": sla}
|
2014-12-24 20:01:07 +08:00
|
|
|
results = [result_(i) for i in (0, 1, 2)]
|
2014-11-18 13:10:30 +02:00
|
|
|
table_cols = ["Action",
|
|
|
|
"Min (sec)",
|
|
|
|
"Avg (sec)",
|
|
|
|
"Max (sec)",
|
2014-10-16 19:05:09 +03:00
|
|
|
"90 percentile",
|
|
|
|
"95 percentile",
|
2014-11-18 13:10:30 +02:00
|
|
|
"Success",
|
|
|
|
"Count"]
|
2014-11-21 19:27:57 +02:00
|
|
|
atomic_durations = [["atomic_1"], ["atomic_2"]]
|
2014-11-18 13:10:30 +02:00
|
|
|
mock_prepare.side_effect = lambda i: {"errors": "errors_list",
|
|
|
|
"output": [],
|
|
|
|
"output_errors": [],
|
2014-11-21 19:27:57 +02:00
|
|
|
"sla": i["sla"],
|
|
|
|
"load_duration": 1234.5,
|
|
|
|
"full_duration": 6789.1}
|
2014-04-09 14:04:04 +08:00
|
|
|
mock_main_duration.return_value = "main_duration"
|
2014-11-21 19:27:57 +02:00
|
|
|
mock_get_atomic.return_value = atomic_durations
|
2014-04-09 14:04:04 +08:00
|
|
|
mock_atomic.return_value = "main_atomic"
|
2014-11-21 19:27:57 +02:00
|
|
|
mock_task_json.return_value = "JSON"
|
|
|
|
|
|
|
|
source, scenarios = plot._process_results(results)
|
2014-02-12 19:41:20 +04:00
|
|
|
|
2014-11-21 19:27:57 +02:00
|
|
|
source_dict = {"Class.method": [kw] * len(results)}
|
|
|
|
mock_task_json.assert_called_with(source_dict)
|
|
|
|
self.assertEqual(source, "JSON")
|
2014-02-12 19:41:20 +04:00
|
|
|
|
2014-09-30 19:31:34 +03:00
|
|
|
results = sorted(results, key=lambda r: "%s%s" % (r["key"]["name"],
|
|
|
|
r["key"]["pos"]))
|
2014-02-12 19:41:20 +04:00
|
|
|
for i, r in enumerate(results):
|
2014-12-30 11:26:06 -08:00
|
|
|
config = json.dumps({r["key"]["name"]: [r["key"]["kw"]]}, indent=2)
|
2014-09-30 19:31:34 +03:00
|
|
|
pos = int(r["key"]["pos"])
|
|
|
|
cls = r["key"]["name"].split(".")[0]
|
|
|
|
met = r["key"]["name"].split(".")[1]
|
|
|
|
name = "%s%s" % (met, (pos and " [%d]" % (pos + 1) or ""))
|
2014-11-21 19:27:57 +02:00
|
|
|
self.assertEqual(scenarios[i], {
|
2014-09-30 19:31:34 +03:00
|
|
|
"cls": cls,
|
|
|
|
"pos": r["key"]["pos"],
|
|
|
|
"met": met,
|
|
|
|
"name": name,
|
|
|
|
"config": config,
|
2014-11-21 19:27:57 +02:00
|
|
|
"iterations": mock_main_duration.return_value,
|
2014-06-04 01:14:38 +08:00
|
|
|
"atomic": mock_atomic.return_value,
|
|
|
|
"table_cols": table_cols,
|
2014-11-21 19:27:57 +02:00
|
|
|
"table_rows": atomic_durations,
|
2014-11-18 13:10:30 +02:00
|
|
|
"errors": "errors_list",
|
|
|
|
"output": [],
|
|
|
|
"output_errors": [],
|
2014-11-21 19:27:57 +02:00
|
|
|
"runner": "foo_runner",
|
|
|
|
"sla": sla,
|
|
|
|
"sla_success": True,
|
|
|
|
"iterations_num": iterations,
|
|
|
|
"load_duration": 1234.5,
|
|
|
|
"full_duration": 6789.1
|
2014-02-12 19:41:20 +04:00
|
|
|
})
|
|
|
|
|
|
|
|
def test__process_main_time(self):
|
|
|
|
result = {
|
|
|
|
"result": [
|
|
|
|
{
|
2014-03-30 20:02:00 +04:00
|
|
|
"error": [],
|
2014-04-09 14:04:04 +08:00
|
|
|
"duration": 1,
|
Add atomic action names tracking
Here we change the format of the scenario results so that it now stores
the atomic actions data as a dict:
Before:
{
"duration": ...,
"error": ...,
"atomic_actions": [
{"action": "a1", "duration": 0.3},
{"action": "a2", "duration": 0.1},
{"action": "a1", "duration": 0.25}
],
...
}
After:
{
"duration": ...,
"error": ...,
"atomic_actions": {
"a1": 0.3,
"a1 (2)": 0.25,
"a2": 0.1
},
...
}
We also solve 2 connected problems here:
1. The statistics table that shows up in the CLI after benchmarks complete now
always has rows for all the atomic actions that were invoked in benchmark
scenarios. Before this patch, this was not the case since occasionally the
atomic actions data could not be written due to Exceptions.
2. We've removed lots of ugly code in benchmark/processing/plot.py, since now
there is no need to make passes through the atomic actions data to collect
their names.
This patch also:
* contains some removing of code duplicate between plot & CLI output generation;
* adds missing UTs for raw benchmark data processing;
* fixes a minor bug in histograms bins number calculation.
Change-Id: I17f563051bd1c2ec2fb47a385d4cc652895e1f9e
2014-09-22 10:45:05 +04:00
|
|
|
"idle_duration": 2,
|
2014-11-18 13:10:30 +02:00
|
|
|
"atomic_actions": {},
|
|
|
|
"scenario_output": {"errors": [], "data": {}}
|
2014-02-12 19:41:20 +04:00
|
|
|
},
|
|
|
|
{
|
2014-11-18 13:10:30 +02:00
|
|
|
"error": ["some", "error", "occurred"],
|
2014-04-09 14:04:04 +08:00
|
|
|
"duration": 1,
|
Add atomic action names tracking
Here we change the format of the scenario results so that it now stores
the atomic actions data as a dict:
Before:
{
"duration": ...,
"error": ...,
"atomic_actions": [
{"action": "a1", "duration": 0.3},
{"action": "a2", "duration": 0.1},
{"action": "a1", "duration": 0.25}
],
...
}
After:
{
"duration": ...,
"error": ...,
"atomic_actions": {
"a1": 0.3,
"a1 (2)": 0.25,
"a2": 0.1
},
...
}
We also solve 2 connected problems here:
1. The statistics table that shows up in the CLI after benchmarks complete now
always has rows for all the atomic actions that were invoked in benchmark
scenarios. Before this patch, this was not the case since occasionally the
atomic actions data could not be written due to Exceptions.
2. We've removed lots of ugly code in benchmark/processing/plot.py, since now
there is no need to make passes through the atomic actions data to collect
their names.
This patch also:
* contains some removing of code duplicate between plot & CLI output generation;
* adds missing UTs for raw benchmark data processing;
* fixes a minor bug in histograms bins number calculation.
Change-Id: I17f563051bd1c2ec2fb47a385d4cc652895e1f9e
2014-09-22 10:45:05 +04:00
|
|
|
"idle_duration": 1,
|
2014-11-18 13:10:30 +02:00
|
|
|
"atomic_actions": {},
|
|
|
|
"scenario_output": {"errors": [], "data": {}}
|
2014-02-12 19:41:20 +04:00
|
|
|
},
|
|
|
|
{
|
2014-03-30 20:02:00 +04:00
|
|
|
"error": [],
|
2014-04-09 14:04:04 +08:00
|
|
|
"duration": 2,
|
Add atomic action names tracking
Here we change the format of the scenario results so that it now stores
the atomic actions data as a dict:
Before:
{
"duration": ...,
"error": ...,
"atomic_actions": [
{"action": "a1", "duration": 0.3},
{"action": "a2", "duration": 0.1},
{"action": "a1", "duration": 0.25}
],
...
}
After:
{
"duration": ...,
"error": ...,
"atomic_actions": {
"a1": 0.3,
"a1 (2)": 0.25,
"a2": 0.1
},
...
}
We also solve 2 connected problems here:
1. The statistics table that shows up in the CLI after benchmarks complete now
always has rows for all the atomic actions that were invoked in benchmark
scenarios. Before this patch, this was not the case since occasionally the
atomic actions data could not be written due to Exceptions.
2. We've removed lots of ugly code in benchmark/processing/plot.py, since now
there is no need to make passes through the atomic actions data to collect
their names.
This patch also:
* contains some removing of code duplicate between plot & CLI output generation;
* adds missing UTs for raw benchmark data processing;
* fixes a minor bug in histograms bins number calculation.
Change-Id: I17f563051bd1c2ec2fb47a385d4cc652895e1f9e
2014-09-22 10:45:05 +04:00
|
|
|
"idle_duration": 3,
|
2014-11-18 13:10:30 +02:00
|
|
|
"atomic_actions": {},
|
|
|
|
"scenario_output": {"errors": [], "data": {}}
|
2014-02-12 19:41:20 +04:00
|
|
|
}
|
2014-11-18 13:10:30 +02:00
|
|
|
],
|
|
|
|
"sla": "foo_sla",
|
2014-11-21 19:27:57 +02:00
|
|
|
"load_duration": 1234.5,
|
|
|
|
"full_duration": 6789.1
|
2014-02-12 19:41:20 +04:00
|
|
|
}
|
|
|
|
|
2014-07-08 15:28:08 +03:00
|
|
|
output = plot._process_main_duration(result,
|
|
|
|
plot._prepare_data(result))
|
2014-02-12 19:41:20 +04:00
|
|
|
|
|
|
|
self.assertEqual(output, {
|
|
|
|
"pie": [
|
|
|
|
{"key": "success", "value": 2},
|
|
|
|
{"key": "errors", "value": 1}
|
|
|
|
],
|
|
|
|
"iter": [
|
|
|
|
{
|
|
|
|
"key": "duration",
|
2014-11-21 19:27:57 +02:00
|
|
|
"values": [(1, 1.0), (2, 0), (3, 2.0)]
|
2014-02-12 19:41:20 +04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "idle_duration",
|
2014-11-21 19:27:57 +02:00
|
|
|
"values": [(1, 2.0), (2, 0), (3, 3.0)]
|
2014-02-12 19:41:20 +04:00
|
|
|
}
|
2014-03-25 20:05:33 +00:00
|
|
|
],
|
|
|
|
"histogram": [
|
|
|
|
{
|
|
|
|
"key": "task",
|
|
|
|
"method": "Square Root Choice",
|
2014-10-16 19:05:09 +03:00
|
|
|
"values": [{"x": 1.0, "y": 1.0}, {"x": 1.0, "y": 0.0}]
|
2014-03-25 20:05:33 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "task",
|
|
|
|
"method": "Sturges Formula",
|
2014-10-16 19:05:09 +03:00
|
|
|
"values": [{"x": 1.0, "y": 1.0}, {"x": 1.0, "y": 0.0}]
|
2014-03-25 20:05:33 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "task",
|
|
|
|
"method": "Rice Rule",
|
2014-10-16 19:05:09 +03:00
|
|
|
"values": [{"x": 1.0, "y": 1.0}, {"x": 1.0, "y": 0.0},
|
|
|
|
{"x": 1.0, "y": 0.0}]
|
2014-03-25 20:05:33 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "task",
|
|
|
|
"method": "One Half",
|
2014-10-16 19:05:09 +03:00
|
|
|
"values": [{"x": 2.0, "y": 2.0}]
|
2014-03-25 20:05:33 +00:00
|
|
|
}
|
2014-02-12 19:41:20 +04:00
|
|
|
]
|
|
|
|
})
|
|
|
|
|
|
|
|
def test__process_atomic_time(self):
|
|
|
|
result = {
|
|
|
|
"result": [
|
|
|
|
{
|
2014-03-30 20:02:00 +04:00
|
|
|
"error": [],
|
Add atomic action names tracking
Here we change the format of the scenario results so that it now stores
the atomic actions data as a dict:
Before:
{
"duration": ...,
"error": ...,
"atomic_actions": [
{"action": "a1", "duration": 0.3},
{"action": "a2", "duration": 0.1},
{"action": "a1", "duration": 0.25}
],
...
}
After:
{
"duration": ...,
"error": ...,
"atomic_actions": {
"a1": 0.3,
"a1 (2)": 0.25,
"a2": 0.1
},
...
}
We also solve 2 connected problems here:
1. The statistics table that shows up in the CLI after benchmarks complete now
always has rows for all the atomic actions that were invoked in benchmark
scenarios. Before this patch, this was not the case since occasionally the
atomic actions data could not be written due to Exceptions.
2. We've removed lots of ugly code in benchmark/processing/plot.py, since now
there is no need to make passes through the atomic actions data to collect
their names.
This patch also:
* contains some removing of code duplicate between plot & CLI output generation;
* adds missing UTs for raw benchmark data processing;
* fixes a minor bug in histograms bins number calculation.
Change-Id: I17f563051bd1c2ec2fb47a385d4cc652895e1f9e
2014-09-22 10:45:05 +04:00
|
|
|
"atomic_actions": {
|
|
|
|
"action1": 1,
|
|
|
|
"action2": 2
|
2014-11-18 13:10:30 +02:00
|
|
|
},
|
|
|
|
"scenario_output": {"errors": [], "data": {}}
|
2014-02-12 19:41:20 +04:00
|
|
|
},
|
|
|
|
{
|
2014-03-30 20:02:00 +04:00
|
|
|
"error": ["some", "error", "occurred"],
|
Add atomic action names tracking
Here we change the format of the scenario results so that it now stores
the atomic actions data as a dict:
Before:
{
"duration": ...,
"error": ...,
"atomic_actions": [
{"action": "a1", "duration": 0.3},
{"action": "a2", "duration": 0.1},
{"action": "a1", "duration": 0.25}
],
...
}
After:
{
"duration": ...,
"error": ...,
"atomic_actions": {
"a1": 0.3,
"a1 (2)": 0.25,
"a2": 0.1
},
...
}
We also solve 2 connected problems here:
1. The statistics table that shows up in the CLI after benchmarks complete now
always has rows for all the atomic actions that were invoked in benchmark
scenarios. Before this patch, this was not the case since occasionally the
atomic actions data could not be written due to Exceptions.
2. We've removed lots of ugly code in benchmark/processing/plot.py, since now
there is no need to make passes through the atomic actions data to collect
their names.
This patch also:
* contains some removing of code duplicate between plot & CLI output generation;
* adds missing UTs for raw benchmark data processing;
* fixes a minor bug in histograms bins number calculation.
Change-Id: I17f563051bd1c2ec2fb47a385d4cc652895e1f9e
2014-09-22 10:45:05 +04:00
|
|
|
"atomic_actions": {
|
|
|
|
"action1": 1,
|
|
|
|
"action2": 2
|
2014-11-18 13:10:30 +02:00
|
|
|
},
|
|
|
|
"scenario_output": {"errors": [], "data": {}}
|
2014-02-12 19:41:20 +04:00
|
|
|
},
|
|
|
|
{
|
2014-03-30 20:02:00 +04:00
|
|
|
"error": [],
|
Add atomic action names tracking
Here we change the format of the scenario results so that it now stores
the atomic actions data as a dict:
Before:
{
"duration": ...,
"error": ...,
"atomic_actions": [
{"action": "a1", "duration": 0.3},
{"action": "a2", "duration": 0.1},
{"action": "a1", "duration": 0.25}
],
...
}
After:
{
"duration": ...,
"error": ...,
"atomic_actions": {
"a1": 0.3,
"a1 (2)": 0.25,
"a2": 0.1
},
...
}
We also solve 2 connected problems here:
1. The statistics table that shows up in the CLI after benchmarks complete now
always has rows for all the atomic actions that were invoked in benchmark
scenarios. Before this patch, this was not the case since occasionally the
atomic actions data could not be written due to Exceptions.
2. We've removed lots of ugly code in benchmark/processing/plot.py, since now
there is no need to make passes through the atomic actions data to collect
their names.
This patch also:
* contains some removing of code duplicate between plot & CLI output generation;
* adds missing UTs for raw benchmark data processing;
* fixes a minor bug in histograms bins number calculation.
Change-Id: I17f563051bd1c2ec2fb47a385d4cc652895e1f9e
2014-09-22 10:45:05 +04:00
|
|
|
"atomic_actions": {
|
|
|
|
"action1": 3,
|
|
|
|
"action2": 4
|
2014-11-18 13:10:30 +02:00
|
|
|
},
|
|
|
|
"scenario_output": {"errors": [], "data": {}}
|
2014-02-12 19:41:20 +04:00
|
|
|
}
|
|
|
|
]
|
|
|
|
}
|
|
|
|
|
2014-11-18 13:10:30 +02:00
|
|
|
data = {
|
|
|
|
"atomic_durations": {
|
|
|
|
"action1": [(1, 1.0), (2, 0.0), (3, 3.0)],
|
|
|
|
"action2": [(1, 2.0), (2, 0.0), (3, 4.0)]}}
|
2014-07-08 15:28:08 +03:00
|
|
|
|
|
|
|
output = plot._process_atomic(result, data)
|
2014-02-12 19:41:20 +04:00
|
|
|
|
|
|
|
self.assertEqual(output, {
|
2014-03-25 20:05:33 +00:00
|
|
|
"histogram": [
|
|
|
|
[
|
|
|
|
{
|
|
|
|
"key": "action1",
|
|
|
|
"disabled": 0,
|
|
|
|
"method": "Square Root Choice",
|
|
|
|
"values": [{"x": 2, "y": 1}, {"x": 3, "y": 1}]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "action1",
|
|
|
|
"disabled": 0,
|
|
|
|
"method": "Sturges Formula",
|
|
|
|
"values": [{"x": 2, "y": 1}, {"x": 3, "y": 1}]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "action1",
|
|
|
|
"disabled": 0,
|
|
|
|
"method": "Rice Rule",
|
|
|
|
"values": [{"x": 1, "y": 1}, {"x": 1, "y": 0},
|
|
|
|
{"x": 1, "y": 0}]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "action1",
|
|
|
|
"disabled": 0,
|
|
|
|
"method": "One Half",
|
|
|
|
"values": [{"x": 3, "y": 2}]
|
|
|
|
},
|
|
|
|
],
|
|
|
|
[
|
|
|
|
{
|
|
|
|
"key": "action2",
|
|
|
|
"disabled": 1,
|
|
|
|
"method": "Square Root Choice",
|
|
|
|
"values": [{"x": 3, "y": 1}, {"x": 4, "y": 1}]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "action2",
|
|
|
|
"disabled": 1,
|
|
|
|
"method": "Sturges Formula",
|
|
|
|
"values": [{"x": 3, "y": 1}, {"x": 4, "y": 1}]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "action2",
|
|
|
|
"disabled": 1,
|
|
|
|
"method": "Rice Rule",
|
|
|
|
"values": [{"x": 2, "y": 1}, {"x": 2, "y": 0},
|
|
|
|
{"x": 2, "y": 0}]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "action2",
|
|
|
|
"disabled": 1,
|
|
|
|
"method": "One Half",
|
|
|
|
"values": [{"x": 4, "y": 2}]
|
|
|
|
}
|
|
|
|
]
|
|
|
|
],
|
2014-02-12 19:41:20 +04:00
|
|
|
"pie": [
|
|
|
|
{"key": "action1", "value": 2.0},
|
|
|
|
{"key": "action2", "value": 3.0}
|
|
|
|
],
|
|
|
|
"iter": [
|
|
|
|
{
|
|
|
|
"key": "action1",
|
2014-07-08 15:28:08 +03:00
|
|
|
"values": [(1, 1), (2, 0), (3, 3)]
|
2014-02-12 19:41:20 +04:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"key": "action2",
|
2014-07-08 15:28:08 +03:00
|
|
|
"values": [(1, 2), (2, 0), (3, 4)]
|
2014-02-12 19:41:20 +04:00
|
|
|
}
|
|
|
|
]
|
|
|
|
})
|
2014-07-08 15:28:08 +03:00
|
|
|
|
2014-10-16 19:05:09 +03:00
|
|
|
@mock.patch("rally.benchmark.processing.utils.compress")
|
|
|
|
def test__prepare_data(self, mock_compress):
|
2014-07-08 15:28:08 +03:00
|
|
|
|
2014-10-16 19:05:09 +03:00
|
|
|
mock_compress.side_effect = lambda i, **kv: i
|
2014-11-18 13:10:30 +02:00
|
|
|
rows_num = 100
|
2014-11-21 19:27:57 +02:00
|
|
|
load_duration = 1234.5
|
|
|
|
full_duration = 6789.1
|
2014-11-18 13:10:30 +02:00
|
|
|
sla = [{"foo": "bar"}]
|
2014-07-08 15:28:08 +03:00
|
|
|
data = []
|
2014-11-18 13:10:30 +02:00
|
|
|
for i in range(rows_num):
|
Add atomic action names tracking
Here we change the format of the scenario results so that it now stores
the atomic actions data as a dict:
Before:
{
"duration": ...,
"error": ...,
"atomic_actions": [
{"action": "a1", "duration": 0.3},
{"action": "a2", "duration": 0.1},
{"action": "a1", "duration": 0.25}
],
...
}
After:
{
"duration": ...,
"error": ...,
"atomic_actions": {
"a1": 0.3,
"a1 (2)": 0.25,
"a2": 0.1
},
...
}
We also solve 2 connected problems here:
1. The statistics table that shows up in the CLI after benchmarks complete now
always has rows for all the atomic actions that were invoked in benchmark
scenarios. Before this patch, this was not the case since occasionally the
atomic actions data could not be written due to Exceptions.
2. We've removed lots of ugly code in benchmark/processing/plot.py, since now
there is no need to make passes through the atomic actions data to collect
their names.
This patch also:
* contains some removing of code duplicate between plot & CLI output generation;
* adds missing UTs for raw benchmark data processing;
* fixes a minor bug in histograms bins number calculation.
Change-Id: I17f563051bd1c2ec2fb47a385d4cc652895e1f9e
2014-09-22 10:45:05 +04:00
|
|
|
atomic_actions = {
|
|
|
|
"a1": i + 0.1,
|
2014-10-16 19:05:09 +03:00
|
|
|
"a2": i + 0.8,
|
Add atomic action names tracking
Here we change the format of the scenario results so that it now stores
the atomic actions data as a dict:
Before:
{
"duration": ...,
"error": ...,
"atomic_actions": [
{"action": "a1", "duration": 0.3},
{"action": "a2", "duration": 0.1},
{"action": "a1", "duration": 0.25}
],
...
}
After:
{
"duration": ...,
"error": ...,
"atomic_actions": {
"a1": 0.3,
"a1 (2)": 0.25,
"a2": 0.1
},
...
}
We also solve 2 connected problems here:
1. The statistics table that shows up in the CLI after benchmarks complete now
always has rows for all the atomic actions that were invoked in benchmark
scenarios. Before this patch, this was not the case since occasionally the
atomic actions data could not be written due to Exceptions.
2. We've removed lots of ugly code in benchmark/processing/plot.py, since now
there is no need to make passes through the atomic actions data to collect
their names.
This patch also:
* contains some removing of code duplicate between plot & CLI output generation;
* adds missing UTs for raw benchmark data processing;
* fixes a minor bug in histograms bins number calculation.
Change-Id: I17f563051bd1c2ec2fb47a385d4cc652895e1f9e
2014-09-22 10:45:05 +04:00
|
|
|
}
|
2014-07-08 15:28:08 +03:00
|
|
|
row = {
|
2014-10-16 19:05:09 +03:00
|
|
|
"duration": i * 3.1,
|
|
|
|
"idle_duration": i * 0.2,
|
|
|
|
"error": [],
|
|
|
|
"atomic_actions": atomic_actions,
|
2014-11-18 13:10:30 +02:00
|
|
|
"scenario_output": {"errors": ["err"],
|
|
|
|
"data": {"out_key": "out_value"}}
|
2014-07-08 15:28:08 +03:00
|
|
|
}
|
|
|
|
data.append(row)
|
|
|
|
|
2014-11-18 13:10:30 +02:00
|
|
|
data[42]["error"] = ["foo", "bar", "spam"]
|
|
|
|
data[52]["error"] = ["spam", "bar", "foo"]
|
2014-07-08 15:28:08 +03:00
|
|
|
|
2014-11-18 13:10:30 +02:00
|
|
|
values_atomic_a1 = [i + 0.1 for i in range(rows_num)]
|
|
|
|
values_atomic_a2 = [i + 0.8 for i in range(rows_num)]
|
|
|
|
values_duration = [i * 3.1 for i in range(rows_num)]
|
2014-11-21 19:27:57 +02:00
|
|
|
values_duration[42] = 0
|
|
|
|
values_duration[52] = 0
|
2014-11-18 13:10:30 +02:00
|
|
|
values_idle = [i * 0.2 for i in range(rows_num)]
|
2014-11-21 19:27:57 +02:00
|
|
|
values_idle[42] = 0
|
|
|
|
values_idle[52] = 0
|
2014-07-08 15:28:08 +03:00
|
|
|
|
2014-11-18 13:10:30 +02:00
|
|
|
prepared_data = plot._prepare_data({"result": data,
|
2014-11-21 19:27:57 +02:00
|
|
|
"load_duration": load_duration,
|
|
|
|
"full_duration": full_duration,
|
2014-11-18 13:10:30 +02:00
|
|
|
"sla": sla,
|
|
|
|
"key": "foo_key"})
|
|
|
|
self.assertEqual(2, len(prepared_data["errors"]))
|
2014-07-08 15:28:08 +03:00
|
|
|
|
2014-11-18 13:10:30 +02:00
|
|
|
calls = [mock.call(values_atomic_a1),
|
|
|
|
mock.call(values_atomic_a2),
|
|
|
|
mock.call(values_duration),
|
|
|
|
mock.call(values_idle)]
|
2014-10-16 19:05:09 +03:00
|
|
|
mock_compress.assert_has_calls(calls)
|
2014-07-08 15:28:08 +03:00
|
|
|
|
2014-11-18 13:10:30 +02:00
|
|
|
expected_output = [{"key": "out_key",
|
|
|
|
"values": ["out_value"] * rows_num}]
|
|
|
|
expected_output_errors = [(i, [e])
|
|
|
|
for i, e in enumerate(["err"] * rows_num)]
|
2014-10-16 19:05:09 +03:00
|
|
|
self.assertEqual({
|
|
|
|
"total_durations": {"duration": values_duration,
|
|
|
|
"idle_duration": values_idle},
|
|
|
|
"atomic_durations": {"a1": values_atomic_a1,
|
|
|
|
"a2": values_atomic_a2},
|
2014-11-18 13:10:30 +02:00
|
|
|
"errors": [{"iteration": 42,
|
|
|
|
"message": "bar",
|
|
|
|
"traceback": "spam",
|
|
|
|
"type": "foo"},
|
|
|
|
{"iteration": 52,
|
|
|
|
"message": "bar",
|
|
|
|
"traceback": "foo",
|
|
|
|
"type": "spam"}],
|
|
|
|
"output": expected_output,
|
|
|
|
"output_errors": expected_output_errors,
|
2014-11-21 19:27:57 +02:00
|
|
|
"load_duration": load_duration,
|
|
|
|
"full_duration": full_duration,
|
2014-11-18 13:10:30 +02:00
|
|
|
"sla": sla,
|
2014-10-16 19:05:09 +03:00
|
|
|
}, prepared_data)
|