Fix Rally reports table stats calculation
There were 3 bugs: * Success rate is always 100% * Percentile were improperly calculated * Order of atomic_actions was calulated wrongly This patch includes next changes: * Simplify percentile calculation based on GraphZipper and percentile method from processing utils * Removes useless streaming algorithms * Removes complexity in Charts * Fix the calculation of MainStatsTable * Add proper unit tests Closes-bug: #1500163 Co-Authored-By: Boris Pavlovic <boris@pavlovic.me> Co-Authored-By: Roman Vasilets <rvasilets@mirantis.com> Change-Id: I96b550d97abde39807ea6466eeb30b27e317fbfc
This commit is contained in:
parent
9a17d8490e
commit
294f00b863
@ -16,6 +16,7 @@
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from rally.common import costilius
|
||||
from rally.common import db
|
||||
from rally.common.i18n import _LE
|
||||
from rally import consts
|
||||
@ -326,7 +327,7 @@ class Task(object):
|
||||
min_duration = 0
|
||||
max_duration = 0
|
||||
iterations_failed = 0
|
||||
atomic = {}
|
||||
atomic = costilius.OrderedDict()
|
||||
output_names = set()
|
||||
|
||||
for itr in scenario["data"]["raw"]:
|
||||
|
@ -14,13 +14,13 @@
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import heapq
|
||||
import math
|
||||
|
||||
import six
|
||||
|
||||
from rally.common.i18n import _
|
||||
from rally import exceptions
|
||||
from rally.task.processing import utils
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
@ -128,66 +128,27 @@ class MaxComputation(StreamingAlgorithm):
|
||||
class PercentileComputation(StreamingAlgorithm):
|
||||
"""Compute percentile value from a stream of numbers."""
|
||||
|
||||
def __init__(self, percent):
|
||||
def __init__(self, percent, length):
|
||||
"""Init streaming computation.
|
||||
|
||||
:param percent: numeric percent (from 0.1 to 99.9)
|
||||
:param percent: numeric percent (from 0.00..1 to 0.999..)
|
||||
:param length: count of the measurements
|
||||
"""
|
||||
if not 0 < percent < 100:
|
||||
if not 0 < percent < 1:
|
||||
raise ValueError("Unexpected percent: %s" % percent)
|
||||
self._percent = percent
|
||||
self._count = 0
|
||||
self._left = []
|
||||
self._right = []
|
||||
self._current_percentile = None
|
||||
|
||||
self._graph_zipper = utils.GraphZipper(length, 10000)
|
||||
|
||||
def add(self, value):
|
||||
value = self._cast_to_float(value)
|
||||
|
||||
if self._current_percentile and value > self._current_percentile:
|
||||
heapq.heappush(self._right, value)
|
||||
else:
|
||||
heapq.heappush(self._left, -value)
|
||||
|
||||
self._count += 1
|
||||
expected_left = int(self._percent * (self._count + 1) / 100)
|
||||
|
||||
if len(self._left) > expected_left:
|
||||
heapq.heappush(self._right, -heapq.heappop(self._left))
|
||||
elif len(self._left) < expected_left:
|
||||
heapq.heappush(self._left, -heapq.heappop(self._right))
|
||||
|
||||
left = -self._left[0] if len(self._left) else 0
|
||||
right = self._right[0] if len(self._right) else 0
|
||||
|
||||
self._current_percentile = left + (right - left) / 2.
|
||||
self._graph_zipper.add_point(value)
|
||||
|
||||
def result(self):
|
||||
if self._current_percentile is None:
|
||||
results = list(
|
||||
map(lambda x: x[1], self._graph_zipper.get_zipped_graph()))
|
||||
if not results:
|
||||
raise ValueError("No values have been processed")
|
||||
return self._current_percentile
|
||||
|
||||
|
||||
class ProgressComputation(StreamingAlgorithm):
|
||||
"""Compute progress in percent."""
|
||||
|
||||
def __init__(self, base_count):
|
||||
"""Init streaming computation.
|
||||
|
||||
:param base_count: int number for end progress (100% reached)
|
||||
"""
|
||||
self._base_count = int(base_count) or 1
|
||||
self._count = 0
|
||||
|
||||
def add(self, *args):
|
||||
if self._count >= self._base_count:
|
||||
raise RuntimeError(
|
||||
"100%% progress is already reached (count of %d)"
|
||||
% self._base_count)
|
||||
self._count += 1
|
||||
|
||||
def result(self):
|
||||
return self._count / float(self._base_count) * 100
|
||||
return utils.percentile(results, self._percent)
|
||||
|
||||
|
||||
class IncrementComputation(StreamingAlgorithm):
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
import abc
|
||||
import bisect
|
||||
import copy
|
||||
import math
|
||||
|
||||
import six
|
||||
@ -270,78 +271,65 @@ class AtomicHistogramChart(HistogramChart):
|
||||
return list(iteration["atomic_actions"].items())
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class Table(Chart):
|
||||
"""Base class for table with processed data."""
|
||||
class MainStatsTable(Chart):
|
||||
|
||||
@abc.abstractmethod
|
||||
def _init_columns(self):
|
||||
"""Initialize columns processing.
|
||||
def _init_row(self, name, iterations_count):
|
||||
|
||||
:returns: OrderedDict(
|
||||
(("str column name", <StreamingAlgorithm instance>),
|
||||
...))
|
||||
"""
|
||||
def round_3(stream, no_result):
|
||||
if no_result:
|
||||
return "n/a"
|
||||
return round(stream.result(), 3)
|
||||
|
||||
return [
|
||||
("Action", name),
|
||||
("Min (sec)", streaming.MinComputation(), round_3),
|
||||
("Median (sec)",
|
||||
streaming.PercentileComputation(0.5, iterations_count), round_3),
|
||||
("90%ile (sec)",
|
||||
streaming.PercentileComputation(0.9, iterations_count), round_3),
|
||||
("95%ile (sec)",
|
||||
streaming.PercentileComputation(0.95, iterations_count), round_3),
|
||||
("Max (sec)", streaming.MaxComputation(), round_3),
|
||||
("Avg (sec)", streaming.MeanComputation(), round_3),
|
||||
("Success", streaming.MeanComputation(),
|
||||
lambda stream, no_result: "%.1f%%" % (stream.result() * 100)),
|
||||
("Count", streaming.IncrementComputation(),
|
||||
lambda x, no_result: x.result())
|
||||
]
|
||||
|
||||
def __init__(self, benchmark_info, zipped_size=1000):
|
||||
self.rows = list(benchmark_info["atomic"].keys())
|
||||
self.rows.append("total")
|
||||
self.rows_index = dict((name, i) for i, name in enumerate(self.rows))
|
||||
self.table = [self._init_row(name, benchmark_info["iterations_count"])
|
||||
for name in self.rows]
|
||||
|
||||
def add_iteration(self, iteration):
|
||||
for name, value in self._map_iteration_values(iteration):
|
||||
if name not in self._data:
|
||||
self._data[name] = self._init_columns()
|
||||
for column in self._data[name]:
|
||||
self._data[name][column].add(value or 0)
|
||||
data = copy.copy(iteration["atomic_actions"])
|
||||
data["total"] = iteration["duration"]
|
||||
|
||||
@abc.abstractmethod
|
||||
def render(self):
|
||||
"""Generate table data ready for displaying.
|
||||
|
||||
:returns: {"cols": [str, ...], "rows": [[numeric, ...], ...]}
|
||||
"""
|
||||
|
||||
|
||||
class MainStatsTable(Table):
|
||||
|
||||
columns = ["Action", "Min (sec)", "Median (sec)", "90%ile (sec)",
|
||||
"95%ile (sec)", "Max (sec)", "Avg (sec)", "Success", "Count"]
|
||||
float_columns = ["Min (sec)", "Median (sec)", "90%ile (sec)",
|
||||
"95%ile (sec)", "Max (sec)", "Avg (sec)"]
|
||||
|
||||
def _init_columns(self):
|
||||
return costilius.OrderedDict(
|
||||
(("Min (sec)", streaming.MinComputation()),
|
||||
("Median (sec)", streaming.PercentileComputation(50)),
|
||||
("90%ile (sec)", streaming.PercentileComputation(90)),
|
||||
("95%ile (sec)", streaming.PercentileComputation(95)),
|
||||
("Max (sec)", streaming.MaxComputation()),
|
||||
("Avg (sec)", streaming.MeanComputation()),
|
||||
("Success", streaming.ProgressComputation(self.base_size)),
|
||||
("Count", streaming.IncrementComputation())))
|
||||
|
||||
def _map_iteration_values(self, iteration):
|
||||
iteration = self._fix_atomic_actions(iteration)
|
||||
values = list(iteration["atomic_actions"].items())
|
||||
values.append(("total",
|
||||
0 if iteration["error"] else iteration["duration"]))
|
||||
return values
|
||||
for name, value in data.items():
|
||||
index = self.rows_index[name]
|
||||
self.table[index][-1][1].add(None)
|
||||
if iteration["error"]:
|
||||
self.table[index][-2][1].add(0)
|
||||
else:
|
||||
self.table[index][-2][1].add(1)
|
||||
for elem in self.table[index][1:-2]:
|
||||
elem[1].add(value)
|
||||
|
||||
def render(self):
|
||||
rows = []
|
||||
total = None
|
||||
|
||||
for name, values in self._data.items():
|
||||
row = [name]
|
||||
for column_name, column in self._data[name].items():
|
||||
if column_name == "Success":
|
||||
row.append("%.1f%%" % column.result())
|
||||
else:
|
||||
row.append(round(column.result(), 3))
|
||||
|
||||
# Save `total' - it must be appended last
|
||||
if name.lower() == "total":
|
||||
total = row
|
||||
continue
|
||||
for i in range(len(self.table)):
|
||||
row = [self.table[i][0][1]]
|
||||
# no results if all iterations failed
|
||||
no_result = self.table[i][-2][1].result() == 0.0
|
||||
row.extend(x[2](x[1], no_result) for x in self.table[i][1:])
|
||||
rows.append(row)
|
||||
|
||||
if total:
|
||||
rows.append(total)
|
||||
return {"cols": list(map(lambda x: x[0], self.table[0])),
|
||||
"rows": rows}
|
||||
|
||||
return {"cols": self.columns, "rows": rows}
|
||||
def _map_iteration_values(self, iteration):
|
||||
pass
|
||||
|
@ -111,6 +111,9 @@ class MaxComputationTestCase(test.TestCase):
|
||||
@ddt.ddt
|
||||
class PercentileComputationTestCase(test.TestCase):
|
||||
|
||||
mixed1 = [0]
|
||||
mixed6 = [100, 100, 0, 100, 100, 100]
|
||||
mixed5 = [0, 0, 100, 0, 0]
|
||||
mixed16 = [55.71, 83.05, 24.12, 27, 48.36, 16.36, 96.23, 6, 16.0, 88.11,
|
||||
29.52, 99.2, 79.96, 77.84, 85.45, 85.32, 7, 17.1, 3.02, 15.23]
|
||||
mixed50 = [51.63, 82.2, 52.52, .05, 66, 94.03, 78.6, 80.9, 51.89, 79, 1.4,
|
||||
@ -122,58 +125,44 @@ class PercentileComputationTestCase(test.TestCase):
|
||||
range5000 = range(5000)
|
||||
|
||||
@ddt.data(
|
||||
{"stream": "mixed16", "percent": 25, "expected": 16.18},
|
||||
{"stream": "mixed16", "percent": 50, "expected": 38.94},
|
||||
{"stream": "mixed16", "percent": 90, "expected": 92.17},
|
||||
{"stream": "mixed50", "percent": 25, "expected": 23.1},
|
||||
{"stream": "mixed50", "percent": 50, "expected": 51.89},
|
||||
{"stream": "mixed50", "percent": 90, "expected": 85.265},
|
||||
{"stream": "mixed5000", "percent": 25, "expected": 25.03},
|
||||
{"stream": "mixed5000", "percent": 50, "expected": 51.89},
|
||||
{"stream": "mixed5000", "percent": 90, "expected": 85.265},
|
||||
{"stream": "range5000", "percent": 25, "expected": 1249.5},
|
||||
{"stream": "range5000", "percent": 50, "expected": 2499.5},
|
||||
{"stream": "range5000", "percent": 90, "expected": 4499.5})
|
||||
{"stream": "mixed1", "percent": 0.95, "expected": 0},
|
||||
{"stream": "mixed6", "percent": 0.5, "expected": 100},
|
||||
{"stream": "mixed5", "percent": 0.5, "expected": 0},
|
||||
{"stream": "mixed5", "percent": 0.999, "expected": 99.6},
|
||||
{"stream": "mixed5", "percent": 0.001, "expected": 0},
|
||||
{"stream": "mixed16", "percent": 0.25, "expected": 16.27},
|
||||
{"stream": "mixed16", "percent": 0.50, "expected": 38.94},
|
||||
{"stream": "mixed16", "percent": 0.90, "expected":
|
||||
88.92200000000001},
|
||||
{"stream": "mixed50", "percent": 0.25, "expected": 25.105},
|
||||
{"stream": "mixed50", "percent": 0.50, "expected": 51.89},
|
||||
{"stream": "mixed50", "percent": 0.90, "expected":
|
||||
82.81300000000002},
|
||||
{"stream": "mixed5000", "percent": 0.25, "expected":
|
||||
35.54600000000001},
|
||||
{"stream": "mixed5000", "percent": 0.50, "expected": 48.351},
|
||||
{"stream": "mixed5000", "percent": 0.90, "expected":
|
||||
66.05880000000437},
|
||||
{"stream": "range5000", "percent": 0.25, "expected": 1249.75},
|
||||
{"stream": "range5000", "percent": 0.50, "expected": 2499.5},
|
||||
{"stream": "range5000", "percent": 0.90, "expected": 4499.1})
|
||||
@ddt.unpack
|
||||
def test_add_and_result(self, percent, stream, expected):
|
||||
comp = algo.PercentileComputation(percent=percent)
|
||||
comp = algo.PercentileComputation(percent=percent, length=len(
|
||||
getattr(self, stream)))
|
||||
[comp.add(i) for i in getattr(self, stream)]
|
||||
self.assertEqual(expected, comp.result())
|
||||
|
||||
def test_add_raises(self):
|
||||
comp = algo.PercentileComputation(50)
|
||||
comp = algo.PercentileComputation(0.50, 100)
|
||||
self.assertRaises(TypeError, comp.add)
|
||||
self.assertRaises(TypeError, comp.add, None)
|
||||
self.assertRaises(TypeError, comp.add, "str")
|
||||
|
||||
def test_result_raises(self):
|
||||
self.assertRaises(TypeError, algo.PercentileComputation)
|
||||
comp = algo.PercentileComputation(50)
|
||||
comp = algo.PercentileComputation(0.50, 100)
|
||||
self.assertRaises(ValueError, comp.result)
|
||||
|
||||
|
||||
class ProgressComputationTestCase(test.TestCase):
|
||||
|
||||
def test___init__raises(self):
|
||||
self.assertRaises(TypeError, algo.ProgressComputation)
|
||||
self.assertRaises(TypeError, algo.ProgressComputation, None)
|
||||
self.assertRaises(ValueError, algo.ProgressComputation, "str")
|
||||
|
||||
def test_add_and_result(self):
|
||||
comp = algo.ProgressComputation(42)
|
||||
self.assertEqual(0, comp.result())
|
||||
for expected_progress in (2.38, 4.76, 7.14, 9.52, 11.9, 14.29,
|
||||
16.67, 19.05, 21.43):
|
||||
comp.add(42)
|
||||
self.assertEqual(expected_progress, round(comp.result(), 2))
|
||||
|
||||
def test_add_raises(self):
|
||||
comp = algo.ProgressComputation(42)
|
||||
[comp.add(123) for i in range(42)]
|
||||
self.assertRaises(RuntimeError, comp.add, None)
|
||||
self.assertRaises(RuntimeError, comp.add, 123)
|
||||
|
||||
|
||||
class IncrementComputationTestCase(test.TestCase):
|
||||
|
||||
def test_add_and_result(self):
|
||||
|
@ -340,62 +340,96 @@ class AtomicHistogramChartTestCase(test.TestCase):
|
||||
self.assertEqual(expected, chart.render())
|
||||
|
||||
|
||||
class TableTestCase(test.TestCase):
|
||||
|
||||
class Table(charts.Table):
|
||||
columns = ["name", "foo", "bar"]
|
||||
foo = mock.Mock()
|
||||
bar = mock.Mock()
|
||||
|
||||
def _init_columns(self):
|
||||
return costilius.OrderedDict(
|
||||
[("foo", self.foo), ("bar", self.bar)])
|
||||
|
||||
def _map_iteration_values(self, iteration):
|
||||
return [("value_" + k, iteration[k]) for k in ["a", "b"]]
|
||||
|
||||
def render(self):
|
||||
return self._data
|
||||
|
||||
def setUp(self, *args, **kwargs):
|
||||
super(TableTestCase, self).setUp(*args, **kwargs)
|
||||
self.bench_info = {"iterations_count": 42,
|
||||
"atomic": {"a": {}, "b": {}, "c": {}}}
|
||||
|
||||
def test_add_iteration_and_render(self):
|
||||
self.assertRaises(TypeError, charts.Table, self.bench_info)
|
||||
table = self.Table(self.bench_info)
|
||||
self.assertEqual(costilius.OrderedDict(), table.render())
|
||||
[table.add_iteration({"a": i, "b": 43 - i}) for i in range(1, 43)]
|
||||
self.assertEqual(
|
||||
costilius.OrderedDict(
|
||||
[("value_a", costilius.OrderedDict([("foo", table.foo),
|
||||
("bar", table.bar)])),
|
||||
("value_b", costilius.OrderedDict([("foo", table.foo),
|
||||
("bar", table.bar)]))]),
|
||||
table.render())
|
||||
MAIN_STATS_TABLE_COLUMNS = ["Action", "Min (sec)", "Median (sec)",
|
||||
"90%ile (sec)", "95%ile (sec)", "Max (sec)",
|
||||
"Avg (sec)", "Success", "Count"]
|
||||
|
||||
|
||||
def generate_iteration(duration, error, *args):
|
||||
return {
|
||||
"atomic_actions": costilius.OrderedDict(args),
|
||||
"duration": duration,
|
||||
"error": error
|
||||
}
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class MainStatsTableTestCase(test.TestCase):
|
||||
|
||||
def setUp(self, *args, **kwargs):
|
||||
super(MainStatsTableTestCase, self).setUp(*args, **kwargs)
|
||||
self.bench_info = {"iterations_count": 42,
|
||||
"atomic": {"a": {}, "b": {}, "c": {}}}
|
||||
self.columns = [
|
||||
"Action", "Min (sec)", "Median (sec)", "90%ile (sec)",
|
||||
"95%ile (sec)", "Max (sec)", "Avg (sec)", "Success", "Count"]
|
||||
@ddt.data(
|
||||
{
|
||||
"info": {
|
||||
"iterations_count": 1,
|
||||
"atomic": costilius.OrderedDict([("foo", {}), ("bar", {})])
|
||||
},
|
||||
"data": [
|
||||
generate_iteration(10.0, False, ("foo", 1.0), ("bar", 2.0))
|
||||
],
|
||||
"expected": {
|
||||
"cols": MAIN_STATS_TABLE_COLUMNS,
|
||||
"rows": [
|
||||
["foo", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, "100.0%", 1],
|
||||
["bar", 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, "100.0%", 1],
|
||||
["total", 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, "100.0%", 1],
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"info": {"iterations_count": 2, "atomic": {"foo": {}}},
|
||||
"data": [
|
||||
generate_iteration(10.0, True, ("foo", 1.0)),
|
||||
generate_iteration(10.0, True, ("foo", 2.0))
|
||||
],
|
||||
"expected": {
|
||||
"cols": MAIN_STATS_TABLE_COLUMNS,
|
||||
"rows": [
|
||||
["foo", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "0.0%",
|
||||
2],
|
||||
["total", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "0.0%",
|
||||
2],
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"info": {"iterations_count": 2, "atomic": {"foo": {}}},
|
||||
"data": [
|
||||
generate_iteration(10.0, False, ("foo", 1.0)),
|
||||
generate_iteration(20.0, True, ("foo", 2.0))
|
||||
],
|
||||
"expected": {
|
||||
"cols": MAIN_STATS_TABLE_COLUMNS,
|
||||
"rows": [
|
||||
["foo", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, "50.0%", 2],
|
||||
["total", 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, "50.0%", 2]
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"info": {
|
||||
"iterations_count": 4,
|
||||
"atomic": costilius.OrderedDict([("foo", {}), ("bar", {})])
|
||||
},
|
||||
"data": [
|
||||
generate_iteration(10.0, False, ("foo", 1.0), ("bar", 4.0)),
|
||||
generate_iteration(20.0, False, ("foo", 2.0), ("bar", 4.0)),
|
||||
generate_iteration(30.0, False, ("foo", 3.0), ("bar", 4.0)),
|
||||
generate_iteration(40.0, True, ("foo", 4.0), ("bar", 4.0))
|
||||
],
|
||||
"expected": {
|
||||
"cols": MAIN_STATS_TABLE_COLUMNS,
|
||||
"rows": [
|
||||
["foo", 1.0, 2.0, 2.8, 2.9, 3.0, 2.0, "75.0%", 4],
|
||||
["bar", 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, "75.0%", 4],
|
||||
["total", 10.0, 20.0, 28.0, 29.0, 30.0, 20.0, "75.0%", 4]
|
||||
]
|
||||
}
|
||||
}
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_add_iteration_and_render(self, info, data, expected):
|
||||
|
||||
def test_add_iteration_and_render(self):
|
||||
table = charts.MainStatsTable({"iterations_count": 42,
|
||||
"atomic": {"foo": {}, "bar": {}}})
|
||||
[table.add_iteration(
|
||||
{"atomic_actions": costilius.OrderedDict([("foo", i),
|
||||
("bar", 43 - 1)]),
|
||||
"duration": i, "error": i % 40}) for i in range(1, 43)]
|
||||
expected_rows = [
|
||||
["foo", 1.0, 21.5, 38.5, 40.5, 42.0, 21.5, "100.0%", 42.0],
|
||||
["bar", 42.0, 42.0, 42.0, 42.0, 42.0, 42.0, "100.0%", 42.0],
|
||||
["total", 0.0, 0.0, 0.0, 0.0, 40.0, 0.952, "100.0%", 42.0]]
|
||||
self.assertEqual({"cols": self.columns, "rows": expected_rows},
|
||||
table.render())
|
||||
table = charts.MainStatsTable(info)
|
||||
for el in data:
|
||||
table.add_iteration(el)
|
||||
|
||||
self.assertEqual(expected, table.render())
|
||||
|
Loading…
Reference in New Issue
Block a user