Rally provides a framework for performance analysis and benchmarking of individual OpenStack components as well as full production OpenStack cloud deployments
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

878 lines
33KB

  1. # All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  4. # not use this file except in compliance with the License. You may obtain
  5. # a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  11. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  12. # License for the specific language governing permissions and limitations
  13. # under the License.
  14. import abc
  15. import bisect
  16. import collections
  17. import math
  18. import six
  19. from rally.common.plugin import plugin
  20. from rally.common import streaming_algorithms as streaming
  21. from rally.task import atomic
  22. from rally.task.processing import utils
  23. @plugin.base()
  24. @six.add_metaclass(abc.ABCMeta)
  25. class Chart(plugin.Plugin):
  26. """Base class for charts.
  27. This is a base for all plugins that prepare data for specific charts
  28. in HTML report. Each chart must at least declare chart widget and
  29. prepare data that is suitable for rendering by JavaScript.
  30. """
  31. @abc.abstractproperty
  32. def widget(self):
  33. """Widget name to display this chart by JavaScript."""
  34. def __init__(self, workload, zipped_size=1000):
  35. """Setup initial values.
  36. :param workload: dict, detailed info about the Workload
  37. :param zipped_size: int maximum number of points on scale
  38. """
  39. self._data = collections.OrderedDict() # Container for results
  40. self._workload = workload
  41. self.base_size = self._workload["total_iteration_count"]
  42. self.zipped_size = zipped_size
  43. def add_iteration(self, iteration):
  44. """Add iteration data.
  45. This method must be called for each iteration.
  46. If overridden, this method must use streaming data processing,
  47. so chart instance could process unlimited number of iterations,
  48. with low memory usage.
  49. """
  50. for name, value in self._map_iteration_values(iteration):
  51. if name not in self._data:
  52. self._data[name] = utils.GraphZipper(self.base_size,
  53. self.zipped_size)
  54. self._data[name].add_point(value)
  55. def render(self):
  56. """Generate chart data ready for drawing."""
  57. return [(name, points.get_zipped_graph())
  58. for name, points in self._data.items()]
  59. @classmethod
  60. def render_complete_data(cls, data):
  61. """render processed complete data for drawing."""
  62. return data
  63. def _fix_atomic_actions(self, atomic_actions):
  64. """Set `0' for missed atomic actions.
  65. Since some atomic actions can absent in some iterations
  66. due to failures, this method must be used in all cases
  67. related to atomic actions processing.
  68. """
  69. return list(
  70. (name, atomic_actions.get(name, {}).get("duration", 0))
  71. for name in self._get_atomic_names()
  72. )
  73. def _get_atomic_names(self):
  74. duration_stats = self._workload["statistics"]["durations"]
  75. return [a["display_name"] for a in duration_stats["atomics"]]
  76. def _map_iteration_values(self, iteration):
  77. """Get values for processing, from given iteration."""
  78. return iteration
  79. class MainStackedAreaChart(Chart):
  80. widget = "StackedArea"
  81. def _map_iteration_values(self, iteration):
  82. if iteration["error"]:
  83. result = [("duration", 0), ("idle_duration", 0)]
  84. if self._workload["failed_iteration_count"]:
  85. result.append(
  86. ("failed_duration",
  87. iteration["duration"] + iteration["idle_duration"]))
  88. else:
  89. result = [("duration", iteration["duration"]),
  90. ("idle_duration", iteration["idle_duration"])]
  91. if self._workload["failed_iteration_count"]:
  92. result.append(("failed_duration", 0))
  93. return result
  94. class AtomicStackedAreaChart(Chart):
  95. widget = "StackedArea"
  96. def _map_iteration_values(self, iteration):
  97. atomic_actions = atomic.merge_atomic_actions(
  98. iteration["atomic_actions"])
  99. atomics = self._fix_atomic_actions(atomic_actions)
  100. if self._workload["failed_iteration_count"]:
  101. if iteration["error"]:
  102. failed_duration = (
  103. iteration["duration"] + iteration["idle_duration"]
  104. - sum([(a[1] or 0) for a in atomics]))
  105. else:
  106. failed_duration = 0
  107. atomics.append(("failed_duration", failed_duration))
  108. return atomics
  109. class AvgChart(Chart):
  110. """Base class for charts with average results."""
  111. widget = "Pie"
  112. def add_iteration(self, iteration):
  113. for name, value in self._map_iteration_values(iteration):
  114. if name not in self._data:
  115. self._data[name] = streaming.MeanComputation()
  116. self._data[name].add(value or 0)
  117. def render(self):
  118. return [(k, v.result()) for k, v in self._data.items()]
  119. class AtomicAvgChart(AvgChart):
  120. def _map_iteration_values(self, iteration):
  121. atomic_actions = atomic.merge_atomic_actions(
  122. iteration["atomic_actions"])
  123. return self._fix_atomic_actions(atomic_actions)
  124. class LoadProfileChart(Chart):
  125. """Chart for parallel durations."""
  126. widget = "StackedArea"
  127. def __init__(self, workload, name="parallel iterations",
  128. scale=100):
  129. """Setup chart with graph name and scale.
  130. :param workload: dict, detailed information about Workload
  131. :param name: str name for X axis
  132. :param scale: int number of X points
  133. """
  134. super(LoadProfileChart, self).__init__(workload)
  135. self._name = name
  136. # NOTE(boris-42): Add 2 points at the end of graph so at the end of
  137. # graph there will be point with 0 running iterations.
  138. self._duration = self._workload["load_duration"] * (1 + 2.0 / scale)
  139. self.step = self._duration / float(scale)
  140. self._time_axis = [self.step * x
  141. for x in six.moves.range(int(scale))
  142. if (self.step * x) < self._duration]
  143. self._time_axis.append(self._duration)
  144. self._running = [0] * len(self._time_axis)
  145. # NOTE(andreykurilin): There is a "start_time" field in workload
  146. # object, but due to transformations in database layer, the
  147. # microseconds can be not accurate enough.
  148. if self._workload["data"]:
  149. self._tstamp_start = self._workload["data"][0]["timestamp"]
  150. else:
  151. self._tstamp_start = self._workload["start_time"]
  152. def _map_iteration_values(self, iteration):
  153. return iteration["timestamp"], iteration["duration"]
  154. def add_iteration(self, iteration):
  155. timestamp, duration = self._map_iteration_values(iteration)
  156. ts_start = timestamp - self._tstamp_start
  157. started_idx = bisect.bisect(self._time_axis, ts_start)
  158. ended_idx = bisect.bisect(self._time_axis, ts_start + duration)
  159. if self._time_axis[ended_idx - 1] == ts_start + duration:
  160. ended_idx -= 1
  161. for idx in range(started_idx + 1, ended_idx):
  162. self._running[idx] += 1
  163. if started_idx == ended_idx:
  164. self._running[ended_idx] += duration / self.step
  165. else:
  166. self._running[started_idx] += (
  167. self._time_axis[started_idx] - ts_start) / self.step
  168. self._running[ended_idx] += (
  169. ts_start + duration
  170. - self._time_axis[ended_idx - 1]) / self.step
  171. def render(self):
  172. return [(self._name, list(zip(self._time_axis, self._running)))]
  173. class HistogramChart(Chart):
  174. """Base class for chart with histograms.
  175. This chart is relatively complex, because actually it is a set
  176. of histograms, that usually can be switched by dropdown select.
  177. And each histogram has several data views.
  178. """
  179. widget = "Histogram"
  180. def _init_views(self, min_value, max_value):
  181. """Generate initial data for each histogram view."""
  182. if not self.base_size:
  183. return []
  184. min_value, max_value = min_value or 0, max_value or 0
  185. views = []
  186. for view, bins in [
  187. ("Square Root Choice",
  188. int(math.ceil(math.sqrt(self.base_size)))),
  189. ("Sturges Formula",
  190. int(math.ceil(math.log(self.base_size, 2) + 1))),
  191. ("Rice Rule",
  192. int(math.ceil(2 * self.base_size ** (1.0 / 3))))]:
  193. bin_width = float(max_value - min_value) / bins
  194. x_axis = [min_value + (bin_width * x) for x in range(1, bins + 1)]
  195. views.append({"view": view, "bins": bins,
  196. "x": x_axis, "y": [0] * len(x_axis)})
  197. return views
  198. def add_iteration(self, iteration):
  199. for name, value in self._map_iteration_values(iteration):
  200. if name not in self._data:
  201. raise KeyError("Unexpected histogram name: %s" % name)
  202. for i, view in enumerate(self._data[name]["views"]):
  203. for bin_i, bin_v in enumerate(view["x"]):
  204. if (value or 0) <= bin_v:
  205. self._data[name]["views"][i]["y"][bin_i] += 1
  206. break
  207. def render(self):
  208. data = []
  209. for name, hist in self._data.items():
  210. for idx, v in enumerate(hist["views"]):
  211. graph = {"key": name,
  212. "view": v["view"],
  213. "disabled": hist["disabled"],
  214. "values": [{"x": x, "y": y}
  215. for x, y in zip(v["x"], v["y"])]}
  216. try:
  217. data[idx].append(graph)
  218. except IndexError:
  219. data.append([graph])
  220. return {"data": data, "views": [{"id": i, "name": d[0]["view"]}
  221. for i, d in enumerate(data)]}
  222. class MainHistogramChart(HistogramChart):
  223. def __init__(self, workload_info):
  224. super(MainHistogramChart, self).__init__(workload_info)
  225. views = self._init_views(self._workload["min_duration"],
  226. self._workload["max_duration"])
  227. self._data["task"] = {"views": views, "disabled": None}
  228. def _map_iteration_values(self, iteration):
  229. return [("task", 0 if iteration["error"] else iteration["duration"])]
  230. class AtomicHistogramChart(HistogramChart):
  231. def __init__(self, workload_info):
  232. super(AtomicHistogramChart, self).__init__(workload_info)
  233. for i, aa in enumerate(
  234. self._workload["statistics"]["durations"]["atomics"]):
  235. self._data[aa["display_name"]] = {
  236. "views": self._init_views(aa["data"]["min"],
  237. aa["data"]["max"]),
  238. "disabled": i}
  239. def _map_iteration_values(self, iteration):
  240. atomic_actions = atomic.merge_atomic_actions(
  241. iteration["atomic_actions"])
  242. return self._fix_atomic_actions(atomic_actions)
  243. @six.add_metaclass(abc.ABCMeta)
  244. class Table(Chart):
  245. """Base class for tables.
  246. Each Table subclass represents HTML table which can be easily rendered in
  247. report. Subclasses are responsible for setting up both columns and rows:
  248. columns are set simply by `columns' property (list of str columns names)
  249. and rows must be initialized in _data property, with the following format:
  250. self._data = {name: [streaming_ins, postprocess_func or None], ...}
  251. where:
  252. name - str name of table row parameter
  253. streaming_ins - instance of streaming algorithm
  254. postprocess_func - optional function that processes final result,
  255. None means usage of default self._round()
  256. This can be done in __init__() or even in add_iteration().
  257. """
  258. widget = "Table"
  259. _styles = {}
  260. @abc.abstractproperty
  261. def columns(self):
  262. """List of columns names."""
  263. def _round(self, ins, has_result):
  264. """This is a default post-process function for table cell value.
  265. :param ins: streaming_algorithms.StreamingAlgorithm subclass instance
  266. :param has_result: bool, whether current row is effective
  267. :returns: rounded float
  268. :returns: str "n/a"
  269. """
  270. r = ins.result()
  271. if not has_result or r is None:
  272. return "n/a"
  273. else:
  274. return round(r, 3)
  275. def _row_has_results(self, values):
  276. """Determine whether row can be assumed as having values.
  277. :param values: row values list
  278. [(StreamingAlgorithm, function or None), ...]
  279. :returns: bool
  280. """
  281. for ins, fn in values:
  282. if isinstance(ins, (streaming.MinComputation,
  283. streaming.MaxComputation,
  284. streaming.MeanComputation)):
  285. # NOTE(amaretskiy): None means this computation
  286. # has never been called
  287. return ins.result() is not None
  288. return True
  289. def _process_row(self, name, values):
  290. row = [name]
  291. has_result = self._row_has_results(values)
  292. for ins, fn in values:
  293. fn = fn or self._round
  294. row.append(fn(ins, has_result))
  295. return row
  296. def get_rows(self):
  297. """Collect rows values finally, after all data is processed.
  298. :returns: [str_name, (float or str), (float or str), ...]
  299. """
  300. rows = []
  301. for name, values in self._data.items():
  302. rows.append(self._process_row(name, values))
  303. return rows
  304. def render(self):
  305. rows = self.get_rows()
  306. if self._styles is None:
  307. # do not apply anything
  308. styles = {}
  309. elif not self._styles and rows:
  310. # make the last elements bold
  311. styles = {len(rows) - 1: "rich"}
  312. else:
  313. styles = self._styles
  314. return {"cols": self.columns,
  315. "rows": rows,
  316. "styles": styles}
  317. class MainStatsTable(Table):
  318. columns = ["Action", "Min (sec)", "Median (sec)", "90%ile (sec)",
  319. "95%ile (sec)", "Max (sec)", "Avg (sec)", "Success", "Count"]
  320. _DEPTH_OF_PROCESSING = 2
  321. def __init__(self, *args, **kwargs):
  322. super(MainStatsTable, self).__init__(*args, **kwargs)
  323. self.iters_num = self._workload["total_iteration_count"]
  324. def _initialize_atomic(self, name, root, real_name=None, count=1):
  325. real_name = real_name or name
  326. root[name] = {
  327. # streaming algorithms
  328. "sa": [
  329. [streaming.MinComputation(), None],
  330. [streaming.PercentileComputation(0.5, self.iters_num), None],
  331. [streaming.PercentileComputation(0.9, self.iters_num), None],
  332. [streaming.PercentileComputation(0.95, self.iters_num), None],
  333. [streaming.MaxComputation(), None],
  334. [streaming.MeanComputation(), None],
  335. [streaming.MeanComputation(),
  336. lambda st, has_result: ("%.1f%%" % (st.result() * 100)
  337. if has_result else "n/a")],
  338. [streaming.IncrementComputation(),
  339. lambda st, has_result: st.result()]],
  340. "children": collections.OrderedDict(),
  341. "real_name": real_name,
  342. "count_per_iteration": count
  343. }
  344. def _add_data(self, raw_data, root=None):
  345. """Add iteration data."""
  346. p_data = self._data if root is None else root
  347. for name, data in raw_data.items():
  348. original_name = name
  349. if data["count"] > 1:
  350. name += (" (x%s)" % data["count"])
  351. if name not in p_data:
  352. self._initialize_atomic(name,
  353. root=p_data,
  354. real_name=original_name,
  355. count=data["count"])
  356. stats = p_data[name]["sa"]
  357. # count
  358. stats[-1][0].add()
  359. # success
  360. stats[-2][0].add(0 if data.get("failed", False) else 1)
  361. for idx in range(6):
  362. stats[idx][0].add(data["duration"])
  363. if data["children"]:
  364. self._add_data(data["children"], root=p_data[name]["children"])
  365. def add_iteration(self, iteration):
  366. """Add data of a single iteration."""
  367. data = atomic.merge_atomic_actions(iteration["atomic_actions"])
  368. # NOTE(andreykurilin): the easiest way to identify the last
  369. # atomic is to find the last added key to the OrderedDict. The
  370. # most perfect way is to use reversed, since class OrderedDict
  371. # uses a doubly linked list for the dictionary items and
  372. # implements __reversed__(), what is why such implementation
  373. # gives you O(1) access to the desired element.
  374. if data:
  375. the_last = data[next(reversed(data))]
  376. if iteration["error"] and not the_last.get("failed", False):
  377. # un-wrapped action failed
  378. data["<no-name-action>"] = {"duration": 0, "count": 1,
  379. "failed": True, "children": {}}
  380. total_duration = iteration["duration"] + iteration["idle_duration"]
  381. data["total"] = {"duration": total_duration,
  382. "count": 1,
  383. "failed": bool(iteration["error"]),
  384. "children": collections.OrderedDict(
  385. [("duration", {
  386. "duration": iteration["duration"],
  387. "count": 1,
  388. "failed": bool(iteration["error"]),
  389. "children": []}),
  390. ("idle_duration", {
  391. "duration": iteration["idle_duration"],
  392. "count": 1,
  393. "failed": bool(iteration["error"]),
  394. "children": []})
  395. ])}
  396. self._add_data(data)
  397. def _process_result(self, name, values, depth=0):
  398. row = self._process_row(name, values["sa"])
  399. children = []
  400. for c_name, c_values in values["children"].items():
  401. children.append(self._process_result(c_name, c_values))
  402. return {"data": {"iteration_count": row[8],
  403. "min": row[1],
  404. "median": row[2],
  405. "90%ile": row[3],
  406. "95%ile": row[4],
  407. "max": row[5],
  408. "avg": row[6],
  409. "success": row[7]},
  410. "count_per_iteration": values["count_per_iteration"],
  411. "name": values["real_name"],
  412. "display_name": name,
  413. "children": children}
  414. def _get_results(self):
  415. if self._data:
  416. # NOTE(andreykurilin): In case when the specific atomic action was
  417. # not executed in the first iteration, it will be added to
  418. # self._data after the "total" raw. It is a wrong order, so
  419. # let's ensure that the "total" is always at the end
  420. self._data["total"] = self._data.pop("total")
  421. else:
  422. # NOTE(andreykurilin): The workload doesn't have any iteration, so
  423. # the method `add_iteration` had not been called and 'total' item
  424. # had not be initialized. Let's do it here, since 'total' item
  425. # should always present in the rows.
  426. self._initialize_atomic("total", root=self._data)
  427. results = []
  428. for name, values in self._data.items():
  429. results.append(self._process_result(name, values))
  430. return results
  431. def get_rows(self):
  432. rows = []
  433. def _process_elem(elem, depth=0):
  434. name = elem["display_name"]
  435. if depth > 0:
  436. name = (" %s> %s" % ("-" * depth, name))
  437. rows.append([name,
  438. elem["data"]["min"],
  439. elem["data"]["median"],
  440. elem["data"]["90%ile"],
  441. elem["data"]["95%ile"],
  442. elem["data"]["max"],
  443. elem["data"]["avg"],
  444. elem["data"]["success"],
  445. elem["data"]["iteration_count"]])
  446. for child in elem["children"]:
  447. _process_elem(child, depth=(depth + 1))
  448. for elem in self._get_results():
  449. _process_elem(elem)
  450. return rows
  451. def to_dict(self):
  452. res = self._get_results()
  453. return {"total": res[-1], "atomics": res[:-1]}
  454. def render(self):
  455. rendered_data = super(MainStatsTable, self).render()
  456. rows_len = len(rendered_data["rows"])
  457. if rows_len > 1:
  458. styles = {rows_len - 3: "rich",
  459. rows_len - 2: "oblique",
  460. rows_len - 1: "oblique"}
  461. for i, row in enumerate(rendered_data["rows"]):
  462. if i == rows_len - 3:
  463. break
  464. if row[0].startswith(" -"):
  465. styles[i] = "oblique"
  466. rendered_data["styles"] = styles
  467. return rendered_data
  468. class OutputChart(Chart):
  469. """Base class for charts related to scenario output."""
  470. def __init__(self, workload_info, zipped_size=1000,
  471. title="", description="", label="", axis_label=""):
  472. super(OutputChart, self).__init__(workload_info, zipped_size)
  473. self.title = title
  474. self.description = description
  475. self.label = label
  476. self.axis_label = axis_label
  477. def render(self):
  478. return {"title": self.title,
  479. "description": self.description,
  480. "widget": self.widget,
  481. "data": super(OutputChart, self).render(),
  482. "label": self.label,
  483. "axis_label": self.axis_label}
  484. @plugin.configure(name="StackedArea")
  485. class OutputStackedAreaChart(OutputChart):
  486. """Display results as stacked area.
  487. This plugin processes additive data and displays it in HTML report
  488. as stacked area with X axis bound to iteration number.
  489. Complete output data is displayed as stacked area as well, without
  490. any processing.
  491. Keys "description", "label" and "axis_label" are optional.
  492. Examples of using this plugin in Scenario, for saving output data:
  493. .. code-block:: python
  494. self.add_output(
  495. additive={"title": "Additive data as stacked area",
  496. "description": "Iterations trend for foo and bar",
  497. "chart_plugin": "StackedArea",
  498. "data": [["foo", 12], ["bar", 34]]},
  499. complete={"title": "Complete data as stacked area",
  500. "description": "Data is shown as stacked area, as-is",
  501. "chart_plugin": "StackedArea",
  502. "data": [["foo", [[0, 5], [1, 42], [2, 15], [3, 7]]],
  503. ["bar", [[0, 2], [1, 1.3], [2, 5], [3, 9]]]],
  504. "label": "Y-axis label text",
  505. "axis_label": "X-axis label text"})
  506. """
  507. widget = "StackedArea"
  508. def render(self):
  509. result = super(OutputStackedAreaChart, self).render()
  510. # NOTE(amaretskiy): transform to Table if there is a single iteration
  511. if result["data"] and len(result["data"][0][1]) == 1:
  512. rows = [[v[0], v[1][0][1]] for v in result["data"]]
  513. result.update({"widget": "Table",
  514. "data": {"cols": ["Name", self.label or "Value"],
  515. "rows": rows}})
  516. return result
  517. @plugin.configure(name="Lines")
  518. class OutputLinesChart(OutputStackedAreaChart):
  519. """Display results as generic chart with lines.
  520. This plugin processes additive data and displays it in HTML report
  521. as linear chart with X axis bound to iteration number.
  522. Complete output data is displayed as linear chart as well, without
  523. any processing.
  524. Examples of using this plugin in Scenario, for saving output data:
  525. .. code-block:: python
  526. self.add_output(
  527. additive={"title": "Additive data as stacked area",
  528. "description": "Iterations trend for foo and bar",
  529. "chart_plugin": "Lines",
  530. "data": [["foo", 12], ["bar", 34]]},
  531. complete={"title": "Complete data as stacked area",
  532. "description": "Data is shown as stacked area, as-is",
  533. "chart_plugin": "Lines",
  534. "data": [["foo", [[0, 5], [1, 42], [2, 15], [3, 7]]],
  535. ["bar", [[0, 2], [1, 1.3], [2, 5], [3, 9]]]],
  536. "label": "Y-axis label text",
  537. "axis_label": "X-axis label text"})
  538. """
  539. widget = "Lines"
  540. @plugin.configure(name="Pie")
  541. class OutputAvgChart(OutputChart, AvgChart):
  542. """Display results as pie, calculate average values for additive data.
  543. This plugin processes additive data and calculate average values.
  544. Both additive and complete data are displayed in HTML report as pie chart.
  545. Examples of using this plugin in Scenario, for saving output data:
  546. .. code-block:: python
  547. self.add_output(
  548. additive={"title": "Additive output",
  549. "description": ("Pie with average data "
  550. "from all iterations values"),
  551. "chart_plugin": "Pie",
  552. "data": [["foo", 12], ["bar", 34], ["spam", 56]]},
  553. complete={"title": "Complete output",
  554. "description": "Displayed as a pie, as-is",
  555. "chart_plugin": "Pie",
  556. "data": [["foo", 12], ["bar", 34], ["spam", 56]]})
  557. """
  558. widget = "Pie"
  559. @plugin.configure(name="Table")
  560. class OutputTable(OutputChart, Table):
  561. """Display complete output as table, can not be used for additive data.
  562. Use this plugin for complete output data to display it in HTML report
  563. as table. This plugin can not be used for additive data because it
  564. does not contain any processing logic.
  565. Examples of using this plugin in Scenario, for saving output data:
  566. .. code-block:: python
  567. self.add_output(
  568. complete={"title": "Arbitrary Table",
  569. "description": "Just show columns and rows as-is",
  570. "chart_plugin": "Table",
  571. "data": {"cols": ["foo", "bar", "spam"],
  572. "rows": [["a row", 1, 2], ["b row", 3, 4],
  573. ["c row", 5, 6]]}})
  574. """
  575. widget = "Table"
  576. @plugin.configure(name="StatsTable")
  577. class OutputStatsTable(OutputTable):
  578. """Calculate statistics for additive data and display it as table.
  579. This plugin processes additive data and compose statistics that is
  580. displayed as table in HTML report.
  581. Examples of using this plugin in Scenario, for saving output data:
  582. .. code-block:: python
  583. self.add_output(
  584. additive={"title": "Statistics",
  585. "description": ("Table with statistics generated "
  586. "from all iterations values"),
  587. "chart_plugin": "StatsTable",
  588. "data": [["foo stat", 12], ["bar", 34], ["spam", 56]]})
  589. """
  590. columns = ["Action", "Min (sec)", "Median (sec)", "90%ile (sec)",
  591. "95%ile (sec)", "Max (sec)", "Avg (sec)", "Count"]
  592. def add_iteration(self, iteration):
  593. for name, value in self._map_iteration_values(iteration):
  594. if name not in self._data:
  595. iters_num = self._workload["total_iteration_count"]
  596. self._data[name] = [
  597. [streaming.MinComputation(), None],
  598. [streaming.PercentileComputation(0.5, iters_num), None],
  599. [streaming.PercentileComputation(0.9, iters_num), None],
  600. [streaming.PercentileComputation(0.95, iters_num), None],
  601. [streaming.MaxComputation(), None],
  602. [streaming.MeanComputation(), None],
  603. [streaming.IncrementComputation(),
  604. lambda v, na: v.result()]]
  605. self._data[name][-1][0].add(None)
  606. self._data[name][-2][0].add(1)
  607. for idx, dummy in enumerate(self._data[name][:-1]):
  608. self._data[name][idx][0].add(value)
  609. @plugin.configure(name="TextArea")
  610. class OutputTextArea(OutputChart):
  611. """Arbitrary text
  612. This plugin processes complete data and displays of output in HTML report.
  613. Examples of using this plugin in Scenario, for saving output data:
  614. .. code-block:: python
  615. self.add_output(
  616. complete={"title": "Script Inline",
  617. "chart_plugin": "TextArea",
  618. "data": ["first output", "second output",
  619. "third output"]]})
  620. """
  621. widget = "TextArea"
  622. @plugin.configure(name="EmbeddedChart")
  623. class OutputEmbeddedChart(OutputChart):
  624. """Chart for embedding custom html as a complete chart.
  625. Example of usage:
  626. .. code-block:: python
  627. self.add_output(
  628. complete={
  629. "title": "Embedding link to example.com",
  630. "chart_plugin": "EmbeddedChart",
  631. "data": "<a href='example.com'>"
  632. "To see external logs follow this link"
  633. "</a>"
  634. }
  635. )
  636. """
  637. widget = "EmbedChart"
  638. @classmethod
  639. def render_complete_data(cls, pdata):
  640. return {
  641. "title": pdata["title"],
  642. "widget": cls.widget,
  643. "data": {
  644. "source": None,
  645. # NOTE(chenxu): ensure that '</script>' of embedded_data will
  646. # not be handled incorrectly by JavaScript.
  647. "embedded": pdata["data"].replace("/script>", "\\/script>"),
  648. }
  649. }
  650. @plugin.configure(name="EmbeddedExternalChart")
  651. class OutputEmbeddedExternalChart(OutputChart):
  652. """Chart for embedding external html page as a complete chart.
  653. Example of usage:
  654. .. code-block:: python
  655. self.add_output(
  656. complete={
  657. "title": "Embedding external html page",
  658. "chart_plugin": "EmbeddedExternalChart",
  659. "data": "https://example.com"
  660. }
  661. )
  662. """
  663. widget = "EmbedChart"
  664. @classmethod
  665. def render_complete_data(cls, pdata):
  666. return {
  667. "title": pdata["title"],
  668. "widget": cls.widget,
  669. "data": {"embedded": None, "source": pdata["data"]}
  670. }
  671. _OUTPUT_SCHEMA = {
  672. "key_types": {
  673. "title": six.string_types,
  674. "description": six.string_types,
  675. "chart_plugin": six.string_types,
  676. "data": (list, dict),
  677. "label": six.string_types,
  678. "axis_label": six.string_types},
  679. "required": ["title", "chart_plugin", "data"]}
  680. def validate_output(output_type, output):
  681. # TODO(amaretskiy): this validation is simple and must be improved.
  682. # Maybe it is worth to add classmethod OutputChart.validate(), so
  683. # we could have flexible validation for custom chart plugins
  684. if output_type not in ("additive", "complete"):
  685. return ("unexpected output type: '%s', "
  686. "should be in ('additive', 'complete')" % output_type)
  687. if not isinstance(output, dict):
  688. return ("%(name)s output item has wrong type '%(type)s', "
  689. "must be 'dict'" % {"name": output_type,
  690. "type": type(output).__name__})
  691. for key in _OUTPUT_SCHEMA["required"]:
  692. if key not in output:
  693. return ("%(name)s output missing key '%(key)s'"
  694. % {"name": output_type, "key": key})
  695. for key in output:
  696. if key not in _OUTPUT_SCHEMA["key_types"]:
  697. return ("%(name)s output has unexpected key '%(key)s'"
  698. % {"name": output_type, "key": key})
  699. proper_type = _OUTPUT_SCHEMA["key_types"][key]
  700. if not isinstance(output[key], proper_type):
  701. if type(proper_type) == tuple:
  702. return ("Value of %(name)s output %(key)s has wrong type "
  703. "'%(actual_type)s', should be in %(types)r"
  704. % {"name": output_type,
  705. "key": key,
  706. "actual_type": type(output[key]).__name__,
  707. "types": tuple(t.__name__
  708. for t in proper_type)})
  709. return ("Value of %(name)s output %(key)s has wrong type "
  710. "'%(actual_type)s', should be %(proper_type)s"
  711. % {"name": output_type,
  712. "key": key,
  713. "actual_type": type(output[key]).__name__,
  714. "proper_type": proper_type.__name__})