diff --git a/doc/samples/tasks/scenarios/dummy/dummy_random_fail_in_atomic.json b/doc/samples/tasks/scenarios/dummy/dummy_random_fail_in_atomic.json new file mode 100644 index 0000000000..5452b3c286 --- /dev/null +++ b/doc/samples/tasks/scenarios/dummy/dummy_random_fail_in_atomic.json @@ -0,0 +1,14 @@ +{ + "Dummy.dummy_random_fail_in_atomic": [ + { + "args": { + "exception_probability": 0.5 + }, + "runner": { + "type": "constant", + "times": 100, + "concurrency": 10 + } + } + ] +} diff --git a/doc/samples/tasks/scenarios/dummy/dummy_random_fail_in_atomic.yaml b/doc/samples/tasks/scenarios/dummy/dummy_random_fail_in_atomic.yaml new file mode 100644 index 0000000000..7b17bdb3f0 --- /dev/null +++ b/doc/samples/tasks/scenarios/dummy/dummy_random_fail_in_atomic.yaml @@ -0,0 +1,8 @@ + Dummy.dummy_random_fail_in_atomic: + - + args: + exception_probability: 0.5 + runner: + type: "constant" + times: 100 + concurrency: 10 diff --git a/rally-scenarios/rally.yaml b/rally-scenarios/rally.yaml index d7be32faf2..df70d48542 100644 --- a/rally-scenarios/rally.yaml +++ b/rally-scenarios/rally.yaml @@ -289,6 +289,15 @@ sla: max_failure_percent: 0 + Dummy.dummy_random_fail_in_atomic: + - + args: + exception_probability: 0.5 + runner: + type: "constant" + times: 50 + concurrency: 10 + FakePlugin.testplugin: - runner: diff --git a/rally/benchmark/processing/plot.py b/rally/benchmark/processing/plot.py index 5e37cd926c..eb2517f1ef 100644 --- a/rally/benchmark/processing/plot.py +++ b/rally/benchmark/processing/plot.py @@ -39,9 +39,13 @@ def _prepare_data(data, reduce_rows=1000): for k, v in d1.iteritems(): v[-1] = (v[-1] + d2[k]) / 2.0 - atomic_action_names = (data["result"][0]["atomic_actions"].keys() - if data["result"] else []) - zero_atomic_actions = dict([(a, 0) for a in atomic_action_names]) + atomic_actions = [] + for row in data["result"]: + # find first non-error result to get atomic actions names + if not row["error"] and "atomic_actions" in row: + atomic_actions = row["atomic_actions"].keys() + break + zero_atomic_actions = dict([(a, 0) for a in atomic_actions]) total_durations = {"duration": [], "idle_duration": []} atomic_durations = dict([(a, []) for a in zero_atomic_actions]) @@ -147,9 +151,12 @@ def _process_atomic(result, data): # all iteration. So we should take first non "error" # iteration. And get in atomitc_iter list: # [{"key": "action", "values":[]}] - stacked_area = ([{"key": a, "values": []} - for a in result["result"][0]["atomic_actions"]] - if result["result"] else []) + stacked_area = [] + for row in result["result"]: + if not row["error"] and "atomic_actions" in row: + stacked_area = [{"key": a, "values": []} + for a in row["atomic_actions"]] + break # NOTE(boris-42): pie is similiar to stacked_area, only difference is in # structure of values. In case of $error we shouldn't put diff --git a/rally/benchmark/processing/utils.py b/rally/benchmark/processing/utils.py index b504016867..f7dabc38ea 100644 --- a/rally/benchmark/processing/utils.py +++ b/rally/benchmark/processing/utils.py @@ -60,12 +60,16 @@ def get_atomic_actions_data(raw_data): :returns: dictionary containing atomic action + total duration lists for all atomic action keys """ - atomic_actions = raw_data[0]["atomic_actions"].keys() if raw_data else [] + atomic_actions = [] + for row in raw_data: + # find first non-error result to get atomic actions names + if not row["error"] and "atomic_actions" in row: + atomic_actions = row["atomic_actions"].keys() actions_data = {} for atomic_action in atomic_actions: actions_data[atomic_action] = [ r["atomic_actions"][atomic_action] for r in raw_data - if r["atomic_actions"][atomic_action] is not None] + if r["atomic_actions"].get(atomic_action) is not None] actions_data["total"] = [r["duration"] for r in raw_data if not r["error"]] return actions_data diff --git a/rally/benchmark/scenarios/base.py b/rally/benchmark/scenarios/base.py index 8829430bd5..a480db8153 100644 --- a/rally/benchmark/scenarios/base.py +++ b/rally/benchmark/scenarios/base.py @@ -279,9 +279,6 @@ class AtomicAction(utils.Timer): else: name_template = name + " (%i)" atomic_action_iteration = 2 - with open("1.txt", "a") as f: - f.write("Enter\n") - f.write(str(dir(self.scenario_instance)) + "\n") while self.scenario_instance._atomic_action_registered( name_template % atomic_action_iteration): atomic_action_iteration += 1 diff --git a/rally/benchmark/scenarios/dummy/dummy.py b/rally/benchmark/scenarios/dummy/dummy.py index a2d7f66845..fc047c8a47 100644 --- a/rally/benchmark/scenarios/dummy/dummy.py +++ b/rally/benchmark/scenarios/dummy/dummy.py @@ -76,3 +76,13 @@ class Dummy(base.Scenario): } err = "" return {"data": out, "errors": err} + + @base.atomic_action_timer("dummy_fail_test") + def _random_fail_emitter(self, exception_probability): + if random.random() < exception_probability: + raise KeyError("Dummy test exception") + + @base.scenario() + def dummy_random_fail_in_atomic(self, exception_probability=0.5): + self._random_fail_emitter(exception_probability) + self._random_fail_emitter(exception_probability) diff --git a/rally/cmd/commands/task.py b/rally/cmd/commands/task.py index ec09a2f8c9..308c1e4bbc 100644 --- a/rally/cmd/commands/task.py +++ b/rally/cmd/commands/task.py @@ -131,13 +131,18 @@ class TaskCommands(object): :param iterations_data: print detailed results for each iteration Prints detailed information of task. """ - def _print_iterations_data(raw): - headers = ['iteration', "full duration"] - float_cols = ['full duration'] - for i in range(0, len(raw)): - if raw[i]['atomic_actions']: - for (c, a) in enumerate(raw[i]['atomic_actions'], 1): - action = str(c) + "-" + a['action'] + def _print_iterations_data(raw_data): + headers = ["iteration", "full duration"] + float_cols = ["full duration"] + atomic_actions = [] + for row in raw_data: + # find first non-error result to get atomic actions names + if not row["error"] and "atomic_actions" in row: + atomic_actions = row["atomic_actions"].keys() + for row in raw_data: + if row["atomic_actions"]: + for (c, a) in enumerate(atomic_actions, 1): + action = "%(no)i. %(action)s" % {"no": c, "action": a} headers.append(action) float_cols.append(action) break @@ -145,18 +150,16 @@ class TaskCommands(object): formatters = dict(zip(float_cols, [cliutils.pretty_float_formatter(col, 3) for col in float_cols])) - for (c, r) in enumerate(raw, 1): + for (c, r) in enumerate(raw_data, 1): dlist = [c] - d = [] - if r['atomic_actions']: - for l in r['atomic_actions']: - d.append(l['duration']) - dlist.append(sum(d)) - dlist = dlist + d + if r["atomic_actions"]: + dlist.append(r["duration"]) + for action in atomic_actions: + dlist.append(r["atomic_actions"].get(action) or 0) table_rows.append(rutils.Struct(**dict(zip(headers, dlist)))) else: - data = dlist + ["N/A" for i in range(1, len(headers))] + data = dlist + [None for i in range(1, len(headers))] table_rows.append(rutils.Struct(**dict(zip(headers, data)))) common_cliutils.print_list(table_rows, @@ -227,7 +230,8 @@ class TaskCommands(object): "%.1f%%" % (len(durations) * 100.0 / len(raw)), len(raw)] else: - data = [action, None, None, None, None, None, 0, len(raw)] + data = [action, None, None, None, None, None, + "0.0%", len(raw)] table_rows.append(rutils.Struct(**dict(zip(table_cols, data)))) common_cliutils.print_list(table_rows, fields=table_cols, diff --git a/tests/benchmark/scenarios/dummy/test_dummy.py b/tests/benchmark/scenarios/dummy/test_dummy.py index 26e67677d3..17ccae0095 100644 --- a/tests/benchmark/scenarios/dummy/test_dummy.py +++ b/tests/benchmark/scenarios/dummy/test_dummy.py @@ -57,3 +57,14 @@ class DummyTestCase(test.TestCase): # Since the data is generated in random, # checking for not None self.assertNotEqual(result['data'], None) + + def test_dummy_random_fail_in_atomic(self): + scenario = dummy.Dummy() + + for i in range(10): + scenario.dummy_random_fail_in_atomic(exception_probability=0) + + for i in range(10): + self.assertRaises(KeyError, + scenario.dummy_random_fail_in_atomic, + {'exception_probability': 1}) diff --git a/tests_ci/rally-gate.sh b/tests_ci/rally-gate.sh index 5bc7604b00..51603a31db 100755 --- a/tests_ci/rally-gate.sh +++ b/tests_ci/rally-gate.sh @@ -54,4 +54,6 @@ rally task results | python -m json.tool > rally-plot/results.json gzip -9 rally-plot/results.json rally task detailed > rally-plot/detailed.txt gzip -9 rally-plot/detailed.txt +rally task detailed --iterations-data > rally-plot/detailed_with_iterations.txt +gzip -9 rally-plot/detailed_with_iterations.txt rally task sla_check | tee rally-plot/sla.txt diff --git a/tests_ci/rally-gate/index.html b/tests_ci/rally-gate/index.html index ebc02ca77f..b8431dba5f 100644 --- a/tests_ci/rally-gate/index.html +++ b/tests_ci/rally-gate/index.html @@ -34,6 +34,7 @@
  • Rally input task
  • HTML report with graphs $ rally task plot2html
  • Plain text aggregated data $ rally task detailed
  • +
  • Plain text aggregated data with detailed iterations $ rally task detailed --iterations-data
  • SLA checks $ rally task sla_check
  • Full result in json $ rally task results
  • diff --git a/tests_ci/test_cli_task.py b/tests_ci/test_cli_task.py index 23cf56e63a..ee19fd712e 100644 --- a/tests_ci/test_cli_task.py +++ b/tests_ci/test_cli_task.py @@ -24,14 +24,14 @@ class TaskTestCase(unittest.TestCase): def _get_sample_task_config(self): return { - "KeystoneBasic.create_and_list_users": [ + "Dummy.dummy_random_fail_in_atomic": [ { "args": { "name_length": 10 }, "runner": { "type": "constant", - "times": 5, + "times": 100, "concurrency": 5 } } @@ -50,8 +50,11 @@ class TaskTestCase(unittest.TestCase): cfg = self._get_sample_task_config() config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) - self.assertIn("KeystoneBasic.create_and_list_users", - rally("task detailed")) + detailed = rally("task detailed") + self.assertIn("Dummy.dummy_random_fail_in_atomic", detailed) + self.assertIn("dummy_fail_test (2)", detailed) + detailed_iterations_data = rally("task detailed --iterations-data") + self.assertIn("2. dummy_fail_test (2)", detailed_iterations_data) def test_results(self): rally = utils.Rally()