All scenario runs time measurement

Partially implements: blueprint collect-runtime-duration

Change-Id: I2708a5f915d5b13948f50dfad26b2463b9d5249b
This commit is contained in:
Oleh Anufriiev
2014-07-08 02:19:13 +03:00
parent 3609d51a14
commit 7a3ed21049
6 changed files with 43 additions and 10 deletions

View File

@@ -197,7 +197,8 @@ class BenchmarkEngine(object):
target=self.consume_results, target=self.consume_results,
args=(key, self.task, runner.result_queue, is_done)) args=(key, self.task, runner.result_queue, is_done))
consumer.start() consumer.start()
runner.run(name, kw.get("context", {}), kw.get("args", {})) self.duration = runner.run(
name, kw.get("context", {}), kw.get("args", {}))
is_done.set() is_done.set()
consumer.join() consumer.join()
self.task.update_status(consts.TaskStatus.FINISHED) self.task.update_status(consts.TaskStatus.FINISHED)
@@ -216,8 +217,7 @@ class BenchmarkEngine(object):
clients.verified_keystone() clients.verified_keystone()
return self return self
@staticmethod def consume_results(self, key, task, result_queue, is_done):
def consume_results(key, task, result_queue, is_done):
"""Consume scenario runner results from queue and send them to db. """Consume scenario runner results from queue and send them to db.
Has to be run from different thread simultaneously with the runner.run Has to be run from different thread simultaneously with the runner.run
@@ -238,4 +238,6 @@ class BenchmarkEngine(object):
break break
else: else:
time.sleep(0.1) time.sleep(0.1)
task.append_results(key, {"raw": results})
task.append_results(key, {"raw": results,
"scenario_duration": self.duration})

View File

@@ -197,6 +197,13 @@ class ScenarioRunner(object):
where each result is a dictionary where each result is a dictionary
""" """
def _wrap_run_scenario(self, cls, method_name, context, args):
"""Whole scenario time measurement without context preparation."""
with rutils.Timer() as timer:
self._run_scenario(cls, method_name, context, args)
return timer.duration()
def run(self, name, context, args): def run(self, name, context, args):
cls_name, method_name = name.split(".", 1) cls_name, method_name = name.split(".", 1)
cls = scenario_base.Scenario.get_by_name(cls_name) cls = scenario_base.Scenario.get_by_name(cls_name)
@@ -217,8 +224,10 @@ class ScenarioRunner(object):
} }
args = cls.preprocess(method_name, context_obj, args) args = cls.preprocess(method_name, context_obj, args)
base_ctx.ContextManager.run(context_obj, self._run_scenario,
cls, method_name, args) return base_ctx.ContextManager.run(context_obj,
self._wrap_run_scenario,
cls, method_name, args)
def _send_result(self, result): def _send_result(self, result):
"""Send partial result to consumer. """Send partial result to consumer.

View File

@@ -198,6 +198,7 @@ class TaskCommands(object):
print("args values:") print("args values:")
pprint.pprint(key["kw"]) pprint.pprint(key["kw"])
scenario_time = result["data"]["scenario_duration"]
raw = result["data"]["raw"] raw = result["data"]["raw"]
table_cols = ["action", "min (sec)", "avg (sec)", "max (sec)", table_cols = ["action", "min (sec)", "avg (sec)", "max (sec)",
"90 percentile", "95 percentile", "success", "90 percentile", "95 percentile", "success",
@@ -235,6 +236,9 @@ class TaskCommands(object):
if iterations_data: if iterations_data:
_print_iterations_data(raw) _print_iterations_data(raw)
print(_("Whole scenario time without context preparation: "),
scenario_time)
# NOTE(hughsaunders): ssrs=scenario specific results # NOTE(hughsaunders): ssrs=scenario specific results
ssrs = [] ssrs = []
for result in raw: for result in raw:
@@ -330,7 +334,10 @@ class TaskCommands(object):
help='Open it in browser.') help='Open it in browser.')
@envutils.with_default_task_id @envutils.with_default_task_id
def plot2html(self, task_id=None, out=None, open_it=False): def plot2html(self, task_id=None, out=None, open_it=False):
results = map(lambda x: {"key": x["key"], 'result': x['data']['raw']}, results = map(lambda x: {
"key": x["key"],
'result': x['data']['raw']
},
db.task_result_get_all_by_uuid(task_id)) db.task_result_get_all_by_uuid(task_id))
output_file = out or ("%s.html" % task_id) output_file = out or ("%s.html" % task_id)

View File

@@ -246,7 +246,7 @@ class ScenarioRunnerTestCase(test.TestCase):
} }
} }
expected = [context_obj, runner._run_scenario, cls, expected = [context_obj, runner._wrap_run_scenario, cls,
method_name, config_kwargs] method_name, config_kwargs]
mock_ctx_manager.run.assert_called_once_with(*expected) mock_ctx_manager.run.assert_called_once_with(*expected)

View File

@@ -102,7 +102,19 @@ class TaskCommandsTestCase(test.TestCase):
"id": "task", "id": "task",
"uuid": test_uuid, "uuid": test_uuid,
"status": "status", "status": "status",
"results": [], "results": [
{
"key": {
"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"
},
"data": {
"scenario_duration": 1.0,
"raw": []
}
}
],
"failed": False "failed": False
} }
mock_db.task_get_detailed = mock.MagicMock(return_value=value) mock_db.task_get_detailed = mock.MagicMock(return_value=value)

View File

@@ -128,6 +128,8 @@ class APITestCase(test.TestCase):
mock_utils_runner.return_value = mock_runner = mock.Mock() mock_utils_runner.return_value = mock_runner = mock.Mock()
mock_runner.result_queue = collections.deque(['fake_result']) mock_runner.result_queue = collections.deque(['fake_result'])
mock_runner.run.return_value = 42
mock_osclients.Clients.return_value = fakes.FakeClients() mock_osclients.Clients.return_value = fakes.FakeClients()
api.start_task(self.deploy_uuid, self.task_config) api.start_task(self.deploy_uuid, self.task_config)
@@ -164,7 +166,8 @@ class APITestCase(test.TestCase):
'pos': 0, 'pos': 0,
}, },
{ {
'raw': ['fake_result'] 'raw': ['fake_result'],
'scenario_duration': 42
} }
) )