Merge "All scenario runs time measurement"

This commit is contained in:
Jenkins 2014-08-08 14:04:09 +00:00 committed by Gerrit Code Review
commit 49e0bc9ffd
6 changed files with 43 additions and 10 deletions

View File

@ -197,7 +197,8 @@ class BenchmarkEngine(object):
target=self.consume_results,
args=(key, self.task, runner.result_queue, is_done))
consumer.start()
runner.run(name, kw.get("context", {}), kw.get("args", {}))
self.duration = runner.run(
name, kw.get("context", {}), kw.get("args", {}))
is_done.set()
consumer.join()
self.task.update_status(consts.TaskStatus.FINISHED)
@ -216,8 +217,7 @@ class BenchmarkEngine(object):
clients.verified_keystone()
return self
@staticmethod
def consume_results(key, task, result_queue, is_done):
def consume_results(self, key, task, result_queue, is_done):
"""Consume scenario runner results from queue and send them to db.
Has to be run from different thread simultaneously with the runner.run
@ -238,4 +238,6 @@ class BenchmarkEngine(object):
break
else:
time.sleep(0.1)
task.append_results(key, {"raw": results})
task.append_results(key, {"raw": results,
"scenario_duration": self.duration})

View File

@ -197,6 +197,13 @@ class ScenarioRunner(object):
where each result is a dictionary
"""
def _wrap_run_scenario(self, cls, method_name, context, args):
"""Whole scenario time measurement without context preparation."""
with rutils.Timer() as timer:
self._run_scenario(cls, method_name, context, args)
return timer.duration()
def run(self, name, context, args):
cls_name, method_name = name.split(".", 1)
cls = scenario_base.Scenario.get_by_name(cls_name)
@ -217,8 +224,10 @@ class ScenarioRunner(object):
}
args = cls.preprocess(method_name, context_obj, args)
base_ctx.ContextManager.run(context_obj, self._run_scenario,
cls, method_name, args)
return base_ctx.ContextManager.run(context_obj,
self._wrap_run_scenario,
cls, method_name, args)
def _send_result(self, result):
"""Send partial result to consumer.

View File

@ -198,6 +198,7 @@ class TaskCommands(object):
print("args values:")
pprint.pprint(key["kw"])
scenario_time = result["data"]["scenario_duration"]
raw = result["data"]["raw"]
table_cols = ["action", "min (sec)", "avg (sec)", "max (sec)",
"90 percentile", "95 percentile", "success",
@ -235,6 +236,9 @@ class TaskCommands(object):
if iterations_data:
_print_iterations_data(raw)
print(_("Whole scenario time without context preparation: "),
scenario_time)
# NOTE(hughsaunders): ssrs=scenario specific results
ssrs = []
for result in raw:
@ -330,7 +334,10 @@ class TaskCommands(object):
help='Open it in browser.')
@envutils.with_default_task_id
def plot2html(self, task_id=None, out=None, open_it=False):
results = map(lambda x: {"key": x["key"], 'result': x['data']['raw']},
results = map(lambda x: {
"key": x["key"],
'result': x['data']['raw']
},
db.task_result_get_all_by_uuid(task_id))
output_file = out or ("%s.html" % task_id)

View File

@ -246,7 +246,7 @@ class ScenarioRunnerTestCase(test.TestCase):
}
}
expected = [context_obj, runner._run_scenario, cls,
expected = [context_obj, runner._wrap_run_scenario, cls,
method_name, config_kwargs]
mock_ctx_manager.run.assert_called_once_with(*expected)

View File

@ -102,7 +102,19 @@ class TaskCommandsTestCase(test.TestCase):
"id": "task",
"uuid": test_uuid,
"status": "status",
"results": [],
"results": [
{
"key": {
"name": "fake_name",
"pos": "fake_pos",
"kw": "fake_kw"
},
"data": {
"scenario_duration": 1.0,
"raw": []
}
}
],
"failed": False
}
mock_db.task_get_detailed = mock.MagicMock(return_value=value)

View File

@ -128,6 +128,8 @@ class APITestCase(test.TestCase):
mock_utils_runner.return_value = mock_runner = mock.Mock()
mock_runner.result_queue = collections.deque(['fake_result'])
mock_runner.run.return_value = 42
mock_osclients.Clients.return_value = fakes.FakeClients()
api.start_task(self.deploy_uuid, self.task_config)
@ -164,7 +166,8 @@ class APITestCase(test.TestCase):
'pos': 0,
},
{
'raw': ['fake_result']
'raw': ['fake_result'],
'scenario_duration': 42
}
)