diff --git a/rally/benchmark/processing/plot.py b/rally/benchmark/processing/plot.py
index 46fb0b68d5..4204ec1507 100644
--- a/rally/benchmark/processing/plot.py
+++ b/rally/benchmark/processing/plot.py
@@ -22,14 +22,14 @@ import mako.template
from rally.benchmark.processing.charts import histogram as histo
-def _process_main_time(result):
+def _process_main_duration(result):
pie = filter(lambda t: not t["error"], result["result"])
stacked_area = map(
- lambda t: {"idle_time": 0, "time": 0} if t["error"] else t,
+ lambda t: {"idle_duration": 0, "duration": 0} if t["error"] else t,
result["result"])
histogram_data = filter(None, map(
- lambda t: t["time"] if not t["error"] else None,
+ lambda t: t["duration"] if not t["error"] else None,
result["result"]))
histograms = []
@@ -48,12 +48,12 @@ def _process_main_time(result):
"iter": [
{
"key": "duration",
- "values": [[i + 1, v["time"]]
+ "values": [[i + 1, v["duration"]]
for i, v in enumerate(stacked_area)]
},
{
"key": "idle_duration",
- "values": [[i + 1, v["idle_time"]]
+ "values": [[i + 1, v["idle_duration"]]
for i, v in enumerate(stacked_area)]
}
],
@@ -68,7 +68,7 @@ def _process_main_time(result):
}
-def _process_atomic_time(result):
+def _process_atomic(result):
def avg(lst, key=None):
lst = lst if not key else map(lambda x: x[key], lst)
@@ -76,30 +76,30 @@ def _process_atomic_time(result):
# NOTE(boris-42): In our result["result"] we have next structure:
# {"error": NoneOrDict,
- # "atomic_actions_time": [
+ # "atomic_actions": [
# {"action": String, "duration": Float},
# ...
# ]}
# Our goal is to get next structure:
- # [{"key": $atomic_actions_time.action,
- # "values": [[order, $atomic_actions_time.duration
+ # [{"key": $atomic_actions.action,
+ # "values": [[order, $atomic_actions.duration
# if not $error else 0], ...}]
#
- # Order of actions in "atomic_action_time" is similiar for
+ # Order of actions in "atomic_action" is similiar for
# all iteration. So we should take first non "error"
# iteration. And get in atomitc_iter list:
# [{"key": "action", "values":[]}]
stacked_area = []
for r in result["result"]:
if not r["error"]:
- for action in r["atomic_actions_time"]:
+ for action in r["atomic_actions"]:
stacked_area.append({"key": action["action"], "values": []})
break
# NOTE(boris-42): pie is similiar to stacked_area, only difference is in
# structure of values. In case of $error we shouldn't put
# anything in pie. In case of non error we should put just
- # $atomic_actions_time.duration (without order)
+ # $atomic_actions.duration (without order)
pie = []
histogram_data = []
if stacked_area:
@@ -113,7 +113,7 @@ def _process_atomic_time(result):
continue
# in case of non error put real durations to pie and stacked area
- for j, action in enumerate(data["atomic_actions_time"]):
+ for j, action in enumerate(data["atomic_actions"]):
pie[j]["values"].append(action["duration"])
stacked_area[j]["values"].append([i + 1, action["duration"]])
histogram_data[j]["values"].append(action["duration"])
@@ -151,8 +151,8 @@ def _process_results(results):
output.append({
"name": "%s (task #%d)" % (info["name"], info["pos"]),
"config": info["kw"],
- "time": _process_main_time(result),
- "atomic": _process_atomic_time(result)
+ "duration": _process_main_duration(result),
+ "atomic": _process_atomic(result)
})
return output
diff --git a/rally/benchmark/processing/src/index.mako b/rally/benchmark/processing/src/index.mako
index 6df598f9eb..78c5207074 100644
--- a/rally/benchmark/processing/src/index.mako
+++ b/rally/benchmark/processing/src/index.mako
@@ -141,21 +141,21 @@
.end()
// Find
- var total_histogram_select = $("#results > .results > .total_time > .histogram_select");
+ var total_histogram_select = $("#results > .results > .total_duration > .histogram_select");
var atomic_histogram_select = $("#results > .results > .atomic > .histogram_select");
// Populate
- for (var i = 0; i < d.time.histogram.length; i++) {
+ for (var i = 0; i < d.duration.histogram.length; i++) {
var option = document.createElement('option');
- option.text = d.time.histogram[i].method;
+ option.text = d.duration.histogram[i].method;
total_histogram_select.get(0).add(option);
atomic_histogram_select.get(0).add(option.cloneNode(true));
}
// Bind onchange event
total_histogram_select.change(function(){
var i = total_histogram_select.get(0).selectedIndex;
- $("#results > .results > .total_time > .histogram").empty();
- draw_histogram("#results .total_time .histogram", function(){
- return [d["time"]["histogram"][i]];
+ $("#results > .results > .total_duration > .histogram").empty();
+ draw_histogram("#results .total_duration .histogram", function(){
+ return [d["duration"]["histogram"][i]];
});
});
atomic_histogram_select.change(function(){
@@ -170,16 +170,16 @@
});
});
- draw_stacked("#results .total_time .stackedarea", function(){
- return d["time"]["iter"]
+ draw_stacked("#results .total_duration .stackedarea", function(){
+ return d["duration"]["iter"]
})
- draw_pie("#results .total_time .pie", function(){
- return d["time"]["pie"]
+ draw_pie("#results .total_duration .pie", function(){
+ return d["duration"]["pie"]
})
- draw_histogram("#results .total_time .histogram", function(){
- return [d["time"]["histogram"][0]];
+ draw_histogram("#results .total_duration .histogram", function(){
+ return [d["duration"]["histogram"][0]];
})
draw_pie("#results .atomic .pie", function(){
@@ -223,7 +223,7 @@
+
diff --git a/rally/benchmark/runners/base.py b/rally/benchmark/runners/base.py
index 39fbc94e23..37542444d7 100644
--- a/rally/benchmark/runners/base.py
+++ b/rally/benchmark/runners/base.py
@@ -69,11 +69,11 @@ def _run_scenario_once(args):
{"task": context["task"]["uuid"], "iteration": iteration,
"status": status})
- return {"time": timer.duration() - scenario.idle_time(),
- "idle_time": scenario.idle_time(),
+ return {"duration": timer.duration() - scenario.idle_duration(),
+ "idle_duration": scenario.idle_duration(),
"error": error,
"scenario_output": scenario_output,
- "atomic_actions_time": scenario.atomic_actions_time()}
+ "atomic_actions": scenario.atomic_actions()}
class ScenarioRunnerResult(list):
@@ -85,10 +85,10 @@ class ScenarioRunnerResult(list):
"items": {
"type": "object",
"properties": {
- "time": {
+ "duration": {
"type": "number"
},
- "idle_time": {
+ "idle_duration": {
"type": "number"
},
"scenario_output": {
@@ -106,7 +106,7 @@ class ScenarioRunnerResult(list):
},
"additionalProperties": False
},
- "atomic_actions_time": {
+ "atomic_actions": {
"type": "array",
"items": {
"type": "object",
diff --git a/rally/benchmark/runners/constant.py b/rally/benchmark/runners/constant.py
index 4f837644a9..e9a80f99fb 100644
--- a/rally/benchmark/runners/constant.py
+++ b/rally/benchmark/runners/constant.py
@@ -89,7 +89,7 @@ class ConstantScenarioRunner(base.ScenarioRunner):
try:
result = iter_result.next(timeout)
except multiprocessing.TimeoutError as e:
- result = {"time": timeout, "idle_time": 0,
+ result = {"duration": timeout, "idle_duration": 0,
"error": utils.format_exc(e)}
results.append(result)
@@ -162,7 +162,7 @@ class ConstantForDurationScenarioRunner(base.ScenarioRunner):
try:
result = iter_result.next(timeout)
except multiprocessing.TimeoutError as e:
- result = {"time": timeout, "idle_time": 0,
+ result = {"duration": timeout, "idle_duration": 0,
"error": utils.format_exc(e)}
results_queue.append(result)
diff --git a/rally/benchmark/runners/periodic.py b/rally/benchmark/runners/periodic.py
index c5576c0a50..a502e04747 100644
--- a/rally/benchmark/runners/periodic.py
+++ b/rally/benchmark/runners/periodic.py
@@ -86,7 +86,7 @@ class PeriodicScenarioRunner(base.ScenarioRunner):
try:
result = async_result.get(timeout=timeout)
except multiprocessing.TimeoutError as e:
- result = {"time": timeout, "idle_time": 0,
+ result = {"duration": timeout, "idle_duration": 0,
"error": utils.format_exc(e)}
results.append(result)
diff --git a/rally/benchmark/scenarios/base.py b/rally/benchmark/scenarios/base.py
index e49bad54de..21d6ca3f39 100644
--- a/rally/benchmark/scenarios/base.py
+++ b/rally/benchmark/scenarios/base.py
@@ -45,8 +45,8 @@ class Scenario(object):
self._context = context
self._admin_clients = admin_clients
self._clients = clients
- self._idle_time = 0
- self._atomic_actions_time = []
+ self._idle_duration = 0
+ self._atomic_actions = []
@staticmethod
def get_by_name(name):
@@ -150,7 +150,7 @@ class Scenario(object):
"""Performs a time.sleep() call for a random amount of seconds.
The exact time is chosen uniformly randomly from the interval
- [min_sleep; max_sleep). The method also updates the idle_time
+ [min_sleep; max_sleep). The method also updates the idle_duration
variable to take into account the overall time spent on sleeping.
:param min_sleep: Minimum sleep time in seconds (non-negative)
@@ -162,17 +162,17 @@ class Scenario(object):
sleep_time = random.uniform(min_sleep, max_sleep)
time.sleep(sleep_time)
- self._idle_time += sleep_time
+ self._idle_duration += sleep_time
- def idle_time(self):
+ def idle_duration(self):
"""Returns duration of all sleep_between."""
- return self._idle_time
+ return self._idle_duration
- def _add_atomic_actions_time(self, name, duration):
+ def _add_atomic_actions(self, name, duration):
"""Adds the duration of an atomic action by its 'name'."""
- self._atomic_actions_time.append(
+ self._atomic_actions.append(
{'action': name, 'duration': duration})
- def atomic_actions_time(self):
- """Returns the duration of each atomic action."""
- return self._atomic_actions_time
+ def atomic_actions(self):
+ """Returns the content of each atomic action."""
+ return self._atomic_actions
diff --git a/rally/benchmark/scenarios/utils.py b/rally/benchmark/scenarios/utils.py
index 280f0baa87..dfb2341b6b 100644
--- a/rally/benchmark/scenarios/utils.py
+++ b/rally/benchmark/scenarios/utils.py
@@ -137,7 +137,7 @@ def atomic_action_timer(name):
def func_atomic_actions(self, *args, **kwargs):
with utils.Timer() as timer:
f = func(self, *args, **kwargs)
- self._add_atomic_actions_time(name, timer.duration())
+ self._add_atomic_actions(name, timer.duration())
return f
return func_atomic_actions
return wrap
diff --git a/rally/cmd/commands/task.py b/rally/cmd/commands/task.py
index 28966edc4c..3ca2b91e8e 100644
--- a/rally/cmd/commands/task.py
+++ b/rally/cmd/commands/task.py
@@ -108,11 +108,11 @@ class TaskCommands(object):
:param no_aggregation: do not aggregate atomic operations
Prints detailed information of task.
"""
- def _print_atomic_actions_time_no_aggregation(raw):
+ def _print_atomic_actions_no_aggregation(raw):
headers = ['iteration', "full duration"]
for i in range(0, len(raw)):
- if raw[i]['atomic_actions_time']:
- for (c, a) in enumerate(raw[i]['atomic_actions_time'], 1):
+ if raw[i]['atomic_actions']:
+ for (c, a) in enumerate(raw[i]['atomic_actions'], 1):
action = str(c) + "-" + a['action']
headers.append(action)
break
@@ -120,8 +120,8 @@ class TaskCommands(object):
for (c, r) in enumerate(raw, 1):
dlist = [c]
d = []
- if r['atomic_actions_time']:
- for l in r['atomic_actions_time']:
+ if r['atomic_actions']:
+ for l in r['atomic_actions']:
d.append(l['duration'])
dlist.append(sum(d))
dlist = dlist + d
@@ -133,17 +133,17 @@ class TaskCommands(object):
print(atomic_action_table)
print()
- def _print_atomic_actions_time_aggregation(raw):
- atime_merged = []
+ def _print_atomic_actions_aggregation(raw):
+ aduration_merged = []
for r in raw:
- if 'atomic_actions_time' in r:
- for a in r['atomic_actions_time']:
- atime_merged.append(a)
+ if 'atomic_actions' in r:
+ for a in r['atomic_actions']:
+ aduration_merged.append(a)
- times_by_action = collections.defaultdict(list)
- for at in atime_merged:
- times_by_action[at['action']].append(at['duration'])
- if times_by_action:
+ durations_by_action = collections.defaultdict(list)
+ for at in aduration_merged:
+ durations_by_action[at['action']].append(at['duration'])
+ if durations_by_action:
atomic_action_table = prettytable.PrettyTable(
['action',
'count',
@@ -152,7 +152,7 @@ class TaskCommands(object):
'min (sec)',
'90 percentile',
'95 percentile'])
- for k, v in times_by_action.iteritems():
+ for k, v in durations_by_action.iteritems():
atomic_action_table.add_row([k,
len(v),
max(v),
@@ -163,11 +163,11 @@ class TaskCommands(object):
print(atomic_action_table)
print()
- def _print_atomic_actions_time(raw):
+ def _print_atomic_actions(raw):
if no_aggregation:
- _print_atomic_actions_time_no_aggregation(raw)
+ _print_atomic_actions_no_aggregation(raw)
else:
- _print_atomic_actions_time_aggregation(raw)
+ _print_atomic_actions_aggregation(raw)
if task_id == "last":
task = db.task_get_detailed_last()
@@ -207,11 +207,11 @@ class TaskCommands(object):
print("args values:")
pprint.pprint(key["kw"])
- _print_atomic_actions_time(result["data"]["raw"])
+ _print_atomic_actions(result["data"]["raw"])
raw = result["data"]["raw"]
- times = map(lambda x: x['time'],
- filter(lambda r: not r['error'], raw))
+ durations = map(lambda x: x['duration'],
+ filter(lambda r: not r['error'], raw))
table = prettytable.PrettyTable(["max (sec)",
"avg (sec)",
"min (sec)",
@@ -219,13 +219,13 @@ class TaskCommands(object):
"95 percentile",
"success/total",
"total times"])
- if times:
- table.add_row([max(times),
- sum(times) / len(times),
- min(times),
- percentile(times, 0.90),
- percentile(times, 0.95),
- float(len(times)) / len(raw),
+ if durations:
+ table.add_row([max(durations),
+ sum(durations) / len(durations),
+ min(durations),
+ percentile(durations, 0.90),
+ percentile(durations, 0.95),
+ float(len(durations)) / len(raw),
len(raw)])
else:
table.add_row(['n/a', 'n/a', 'n/a', 'n/a', 'n/a', 0, len(raw)])
diff --git a/tests/benchmark/processing/test_plot.py b/tests/benchmark/processing/test_plot.py
index 89ffa34c5d..5ccb199861 100644
--- a/tests/benchmark/processing/test_plot.py
+++ b/tests/benchmark/processing/test_plot.py
@@ -49,16 +49,16 @@ class PlotTestCase(test.TestCase):
mock_open.assert_called_once_with("%s/src/index.mako"
% mock_dirname.return_value)
- @mock.patch("rally.benchmark.processing.plot._process_atomic_time")
- @mock.patch("rally.benchmark.processing.plot._process_main_time")
- def test__process_results(self, mock_main_time, mock_atomic_time):
+ @mock.patch("rally.benchmark.processing.plot._process_atomic")
+ @mock.patch("rally.benchmark.processing.plot._process_main_duration")
+ def test__process_results(self, mock_main_duration, mock_atomic):
results = [
{"key": {"name": "n1", "pos": 1, "kw": "config1"}},
{"key": {"name": "n2", "pos": 2, "kw": "config2"}}
]
- mock_main_time.return_value = "main_time"
- mock_atomic_time.return_value = "main_atomic"
+ mock_main_duration.return_value = "main_duration"
+ mock_atomic.return_value = "main_atomic"
output = plot._process_results(results)
@@ -66,8 +66,8 @@ class PlotTestCase(test.TestCase):
self.assertEqual(output[i], {
"name": "%s (task #%d)" % (r["key"]["name"], r["key"]["pos"]),
"config": r["key"]["kw"],
- "time": mock_main_time.return_value,
- "atomic": mock_atomic_time.return_value
+ "duration": mock_main_duration.return_value,
+ "atomic": mock_atomic.return_value
})
def test__process_main_time(self):
@@ -75,23 +75,23 @@ class PlotTestCase(test.TestCase):
"result": [
{
"error": [],
- "time": 1,
- "idle_time": 2
+ "duration": 1,
+ "idle_duration": 2
},
{
"error": True,
- "time": 1,
- "idle_time": 1
+ "duration": 1,
+ "idle_duration": 1
},
{
"error": [],
- "time": 2,
- "idle_time": 3
+ "duration": 2,
+ "idle_duration": 3
}
]
}
- output = plot._process_main_time(result)
+ output = plot._process_main_duration(result)
self.assertEqual(output, {
"pie": [
@@ -138,21 +138,21 @@ class PlotTestCase(test.TestCase):
"result": [
{
"error": [],
- "atomic_actions_time": [
+ "atomic_actions": [
{"action": "action1", "duration": 1},
{"action": "action2", "duration": 2}
]
},
{
"error": ["some", "error", "occurred"],
- "atomic_actions_time": [
+ "atomic_actions": [
{"action": "action1", "duration": 1},
{"action": "action2", "duration": 2}
]
},
{
"error": [],
- "atomic_actions_time": [
+ "atomic_actions": [
{"action": "action1", "duration": 3},
{"action": "action2", "duration": 4}
]
@@ -160,7 +160,7 @@ class PlotTestCase(test.TestCase):
]
}
- output = plot._process_atomic_time(result)
+ output = plot._process_atomic(result)
self.assertEqual(output, {
"histogram": [
diff --git a/tests/benchmark/runners/test_base.py b/tests/benchmark/runners/test_base.py
index f1e1a1e082..4a95cb005c 100644
--- a/tests/benchmark/runners/test_base.py
+++ b/tests/benchmark/runners/test_base.py
@@ -59,9 +59,9 @@ class ScenarioHelpersTestCase(test.TestCase):
expected_calls = [
mock.call(context=context, admin_clients="cl", clients="cl"),
mock.call().test(),
- mock.call().idle_time(),
- mock.call().idle_time(),
- mock.call().atomic_actions_time()
+ mock.call().idle_duration(),
+ mock.call().idle_duration(),
+ mock.call().atomic_actions()
]
scenario_cls.assert_has_calls(expected_calls, any_order=True)
@@ -75,11 +75,11 @@ class ScenarioHelpersTestCase(test.TestCase):
result = base._run_scenario_once(args)
expected_reuslt = {
- "time": fakes.FakeTimer().duration(),
- "idle_time": 0,
+ "duration": fakes.FakeTimer().duration(),
+ "idle_duration": 0,
"error": [],
"scenario_output": {},
- "atomic_actions_time": []
+ "atomic_actions": []
}
self.assertEqual(expected_reuslt, result)
@@ -93,11 +93,11 @@ class ScenarioHelpersTestCase(test.TestCase):
result = base._run_scenario_once(args)
expected_reuslt = {
- "time": fakes.FakeTimer().duration(),
- "idle_time": 0,
+ "duration": fakes.FakeTimer().duration(),
+ "idle_duration": 0,
"error": [],
"scenario_output": fakes.FakeScenario().with_output(),
- "atomic_actions_time": []
+ "atomic_actions": []
}
self.assertEqual(expected_reuslt, result)
@@ -110,10 +110,10 @@ class ScenarioHelpersTestCase(test.TestCase):
result = base._run_scenario_once(args)
expected_error = result.pop("error")
expected_reuslt = {
- "time": fakes.FakeTimer().duration(),
- "idle_time": 0,
+ "duration": fakes.FakeTimer().duration(),
+ "idle_duration": 0,
"scenario_output": {},
- "atomic_actions_time": []
+ "atomic_actions": []
}
self.assertEqual(expected_reuslt, result)
self.assertEqual(expected_error[:2],
@@ -125,23 +125,23 @@ class ScenarioRunnerResultTestCase(test.TestCase):
def test_validate(self):
config = [
{
- "time": 1.0,
- "idle_time": 1.0,
+ "duration": 1.0,
+ "idle_duration": 1.0,
"scenario_output": {
"data": {"test": 1.0},
"errors": "test error string 1"
},
- "atomic_actions_time": [{"action": "test1", "duration": 1.0}],
+ "atomic_actions": [{"action": "test1", "duration": 1.0}],
"error": []
},
{
- "time": 2.0,
- "idle_time": 2.0,
+ "duration": 2.0,
+ "idle_duration": 2.0,
"scenario_output": {
"data": {"test": 2.0},
"errors": "test error string 2"
},
- "atomic_actions_time": [{"action": "test2", "duration": 2.0}],
+ "atomic_actions": [{"action": "test2", "duration": 2.0}],
"error": ["a", "b", "c"]
}
]
diff --git a/tests/benchmark/runners/test_serial.py b/tests/benchmark/runners/test_serial.py
index c73b13395d..905a6d6ab0 100644
--- a/tests/benchmark/runners/test_serial.py
+++ b/tests/benchmark/runners/test_serial.py
@@ -34,8 +34,8 @@ class SerialScenarioRunnerTestCase(test.TestCase):
@mock.patch("rally.benchmark.runners.base._run_scenario_once")
def test_run_scenario(self, mock_run_once):
times = 5
- result = {"time": 10, "idle_time": 0, "error": [],
- "scenario_output": {}, "atomic_actions_time": []}
+ result = {"duration": 10, "idle_duration": 0, "error": [],
+ "scenario_output": {}, "atomic_actions": []}
mock_run_once.return_value = result
expected_results = [result for i in range(times)]
diff --git a/tests/benchmark/scenarios/cinder/test_utils.py b/tests/benchmark/scenarios/cinder/test_utils.py
index a97135e2ca..bbc63db37e 100644
--- a/tests/benchmark/scenarios/cinder/test_utils.py
+++ b/tests/benchmark/scenarios/cinder/test_utils.py
@@ -24,9 +24,9 @@ CINDER_UTILS = "rally.benchmark.scenarios.cinder.utils"
class CinderScenarioTestCase(test.TestCase):
- def _test_atomic_action_timer(self, atomic_actions_time, name):
+ def _test_atomic_action_timer(self, atomic_actions, name):
action_duration = test_utils.get_atomic_action_timer_value_by_name(
- atomic_actions_time, name)
+ atomic_actions, name)
self.assertIsNotNone(action_duration)
self.assertIsInstance(action_duration, float)
@@ -37,5 +37,5 @@ class CinderScenarioTestCase(test.TestCase):
scenario = utils.CinderScenario()
return_volumes_list = scenario._list_volumes()
self.assertEqual(volumes_list, return_volumes_list)
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'cinder.list_volumes')
diff --git a/tests/benchmark/scenarios/glance/test_utils.py b/tests/benchmark/scenarios/glance/test_utils.py
index a4c2edbe64..4e6096c45e 100644
--- a/tests/benchmark/scenarios/glance/test_utils.py
+++ b/tests/benchmark/scenarios/glance/test_utils.py
@@ -57,9 +57,9 @@ class GlanceScenarioTestCase(test.TestCase):
butils.get_from_manager(),
image_manager.create('fails', 'url', 'cf', 'df'))
- def _test_atomic_action_timer(self, atomic_actions_time, name):
+ def _test_atomic_action_timer(self, atomic_actions, name):
action_duration = test_utils.get_atomic_action_timer_value_by_name(
- atomic_actions_time, name)
+ atomic_actions, name)
self.assertIsNotNone(action_duration)
self.assertIsInstance(action_duration, float)
@@ -70,7 +70,7 @@ class GlanceScenarioTestCase(test.TestCase):
scenario = utils.GlanceScenario()
return_images_list = scenario._list_images()
self.assertEqual(images_list, return_images_list)
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'glance.list_images')
@mock.patch(GLANCE_UTILS + '.GlanceScenario.clients')
@@ -88,7 +88,7 @@ class GlanceScenarioTestCase(test.TestCase):
timeout=120)
self.res_is.mock.assert_has_calls(mock.call('active'))
self.assertEqual(self.wait_for.mock(), return_image)
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'glance.create_image')
def test_delete_image(self):
@@ -100,5 +100,5 @@ class GlanceScenarioTestCase(test.TestCase):
update_resource=self.gfm(),
check_interval=1,
timeout=120)
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'glance.delete_image')
diff --git a/tests/benchmark/scenarios/keystone/test_utils.py b/tests/benchmark/scenarios/keystone/test_utils.py
index d6df534cf6..4aab41d3ad 100644
--- a/tests/benchmark/scenarios/keystone/test_utils.py
+++ b/tests/benchmark/scenarios/keystone/test_utils.py
@@ -48,9 +48,9 @@ class KeystoneUtilsTestCase(test.TestCase):
class KeystoneScenarioTestCase(test.TestCase):
- def _test_atomic_action_timer(self, atomic_actions_time, name):
+ def _test_atomic_action_timer(self, atomic_actions, name):
action_duration = test_utils.get_atomic_action_timer_value_by_name(
- atomic_actions_time, name)
+ atomic_actions, name)
self.assertIsNotNone(action_duration)
self.assertIsInstance(action_duration, float)
@@ -71,7 +71,7 @@ class KeystoneScenarioTestCase(test.TestCase):
self.assertEqual(user, result)
fake_keystone.users.create.assert_called_once_with(name, name,
name + "@rally.me")
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'keystone.create_user')
def test_user_delete(self):
@@ -81,7 +81,7 @@ class KeystoneScenarioTestCase(test.TestCase):
scenario = utils.KeystoneScenario()
scenario._resource_delete(resource)
resource.delete.assert_called_once_with()
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'keystone.delete_resource')
@mock.patch(UTILS + "generate_keystone_name")
@@ -100,7 +100,7 @@ class KeystoneScenarioTestCase(test.TestCase):
self.assertEqual(tenant, result)
fake_keystone.tenants.create.assert_called_once_with(name)
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'keystone.create_tenant')
@mock.patch(UTILS + "generate_keystone_name")
@@ -120,7 +120,7 @@ class KeystoneScenarioTestCase(test.TestCase):
fake_keystone.users.create.assert_called_once_with(name, name,
name + "@rally.me",
tenant_id=tenant.id)
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'keystone.create_users')
def test_list_users(self):
@@ -131,7 +131,7 @@ class KeystoneScenarioTestCase(test.TestCase):
scenario = utils.KeystoneScenario(admin_clients=fake_clients)
scenario._list_users()
fake_keystone.users.list.assert_called_once()
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'keystone.list_users')
def test_list_tenants(self):
@@ -142,5 +142,5 @@ class KeystoneScenarioTestCase(test.TestCase):
scenario = utils.KeystoneScenario(admin_clients=fake_clients)
scenario._list_tenants()
fake_keystone.tenants.list.assert_called_once()
- self._test_atomic_action_timer(scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(scenario.atomic_actions(),
'keystone.list_tenants')
diff --git a/tests/benchmark/scenarios/nova/test_utils.py b/tests/benchmark/scenarios/nova/test_utils.py
index ceeb37d412..603f99b66c 100644
--- a/tests/benchmark/scenarios/nova/test_utils.py
+++ b/tests/benchmark/scenarios/nova/test_utils.py
@@ -48,9 +48,9 @@ class NovaScenarioTestCase(test.TestCase):
self.gfm = self.get_fm.mock
self.useFixture(mockpatch.Patch('time.sleep'))
- def _test_atomic_action_timer(self, atomic_actions_time, name):
+ def _test_atomic_action_timer(self, atomic_actions, name):
action_duration = test_utils.get_atomic_action_timer_value_by_name(
- atomic_actions_time, name)
+ atomic_actions, name)
self.assertIsNotNone(action_duration)
self.assertIsInstance(action_duration, float)
@@ -74,7 +74,7 @@ class NovaScenarioTestCase(test.TestCase):
nova_scenario = utils.NovaScenario()
return_servers_list = nova_scenario._list_servers(True)
self.assertEqual(servers_list, return_servers_list)
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.list_servers')
@mock.patch(NOVA_UTILS + '.NovaScenario.clients')
@@ -92,7 +92,7 @@ class NovaScenarioTestCase(test.TestCase):
)
self.res_is.mock.assert_has_calls(mock.call('ACTIVE'))
self.assertEqual(self.wait_for.mock(), return_server)
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.boot_server')
def test__suspend_server(self):
@@ -107,7 +107,7 @@ class NovaScenarioTestCase(test.TestCase):
timeout=CONF.benchmark.nova_server_suspend_timeout
)
self.res_is.mock.assert_has_calls(mock.call('SUSPENDED'))
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.suspend_server')
@mock.patch(NOVA_UTILS + '.NovaScenario.clients')
@@ -125,7 +125,7 @@ class NovaScenarioTestCase(test.TestCase):
)
self.res_is.mock.assert_has_calls(mock.call('ACTIVE'))
self.assertEqual(self.wait_for.mock(), return_image)
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.create_image')
def test__delete_server(self):
@@ -138,7 +138,7 @@ class NovaScenarioTestCase(test.TestCase):
check_interval=CONF.benchmark.nova_server_delete_poll_interval,
timeout=CONF.benchmark.nova_server_delete_timeout
)
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.delete_server')
def test__reboot_server(self):
@@ -153,7 +153,7 @@ class NovaScenarioTestCase(test.TestCase):
timeout=CONF.benchmark.nova_server_reboot_timeout
)
self.res_is.mock.assert_has_calls(mock.call('ACTIVE'))
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.reboot_server')
def test__start_server(self):
@@ -168,7 +168,7 @@ class NovaScenarioTestCase(test.TestCase):
timeout=CONF.benchmark.nova_server_start_timeout
)
self.res_is.mock.assert_has_calls(mock.call('ACTIVE'))
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.start_server')
def test__stop_server(self):
@@ -183,7 +183,7 @@ class NovaScenarioTestCase(test.TestCase):
timeout=CONF.benchmark.nova_server_stop_timeout
)
self.res_is.mock.assert_has_calls(mock.call('SHUTOFF'))
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.stop_server')
def test__rescue_server(self):
@@ -198,7 +198,7 @@ class NovaScenarioTestCase(test.TestCase):
timeout=CONF.benchmark.nova_server_rescue_timeout
)
self.res_is.mock.assert_has_calls(mock.call('RESCUE'))
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.rescue_server')
def test__unrescue_server(self):
@@ -213,7 +213,7 @@ class NovaScenarioTestCase(test.TestCase):
timeout=CONF.benchmark.nova_server_unrescue_timeout
)
self.res_is.mock.assert_has_calls(mock.call('ACTIVE'))
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.unrescue_server')
@mock.patch(NOVA_UTILS + '.NovaScenario.clients')
@@ -237,7 +237,7 @@ class NovaScenarioTestCase(test.TestCase):
)
]
self.assertEqual(expected, self.wait_for_delete.mock.mock_calls)
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.delete_all_servers')
def test__delete_image(self):
@@ -250,7 +250,7 @@ class NovaScenarioTestCase(test.TestCase):
CONF.benchmark.nova_server_image_delete_poll_interval,
timeout=CONF.benchmark.nova_server_image_delete_timeout
)
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.delete_image')
@mock.patch(NOVA_UTILS + '.NovaScenario.clients')
@@ -275,5 +275,5 @@ class NovaScenarioTestCase(test.TestCase):
]
self.assertEqual(expected, self.wait_for.mock.mock_calls)
self.res_is.mock.assert_has_calls(mock.call('ACTIVE'))
- self._test_atomic_action_timer(nova_scenario.atomic_actions_time(),
+ self._test_atomic_action_timer(nova_scenario.atomic_actions(),
'nova.boot_servers')
diff --git a/tests/benchmark/scenarios/test_base.py b/tests/benchmark/scenarios/test_base.py
index 9390df18a0..2700b47408 100644
--- a/tests/benchmark/scenarios/test_base.py
+++ b/tests/benchmark/scenarios/test_base.py
@@ -186,13 +186,13 @@ class ScenarioTestCase(test.TestCase):
def test_sleep_between(self):
scenario = base.Scenario()
scenario.sleep_between(0.001, 0.002)
- self.assertTrue(0.001 <= scenario.idle_time() <= 0.002)
+ self.assertTrue(0.001 <= scenario.idle_duration() <= 0.002)
def test_sleep_beetween_multi(self):
scenario = base.Scenario()
scenario.sleep_between(0.001, 0.001)
scenario.sleep_between(0.004, 0.004)
- self.assertEqual(scenario.idle_time(), 0.005)
+ self.assertEqual(scenario.idle_duration(), 0.005)
@mock.patch("rally.benchmark.scenarios.base.time.sleep")
@mock.patch("rally.benchmark.scenarios.base.random.uniform")
@@ -203,7 +203,7 @@ class ScenarioTestCase(test.TestCase):
scenario.sleep_between(1, 2)
mock_sleep.assert_called_once_with(mock_uniform.return_value)
- self.assertEqual(scenario.idle_time(), mock_uniform.return_value)
+ self.assertEqual(scenario.idle_duration(), mock_uniform.return_value)
def test_context(self):
context = mock.MagicMock()
diff --git a/tests/benchmark/scenarios/test_utils.py b/tests/benchmark/scenarios/test_utils.py
index 658fb3e6cf..0c8b3c0cea 100644
--- a/tests/benchmark/scenarios/test_utils.py
+++ b/tests/benchmark/scenarios/test_utils.py
@@ -186,8 +186,8 @@ class ActionBuilderTestCase(test.TestCase):
mock_action_two.assert_has_calls(mock_calls)
-def get_atomic_action_timer_value_by_name(atomic_actions_times, name):
- for action_time in atomic_actions_times:
- if action_time['action'] == name:
- return action_time['duration']
+def get_atomic_action_timer_value_by_name(atomic_actions, name):
+ for action in atomic_actions:
+ if action['action'] == name:
+ return action['duration']
return None