Report claim and delete latency separately in the benchmark tool
The consumer worker actually performs two different operations. First it claims a batch of messages, then it deletes each one before getting the next batch. Therefore, in order to better compare the latency of enqueuing a message against other operations, this patch changes the reporting to provide the individual latency of those other operations. Change-Id: Iff7f6054904a03dc240d3f7a2a0529bd990f973b
This commit is contained in:
parent
04fa909282
commit
6c09339a2a
19
README.rst
19
README.rst
|
@ -132,19 +132,20 @@ Verbose output looks similar to the following::
|
||||||
|
|
||||||
Consumer
|
Consumer
|
||||||
========
|
========
|
||||||
duration_sec: 10.1
|
duration_sec: 10.2
|
||||||
ms_per_req: 77.1
|
ms_per_claim: 37.6
|
||||||
total_reqs: 160.0
|
ms_per_delete: 11.8
|
||||||
successful_reqs: 160.0
|
reqs_per_sec: 82.0
|
||||||
reqs_per_sec: 15.8
|
successful_reqs: 833.0
|
||||||
|
total_reqs: 833.0
|
||||||
|
|
||||||
Producer
|
Producer
|
||||||
========
|
========
|
||||||
duration_sec: 10.2
|
duration_sec: 10.2
|
||||||
ms_per_req: 4.6
|
ms_per_req: 3.8
|
||||||
total_reqs: 8866.0
|
reqs_per_sec: 1033.6
|
||||||
successful_reqs: 8866.0
|
successful_reqs: 10523.0
|
||||||
reqs_per_sec: 870.5
|
total_reqs: 10523.0
|
||||||
|
|
||||||
|
|
||||||
.. _`OpenStack` : http://openstack.org/
|
.. _`OpenStack` : http://openstack.org/
|
||||||
|
|
|
@ -40,11 +40,16 @@ def main():
|
||||||
if conf.verbose:
|
if conf.verbose:
|
||||||
print()
|
print()
|
||||||
|
|
||||||
for name, stat in stats.items():
|
for name, stats_group in stats.items():
|
||||||
print(name.capitalize())
|
print(name.capitalize())
|
||||||
print('=' * len(name))
|
print('=' * len(name))
|
||||||
print("\n".join("{}: {:.1f}".format(*it) for it in stat.items()))
|
|
||||||
|
values = sorted(stats_group.items(), key=lambda v: v[0])
|
||||||
|
formatted_vals = ["{}: {:.1f}".format(*v) for v in values]
|
||||||
|
|
||||||
|
print("\n".join(formatted_vals))
|
||||||
print('') # Blank line
|
print('') # Blank line
|
||||||
|
|
||||||
else:
|
else:
|
||||||
stats['params'] = {
|
stats['params'] = {
|
||||||
'producer': {
|
'producer': {
|
||||||
|
|
|
@ -40,7 +40,8 @@ def claim_delete(stats, test_duration, ttl, grace, limit):
|
||||||
cli = client.Client(conf.server_url)
|
cli = client.Client(conf.server_url)
|
||||||
queue = cli.queue(conf.queue_prefix + '1')
|
queue = cli.queue(conf.queue_prefix + '1')
|
||||||
end = time.time() + test_duration
|
end = time.time() + test_duration
|
||||||
total_elapsed = 0
|
claim_total_elapsed = 0
|
||||||
|
delete_total_elapsed = 0
|
||||||
total_requests = 0
|
total_requests = 0
|
||||||
claim_total_requests = 0
|
claim_total_requests = 0
|
||||||
delete_total_requests = 0
|
delete_total_requests = 0
|
||||||
|
@ -54,7 +55,7 @@ def claim_delete(stats, test_duration, ttl, grace, limit):
|
||||||
sys.stderr.write("Could not claim messages : {0}\n".format(ex))
|
sys.stderr.write("Could not claim messages : {0}\n".format(ex))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
total_elapsed += marktime.stop('claim_message').seconds
|
claim_total_elapsed += marktime.stop('claim_message').seconds
|
||||||
claim_total_requests += 1
|
claim_total_requests += 1
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -62,10 +63,10 @@ def claim_delete(stats, test_duration, ttl, grace, limit):
|
||||||
|
|
||||||
for msg in claim:
|
for msg in claim:
|
||||||
# TODO(TheSriram): Simulate actual work before deletion
|
# TODO(TheSriram): Simulate actual work before deletion
|
||||||
|
delete_total_requests += 1
|
||||||
msg.delete()
|
msg.delete()
|
||||||
|
|
||||||
total_elapsed += marktime.stop('delete_message').seconds
|
delete_total_elapsed += marktime.stop('delete_message').seconds
|
||||||
delete_total_requests += 1
|
|
||||||
|
|
||||||
except TransportError as ex:
|
except TransportError as ex:
|
||||||
sys.stderr.write("Could not delete messages: {0}\n".format(ex))
|
sys.stderr.write("Could not delete messages: {0}\n".format(ex))
|
||||||
|
@ -75,10 +76,13 @@ def claim_delete(stats, test_duration, ttl, grace, limit):
|
||||||
finally:
|
finally:
|
||||||
total_requests += 1
|
total_requests += 1
|
||||||
|
|
||||||
stats.put({'total_requests': total_requests,
|
stats.put({
|
||||||
'claim_total_requests': claim_total_requests,
|
'total_requests': total_requests,
|
||||||
'delete_total_requests': delete_total_requests,
|
'claim_total_requests': claim_total_requests,
|
||||||
'total_elapsed': total_elapsed})
|
'delete_total_requests': delete_total_requests,
|
||||||
|
'claim_total_elapsed': claim_total_elapsed,
|
||||||
|
'delete_total_elapsed': delete_total_elapsed,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
def load_generator(stats, num_workers, test_duration, url, ttl, grace, limit):
|
def load_generator(stats, num_workers, test_duration, url, ttl, grace, limit):
|
||||||
|
@ -91,19 +95,21 @@ def load_generator(stats, num_workers, test_duration, url, ttl, grace, limit):
|
||||||
|
|
||||||
def crunch(stats):
|
def crunch(stats):
|
||||||
total_requests = 0
|
total_requests = 0
|
||||||
total_latency = 0.0
|
claim_total_elapsed = 0.0
|
||||||
|
delete_total_elapsed = 0.0
|
||||||
claim_total_requests = 0
|
claim_total_requests = 0
|
||||||
delete_total_requests = 0
|
delete_total_requests = 0
|
||||||
|
|
||||||
while not stats.empty():
|
while not stats.empty():
|
||||||
entry = stats.get_nowait()
|
entry = stats.get_nowait()
|
||||||
total_requests += entry['total_requests']
|
total_requests += entry['total_requests']
|
||||||
total_latency += entry['total_elapsed']
|
claim_total_elapsed += entry['claim_total_elapsed']
|
||||||
|
delete_total_elapsed += entry['delete_total_elapsed']
|
||||||
claim_total_requests += entry['claim_total_requests']
|
claim_total_requests += entry['claim_total_requests']
|
||||||
delete_total_requests += entry['delete_total_requests']
|
delete_total_requests += entry['delete_total_requests']
|
||||||
|
|
||||||
return (total_requests, total_latency, claim_total_requests,
|
return (total_requests, claim_total_elapsed, delete_total_elapsed,
|
||||||
delete_total_requests)
|
claim_total_requests, delete_total_requests)
|
||||||
|
|
||||||
|
|
||||||
def run(upstream_queue):
|
def run(upstream_queue):
|
||||||
|
@ -131,24 +137,30 @@ def run(upstream_queue):
|
||||||
for each_proc in procs:
|
for each_proc in procs:
|
||||||
each_proc.join()
|
each_proc.join()
|
||||||
|
|
||||||
(total_requests, total_latency, claim_total_requests,
|
(total_requests, claim_total_elapsed, delete_total_elapsed,
|
||||||
delete_total_requests) = crunch(stats)
|
claim_total_requests, delete_total_requests) = crunch(stats)
|
||||||
|
|
||||||
successful_requests = claim_total_requests + delete_total_requests
|
successful_requests = claim_total_requests + delete_total_requests
|
||||||
duration = time.time() - start
|
duration = time.time() - start
|
||||||
throughput = successful_requests / duration
|
throughput = successful_requests / duration
|
||||||
latency = 1000 * total_latency / successful_requests
|
claim_latency = 1000 * claim_total_elapsed / claim_total_requests
|
||||||
|
delete_latency = 1000 * delete_total_elapsed / delete_total_requests
|
||||||
|
|
||||||
else:
|
else:
|
||||||
duration = 0
|
duration = 0
|
||||||
total_requests = 0
|
total_requests = 0
|
||||||
successful_requests = 0
|
successful_requests = 0
|
||||||
throughput = 0
|
throughput = 0
|
||||||
latency = 0
|
claim_latency = 0
|
||||||
|
delete_latency = 0
|
||||||
|
|
||||||
upstream_queue.put({'consumer': {
|
upstream_queue.put({
|
||||||
'duration_sec': duration,
|
'consumer': {
|
||||||
'total_reqs': total_requests,
|
'duration_sec': duration,
|
||||||
'successful_reqs': successful_requests,
|
'total_reqs': total_requests,
|
||||||
'reqs_per_sec': throughput,
|
'successful_reqs': successful_requests,
|
||||||
'ms_per_req': latency}})
|
'reqs_per_sec': throughput,
|
||||||
|
'ms_per_claim': claim_latency,
|
||||||
|
'ms_per_delete': delete_latency,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
Loading…
Reference in New Issue