Merge "Fix divide by zero bug in benchmark workers"

This commit is contained in:
Jenkins 2014-08-26 19:22:20 +00:00 committed by Gerrit Code Review
commit 822624a730
2 changed files with 42 additions and 23 deletions

View File

@ -119,6 +119,17 @@ def run(upstream_queue):
num_procs = conf.consumer_processes
num_workers = conf.consumer_workers
# Stats that will be reported
duration = 0
total_requests = 0
successful_requests = 0
claim_total_requests = 0
delete_total_requests = 0
throughput = 0
claim_latency = 0
delete_latency = 0
# Performance test
if num_procs and num_workers:
test_duration = conf.time
stats = mp.Queue()
@ -145,23 +156,25 @@ def run(upstream_queue):
successful_requests = claim_total_requests + delete_total_requests
duration = time.time() - start
throughput = successful_requests / duration
claim_latency = 1000 * claim_total_elapsed / claim_total_requests
delete_latency = 1000 * delete_total_elapsed / delete_total_requests
else:
duration = 0
total_requests = 0
successful_requests = 0
throughput = 0
claim_latency = 0
delete_latency = 0
# NOTE(kgriffs): Duration should never be zero
throughput = successful_requests / duration
if claim_total_requests:
claim_latency = (1000 * claim_total_elapsed /
claim_total_requests)
if delete_total_requests:
delete_latency = (1000 * delete_total_elapsed /
delete_total_requests)
upstream_queue.put({
'consumer': {
'duration_sec': duration,
'total_reqs': total_requests,
'claim_total_requests': claim_total_requests,
'successful_reqs': successful_requests,
'messages_processed': delete_total_requests,
'reqs_per_sec': throughput,
'ms_per_claim': claim_latency,
'ms_per_delete': delete_latency,

View File

@ -133,6 +133,12 @@ def run(upstream_queue):
num_procs = conf.producer_processes
num_workers = conf.producer_workers
duration = 0
total_requests = 0
successful_requests = 0
throughput = 0
latency = 0
if num_procs and num_workers:
test_duration = conf.time
stats = mp.Queue()
@ -161,19 +167,19 @@ def run(upstream_queue):
successful_requests, total_requests, total_latency = crunch(stats)
duration = time.time() - start
# NOTE(kgriffs): Duration should never be zero
throughput = successful_requests / duration
latency = 1000 * total_latency / successful_requests
else:
duration = 0
total_requests = 0
successful_requests = 0
throughput = 0
latency = 0
if successful_requests:
latency = 1000 * total_latency / successful_requests
upstream_queue.put({'producer': {
'duration_sec': duration,
'total_reqs': total_requests,
'successful_reqs': successful_requests,
'reqs_per_sec': throughput,
'ms_per_req': latency}})
upstream_queue.put({
'producer': {
'duration_sec': duration,
'total_reqs': total_requests,
'successful_reqs': successful_requests,
'reqs_per_sec': throughput,
'ms_per_req': latency
}
})