Add percent change to duration on subunit-trace output

This commit adds a percent change to the duration on individual test
output lines. This is conditionally displayed based on the presence
of a testrepository times.dbm file and data in that file for the test
being run. If there is useable data subunit-trace will now use the
runtimes from there to display how the current run has changed from
the dbm file. A new threshold option is added to optionally specify a
minimum percent change to be used to determine whether to display the
value or not.

Change-Id: I3d68425f48114531a78cab08c353111648ce3911
This commit is contained in:
Matthew Treinish 2015-02-18 17:43:45 -05:00
parent 6e1bb16cab
commit bbc8b8f541

View File

@ -28,6 +28,13 @@ import sys
import subunit import subunit
import testtools import testtools
# NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module
# was renamed to dbm.ndbm, this block takes that into account
try:
import anydbm as dbm
except ImportError:
import dbm
DAY_SECONDS = 60 * 60 * 24 DAY_SECONDS = 60 * 60 * 24
FAILS = [] FAILS = []
RESULTS = {} RESULTS = {}
@ -116,7 +123,24 @@ def print_attachments(stream, test, all_channels=False):
stream.write(" %s\n" % line) stream.write(" %s\n" % line)
def show_outcome(stream, test, print_failures=False, failonly=False): def find_test_run_time_diff(test_id, run_time):
times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'),
'times.dbm')
if os.path.isfile(times_db_path):
try:
test_times = dbm.open(times_db_path)
except Exception:
return False
avg_runtime = float(test_times.get(str(test_id), False))
if avg_runtime and avg_runtime > 0:
run_time = float(run_time.rstrip('s'))
perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100
return perc_diff
return False
def show_outcome(stream, test, print_failures=False, failonly=False,
threshold='0'):
global RESULTS global RESULTS
status = test['status'] status = test['status']
# TODO(sdague): ask lifeless why on this? # TODO(sdague): ask lifeless why on this?
@ -143,8 +167,14 @@ def show_outcome(stream, test, print_failures=False, failonly=False):
print_attachments(stream, test, all_channels=True) print_attachments(stream, test, all_channels=True)
elif not failonly: elif not failonly:
if status == 'success': if status == 'success':
stream.write('{%s} %s [%s] ... ok\n' % ( out_string = '{%s} %s [%s' % (worker, name, duration)
worker, name, duration)) perc_diff = find_test_run_time_diff(test['id'], duration)
if perc_diff and abs(perc_diff) >= abs(float(threshold)):
if perc_diff > 0:
out_string = out_string + ' +%.2f%%' % perc_diff
else:
out_string = out_string + ' %.2f%%' % perc_diff
stream.write(out_string + '] ... ok\n')
print_attachments(stream, test) print_attachments(stream, test)
elif status == 'skip': elif status == 'skip':
stream.write('{%s} %s ... SKIPPED: %s\n' % ( stream.write('{%s} %s ... SKIPPED: %s\n' % (
@ -241,6 +271,10 @@ def parse_args():
default=( default=(
os.environ.get('TRACE_FAILONLY', False) os.environ.get('TRACE_FAILONLY', False)
is not False)) is not False))
parser.add_argument('--diff-threshold', '-t', dest='threshold',
help="Threshold to use for displaying percent change "
"from the avg run time. If one is not specified "
"the percent change will always be displayed")
return parser.parse_args() return parser.parse_args()