From 1d5f32b48e7b4e5c666e4ac0de05091cbc9e407b Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 4 Jun 2014 15:37:58 -0400 Subject: [PATCH] Change how tempest debug logs are displayed This commit cleans up how tempest failure logs are displayed. We'll no longer dump the failure information to the console as a test fails. Instead all the failure logs will be printed after the tests are run. Change-Id: I7ecdc349d913b43f4fb0505d5c17c66f811774b4 --- tools/pretty_tox.sh | 2 +- tools/pretty_tox_serial.sh | 3 ++- tools/subunit-trace.py | 26 ++++++++++++++++++++++---- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh index f3c88f30fc..0a04ce6768 100755 --- a/tools/pretty_tox.sh +++ b/tools/pretty_tox.sh @@ -3,4 +3,4 @@ set -o pipefail TESTRARGS=$1 -python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | $(dirname $0)/subunit-trace.py +python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | $(dirname $0)/subunit-trace.py --no-failure-debug -f diff --git a/tools/pretty_tox_serial.sh b/tools/pretty_tox_serial.sh index 1634b8e5b3..db70890942 100755 --- a/tools/pretty_tox_serial.sh +++ b/tools/pretty_tox_serial.sh @@ -7,7 +7,8 @@ TESTRARGS=$@ if [ ! -d .testrepository ]; then testr init fi -testr run --subunit $TESTRARGS | $(dirname $0)/subunit-trace.py +testr run --subunit $TESTRARGS | $(dirname $0)/subunit-trace.py -f -n retval=$? testr slowest + exit $retval diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py index 7bb88a4c78..9bfefe1ee2 100755 --- a/tools/subunit-trace.py +++ b/tools/subunit-trace.py @@ -18,6 +18,7 @@ """Trace a subunit stream in reasonable detail and high accuracy.""" +import argparse import functools import re import sys @@ -151,7 +152,7 @@ def print_attachments(stream, test, all_channels=False): stream.write(" %s\n" % line) -def show_outcome(stream, test): +def show_outcome(stream, test, print_failures=False): global RESULTS status = test['status'] # TODO(sdague): ask lifeless why on this? @@ -178,14 +179,16 @@ def show_outcome(stream, test): FAILS.append(test) stream.write('{%s} %s [%s] ... FAILED\n' % ( worker, name, duration)) - print_attachments(stream, test, all_channels=True) + if not print_failures: + print_attachments(stream, test, all_channels=True) elif status == 'skip': stream.write('{%s} %s ... SKIPPED: %s\n' % ( worker, name, test['details']['reason'].as_text())) else: stream.write('{%s} %s [%s] ... %s\n' % ( worker, name, duration, test['status'])) - print_attachments(stream, test, all_channels=True) + if not print_failures: + print_attachments(stream, test, all_channels=True) stream.flush() @@ -247,12 +250,25 @@ def print_summary(stream): (w, num, time)) +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--no-failure-debug', '-n', action='store_true', + dest='print_failures', help='Disable printing failure ' + 'debug infomation in realtime') + parser.add_argument('--fails', '-f', action='store_true', + dest='post_fails', help='Print failure debug ' + 'information after the stream is proccesed') + return parser.parse_args() + + def main(): + args = parse_args() stream = subunit.ByteStreamToStreamResult( sys.stdin, non_subunit_name='stdout') starts = Starts(sys.stdout) outcomes = testtools.StreamToDict( - functools.partial(show_outcome, sys.stdout)) + functools.partial(show_outcome, sys.stdout, + print_failures=args.print_failures)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([starts, outcomes, summary]) result.startTestRun() @@ -260,6 +276,8 @@ def main(): stream.run(result) finally: result.stopTestRun() + if args.post_fails: + print_fails(sys.stdout) print_summary(sys.stdout) return (0 if summary.wasSuccessful() else 1)