Added code to propagate the tags associated to the tests all the way into test result reporting

Change-Id: I1a63a8d826b9b8e08d314ecc73698ce3b30d4e16
This commit is contained in:
Nitin Mehra
2014-02-20 14:27:02 -06:00
parent 17bf6611e2
commit 8a467a04f2
9 changed files with 351 additions and 43 deletions

View File

@@ -47,9 +47,9 @@ def get_object_namespace(obj):
except:
pass
#mro name wasn't availble, generate a unique name
#By default, name is set to the memory address of the passed in object
#since it's guaranteed to work.
# mro name wasn't availble, generate a unique name
# By default, name is set to the memory address of the passed in object
# since it's guaranteed to work.
name = str(id(obj))
try:
@@ -76,17 +76,17 @@ def getLogger(log_name, log_level=None):
Log level defaults to logging.DEBUG
'''
#Create new log
# Create new log
new_log = logging.getLogger(name=log_name)
new_log.setLevel(log_level or logging.DEBUG)
verbosity = os.getenv('CAFE_LOGGING_VERBOSITY')
if verbosity == 'VERBOSE':
if logging.getLogger(log_name).handlers == []:
#Special case for root log handler
# Special case for root log handler
if log_name == "":
log_name = os.getenv('CAFE_MASTER_LOG_FILE_NAME')
#Add handler by default for all new loggers
# Add handler by default for all new loggers
new_log.addHandler(setup_new_cchandler(log_name))
# Add support for adding null log handlers by default when
@@ -120,7 +120,7 @@ def setup_new_cchandler(
log_path = os.path.join(log_dir, "{0}.log".format(log_file_name))
#Set up handler with encoding and msg formatter in log directory
# Set up handler with encoding and msg formatter in log directory
log_handler = logging.FileHandler(log_path, "a+",
encoding=encoding or "UTF-8", delay=True)
@@ -130,7 +130,7 @@ def setup_new_cchandler(
return log_handler
def log_results(result):
def log_results(result, test_id=None, verbosity=0):
"""
@summary: Replicates the printing functionality of unittest's
runner.run() but log's instead of prints
@@ -179,6 +179,14 @@ def log_results(result):
os.getenv("CAFE_TEST_LOG_PATH"))
print '-' * 150
# Print the tag to test mapping if available and verbosity is > 2
if verbosity > 2 and hasattr(result, 'mapping'):
if test_id is not None:
result.mapping.write_to_stream(
"Test Suite ID: {0}\n".format(test_id))
result.mapping.print_tag_to_test_mapping()
result.mapping.print_attribute_to_test_mapping()
def log_errors(label, result, errors):
border1 = '=' * 45
@@ -190,7 +198,7 @@ def log_errors(label, result, errors):
def init_root_log_handler():
#Setup root log handler if the root logger doesn't already have one
# Setup root log handler if the root logger doesn't already have one
if not getLogger('').handlers:
master_log_file_name = os.getenv('CAFE_MASTER_LOG_FILE_NAME')
getLogger('').addHandler(
@@ -220,12 +228,12 @@ def log_info_block(
try:
info = info if isinstance(info, OrderedDict) else OrderedDict(info)
except:
#Something went wrong, log what can be logged
# Something went wrong, log what can be logged
output.append(str(info))
return
separator = str(separator or "{0}".format('=' * 56))
max_length = len(max([k for k in info.keys() if info.get(k)], key=len))+3
max_length = len(max([k for k in info.keys() if info.get(k)], key=len)) + 3
output.append(separator)
if heading:
@@ -236,7 +244,7 @@ def log_info_block(
value = str(info.get(k, None))
if value:
output.append(
"{0}{1}: {2}".format(k, "." * (max_length-len(k)), value))
"{0}{1}: {2}".format(k, "." * (max_length - len(k)), value))
else:
output.append("{0}".format(k))
output.append(separator)

View File

@@ -19,6 +19,9 @@ from cafe.common.reporting.xml_report import XMLReport
class Reporter:
JSON_REPORT = 'json'
XML_REPORT = 'xml'
def __init__(self, result_parser, all_results):
self.result_parser = result_parser
self.all_results = all_results
@@ -27,9 +30,9 @@ class Reporter:
""" Creates a report object based on what type is given and generates
the report in the specified directory.
"""
if result_type == 'json':
if result_type == Reporter.JSON_REPORT:
report = JSONReport()
elif result_type == 'xml':
elif result_type == Reporter.XML_REPORT:
report = XMLReport()
report.generate_report(result_parser=self.result_parser,

View File

@@ -66,6 +66,14 @@ class XMLReport(BaseReport):
else:
testcase_tag.attrib['result'] = "PASSED"
comment = ""
if testcase.tags is not None:
comment += "Test Tags: {tags}".format(tags=testcase.tags)
if testcase.attributes is not None:
comment += " Attribute Tags: {attributes}".format(
attributes=testcase.attributes)
testcase_tag.attrib['comment'] = comment
result_path = path or os.getcwd()
if os.path.isdir(result_path):
result_path += "/results.xml"

View File

@@ -36,9 +36,8 @@ class DataDrivenFixtureError(Exception):
def tags(*tags, **attrs):
"""Adds tags and attributes to tests, which are interpreted by the
cafe-runner at run time
cafe-runner at run time and by result generator during reporting
"""
def decorator(func):
setattr(func, TAGS_DECORATOR_TAG_LIST_NAME, [])
setattr(func, TAGS_DECORATOR_ATTR_DICT_NAME, {})
@@ -135,6 +134,7 @@ def skip_open_issue(type, bug_id):
class memoized(object):
"""
Decorator.
@see: https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize

View File

@@ -97,6 +97,25 @@ class SummarizeResults(object):
return failure_obj_list
def update_tags(self, executed_tests):
for test in executed_tests:
if hasattr(self, 'mapping'):
test_tags = self.mapping.test_to_tag_mapping.get(
getattr(test, 'test_method_name'))
if test_tags is None or len(test_tags) == 0:
test_tags = []
setattr(test, 'tags', test_tags)
attribute_tags = self.mapping.test_to_attribute_mapping.get(
getattr(test, 'test_method_name'))
if attribute_tags is None or len(attribute_tags) == 0:
attribute_tags = []
setattr(test, 'attributes', attribute_tags)
return executed_tests
def summary_result(self):
summary_res = {'tests': str(self.testsRun),
'errors': str(len(self.errors)),
@@ -107,17 +126,22 @@ class SummarizeResults(object):
def gather_results(self):
executed_tests = (self.get_passed_tests() + self.parse_failures() +
self.get_errored_tests() + self.get_skipped_tests())
return executed_tests
return self.update_tags(executed_tests)
class Result(object):
def __init__(self, test_class_name, test_method_name, failure_trace=None,
skipped_msg=None, error_trace=None):
skipped_msg=None, error_trace=None, tags=None,
attributes=None):
self.test_class_name = test_class_name
self.test_method_name = test_method_name
self.failure_trace = failure_trace
self.skipped_msg = skipped_msg
self.error_trace = error_trace
self.tags = tags
self.attributes = attributes
def __repr__(self):
values = []

View File

@@ -0,0 +1,200 @@
"""
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest import TextTestResult
from cafe.drivers.unittest.decorators import TAGS_DECORATOR_TAG_LIST_NAME
from cafe.drivers.unittest.decorators import TAGS_DECORATOR_ATTR_DICT_NAME
class TaggedTextTestResult(TextTestResult):
""" Extended TextTestResult object to include support for tagged methods"""
def __init__(self, stream, descriptions, verbosity):
super(TaggedTextTestResult, self).__init__(
stream, descriptions, verbosity)
self.mapping = TestCaseTagMapping(self)
def stopTest(self, test):
""" Override stopTest method to capture test object and extract tags"""
super(TaggedTextTestResult, self).stopTest(test)
test_method = getattr(test, test._testMethodName)
if hasattr(test_method, TAGS_DECORATOR_TAG_LIST_NAME):
self.mapping.update_mapping(test._testMethodName, getattr(
test_method, TAGS_DECORATOR_TAG_LIST_NAME))
if hasattr(test_method, TAGS_DECORATOR_ATTR_DICT_NAME):
self.mapping.update_attribute_mapping(
test._testMethodName, getattr(test_method,
TAGS_DECORATOR_ATTR_DICT_NAME))
class TestCaseTagMapping(object):
""" Test case mapping class which keeps track of test-to-tag and
tag-to-test mapping
"""
def __init__(self, test_result):
self.test_ref = test_result
self.test_to_tag_mapping = dict()
self.tag_to_test_mapping = dict()
self.test_to_attribute_mapping = dict()
self.attribute_to_test_mapping = dict()
def update_mapping(self, test_name, tag_list):
""" Takes the test name and the list of associated tags and updates
the mapping
"""
if not self.test_to_tag_mapping.__contains__(test_name):
self.test_to_tag_mapping[test_name] = tag_list
for tag in tag_list:
if self.tag_to_test_mapping.__contains__(
tag) and not self.tag_to_test_mapping.get(
tag).__contains__(test_name):
self.tag_to_test_mapping[tag].append(test_name)
else:
self.tag_to_test_mapping[tag] = [test_name]
def update_attribute_mapping(self, test_name, attribute_list):
if not self.test_to_attribute_mapping.__contains__(test_name):
self.test_to_attribute_mapping[test_name] = attribute_list
for attribute, entries in attribute_list.items():
for entry in entries.split(","):
entry = entry.lstrip().rstrip()
attribute_tuple = (attribute, entry)
if self.attribute_to_test_mapping.__contains__(
attribute_tuple) and not \
self.attribute_to_test_mapping.get(
attribute_tuple).__contains__(test_name):
self.attribute_to_test_mapping[attribute_tuple].append(
test_name)
else:
self.attribute_to_test_mapping[
attribute_tuple] = [test_name]
def print_test_to_tag_mapping(self):
""" Prints the test-to-tag dict mapping to result stream """
max_len = 0
self.test_ref.stream.writeln()
self.test_ref.stream.writeln("Tags and attributes associated to tests")
self.test_ref.stream.writeln(self.test_ref.separator1)
max_len = self.__get_max_entry_length(self.test_to_tag_mapping.keys())
for entry in self.test_to_tag_mapping.keys():
self.test_ref.stream.write("{entry}{spacer}: ".format(
entry=entry, spacer=(" " * (max_len - len(entry)))))
self.test_ref.stream.write(
str(self.test_to_tag_mapping.get(entry)))
if entry in self.test_to_attribute_mapping:
self.test_ref.stream.write(" Attributes: {attributes}".format(
attributes=str(self.test_to_attribute_mapping.get(entry))))
self.test_ref.stream.write("\n")
self.test_ref.stream.writeln(self.test_ref.separator1)
self.test_ref.stream.flush()
def print_tag_to_test_mapping(self):
""" Prints the tag-to-test dict mapping to result stream """
max_len = 0
self.test_ref.stream.writeln("Tests associated to tags")
self.test_ref.stream.writeln(self.test_ref.separator1)
max_len = self.__get_max_entry_length(self.tag_to_test_mapping.keys())
for entry in self.tag_to_test_mapping.keys():
self.test_ref.stream.write("{entry}{spacer} : ".format(
entry=entry, spacer=(" " * (max_len - len(entry)))))
self.test_ref.stream.writeln(self.__generate_summary(
entry, self.tag_to_test_mapping))
self.test_ref.stream.writeln(
str(self.tag_to_test_mapping.get(entry)))
self.test_ref.stream.writeln("\n")
self.test_ref.stream.writeln(self.test_ref.separator1)
self.test_ref.stream.flush()
def print_attribute_to_test_mapping(self):
""" Prints the attribute-to-test dict mapping to result stream """
max_len = 0
self.test_ref.stream.writeln("Tests associated to attributes")
self.test_ref.stream.writeln(self.test_ref.separator1)
max_len = self.__get_max_entry_length(
self.attribute_to_test_mapping.keys())
for entry in self.attribute_to_test_mapping.keys():
self.test_ref.stream.write("{entry}{spacer} : ".format(
entry=entry, spacer=(" " * (max_len - len(str(entry))))))
self.test_ref.stream.writeln(self.__generate_summary(
entry, self.attribute_to_test_mapping))
self.test_ref.stream.writeln(
str(self.attribute_to_test_mapping.get(entry)))
self.test_ref.stream.writeln("\n")
self.test_ref.stream.writeln(self.test_ref.separator1)
self.test_ref.stream.flush()
def write_to_stream(self, data):
""" Writes to the stream object passed to the result object
"""
self.test_ref.stream.write(data)
self.test_ref.stream.flush()
@staticmethod
def __tuple_contains(test, test_ref_list):
if test_ref_list is None or len(test_ref_list) == 0:
return False
else:
for item in test_ref_list:
if vars(item[0]).get('_testMethodName') == test:
return True
return False
@staticmethod
def __get_max_entry_length(listing):
max_len = 0
for entry in listing:
if type(entry) is not str:
entry = str(entry)
if len(entry) > max_len:
max_len = len(entry)
return max_len
def __generate_summary(self, tag, listing):
""" Generates a run summary for a given tag """
pass_count = 0
fail_count = 0
skip_count = 0
error_count = 0
tests = listing.get(tag)
for test in tests:
if self.__tuple_contains(test, self.test_ref.failures):
fail_count += 1
continue
elif self.__tuple_contains(test, self.test_ref.errors):
error_count += 1
continue
elif self.__tuple_contains(test, self.test_ref.skipped):
skip_count += 1
continue
else:
pass_count += 1
total_count = pass_count + fail_count + skip_count + error_count
if pass_count == 0:
pass_rate = float(0)
else:
pass_rate = 100 * float(pass_count) / float(total_count)
return ("Pass: {0} Fail: {1} Error: {2} Skipped: {3} Total: {4} "
"Pass Rate: {5}%").format(pass_count, fail_count, error_count,
skip_count, total_count, pass_rate)

View File

@@ -28,6 +28,7 @@ from traceback import extract_tb
import unittest2 as unittest
import uuid
from result import TaggedTextTestResult
from cafe.drivers.unittest.fixtures import BaseTestFixture
from cafe.common.reporting.cclogging import log_results
from cafe.drivers.unittest.parsers import SummarizeResults
@@ -97,6 +98,7 @@ def print_traceback():
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy "writeln" method"""
def __init__(self, stream):
@@ -115,7 +117,11 @@ class _WritelnDecorator(object):
class OpenCafeParallelTextTestRunner(unittest.TextTestRunner):
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1):
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
resultclass=None):
super(OpenCafeParallelTextTestRunner, self).__init__(
stream, descriptions, verbosity, resultclass=resultclass)
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
@@ -133,6 +139,7 @@ class OpenCafeParallelTextTestRunner(unittest.TextTestRunner):
class LoadedTestClass(object):
def __init__(self, loaded_module):
self.module = loaded_module
self.module_path = self._get_module_path(loaded_module)
@@ -183,6 +190,7 @@ class LoadedTestClass(object):
class SuiteBuilder(object):
def __init__(self, module_regex, method_regex, cl_tags, supress_flag):
self.module_regex = module_regex
self.method_regex = method_regex
@@ -440,6 +448,7 @@ class SuiteBuilder(object):
class _UnittestRunnerCLI(object):
class ListAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
product = namespace.product or ""
test_env_mgr = TestEnvManager(product, None)
@@ -468,7 +477,7 @@ class _UnittestRunnerCLI(object):
[" +-{0}/".format(dirname) for dirname in os.listdir(
product_config_dir)])
#If no values passed, print a default
# If no values passed, print a default
if not values:
if namespace.product and namespace.config:
_print_test_tree()
@@ -493,11 +502,13 @@ class _UnittestRunnerCLI(object):
exit(0)
class ProductAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# Add the product to the namespace
setattr(namespace, self.dest, values)
class ConfigAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# Make sure user provided config name ends with '.config'
if values is not None:
@@ -514,6 +525,7 @@ class _UnittestRunnerCLI(object):
setattr(namespace, self.dest, values)
class ModuleRegexAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# Make sure user-specified module name has a .py at the end of it
if ".py" not in str(values):
@@ -522,6 +534,7 @@ class _UnittestRunnerCLI(object):
setattr(namespace, self.dest, values)
class MethodRegexAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# Make sure user-specified method name has test_ at the start of it
@@ -531,6 +544,7 @@ class _UnittestRunnerCLI(object):
setattr(namespace, self.dest, values)
class DataAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
dict_string = ""
data_range = len(values)
@@ -545,6 +559,7 @@ class _UnittestRunnerCLI(object):
setattr(namespace, self.dest, values)
class DataDirectoryAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not os.path.exists(values):
print (
@@ -554,6 +569,7 @@ class _UnittestRunnerCLI(object):
setattr(namespace, self.dest, values)
class VerboseAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
msg = None
@@ -796,6 +812,7 @@ class _UnittestRunnerCLI(object):
class UnittestRunner(object):
def __init__(self):
self.cl_args = _UnittestRunnerCLI().get_cl_args()
self.test_env = TestEnvManager(
@@ -834,8 +851,13 @@ class UnittestRunner(object):
print "=" * 150
@staticmethod
def execute_test(runner, test_id, test, results):
def execute_test(runner, test_id, test, results, verbosity):
result = runner.run(test)
# Inject tag mapping and log results to console
UnittestRunner._inject_tag_mapping(result)
log_results(result, test_id, verbosity=verbosity)
results.update({test_id: result})
@staticmethod
@@ -843,11 +865,13 @@ class UnittestRunner(object):
test_runner = None
# Use the parallel text runner so the console logs look correct
# Use custom test result object to keep track of tags
if parallel:
test_runner = OpenCafeParallelTextTestRunner(
verbosity=int(verbosity))
verbosity=int(verbosity), resultclass=TaggedTextTestResult)
else:
test_runner = unittest.TextTestRunner(verbosity=int(verbosity))
test_runner = unittest.TextTestRunner(
verbosity=int(verbosity), resultclass=TaggedTextTestResult)
test_runner.failfast = fail_fast
return test_runner
@@ -875,6 +899,24 @@ class UnittestRunner(object):
return errors, failures, tests_run
@staticmethod
def _inject_tag_mapping(result):
"""Inject tag mapping into the result __dict__ object if available"""
if hasattr(result, 'mapping'):
mapping = result.mapping.test_to_tag_mapping
if not mapping is None and len(mapping) > 0:
setattr(result, 'tags', mapping)
else:
setattr(result, 'tags', [])
attributes = result.mapping.test_to_attribute_mapping
if not attributes is None and len(attributes) > 0:
setattr(result, 'attributes', attributes)
else:
setattr(result, 'attributes', [])
def run(self):
"""
loops through all the packages, modules, and methods sent in from
@@ -894,7 +936,7 @@ class UnittestRunner(object):
self.cl_args.fail_fast,
self.cl_args.verbose)
#Build master test suite
# Build master test suite
if self.cl_args.packages:
for package_name in self.cl_args.packages:
path = builder.find_subdir(
@@ -918,19 +960,21 @@ class UnittestRunner(object):
parallel_test_list,
test_runner,
result_type=self.cl_args.result,
results_path=self.cl_args.result_directory)
results_path=self.cl_args.result_directory,
verbosity=self.cl_args.verbose)
exit(exit_code)
else:
exit_code = self.run_serialized(
master_suite,
test_runner,
result_type=self.cl_args.result,
results_path=self.cl_args.result_directory)
results_path=self.cl_args.result_directory,
verbosity=self.cl_args.verbose)
exit(exit_code)
def run_parallel(
self, test_suites, test_runner, result_type=None,
results_path=None):
@staticmethod
def run_parallel(test_suites, test_runner, result_type=None,
results_path=None, verbosity=0):
exit_code = 0
proc = None
@@ -949,8 +993,8 @@ class UnittestRunner(object):
test_mapping[test_id] = test_suite
proc = Process(
target=self.execute_test,
args=(test_runner, test_id, test_suite, results))
target=UnittestRunner.execute_test,
args=(test_runner, test_id, test_suite, results, verbosity))
processes.append(proc)
proc.start()
@@ -959,7 +1003,8 @@ class UnittestRunner(object):
finish = time.time()
errors, failures, _ = self.dump_results(start, finish, results)
errors, failures, _ = UnittestRunner.dump_results(
start, finish, results)
if result_type is not None:
all_results = []
@@ -979,9 +1024,10 @@ class UnittestRunner(object):
return exit_code
@staticmethod
def run_serialized(
self, master_suite, test_runner, result_type=None,
results_path=None):
master_suite, test_runner, result_type=None,
results_path=None, verbosity=0):
exit_code = 0
unittest.installHandler()
@@ -989,6 +1035,9 @@ class UnittestRunner(object):
result = test_runner.run(master_suite)
total_execution_time = time.time() - start_time
# Inject tag mapping
UnittestRunner._inject_tag_mapping(result)
if result_type is not None:
result_parser = SummarizeResults(vars(result), master_suite,
total_execution_time)
@@ -998,7 +1047,7 @@ class UnittestRunner(object):
reporter.generate_report(result_type=result_type,
path=results_path)
log_results(result)
log_results(result, verbosity=verbosity)
if not result.wasSuccessful():
exit_code = 1

View File

@@ -35,7 +35,8 @@ class OpenCafeUnittestTestSuite(TestSuite):
# Monkeypatch: run class cleanup tasks regardless of whether
# tearDownClass succeeds or not
finally:
previousClass._do_class_cleanup_tasks()
if hasattr(previousClass, '_do_class_cleanup_tasks'):
previousClass._do_class_cleanup_tasks()
# Monkeypatch: run class cleanup tasks regardless of whether
# tearDownClass exists or not