Revamped results schema

Results are now formatted in the schema as defined here:
https://gist.github.com/cneill/a511451284a0c5f33295477150bd94d4

Furthermore, the json formatter is no longer responsible for the aggregation
of issues. Instead, this logic has been moved to the IssueTestResult class

Change-Id: Id39e122b2b4c1c9cafab09fdbc5d172dec012d22
This commit is contained in:
michael.dong@rackspace.com 2016-08-26 17:28:56 -05:00
parent 4876101b55
commit adca69a272
5 changed files with 192 additions and 140 deletions

View File

@ -13,123 +13,14 @@
# limitations under the License.
import json
import syntribos
class JSONFormatter(object):
def __init__(self, results):
self.results = results
def report(self, min_severity, min_confidence, exclude_results):
min_sev = syntribos.RANKING_VALUES[min_severity]
min_conf = syntribos.RANKING_VALUES[min_confidence]
machine_output = dict({'failures': {}, 'errors': [], 'stats': {}})
machine_output['stats']['severity'] = {
'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 0
}
severity_counter_dict = {}
# reports errors
for test, error in self.results.errors:
machine_output['errors'].append(
{
'test': self.results.getDescription(test),
'error': error
})
# reports failures
# Gets list of [issues] by flattening list of [(test, [issues])]
issues = [issue for test, failures in self.results.failures
for issue in failures]
for issue in issues:
target = issue.target
path = issue.path
url = "{0}{1}".format(target, path)
defect_type = issue.defect_type
sev_rating = syntribos.RANKING[issue.severity]
conf_rating = syntribos.RANKING[issue.confidence]
if any([True for x in exclude_results if x and x in defect_type]):
continue
defect_obj = {
'description': issue.description,
'severity': sev_rating,
'signals': {
'init_signals': [s.slug for s in issue.init_signals],
'test_signals': [s.slug for s in issue.test_signals],
'diff_signals': [s.slug for s in issue.diff_signals]
}
}
if defect_type not in severity_counter_dict:
severity_counter_dict[defect_type] = defect_obj
machine_output['stats']['severity'][sev_rating] += 1
if url not in machine_output['failures']:
if issue.severity >= min_sev and issue.confidence >= min_conf:
machine_output['failures'][url] = {}
else:
continue
issues_by_url = machine_output['failures'][url]
if defect_type not in issues_by_url:
if issue.severity >= min_sev and issue.confidence >= min_conf:
issues_by_url[defect_type] = defect_obj
else:
continue
issues_by_defect = issues_by_url[defect_type]
if issue.impacted_parameter:
# Only fuzz tests have an ImpactedParameter
method = issue.impacted_parameter.method
loc = issue.impacted_parameter.location
name = issue.impacted_parameter.name
content_type = issue.content_type
payload_string = issue.impacted_parameter.trunc_fuzz_string
param = {
'method': method,
'location': loc,
'variables': [name],
}
if loc == "data":
param['type'] = content_type
payload_obj = {
'strings': [payload_string],
'param': param,
'confidence': conf_rating
}
if 'payloads' not in issues_by_defect:
issues_by_defect['payloads'] = [payload_obj]
else:
is_not_duplicate_payload = True
for p in issues_by_defect['payloads']:
if (p['param']['method'] == method and
p['param']['location'] == loc):
if payload_string not in p['strings']:
p['strings'].append(payload_string)
if name not in p['param']['variables']:
p['param']['variables'].append(name)
is_not_duplicate_payload = False
break
if is_not_duplicate_payload:
issues_by_defect['payloads'].append(payload_obj)
else:
issues_by_defect['confidence'] = conf_rating
output = json.dumps(machine_output, sort_keys=True,
def report(self, output):
output = json.dumps(output, sort_keys=True, cls=SetEncoder,
indent=2, separators=(',', ': '))
self.results.stream.write(output)

View File

@ -30,21 +30,168 @@ class IssueTestResult(unittest.TextTestResult):
This class aggregates :class:`syntribos.issue.Issue` objects from all the
tests as they run
"""
output = {"failures": {}, "errors": [], "stats": {}}
output["stats"]["severity"] = {
"UNDEFINED": 0, "LOW": 0, "MEDIUM": 0, "HIGH": 0
}
stats = {"errors": 0, "failures": 0, "successes": 0}
severity_counter_dict = {}
testsRunSinceLastPrint = 0
failure_id = 0
def addFailure(self, test, err):
"""Adds issues to data structures
Appends issues to the result's list of failures, as well as
to a dict of {url: {method: {test_name: issue}}} structure.
Appends issues to the result's list of failures, as well as updates the
stats for the result. Each failure in the list of failures takes the
form:
.. code-block:: json
{
"url": "host.com/blah",
"type": "500_error",
"description": "500 errors r bad, mkay?",
"failure_id": 1234,
"instances": [
{
"confidence": "HIGH",
"param": {
"location": "headers",
"method": "POST",
"variables": [
"Content-Type"
]
},
"strings": [
"derp"
],
"severity": "LOW",
"signals": {
"diff_signals": [],
"init_signals": [],
"test_signals": []
}
}
]
}
:param test: The test that has failed
:type test: :class:`syntribos.tests.base.BaseTestCase`
:param tuple err: Tuple of format ``(type, value, traceback)``
"""
self.failures.append((test, test.failures))
self.stats["failures"] += len(test.failures)
for issue in test.failures:
defect_type = issue.defect_type
if any([True for x in CONF.syntribos.exclude_results
if x and x in defect_type]):
continue
min_sev = syntribos.RANKING_VALUES[CONF.min_severity]
min_conf = syntribos.RANKING_VALUES[CONF.min_confidence]
if issue.severity < min_sev or issue.confidence < min_conf:
continue
target = issue.target
path = issue.path
url = "{0}{1}".format(target, path)
description = issue.description
failure_obj = None
for f in self.failures:
if (f["url"] == url and f["defect_type"] == defect_type and
f["description"] == description):
failure_obj = f
break
if not failure_obj:
failure_obj = {
"url": url,
"defect_type": defect_type,
"description": description,
"failure_id": self.failure_id,
"instances": []
}
self.failures.append(failure_obj)
self.failure_id += 1
signals = {}
if issue.init_signals:
signals["init_signals"] = set(
[s.slug for s in issue.init_signals])
if issue.test_signals:
signals["test_signals"] = set(
[s.slug for s in issue.test_signals])
if issue.diff_signals:
signals["diff_signals"] = set(
[s.slug for s in issue.diff_signals])
sev_rating = syntribos.RANKING[issue.severity]
conf_rating = syntribos.RANKING[issue.confidence]
if issue.impacted_parameter:
method = issue.impacted_parameter.method
loc = issue.impacted_parameter.location
name = issue.impacted_parameter.name
content_type = issue.content_type
payload_string = issue.impacted_parameter.trunc_fuzz_string
param = {
"method": method,
"location": loc,
}
if loc == "data":
param["type"] = content_type
instance_obj = None
for i in failure_obj["instances"]:
if(i["confidence"] == conf_rating and
i["severity"] == sev_rating and
i["param"]["method"] == method and
i["param"]["location"] == loc):
i["param"]["variables"].add(name)
for sig_type in signals:
if sig_type in i["signals"]:
i["signals"][sig_type].update(
signals[sig_type])
else:
i["signals"][sig_type] = signals[sig_type]
i["strings"].add(payload_string)
instance_obj = i
break
if not instance_obj:
param["variables"] = set([name])
instance_obj = {
"confidence": conf_rating,
"severity": sev_rating,
"param": param,
"strings": set([payload_string]),
"signals": signals
}
failure_obj["instances"].append(instance_obj)
self.stats["failures"] += 1
self.output["stats"]["severity"][sev_rating] += 1
else:
instance_obj = None
for i in failure_obj["instances"]:
if(i["confidence"] == conf_rating and
i["severity"] == sev_rating):
for sig_type in signals:
if sig_type in i["signals"]:
i["signals"][sig_type].update(
signals[sig_type])
else:
i["signals"][sig_type] = signals[sig_type]
instance_obj = i
break
if not instance_obj:
instance_obj = {
"confidence": conf_rating,
"severity": sev_rating,
"signals": signals
}
failure_obj["instances"].append(instance_obj)
self.stats["failures"] += 1
self.output["stats"]["severity"][sev_rating] += 1
def addError(self, test, err):
"""Duplicates parent class addError functionality.
@ -54,7 +201,11 @@ class IssueTestResult(unittest.TextTestResult):
:param err:
:type tuple: Tuple of format ``(type, value, traceback)``
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self.errors.append(
{
"test": self.getDescription(test),
"error": self._exc_info_to_string(err, test)
})
self.stats["errors"] += 1
def addSuccess(self, test):
@ -65,36 +216,37 @@ class IssueTestResult(unittest.TextTestResult):
"""
self.stats["successes"] += 1
def printErrors(self, output_format, min_severity, min_confidence,
exclude_results):
def printErrors(self, output_format):
"""Print out each :class:`syntribos.issue.Issue` that was encountered
:param str output_format: Either "json" or "xml"
"""
self.output["errors"] = self.errors
self.output["failures"] = self.failures
formatter_types = {
"json": JSONFormatter(self)
}
formatter = formatter_types[output_format]
formatter.report(min_severity, min_confidence, exclude_results)
formatter.report(self.output)
def print_result(self, start_time):
"""Prints test summary/stats (e.g. # failures) to stdout."""
self.printErrors(
CONF.output_format, CONF.min_severity, CONF.min_confidence,
CONF.syntribos.exclude_results)
self.printErrors(CONF.output_format)
self.print_log_path_and_stats(start_time)
def print_log_path_and_stats(self, start_time):
"""Print the path to the log folder for this run."""
test_log = Runner.get_log_file_name()
run_time = time.time() - start_time
num_fail = self.stats["failures"]
num_err = self.stats["errors"]
print("\n{sep}\nTotal: Ran {num} test{suff} in {time:.3f}s".format(
sep=syntribos.SEP, num=self.testsRun,
suff="s" * bool(self.testsRun - 1), time=run_time))
print("Total: {f} failure{fsuff} and {e} error{esuff}".format(
f=len(self.failures), e=len(self.errors),
fsuff="s" * bool(len(self.failures) - 1),
esuff="s" * bool(len(self.errors) - 1)))
f=num_fail, e=num_err,
fsuff="s" * bool(num_fail - 1),
esuff="s" * bool(num_err - 1)))
if test_log:
print(syntribos.SEP)
print("LOG PATH...: {path}".format(path=test_log))

View File

@ -233,8 +233,8 @@ class Runner(object):
LOG.debug(test_time)
bar.increment(1)
bar.print_bar()
failures = len(result.failures) - last_failures
errors = len(result.errors) - last_errors
failures = result.stats["failures"] - last_failures
errors = result.stats["errors"] - last_errors
total_tests = len(test_cases)
if failures > total_tests * 0.90:
# More than 90 percent failure
@ -246,8 +246,8 @@ class Runner(object):
# More than 15 percent failure
failures = cli.colorize(failures, "blue")
if errors:
last_failures = len(result.failures)
last_errors = len(result.errors)
last_failures = result.stats["failures"]
last_errors = result.stats["errors"]
errors = cli.colorize(errors, "red")
print (" : {0} Failure(s), {1} Error(s)\r".format(
failures, errors))

View File

@ -208,10 +208,10 @@ class ImpactedParameter(object):
def __init__(self, method, location, name, value):
self.method = method
self.location = location
if len(value) >= 512:
if len(value) >= 128:
self.trunc_fuzz_string = "{0}...({1} chars)...{2}".format(
value[:256], len(value),
value[-256:])
value[:64], len(value),
value[-64:])
else:
self.trunc_fuzz_string = value
self.fuzz_string = value

View File

@ -11,21 +11,35 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import testtools
import syntribos
from syntribos.issue import Issue
from syntribos.result import IssueTestResult
class FakeTest(object):
def __init__(self, name):
self.failures = [1, 2]
self.errors = [3, 4]
self.successes = [5, 6]
self.name = name
self.failureException = Exception
issue1 = Issue(defect_type="fake",
severity=syntribos.LOW,
description="x",
confidence=syntribos.LOW)
issue1.target = "example.com"
issue1.path = "/test"
issue2 = Issue(defect_type="fake2",
severity=syntribos.MEDIUM,
description="x",
confidence=syntribos.LOW)
issue2.target = "example.com"
issue2.path = "/test2"
self.failures = [issue1, issue2]
def __str__(self):
return self.name
@ -40,11 +54,6 @@ class TestIssueTestResult(testtools.TestCase):
self.issue_result.addFailure(test, ())
self.assertEqual(self.issue_result.stats["failures"], 2)
def test_addError(self):
test = FakeTest("error")
self.issue_result.addError(test, sys.exc_info())
self.assertEqual(self.issue_result.stats["errors"], 1)
def test_addSuccess(self):
test = FakeTest("success")
self.issue_result.addSuccess(test)