Adds JSON output functionality

Adds --format to support JSON format output

Change-Id: Ib35e9788e9076d3dd6158a8dfbd87a50c981ba56
This commit is contained in:
Rob Fletcher 2015-02-19 10:07:45 -08:00
parent 4f5633abe3
commit 0123f82d5c
5 changed files with 297 additions and 87 deletions

2
.gitignore vendored
View File

@ -1,3 +1,5 @@
env*
venv*
*.pyc
.DS_Store
*.egg

View File

@ -31,6 +31,10 @@ To test the new installation::
$ pip install tox
$ tox -epy27
To run PEP8 tests on diffs::
$ tox -v -epep8
Usage
-----
@ -73,6 +77,8 @@ Usage::
test set profile in config to use (defaults to all
tests)
-l, --level results level filter
-f {txt,json}, --format {txt,json}
output format for STDOUT or file
-o OUTPUT_FILE, --output OUTPUT_FILE
write report to filename
-d, --debug turn on debug mode

View File

@ -57,6 +57,11 @@ def main():
'-l', '--level', dest='level', action='count',
default=1, help='results level filter'
)
parser.add_argument(
'-f', '--format', dest='output_format', action='store',
default='txt', help='specify output format',
choices=['txt', 'json']
)
parser.add_argument(
'-o', '--output', dest='output_file', action='store',
default=None, help='write report to filename'
@ -74,7 +79,8 @@ def main():
b_mgr.run_scope(args.files)
if args.debug:
b_mgr.output_metaast()
b_mgr.output_results(args.context_lines, args.level - 1, args.output_file)
b_mgr.output_results(args.context_lines, args.level - 1, args.output_file,
args.output_format)
# return an exit code of 1 if there are results, 0 otherwise
if b_mgr.results_count > 0:
@ -82,5 +88,6 @@ def main():
else:
sys.exit(0)
if __name__ == '__main__':
main()

View File

@ -92,17 +92,20 @@ class BanditManager():
'''
return self.b_rs.count
def output_results(self, lines, level, output_filename):
def output_results(self, lines, level, output_filename, output_format):
'''Outputs results from the result store
:param lines: How many surrounding lines to show per result
:param level: Which levels to show (info, warning, error)
:param output_filename: File to store results
:param output_format: output format, either 'json' or 'txt'
:return: -
'''
self.b_rs.report(
scope=self.scope, scores=self.scores, lines=lines,
level=level, output_filename=output_filename
level=level, output_filename=output_filename,
output_format=output_format
)
def output_metaast(self):

View File

@ -19,9 +19,10 @@
from collections import OrderedDict
from datetime import datetime
import json
import linecache
from operator import itemgetter
import re
from sys import stdout
import constants
import utils
@ -67,6 +68,8 @@ class BanditResultStore():
filename, lineno = context['filename'], context['lineno']
(issue_type, issue_text) = issue
# XXX(fletcher): tuple usage is fragile because ordering changes on
# agg_type; ordering is important for reporting
if self.agg_type == 'vuln':
if test in self.resstore:
self.resstore[test].append((filename, lineno, issue_type,
@ -83,77 +86,148 @@ class BanditResultStore():
issue_text), ]
self.count += 1
def report(self, scope, scores, lines=0, level=1, output_filename=None):
'''Prints the contents of the result store
def report_json(self, output_filename, stats=None, lines=1):
'''Prints/returns warnings in JSON format
:param output_filename: File to output the results (optional)
:param stats: dictionary of stats for each file
:param lines: number of lines around code to print
:return: JSON string
'''
machine_output = dict({'results': [], 'errors': [], 'stats': []})
collector = list()
for (fname, reason) in self.skipped:
machine_output['errors'].append({'filename': fname,
'reason': reason})
for filer, score in stats.iteritems():
machine_output['stats'].append({'filename': filer,
'score': score})
# array indeces are determined by order of tuples defined in add()
if self.agg_type == 'file':
"""
XXX(fletcher): We currently pass around tuples, whose order change
depending on agg_type, which leads to confusing code like what's
below.
In this context an item in resstore looks like:
('examples/imports-telnetlib.py', [(1, 'blacklist_imports',
'ERROR','Telnet...other encrypted protocol.')])
So the list/tuple associated with the file
'examples/imports-telnetlib.py' looks like:
(1, 'blacklist_imports', 'ERROR', 'Telnet...other encrypted
protocol.')
This means:
line number = [0] = 1
error label = [1] = 'blacklist_imports'
error type = [2] 'ERROR'
reason = [3] = 'Telnet...other encrypted protocol.'
"""
for item in self.resstore.items():
filename = item[0]
filelist = item[1]
for x in filelist:
line_num = str(x[0])
error_label = str(x[1]).strip()
error_type = str(x[2]).strip()
reason = str(x[3]).strip()
code = ""
for i in utils.mid_range(int(line_num), lines):
code += linecache.getline(filename, i)
holder = dict({"filename": filename,
"line_num": line_num,
"error_label": error_label,
"error_type": error_type,
"code": code,
"reason": reason})
collector.append(holder)
else:
"""
XXX(fletcher): We currently pass around tuples, whose order change
depending on agg_type, which leads to confusing code like what's
below.
In this context an item in resstore looks like:
('random_lib_imports', [('examples/random.py', 1, 'INFO', 'Random
library should...cryptographic purposes')])
So the list/tuple associated with error label 'random_lib_imports'
looks like:
('examples/random.py', 1, 'INFO', 'Random library
should...cryptographic purposes')
This means:
filename = [0] = 'examples/random.py'
line number = [1] = 1
error type = [2] = 'INFO'
reason = [3] = 'Random library should...cryptographic purposes'
"""
for item in self.resstore.items():
vuln_label = item[0]
filelist = item[1]
for x in filelist:
filename = str(x[0])
line_num = str(x[1])
error_type = str(x[2]).strip()
reason = str(x[3]).strip()
code = ""
for i in utils.mid_range(int(line_num), lines):
code += linecache.getline(filename, i)
holder = dict({"filename": filename,
"line_num": line_num,
"error_label": vuln_label.strip(),
"error_type": error_type,
"code": code,
"reason": reason})
collector.append(holder)
if self.agg_type == 'vuln':
machine_output['results'] = sorted(collector,
key=itemgetter('error_type'))
else:
machine_output['results'] = sorted(collector,
key=itemgetter('filename'))
return json.dumps(machine_output, sort_keys=True,
indent=2, separators=(',', ': '))
def report_txt(self, scope, scores, lines=0, level=1):
'''Returns TXT string of results
:param scope: Which files were inspected
:param scores: The scores awarded to each file in the scope
:param lines: # of lines around the issue line to display (optional)
:param level: What level of severity to display (optional)
:param output_filename: File to output the results (optional)
:return: -
:return: TXT string
'''
# display output using colors if not writing to a file
is_tty = False if output_filename is not None else stdout.isatty()
if level >= len(constants.SEVERITY):
level = len(constants.SEVERITY) - 1
tmpstr = ""
# get text colors from settings
color = dict()
color['HEADER'] = self.config.get_setting('color_HEADER')
color['DEFAULT'] = self.config.get_setting('color_DEFAULT')
color['INFO'] = self.config.get_setting('color_INFO')
color['WARN'] = self.config.get_setting('color_WARN')
color['ERROR'] = self.config.get_setting('color_ERROR')
tmpstr_list = []
# print header
if is_tty:
tmpstr += "%sRun started:%s\n\t%s\n" % (
color['HEADER'],
color['DEFAULT'],
datetime.utcnow()
)
else:
tmpstr += "Run started:\n\t%s\n" % datetime.utcnow()
tmpstr_list.append("Run started:\n\t%s\n" % datetime.utcnow())
# print which files were inspected
if is_tty:
tmpstr += "%sFiles in scope (%s):%s\n" % (
color['HEADER'], len(scope),
color['DEFAULT']
)
else:
tmpstr += "Files in scope (%s):\n" % (len(scope))
tmpstr_list.append("Files in scope (%s):\n" % (len(scope)))
for item in zip(scope, scores):
tmpstr += "\t%s (score: %i)\n" % item
tmpstr_list.append("\t%s (score: %i)\n" % item)
# print which files were skipped and why
if is_tty:
tmpstr += "%sFiles skipped (%s):%s" % (
color['HEADER'], len(self.skipped),
color['DEFAULT']
)
else:
tmpstr += "Files skipped (%s):" % len(self.skipped)
tmpstr_list.append("Files skipped (%s):" % len(self.skipped))
for (fname, reason) in self.skipped:
tmpstr += "\n\t%s (%s)" % (fname, reason)
tmpstr_list.append("\n\t%s (%s)" % (fname, reason))
# print the results
if is_tty:
tmpstr += "\n%sTest results:%s\n" % (
color['HEADER'], color['DEFAULT']
)
else:
tmpstr += "\nTest results:\n"
tmpstr_list.append("\nTest results:\n")
if self.count == 0:
tmpstr += "\tNo issues identified.\n"
tmpstr_list.append("\tNo issues identified.\n")
# if aggregating by vulnerability type
elif self.agg_type == 'vuln':
for test, issues in self.resstore.items():
@ -164,24 +238,17 @@ class BanditResultStore():
continue
# if the result in't filtered out by severity
if constants.SEVERITY.index(issue_type) >= level:
if is_tty:
tmpstr += "%s>> %s\n - %s::%s%s\n" % (
color.get(issue_type, color['DEFAULT']),
issue_text, filename, lineno,
color['DEFAULT']
)
else:
tmpstr += ">> %s\n - %s::%s\n" % (
issue_text, filename, lineno
)
tmpstr_list.append(">> %s\n - %s::%s\n" % (
issue_text, filename, lineno
))
for i in utils.mid_range(lineno, lines):
line = linecache.getline(filename, i)
# linecache returns '' if line does not exist
if line != '':
tmpstr += "\t%3d %s" % (
tmpstr_list.append("\t%3d %s" % (
i, linecache.getline(filename, i)
)
))
# otherwise, aggregating by filename
else:
for filename, issues in self.resstore.items():
@ -192,30 +259,155 @@ class BanditResultStore():
continue
# if the result isn't filtered out by severity
if constants.SEVERITY.index(issue_type) >= level:
if is_tty:
tmpstr += "%s>> %s\n - %s::%s%s\n" % (
color.get(
issue_type, color['DEFAULT']
),
issue_text, filename, lineno,
color['DEFAULT']
)
else:
tmpstr += ">> %s\n - %s::%s\n" % (
issue_text, filename, lineno
)
tmpstr_list.append(">> %s\n - %s::%s\n" % (
issue_text, filename, lineno
))
for i in utils.mid_range(lineno, lines):
line = linecache.getline(filename, i)
# linecache returns '' if line does not exist
if line != '':
tmpstr += "\t%3d %s" % (
tmpstr_list.append("\t%3d %s" % (
i, linecache.getline(filename, i)
)
# output to a file,
if output_filename is not None:
with open(output_filename, 'w') as fout:
fout.write(tmpstr)
print("Output written to file: %s" % output_filename)
# or print the results on screen
))
return "".join(tmpstr_list)
def report_tty(self, scope, scores, lines=0, level=1):
'''Prints the contents of the result store
:param scope: Which files were inspected
:param scores: The scores awarded to each file in the scope
:param lines: # of lines around the issue line to display (optional)
:param level: What level of severity to display (optional)
:return: TXT string with appropriate TTY coloring for terminals
'''
if level >= len(constants.SEVERITY):
level = len(constants.SEVERITY) - 1
tmpstr_list = []
# get text colors from settings
get_setting = self.config.get_setting
color = {'HEADER': get_setting('color_HEADER'),
'DEFAULT': get_setting('color_DEFAULT'),
'INFO': get_setting('color_INFO'),
'WARN': get_setting('color_WARN'),
'ERROR': get_setting('color_ERROR')
}
# print header
tmpstr_list.append("%sRun started:%s\n\t%s\n" % (
color['HEADER'],
color['DEFAULT'],
datetime.utcnow()
))
# print which files were inspected
tmpstr_list.append("%sFiles in scope (%s):%s\n" % (
color['HEADER'], len(scope),
color['DEFAULT']
))
for item in zip(scope, scores):
tmpstr_list.append("\t%s (score: %i)\n" % item)
# print which files were skipped and why
tmpstr_list.append("%sFiles skipped (%s):%s" % (
color['HEADER'], len(self.skipped),
color['DEFAULT']
))
for (fname, reason) in self.skipped:
tmpstr_list.append("\n\t%s (%s)" % (fname, reason))
# print the results
tmpstr_list.append("\n%sTest results:%s\n" % (
color['HEADER'], color['DEFAULT']
))
if self.count == 0:
tmpstr_list.append("\tNo issues identified.\n")
# if aggregating by vulnerability type
elif self.agg_type == 'vuln':
for test, issues in self.resstore.items():
for filename, lineno, issue_type, issue_text in issues:
issue_line = linecache.getline(filename, lineno)
# if the line doesn't have one of the skip tags, keep going
if re.search(constants.SKIP_RE, issue_line):
continue
# if the result in't filtered out by severity
if constants.SEVERITY.index(issue_type) >= level:
tmpstr_list.append("%s>> %s\n - %s::%s%s\n" % (
color.get(issue_type, color['DEFAULT']),
issue_text, filename, lineno,
color['DEFAULT']
))
for i in utils.mid_range(lineno, lines):
line = linecache.getline(filename, i)
# linecache returns '' if line does not exist
if line != '':
tmpstr_list.append("\t%3d %s" % (
i, linecache.getline(filename, i)
))
# otherwise, aggregating by filename
else:
print(tmpstr)
for filename, issues in self.resstore.items():
for lineno, test, issue_type, issue_text in issues:
issue_line = linecache.getline(filename, lineno)
# if the line doesn't have one of the skip tags, keep going
if re.search(constants.SKIP_RE, issue_line):
continue
# if the result isn't filtered out by severity
if constants.SEVERITY.index(issue_type) >= level:
tmpstr_list.append("%s>> %s\n - %s::%s%s\n" % (
color.get(
issue_type, color['DEFAULT']
),
issue_text, filename, lineno,
color['DEFAULT']
))
for i in utils.mid_range(lineno, lines):
line = linecache.getline(filename, i)
# linecache returns '' if line does not exist
if line != '':
tmpstr_list.append("\t%3d %s" % (
i, linecache.getline(filename, i)
))
return ''.join(tmpstr_list)
def report(self, scope, scores, lines=0, level=1, output_filename=None,
output_format=None):
'''Prints the contents of the result store
:param scope: Which files were inspected
:param scores: The scores awarded to each file in the scope
:param lines: # of lines around the issue line to display (optional)
:param level: What level of severity to display (optional)
:param output_filename: File to output the results (optional)
:param output_format: File type to output (json|txt)
:return: -
'''
scores_dict = dict(zip(scope, scores))
if output_filename is None and output_format == 'txt':
print self.report_tty(scope, scores, lines, level) # noqa
return
if output_filename is None and output_format == 'json':
print self.report_json(output_filename, scores_dict) # noqa
return
if output_format == 'txt':
outer = self.report_txt(scope, scores, lines, level)
with open(output_filename, 'w') as fout:
fout.write(outer)
print("TXT output written to file: %s" % output_filename)
return
else:
outer = self.report_json(output_filename, scores_dict)
with open(output_filename, 'w') as fout:
fout.write(outer)
print("JSON output written to file: %s" % output_filename)
return