Improve performance by multithreading test calls
This change: 1) rewrites the runner to spawn a thread pool for each template and assigns a worker for each test case 2) makes the output colorized by default 3) makes minor changes to the output Change-Id: I49906f5daaa339ca9429913680203c762a0ad9fe
This commit is contained in:
parent
cb458c03db
commit
c4586a374b
|
@ -80,4 +80,5 @@ class HTTPClient(object):
|
|||
'data': data}, **requestslib_kwargs)
|
||||
|
||||
# Make the request
|
||||
return requests.request(method, url, **requestslib_kwargs)
|
||||
return requests.request(method, url, allow_redirects=False,
|
||||
**requestslib_kwargs)
|
||||
|
|
|
@ -53,7 +53,7 @@ class SynHTTPClient(HTTPClient):
|
|||
return (response, signals)
|
||||
|
||||
def send_request(self, request_obj):
|
||||
"""This sends a request based on a RequestOjbect.
|
||||
"""This sends a request based on a RequestObject.
|
||||
|
||||
RequestObjects are generated by a parser (e.g.
|
||||
:class:`syntribos.clients.http.parser.RequestCreator`) from request
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
# limitations under the License.
|
||||
from copy import deepcopy
|
||||
import logging
|
||||
import threading
|
||||
from time import time
|
||||
|
||||
import requests
|
||||
|
@ -27,6 +28,8 @@ import syntribos.checks.http as http_checks
|
|||
import syntribos.signal
|
||||
from syntribos.utils import string_utils
|
||||
|
||||
lock = threading.Lock()
|
||||
|
||||
|
||||
def log_http_transaction(log, level=logging.DEBUG):
|
||||
"""Decorator used for logging requests/response in clients.
|
||||
|
@ -59,20 +62,13 @@ def log_http_transaction(log, level=logging.DEBUG):
|
|||
sent to the request() method, to the provided log at the provided
|
||||
log level.
|
||||
"""
|
||||
|
||||
kwargs_copy = deepcopy(kwargs)
|
||||
if kwargs_copy.get("sanitize"):
|
||||
kwargs_copy = string_utils.sanitize_secrets(kwargs_copy)
|
||||
logline = '{0} {1}'.format(args, string_utils.compress(
|
||||
logline_obj = '{0} {1}'.format(args, string_utils.compress(
|
||||
kwargs_copy))
|
||||
|
||||
try:
|
||||
log.debug(_safe_decode(logline))
|
||||
except Exception as exception:
|
||||
# Ignore all exceptions that happen in logging, then log them
|
||||
log.info('Exception occurred while logging signature of '
|
||||
'calling method in http client')
|
||||
log.exception(exception)
|
||||
|
||||
# Make the request and time its execution
|
||||
response = None
|
||||
no_resp_time = None
|
||||
|
@ -132,7 +128,7 @@ def log_http_transaction(log, level=logging.DEBUG):
|
|||
request_headers = string_utils.sanitize_secrets(
|
||||
request_headers)
|
||||
request_body = string_utils.sanitize_secrets(request_body)
|
||||
logline = ''.join([
|
||||
logline_req = ''.join([
|
||||
'\n{0}\nREQUEST SENT\n{0}\n'.format('-' * 12),
|
||||
'request method.......: {0}\n'.format(response.request.method),
|
||||
'request url..........: {0}\n'.format(string_utils.compress(
|
||||
|
@ -145,15 +141,7 @@ def log_http_transaction(log, level=logging.DEBUG):
|
|||
'request body size....: {0}\n'.format(req_body_len),
|
||||
'request body.........: {0}\n'.format(string_utils.compress
|
||||
(request_body))])
|
||||
|
||||
try:
|
||||
log.log(level, _safe_decode(logline))
|
||||
except Exception as exception:
|
||||
# Ignore all exceptions that happen in logging, then log them
|
||||
log.log(level, '\n{0}\nREQUEST INFO\n{0}\n'.format('-' * 12))
|
||||
log.exception(exception)
|
||||
|
||||
logline = ''.join([
|
||||
logline_rsp = ''.join([
|
||||
'\n{0}\nRESPONSE RECEIVED\n{0}\n'.format('-' * 17),
|
||||
'response status..: {0}\n'.format(response),
|
||||
'response headers.: {0}\n'.format(response.headers),
|
||||
|
@ -162,12 +150,27 @@ def log_http_transaction(log, level=logging.DEBUG):
|
|||
'response size....: {0}\n'.format(len(response.content)),
|
||||
'response body....: {0}\n'.format(response_content),
|
||||
'-' * 79])
|
||||
lock.acquire()
|
||||
try:
|
||||
log.log(level, _safe_decode(logline))
|
||||
log.log(level, _safe_decode(logline_req))
|
||||
except Exception as exception:
|
||||
# Ignore all exceptions that happen in logging, then log them
|
||||
log.log(level, '\n{0}\nREQUEST INFO\n{0}\n'.format('-' * 12))
|
||||
log.exception(exception)
|
||||
try:
|
||||
log.log(level, _safe_decode(logline_rsp))
|
||||
except Exception as exception:
|
||||
# Ignore all exceptions that happen in logging, then log them
|
||||
log.log(level, '\n{0}\nRESPONSE INFO\n{0}\n'.format('-' * 13))
|
||||
log.exception(exception)
|
||||
try:
|
||||
log.debug(_safe_decode(logline_obj))
|
||||
except Exception as exception:
|
||||
# Ignore all exceptions that happen in logging, then log them
|
||||
log.info('Exception occurred while logging signature of '
|
||||
'calling method in http client')
|
||||
log.exception(exception)
|
||||
lock.release()
|
||||
return (response, signals)
|
||||
return _wrapper
|
||||
return _decorator
|
||||
|
|
|
@ -157,7 +157,8 @@ def list_cli_opts():
|
|||
default=[""], sample_default=["SQL", "XSS"],
|
||||
help=_("Test types to be excluded from "
|
||||
"current run against the target API")),
|
||||
cfg.BoolOpt("colorize", dest="colorize", short="cl", default=False,
|
||||
cfg.BoolOpt("no_colorize", dest="no_colorize", short="ncl",
|
||||
default=False,
|
||||
help=_("Enable color in syntribos terminal output")),
|
||||
cfg.StrOpt("outfile", short="o",
|
||||
sample_default="out.json", help=_("File to print "
|
||||
|
@ -194,6 +195,10 @@ def list_syntribos_opts():
|
|||
cfg.StrOpt("endpoint", default="",
|
||||
sample_default="http://localhost/app",
|
||||
help=_("The target host to be tested")),
|
||||
cfg.IntOpt("threads", default=16,
|
||||
sample_default="16",
|
||||
help=_("Maximum number of threads syntribos spawns "
|
||||
"(experimental)")),
|
||||
cfg.Opt("templates", type=ContentType("r", 0),
|
||||
default="",
|
||||
sample_default="~/.syntribos/templates",
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import threading
|
||||
import time
|
||||
import unittest
|
||||
|
||||
|
@ -23,6 +24,7 @@ from syntribos.runner import Runner
|
|||
import syntribos.utils.remotes
|
||||
|
||||
CONF = cfg.CONF
|
||||
lock = threading.Lock()
|
||||
|
||||
|
||||
class IssueTestResult(unittest.TextTestResult):
|
||||
|
@ -38,7 +40,7 @@ class IssueTestResult(unittest.TextTestResult):
|
|||
"MEDIUM": 0,
|
||||
"HIGH": 0
|
||||
}
|
||||
stats = {"errors": 0, "failures": 0, "successes": 0}
|
||||
stats = {"errors": 0, "unique_failures": 0, "successes": 0}
|
||||
severity_counter_dict = {}
|
||||
testsRunSinceLastPrint = 0
|
||||
failure_id = 0
|
||||
|
@ -84,6 +86,7 @@ class IssueTestResult(unittest.TextTestResult):
|
|||
:type test: :class:`syntribos.tests.base.BaseTestCase`
|
||||
:param tuple err: Tuple of format ``(type, value, traceback)``
|
||||
"""
|
||||
lock.acquire()
|
||||
for issue in test.failures:
|
||||
defect_type = issue.defect_type
|
||||
if any([
|
||||
|
@ -174,7 +177,7 @@ class IssueTestResult(unittest.TextTestResult):
|
|||
"signals": signals
|
||||
}
|
||||
failure_obj["instances"].append(instance_obj)
|
||||
self.stats["failures"] += 1
|
||||
self.stats["unique_failures"] += 1
|
||||
self.output["stats"]["severity"][sev_rating] += 1
|
||||
else:
|
||||
instance_obj = None
|
||||
|
@ -196,8 +199,9 @@ class IssueTestResult(unittest.TextTestResult):
|
|||
"signals": signals
|
||||
}
|
||||
failure_obj["instances"].append(instance_obj)
|
||||
self.stats["failures"] += 1
|
||||
self.stats["unique_failures"] += 1
|
||||
self.output["stats"]["severity"][sev_rating] += 1
|
||||
lock.release()
|
||||
|
||||
def addError(self, test, err):
|
||||
"""Duplicates parent class addError functionality.
|
||||
|
@ -207,11 +211,19 @@ class IssueTestResult(unittest.TextTestResult):
|
|||
:param err:
|
||||
:type tuple: Tuple of format ``(type, value, traceback)``
|
||||
"""
|
||||
self.errors.append({
|
||||
"test": self.getDescription(test),
|
||||
"error": self._exc_info_to_string(err, test)
|
||||
})
|
||||
self.stats["errors"] += 1
|
||||
with lock:
|
||||
for e in self.errors:
|
||||
if e['error'] == self._exc_info_to_string(err, test):
|
||||
if self.getDescription(test) in e['test']:
|
||||
return
|
||||
e['test'].append(self.getDescription(test))
|
||||
self.stats["errors"] += 1
|
||||
return
|
||||
self.errors.append({
|
||||
"test": [self.getDescription(test)],
|
||||
"error": self._exc_info_to_string(err, test)
|
||||
})
|
||||
self.stats["errors"] += 1
|
||||
|
||||
def addSuccess(self, test):
|
||||
"""Duplicates parent class addSuccess functionality.
|
||||
|
@ -219,7 +231,8 @@ class IssueTestResult(unittest.TextTestResult):
|
|||
:param test: The test that was run
|
||||
:type test: :class:`syntribos.tests.base.BaseTestCase`
|
||||
"""
|
||||
self.stats["successes"] += 1
|
||||
with lock:
|
||||
self.stats["successes"] += 1
|
||||
|
||||
def printErrors(self, output_format):
|
||||
"""Print out each :class:`syntribos.issue.Issue` that was encountered
|
||||
|
@ -241,18 +254,19 @@ class IssueTestResult(unittest.TextTestResult):
|
|||
"""Print the path to the log folder for this run."""
|
||||
test_log = Runner.log_path
|
||||
run_time = time.time() - start_time
|
||||
num_fail = self.stats["failures"]
|
||||
num_fail = self.stats["unique_failures"]
|
||||
num_err = self.stats["errors"]
|
||||
print("\n{sep}\nTotal: Ran {num} test{suff} in {time:.3f}s".format(
|
||||
sep=syntribos.SEP,
|
||||
num=self.testsRun,
|
||||
suff="s" * bool(self.testsRun - 1),
|
||||
time=run_time))
|
||||
print("Total: {f} failure{fsuff} and {e} error{esuff}".format(
|
||||
f=num_fail,
|
||||
e=num_err,
|
||||
fsuff="s" * bool(num_fail - 1),
|
||||
esuff="s" * bool(num_err - 1)))
|
||||
print("Total: {f} unique failure{fsuff} "
|
||||
"and {e} unique error{esuff}".format(
|
||||
f=num_fail,
|
||||
e=num_err,
|
||||
fsuff="s" * bool(num_fail - 1),
|
||||
esuff="s" * bool(num_err - 1)))
|
||||
if test_log:
|
||||
print(syntribos.SEP)
|
||||
print(_("LOG PATH...: %s") % test_log)
|
||||
|
|
|
@ -13,9 +13,11 @@
|
|||
# limitations under the License.
|
||||
import json
|
||||
import logging
|
||||
from multiprocessing.dummy import Pool as ThreadPool
|
||||
import os
|
||||
import pkgutil
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import unittest
|
||||
|
@ -39,6 +41,7 @@ result = None
|
|||
user_base_dir = None
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
lock = threading.Lock()
|
||||
|
||||
|
||||
class Runner(object):
|
||||
|
@ -125,6 +128,7 @@ class Runner(object):
|
|||
LOG = logging.getLogger()
|
||||
LOG.handlers = [log_handle]
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
||||
return LOG
|
||||
|
||||
@classmethod
|
||||
|
@ -258,7 +262,6 @@ class Runner(object):
|
|||
cls.meta_dir_dict[meta_path] = json.loads(file_content)
|
||||
except Exception:
|
||||
print("Unable to parse %s, skipping..." % file_path)
|
||||
|
||||
for file_path, req_str in templates_dir:
|
||||
if "meta.json" in file_path:
|
||||
continue
|
||||
|
@ -331,7 +334,7 @@ class Runner(object):
|
|||
if len(test_cases) > 0:
|
||||
for test in test_cases:
|
||||
if test:
|
||||
cls.run_test(test, result)
|
||||
cls.run_test(test)
|
||||
|
||||
@classmethod
|
||||
def dry_run_report(cls, output):
|
||||
|
@ -359,6 +362,7 @@ class Runner(object):
|
|||
|
||||
:return: None
|
||||
"""
|
||||
pool = ThreadPool(CONF.syntribos.threads)
|
||||
try:
|
||||
template_start_time = time.time()
|
||||
failures = 0
|
||||
|
@ -367,19 +371,18 @@ class Runner(object):
|
|||
for test_name, test_class in list_of_tests:
|
||||
test_class.test_id = cls.current_test_id
|
||||
cls.current_test_id += 5
|
||||
log_string = "[{test_id}] : {name}".format(
|
||||
test_id=test_class.test_id, name=test_name)
|
||||
|
||||
result_string = "[{test_id}] : {name}".format(
|
||||
test_id=cli.colorize(
|
||||
test_class.test_id, color="green"),
|
||||
name=test_name.replace("_", " ").capitalize())
|
||||
if not CONF.colorize:
|
||||
if CONF.no_colorize:
|
||||
result_string = result_string.ljust(55)
|
||||
else:
|
||||
result_string = result_string.ljust(60)
|
||||
LOG.debug(log_string)
|
||||
try:
|
||||
test_class.send_init_request(file_path, req_str, meta_vars)
|
||||
test_class.create_init_request(file_path, req_str,
|
||||
meta_vars)
|
||||
except Exception:
|
||||
print(_(
|
||||
"Error in parsing template:\n %s\n"
|
||||
|
@ -388,40 +391,33 @@ class Runner(object):
|
|||
break
|
||||
test_cases = list(
|
||||
test_class.get_test_cases(file_path, req_str))
|
||||
if len(test_cases) > 0:
|
||||
total_tests = len(test_cases)
|
||||
if total_tests > 0:
|
||||
log_string = "[{test_id}] : {name}".format(
|
||||
test_id=test_class.test_id, name=test_name)
|
||||
LOG.debug(log_string)
|
||||
last_failures = result.stats['unique_failures']
|
||||
last_errors = result.stats['errors']
|
||||
p_bar = cli.ProgressBar(
|
||||
message=result_string, total_len=len(test_cases))
|
||||
last_failures = result.stats["failures"]
|
||||
last_errors = result.stats["errors"]
|
||||
for test in test_cases:
|
||||
if test:
|
||||
cls.run_test(test, result)
|
||||
p_bar.increment(1)
|
||||
p_bar.print_bar()
|
||||
failures = result.stats["failures"] - last_failures
|
||||
errors = result.stats["errors"] - last_errors
|
||||
total_tests = len(test_cases)
|
||||
if failures > total_tests * 0.90:
|
||||
# More than 90 percent failure
|
||||
failures = cli.colorize(failures, "red")
|
||||
elif failures > total_tests * 0.45:
|
||||
# More than 45 percent failure
|
||||
failures = cli.colorize(failures, "yellow")
|
||||
elif failures > total_tests * 0.15:
|
||||
# More than 15 percent failure
|
||||
failures = cli.colorize(failures, "blue")
|
||||
message=result_string, total_len=total_tests)
|
||||
test_class.send_init_request(file_path, req_str, meta_vars)
|
||||
|
||||
# This line runs the tests
|
||||
pool.map(lambda t: cls.run_test(t, p_bar), test_cases)
|
||||
|
||||
failures = result.stats['unique_failures'] - last_failures
|
||||
errors = result.stats['errors'] - last_errors
|
||||
failures_str = cli.colorize_by_percent(
|
||||
failures, total_tests, "red")
|
||||
|
||||
if errors:
|
||||
last_failures = result.stats["failures"]
|
||||
last_errors = result.stats["errors"]
|
||||
errors = cli.colorize(errors, "red")
|
||||
errors_str = cli.colorize(errors, "red")
|
||||
print(_(
|
||||
" : %(fail)s Failure(s), %(err)s Error(s)\r") % {
|
||||
"fail": failures, "err": errors})
|
||||
"fail": failures_str, "err": errors_str})
|
||||
else:
|
||||
last_failures = result.stats["failures"]
|
||||
print(
|
||||
_(
|
||||
" : %s Failure(s), 0 Error(s)\r") % failures)
|
||||
print(_(
|
||||
" : %s Failure(s), 0 Error(s)\r") % failures_str)
|
||||
|
||||
run_time = time.time() - template_start_time
|
||||
LOG.info(_("Run time: %s sec."), run_time)
|
||||
|
@ -440,26 +436,34 @@ class Runner(object):
|
|||
result.print_result(cls.start_time)
|
||||
cleanup.delete_temps()
|
||||
print(_("Exiting..."))
|
||||
pool.close()
|
||||
pool.join()
|
||||
exit(0)
|
||||
print(_('Resuming...'))
|
||||
except KeyboardInterrupt:
|
||||
result.print_result(cls.start_time)
|
||||
cleanup.delete_temps()
|
||||
print(_("Exiting..."))
|
||||
pool.close()
|
||||
pool.join()
|
||||
exit(0)
|
||||
|
||||
@classmethod
|
||||
def run_test(cls, test, result):
|
||||
def run_test(cls, test, p_bar=None):
|
||||
"""Create a new test suite, add a test, and run it
|
||||
|
||||
:param test: The test to add to the suite
|
||||
:param result: The result object to append to
|
||||
:type result: :class:`syntribos.result.IssueTestResult`
|
||||
:param bool dry_run: (OPTIONAL) Only print out test names
|
||||
"""
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(test("run_test_case"))
|
||||
suite.run(result)
|
||||
if test:
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(test("run_test_case"))
|
||||
suite.run(result)
|
||||
if p_bar:
|
||||
with lock:
|
||||
p_bar.increment(1)
|
||||
p_bar.print_bar()
|
||||
|
||||
|
||||
def entry_point():
|
||||
|
|
|
@ -148,10 +148,11 @@ class BaseTestCase(unittest.TestCase):
|
|||
:param str filename: name of template file
|
||||
:param str file_content: content of template file as string
|
||||
"""
|
||||
cls.init_req = parser.create_request(
|
||||
file_content, CONF.syntribos.endpoint, meta_vars)
|
||||
|
||||
if not cls.init_req:
|
||||
cls.init_req = parser.create_request(
|
||||
file_content, CONF.syntribos.endpoint, meta_vars)
|
||||
prepared_copy = cls.init_req.get_prepared_copy()
|
||||
cls.prepared_init_req = prepared_copy
|
||||
cls.init_resp, cls.init_signals = cls.client.send_request(
|
||||
prepared_copy)
|
||||
if cls.init_resp is not None:
|
||||
|
|
|
@ -67,6 +67,9 @@ class BaseFuzzTestCase(base.BaseTestCase):
|
|||
headers=cls.request.headers,
|
||||
params=cls.request.params,
|
||||
data=cls.request.data)
|
||||
|
||||
if not hasattr(cls.request, 'body'):
|
||||
cls.request.body = cls.request.data
|
||||
cls.test_req = cls.request
|
||||
|
||||
if cls.test_resp is None or "EXCEPTION_RAISED" in cls.test_signals:
|
||||
|
@ -88,7 +91,6 @@ class BaseFuzzTestCase(base.BaseTestCase):
|
|||
self.run_default_checks() in order to test for the Issues
|
||||
defined here
|
||||
"""
|
||||
|
||||
if "HTTP_STATUS_CODE_5XX" in self.test_signals:
|
||||
self.register_issue(
|
||||
defect_type="500_errors",
|
||||
|
@ -125,9 +127,6 @@ class BaseFuzzTestCase(base.BaseTestCase):
|
|||
def get_test_cases(cls, filename, file_content):
|
||||
"""Generates new TestCases for each fuzz string
|
||||
|
||||
First, sends a baseline (non-fuzzed) request, storing it in
|
||||
cls.init_resp.
|
||||
|
||||
For each string returned by cls._get_strings(), yield a TestCase class
|
||||
for the string as an extension to the current TestCase class. Every
|
||||
string used as a fuzz test payload entails the generation of a new
|
||||
|
@ -197,8 +196,7 @@ class BaseFuzzTestCase(base.BaseTestCase):
|
|||
issue.request = self.test_req
|
||||
issue.response = self.test_resp
|
||||
issue.test_type = self.test_name
|
||||
prepared_copy = self.init_req.get_prepared_copy()
|
||||
url_components = urlparse(prepared_copy.url)
|
||||
url_components = urlparse(self.prepared_init_req.url)
|
||||
issue.target = url_components.netloc
|
||||
issue.path = url_components.path
|
||||
issue.init_signals = self.init_signals
|
||||
|
|
|
@ -34,7 +34,6 @@ class BufferOverflowBody(base_fuzz.BaseFuzzTestCase):
|
|||
return [
|
||||
"A" * (2 ** 16 + 1),
|
||||
"a" * 10 ** 5,
|
||||
"a" * 10 ** 6,
|
||||
'\x00' * (2 ** 16 + 1),
|
||||
"%%s" * 513,
|
||||
]
|
||||
|
|
|
@ -26,22 +26,6 @@ CONF = cfg.CONF
|
|||
def print_symbol():
|
||||
"""Syntribos radiation symbol."""
|
||||
symbol = """ Syntribos
|
||||
xxxxxxx
|
||||
x xxxxxxxxxxxxx x
|
||||
x xxxxxxxxxxx x
|
||||
xxxxxxxxx
|
||||
x xxxxxxx x
|
||||
xxxxx
|
||||
x xxx x
|
||||
x
|
||||
xxxxxxxxxxxxxxx xxxxxxxxxxxxxxx
|
||||
xxxxxxxxxxxxx xxxxxxxxxxxxx
|
||||
xxxxxxxxxxx xxxxxxxxxxx
|
||||
xxxxxxxxx xxxxxxxxx
|
||||
xxxxxx xxxxxx
|
||||
xxx xxx
|
||||
x x
|
||||
x
|
||||
=== Automated API Scanning ==="""
|
||||
print(syntribos.SEP)
|
||||
print(symbol)
|
||||
|
@ -55,15 +39,24 @@ def colorize(string, color="nocolor"):
|
|||
colors = dict(list(zip(color_names, list(range(31, 35)))))
|
||||
colors["nocolor"] = 0 # No Color
|
||||
|
||||
if not CONF.colorize:
|
||||
if CONF.no_colorize:
|
||||
return string
|
||||
return "\033[0;{color}m{string}\033[0;m".format(string=string,
|
||||
color=colors.setdefault(
|
||||
color, 0))
|
||||
|
||||
|
||||
def colorize_by_percent(amount, total, high=0.5, medium=0):
|
||||
if amount > total * high:
|
||||
return colorize(amount, "red")
|
||||
elif amount > total * medium:
|
||||
return colorize(amount, "yellow")
|
||||
else:
|
||||
return str(amount)
|
||||
|
||||
|
||||
class ProgressBar(object):
|
||||
"""A simple progressBar.
|
||||
"""A simple progressBar. Written as a singleton.
|
||||
|
||||
A simple generic progress bar like many others.
|
||||
:param int total_len: total_len value, when progress is 100 %
|
||||
|
|
|
@ -64,7 +64,7 @@ class ConfFixture(config_fixture.Config):
|
|||
"""config values for CLI options(default group)."""
|
||||
# TODO(unrahul): Add mock file path for outfile
|
||||
self.conf.set_default("test_types", [""])
|
||||
self.conf.set_default("colorize", False)
|
||||
self.conf.set_default("no_colorize", True)
|
||||
self.conf.set_default("output_format", "json")
|
||||
self.conf.set_default("min_severity", "LOW")
|
||||
self.conf.set_default("min_confidence", "LOW")
|
||||
|
|
|
@ -20,7 +20,7 @@ from syntribos.utils.cli import CONF
|
|||
class TestColorize(testtools.TestCase):
|
||||
|
||||
def test_colorize(self):
|
||||
CONF.colorize = True
|
||||
CONF.no_colorize = False
|
||||
string = "color this string"
|
||||
colors = {"red": 31,
|
||||
"green": 32,
|
||||
|
@ -34,6 +34,6 @@ class TestColorize(testtools.TestCase):
|
|||
colorize(string, color))
|
||||
|
||||
def test_no_colorize(self):
|
||||
CONF.colorize = False
|
||||
CONF.no_colorize = True
|
||||
string = "No color"
|
||||
self.assertEqual(string, colorize(string))
|
||||
|
|
|
@ -52,7 +52,7 @@ class TestIssueTestResult(testtools.TestCase):
|
|||
def test_addFailure(self):
|
||||
test = FakeTest("failure")
|
||||
self.issue_result.addFailure(test, ())
|
||||
self.assertEqual(self.issue_result.stats["failures"], 2)
|
||||
self.assertEqual(self.issue_result.stats["unique_failures"], 2)
|
||||
|
||||
def test_addSuccess(self):
|
||||
test = FakeTest("success")
|
||||
|
|
Loading…
Reference in New Issue