Adding sub commands to Syntribos
This change adds some commands to syntribos sub commands: - list_tests - run - dry_run Also, refactoring runner.py and added utils/cli.py to add all utility methods used for cli output. Change-Id: Ieed2e06e0fb6eec34be640ae1db86785403546df
This commit is contained in:
80
README.rst
80
README.rst
@@ -143,24 +143,78 @@ Example configuration file:
|
||||
http_request_compression=True
|
||||
|
||||
|
||||
Syntribos Commands
|
||||
------------------
|
||||
|
||||
Below are the set of commands that should be specified while
|
||||
using Syntribos.
|
||||
|
||||
|
||||
### run
|
||||
|
||||
|
||||
This command runs Syntribos with the given config options
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t SQL run
|
||||
|
||||
|
||||
### dry_run
|
||||
|
||||
This command prepares all the test cases that would be executed by
|
||||
the ```run``` command based on the configuration options passed to
|
||||
Syntribos, but simply prints their details to the screen instead
|
||||
of actually running them.
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t SQL dry_run
|
||||
|
||||
|
||||
### list_tests
|
||||
|
||||
|
||||
This command will list the names and description of all the tests
|
||||
that can be executed by the ```run``` command.
|
||||
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config list_tests
|
||||
|
||||
|
||||
All these commands will only work if the config file or directory is
|
||||
specified.
|
||||
|
||||
|
||||
Running Syntribos
|
||||
-----------------
|
||||
(**This section will be updated shortly**)
|
||||
|
||||
To execute a Syntribos test, run ``syntribos`` specifying the configuration
|
||||
file and the type of test you want to use.
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t SQL
|
||||
|
||||
To run ``syntribos`` against all available tests, just specify command
|
||||
To run Syntribos against all the available tests, just specify command
|
||||
```syntribos``` with the configuration file without specifying any
|
||||
test type.
|
||||
test type.
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config
|
||||
$ syntribos --config-file keystone.config run
|
||||
|
||||
####Fuzzy-matching test names
|
||||
|
||||
It is possible to limit Syntribos to run a specific test type using
|
||||
the ```-t``` flag.
|
||||
|
||||
For example,
|
||||
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t SQL run
|
||||
|
||||
|
||||
This will match all tests that contain the string 'SQL' in their name,
|
||||
like SQL_INJECTION_HEADERS, SQL_INJECTION_BODY etc.
|
||||
|
||||
Syntribos Logging
|
||||
-----------------
|
||||
@@ -273,19 +327,19 @@ For SQL injection tests, use:
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t SQL
|
||||
$ syntribos --config-file keystone.config -t SQL run
|
||||
|
||||
For SQL injection tests against the template body only, use:
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t SQL_INJECTION_BODY
|
||||
$ syntribos --config-file keystone.config -t SQL_INJECTION_BODY run
|
||||
|
||||
For all tests against HTTP headers only, use:
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t HEADERS
|
||||
$ syntribos --config-file keystone.config -t HEADERS run
|
||||
|
||||
**Call External**
|
||||
|
||||
|
||||
43
doc/source/commands.rst
Normal file
43
doc/source/commands.rst
Normal file
@@ -0,0 +1,43 @@
|
||||
Syntribos Commands
|
||||
------------------
|
||||
|
||||
Below are the set of commands that should be specified while
|
||||
using Syntribos.
|
||||
|
||||
|
||||
### run
|
||||
|
||||
|
||||
This command runs Syntribos with the given config options
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t SQL run
|
||||
|
||||
|
||||
### dry_run
|
||||
|
||||
This command prepares all the test cases that would be executed by
|
||||
the ```run``` command based on the configuration options passed to
|
||||
Syntribos, but simply prints their details to the screen instead
|
||||
of actually running them.
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t SQL dry_run
|
||||
|
||||
|
||||
### list_tests
|
||||
|
||||
|
||||
This command will list the names and description of all the tests
|
||||
that can be executed by the ```run``` command.
|
||||
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config list_tests
|
||||
|
||||
|
||||
All these commands will only work if the config file or directory is
|
||||
specified.
|
||||
@@ -1,16 +1,27 @@
|
||||
Running syntribos
|
||||
Running Syntribos
|
||||
=================
|
||||
|
||||
To execute a Syntribos test, run ``syntribos`` specifying the configuration
|
||||
file and the test you want to run:
|
||||
|
||||
To run Syntribos against all the available tests, just specify command
|
||||
```syntribos``` with the configuration file without specifying any
|
||||
test type.
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config -t SQL
|
||||
$ syntribos --config-file keystone.config run
|
||||
|
||||
####Fuzzy-matching test names
|
||||
|
||||
It is possible to limit Syntribos to run a specific test type using
|
||||
the ```-t``` flag..
|
||||
|
||||
For example,
|
||||
|
||||
To run ``syntribos`` against all available tests, just run ``syntribos``
|
||||
specifying the configuration file:
|
||||
|
||||
::
|
||||
|
||||
$ syntribos --config-file keystone.config
|
||||
$ syntribos --config-file keystone.config -t SQL run
|
||||
|
||||
|
||||
This will match all tests that contain the string 'SQL' in their name,
|
||||
like SQL_INJECTION_HEADERS, SQL_INJECTION_BODY etc.
|
||||
|
||||
@@ -122,6 +122,15 @@ test_group = cfg.OptGroup(name="test", title="Test Config")
|
||||
logger_group = cfg.OptGroup(name="logging", title="Logger config")
|
||||
|
||||
|
||||
def sub_commands(sub_parser):
|
||||
sub_parser.add_parser('list_tests',
|
||||
help="List all available tests")
|
||||
sub_parser.add_parser('run',
|
||||
help="Run Syntribos with given config options")
|
||||
sub_parser.add_parser('dry_run',
|
||||
help="Dry run Syntribos with given config options")
|
||||
|
||||
|
||||
def list_opts():
|
||||
results = []
|
||||
results.append((None, list_cli_opts()))
|
||||
@@ -151,6 +160,10 @@ def register_opts():
|
||||
|
||||
def list_cli_opts():
|
||||
return [
|
||||
cfg.SubCommandOpt(name="sub_command",
|
||||
handler=sub_commands,
|
||||
help="Available commands",
|
||||
title="Syntribos Commands"),
|
||||
cfg.MultiStrOpt("test-types", dest="test_types", short="t",
|
||||
default=[""],
|
||||
help="Test types to run against the target API"),
|
||||
@@ -158,13 +171,8 @@ def list_cli_opts():
|
||||
default=[""],
|
||||
help="Test types to be excluded from current run"
|
||||
"against the target API"),
|
||||
cfg.BoolOpt("list-tests", dest="list_tests", short="L", default=False,
|
||||
help="List all available test types that can be run"
|
||||
" against the target API"),
|
||||
cfg.BoolOpt("colorize", dest="colorize", short="cl", default=False,
|
||||
help="Enable color in Syntribos terminal output"),
|
||||
cfg.BoolOpt("dry-run", dest="dry_run", short="D", default=False,
|
||||
help="Don't run tests, just print them out to console"),
|
||||
cfg.StrOpt("outfile", short="o", default=None,
|
||||
help="File to print output to"),
|
||||
cfg.StrOpt("format", dest="output_format", short="f", default="json",
|
||||
|
||||
@@ -11,9 +11,16 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
import syntribos
|
||||
from syntribos.formatters.json_formatter import JSONFormatter
|
||||
from syntribos.runner import Runner
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class IssueTestResult(unittest.TextTestResult):
|
||||
@@ -72,3 +79,38 @@ class IssueTestResult(unittest.TextTestResult):
|
||||
"""Print errors when the test run is complete."""
|
||||
super(IssueTestResult, self).stopTestRun()
|
||||
self.printErrors()
|
||||
|
||||
|
||||
def print_log_file_path():
|
||||
"""Print the path to the log folder for this run."""
|
||||
test_log = Runner.get_log_file_name()
|
||||
if test_log:
|
||||
print(syntribos.SEP)
|
||||
print("LOG PATH...: {path}".format(path=test_log))
|
||||
print(syntribos.SEP)
|
||||
|
||||
|
||||
def print_result(result, start_time):
|
||||
"""Prints test summary/stats (e.g. # failures) to stdout
|
||||
|
||||
:param result: Global result object with all issues/etc.
|
||||
:type result: :class:`syntribos.result.IssueTestResult`
|
||||
:param float start_time: Time this run started
|
||||
"""
|
||||
result.printErrors(
|
||||
CONF.output_format, CONF.min_severity, CONF.min_confidence)
|
||||
run_time = time.time() - start_time
|
||||
tests = result.testsRun
|
||||
failures = len(result.failures)
|
||||
errors = len(result.errors)
|
||||
|
||||
print("\n{sep}\nRan {num} test{suff} in {time:.3f}s".format(
|
||||
sep=syntribos.SEP, num=tests, suff="s" * bool(tests - 1),
|
||||
time=run_time))
|
||||
if failures or errors:
|
||||
print("\nFAILED ({0}{1}{2})".format(
|
||||
"failures={0}".format(failures) if failures else "",
|
||||
", " if failures and errors else "",
|
||||
"errors={0}".format(errors) if errors else ""))
|
||||
print_log_file_path()
|
||||
return tests, errors, failures
|
||||
|
||||
@@ -22,11 +22,10 @@ import unittest
|
||||
from oslo_config import cfg
|
||||
|
||||
import syntribos.config
|
||||
from syntribos.result import IssueTestResult
|
||||
import syntribos.result
|
||||
import syntribos.tests as tests
|
||||
import syntribos.tests.base
|
||||
from syntribos.utils.ascii_colors import colorize
|
||||
from syntribos.utils.progress_bar import ProgressBar
|
||||
from syntribos.utils import cli as cli
|
||||
|
||||
result = None
|
||||
CONF = cfg.CONF
|
||||
@@ -38,12 +37,20 @@ class Runner(object):
|
||||
log_file = ""
|
||||
|
||||
@classmethod
|
||||
def print_tests(cls):
|
||||
def list_tests(cls):
|
||||
"""Print out the list of available tests types that can be run."""
|
||||
testlist = []
|
||||
print("Test types...:")
|
||||
testlist = [name for name, _ in cls.get_tests()]
|
||||
print(testlist)
|
||||
print("List of available tests...:\n")
|
||||
print("{:<50}{}\n".format("[Test Name]", "[Description]"))
|
||||
testdict = {name: clss.__doc__ for name, clss in cls.get_tests()}
|
||||
for test in testdict:
|
||||
if testdict[test] is None:
|
||||
raise Exception(("No Test description provided"
|
||||
" as doc string for: ".format(test)))
|
||||
else:
|
||||
test_description = testdict[test].split(".")[0]
|
||||
print("{test:<50}{desc}\r".format(
|
||||
test=test, desc=test_description))
|
||||
print("\n")
|
||||
exit(0)
|
||||
|
||||
@classmethod
|
||||
@@ -60,7 +67,7 @@ class Runner(object):
|
||||
|
||||
@classmethod
|
||||
def get_tests(cls, test_types=None, excluded_types=None):
|
||||
"""Yields relevant tests based on test type (from ```syntribos.arguments```)
|
||||
"""Yields relevant tests based on test type
|
||||
|
||||
:param list test_types: Test types to be run
|
||||
|
||||
@@ -79,41 +86,6 @@ class Runner(object):
|
||||
if t in k:
|
||||
yield k, v
|
||||
|
||||
@staticmethod
|
||||
def print_symbol():
|
||||
"""Syntribos radiation symbol."""
|
||||
symbol = """ Syntribos
|
||||
xxxxxxx
|
||||
x xxxxxxxxxxxxx x
|
||||
x xxxxxxxxxxx x
|
||||
xxxxxxxxx
|
||||
x xxxxxxx x
|
||||
xxxxx
|
||||
x xxx x
|
||||
x
|
||||
xxxxxxxxxxxxxxx xxxxxxxxxxxxxxx
|
||||
xxxxxxxxxxxxx xxxxxxxxxxxxx
|
||||
xxxxxxxxxxx xxxxxxxxxxx
|
||||
xxxxxxxxx xxxxxxxxx
|
||||
xxxxxx xxxxxx
|
||||
xxx xxx
|
||||
x x
|
||||
x
|
||||
=== Automated API Scanning ==="""
|
||||
|
||||
print(syntribos.SEP)
|
||||
print(symbol)
|
||||
print(syntribos.SEP)
|
||||
|
||||
@classmethod
|
||||
def print_log(cls):
|
||||
"""Print the path to the log folder for this run."""
|
||||
test_log = cls.get_log_file_name()
|
||||
if test_log:
|
||||
print(syntribos.SEP)
|
||||
print("LOG PATH...: {path}".format(path=test_log))
|
||||
print(syntribos.SEP)
|
||||
|
||||
@classmethod
|
||||
def get_default_conf_files(cls):
|
||||
return ["~/.syntribos/syntribos.conf"]
|
||||
@@ -129,9 +101,13 @@ class Runner(object):
|
||||
|
||||
@classmethod
|
||||
def run(cls):
|
||||
"""Method sets up logger and decides on Syntribos control flow
|
||||
|
||||
This is the method where control flow of Syntribos is decided
|
||||
based on the commands entered. Depending upon commands such
|
||||
as ```list_tests``` or ```run``` the respective method is called.
|
||||
"""
|
||||
global result
|
||||
test_id = 1000
|
||||
try:
|
||||
try:
|
||||
syntribos.config.register_opts()
|
||||
CONF(sys.argv[1:],
|
||||
@@ -142,33 +118,79 @@ class Runner(object):
|
||||
except Exception as exc:
|
||||
syntribos.config.handle_config_exception(exc)
|
||||
|
||||
cls.print_symbol()
|
||||
|
||||
# 2 == higher verbosity, 1 == normal
|
||||
verbosity = 0
|
||||
cli.print_symbol()
|
||||
if not CONF.outfile:
|
||||
decorator = unittest.runner._WritelnDecorator(sys.stdout)
|
||||
else:
|
||||
decorator = unittest.runner._WritelnDecorator(
|
||||
open(CONF.outfile, 'w'))
|
||||
result = IssueTestResult(decorator, True, verbosity)
|
||||
start_time = time.time()
|
||||
if CONF.list_tests:
|
||||
cls.print_tests()
|
||||
result = syntribos.result.IssueTestResult(decorator, True, verbosity=1)
|
||||
if CONF.sub_command.name == "list_tests":
|
||||
cls.list_tests()
|
||||
else:
|
||||
list_of_tests = list(cls.get_tests(CONF.test_types,
|
||||
CONF.excluded_types))
|
||||
print("\nRunning Tests...:")
|
||||
for file_path, req_str in CONF.syntribos.templates:
|
||||
print(syntribos.SEP)
|
||||
print("Template File...: {}".format(file_path))
|
||||
print(syntribos.SEP)
|
||||
if CONF.sub_command.name == "run":
|
||||
cls.run_all_tests(list_of_tests, file_path, req_str)
|
||||
elif CONF.sub_command.name == "dry_run":
|
||||
cls.dry_run(list_of_tests, file_path, req_str)
|
||||
|
||||
@classmethod
|
||||
def dry_run(cls, list_of_tests, file_path, req_str):
|
||||
"""Loads all the template and data files and prints out the tests
|
||||
|
||||
This method does not run any tests, but loads all the templates
|
||||
and payload data files and prints all the loaded tests.
|
||||
|
||||
:param list list_of_tests: A list of all the tests loaded
|
||||
:param str file_path: Path of the payload file
|
||||
:param str req_str: Request string of each template
|
||||
|
||||
:return: None
|
||||
"""
|
||||
for test_name, test_class in list_of_tests:
|
||||
log_string = "Dry ran : {name}".format(name=test_name)
|
||||
LOG.debug(log_string)
|
||||
test_class.send_init_request(file_path, req_str)
|
||||
test_cases = list(
|
||||
test_class.get_test_cases(file_path, req_str))
|
||||
if len(test_cases) > 0:
|
||||
for test in test_cases:
|
||||
if test:
|
||||
test_time = cls.run_test(test, result,
|
||||
dry_run=True)
|
||||
test_time = "Run time: {} sec.".format(
|
||||
test_time)
|
||||
LOG.debug(test_time)
|
||||
|
||||
@classmethod
|
||||
def run_all_tests(cls, list_of_tests, file_path, req_str):
|
||||
"""Loads all the payload data and templates runs all the tests
|
||||
|
||||
This method call run_test method to run each of the tests one
|
||||
by one.
|
||||
|
||||
:param list list_of_tests: A list of all the tests loaded
|
||||
:param str file_path: Path of the payload file
|
||||
:param str req_str: Request string of each template
|
||||
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
start_time = time.time()
|
||||
test_id = 1000
|
||||
print("\n ID \t\tTest Name \t\t\t\t\t\tProgress")
|
||||
list_of_tests = list(cls.get_tests(CONF.test_types,
|
||||
CONF.excluded_types))
|
||||
for test_name, test_class in list_of_tests:
|
||||
test_id += 5
|
||||
log_string = "[{test_id}] : {name}".format(
|
||||
test_id=test_id, name=test_name)
|
||||
result_string = "[{test_id}] : {name}".format(
|
||||
test_id=colorize(test_id, color="green"),
|
||||
test_id=cli.colorize(test_id, color="green"),
|
||||
name=test_name.replace("_", " ").capitalize())
|
||||
if not CONF.colorize:
|
||||
result_string = result_string.ljust(55)
|
||||
@@ -179,13 +201,12 @@ class Runner(object):
|
||||
test_cases = list(
|
||||
test_class.get_test_cases(file_path, req_str))
|
||||
if len(test_cases) > 0:
|
||||
bar = ProgressBar(message=result_string,
|
||||
bar = cli.ProgressBar(message=result_string,
|
||||
max=len(test_cases))
|
||||
for test in test_cases:
|
||||
if test:
|
||||
test_time = cls.run_test(test, result,
|
||||
CONF.dry_run)
|
||||
test_time = "Test run time: {} sec.".format(
|
||||
test_time = cls.run_test(test, result)
|
||||
test_time = "Run time: {} sec.".format(
|
||||
test_time)
|
||||
LOG.debug(test_time)
|
||||
bar.increment(1)
|
||||
@@ -194,19 +215,19 @@ class Runner(object):
|
||||
total_tests = len(test_cases)
|
||||
if failures > total_tests * 0.90:
|
||||
# More than 90 percent failure
|
||||
failures = colorize(failures, "red")
|
||||
failures = cli.colorize(failures, "red")
|
||||
elif failures > total_tests * 0.45:
|
||||
# More than 45 percent failure
|
||||
failures = colorize(failures, "yellow")
|
||||
failures = cli.colorize(failures, "yellow")
|
||||
elif failures > total_tests * 0.15:
|
||||
# More than 15 percent failure
|
||||
failures = colorize(failures, "blue")
|
||||
failures = cli.colorize(failures, "blue")
|
||||
print(" : {} Failure(s)\r".format(failures))
|
||||
print(syntribos.SEP)
|
||||
print("\nResults...:\n")
|
||||
cls.print_result(result, start_time)
|
||||
syntribos.result.print_result(result, start_time)
|
||||
except KeyboardInterrupt:
|
||||
cls.print_result(result, start_time)
|
||||
syntribos.result.print_result(result, start_time)
|
||||
print("Keyboard interrupt, exiting...")
|
||||
exit(0)
|
||||
|
||||
@@ -229,33 +250,6 @@ class Runner(object):
|
||||
suite.run(result)
|
||||
test_end_time = time.time() - test_start_time
|
||||
test_end_time = '%.5f' % test_end_time
|
||||
return test_end_time
|
||||
|
||||
@classmethod
|
||||
def print_result(cls, result, start_time):
|
||||
"""Prints test summary/stats (e.g. # failures) to stdout
|
||||
|
||||
:param result: Global result object with all issues/etc.
|
||||
:type result: :class:`syntribos.result.IssueTestResult`
|
||||
:param float start_time: Time this run started
|
||||
"""
|
||||
result.printErrors(
|
||||
CONF.output_format, CONF.min_severity, CONF.min_confidence)
|
||||
run_time = time.time() - start_time
|
||||
tests = result.testsRun
|
||||
failures = len(result.failures)
|
||||
errors = len(result.errors)
|
||||
|
||||
print("\n{sep}\nRan {num} test{suff} in {time:.3f}s".format(
|
||||
sep=syntribos.SEP, num=tests, suff="s" * bool(tests - 1),
|
||||
time=run_time))
|
||||
if failures or errors:
|
||||
print("\nFAILED ({0}{1}{2})".format(
|
||||
"failures={0}".format(failures) if failures else "",
|
||||
", " if failures and errors else "",
|
||||
"errors={0}".format(errors) if errors else ""))
|
||||
cls.print_log()
|
||||
return tests, errors, failures
|
||||
|
||||
|
||||
def entry_point():
|
||||
|
||||
@@ -22,6 +22,7 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
class AuthTestCase(base.BaseTestCase):
|
||||
"""Test for possible token misuse in keystone."""
|
||||
test_name = "AUTH"
|
||||
test_type = "headers"
|
||||
|
||||
|
||||
@@ -18,6 +18,8 @@ from syntribos.tests.fuzz import base_fuzz
|
||||
|
||||
|
||||
class BufferOverflowBody(base_fuzz.BaseFuzzTestCase):
|
||||
"""Test for buffer overflow vulnerabilities in HTTP body."""
|
||||
|
||||
test_name = "BUFFER_OVERFLOW_BODY"
|
||||
test_type = "data"
|
||||
data_key = "buffer-overflow.txt"
|
||||
@@ -66,16 +68,22 @@ class BufferOverflowBody(base_fuzz.BaseFuzzTestCase):
|
||||
|
||||
|
||||
class BufferOverflowParams(BufferOverflowBody):
|
||||
"""Test for buffer overflow vulnerabilities in HTTP params."""
|
||||
|
||||
test_name = "BUFFER_OVERFLOW_PARAMS"
|
||||
test_type = "params"
|
||||
|
||||
|
||||
class BufferOverflowHeaders(BufferOverflowBody):
|
||||
"""Test for buffer overflow vulnerabilities in HTTP header."""
|
||||
|
||||
test_name = "BUFFER_OVERFLOW_HEADERS"
|
||||
test_type = "headers"
|
||||
|
||||
|
||||
class BufferOverflowURL(BufferOverflowBody):
|
||||
"""Test for buffer overflow vulnerabilities in HTTP URL."""
|
||||
|
||||
test_name = "BUFFER_OVERFLOW_URL"
|
||||
test_type = "url"
|
||||
url_var = "FUZZ"
|
||||
|
||||
@@ -19,6 +19,8 @@ from syntribos.tests.fuzz import base_fuzz
|
||||
|
||||
|
||||
class CommandInjectionBody(base_fuzz.BaseFuzzTestCase):
|
||||
"""Test for command injection vulnerabilities in HTTP body."""
|
||||
|
||||
test_name = "COMMAND_INJECTION_BODY"
|
||||
test_type = "data"
|
||||
data_key = "command_injection.txt"
|
||||
@@ -57,16 +59,22 @@ class CommandInjectionBody(base_fuzz.BaseFuzzTestCase):
|
||||
|
||||
|
||||
class CommandInjectionParams(CommandInjectionBody):
|
||||
"""Test for command injection vulnerabilities in HTTP params."""
|
||||
|
||||
test_name = "COMMAND_INJECTION_PARAMS"
|
||||
test_type = "params"
|
||||
|
||||
|
||||
class CommandInjectionHeaders(CommandInjectionBody):
|
||||
"""Test for command injection vulnerabilities in HTTP header."""
|
||||
|
||||
test_name = "COMMAND_INJECTION_HEADERS"
|
||||
test_type = "headers"
|
||||
|
||||
|
||||
class CommandInjectionURL(CommandInjectionBody):
|
||||
"""Test for command injection vulnerabilities in HTTP URL."""
|
||||
|
||||
test_name = "COMMAND_INJECTION_URL"
|
||||
test_type = "url"
|
||||
url_var = "FUZZ"
|
||||
|
||||
@@ -17,6 +17,8 @@ from syntribos.tests.fuzz import base_fuzz
|
||||
|
||||
|
||||
class IntOverflowBody(base_fuzz.BaseFuzzTestCase):
|
||||
"""Test for integer overflow vulnerabilities in HTTP body."""
|
||||
|
||||
test_name = "INTEGER_OVERFLOW_BODY"
|
||||
test_type = "data"
|
||||
data_key = "integer-overflow.txt"
|
||||
@@ -35,16 +37,22 @@ class IntOverflowBody(base_fuzz.BaseFuzzTestCase):
|
||||
|
||||
|
||||
class IntOverflowParams(IntOverflowBody):
|
||||
"""Test for integer overflow vulnerabilities in HTTP params."""
|
||||
|
||||
test_name = "INTEGER_OVERFLOW_PARAMS"
|
||||
test_type = "params"
|
||||
|
||||
|
||||
class IntOverflowHeaders(IntOverflowBody):
|
||||
"""Test for integer overflow vulnerabilities in HTTP header."""
|
||||
|
||||
test_name = "INTEGER_OVERFLOW_HEADERS"
|
||||
test_type = "headers"
|
||||
|
||||
|
||||
class IntOverflowURL(IntOverflowBody):
|
||||
"""Test for integer overflow vulnerabilities in HTTP URL."""
|
||||
|
||||
test_name = "INTEGER_OVERFLOW_URL"
|
||||
test_type = "url"
|
||||
url_var = "FUZZ"
|
||||
|
||||
@@ -15,22 +15,30 @@ from syntribos.tests.fuzz import base_fuzz
|
||||
|
||||
|
||||
class LDAPInjectionBody(base_fuzz.BaseFuzzTestCase):
|
||||
"""Test for LDAP injection vulnerabilities in HTTP body."""
|
||||
|
||||
test_name = "LDAP_INJECTION_BODY"
|
||||
test_type = "data"
|
||||
data_key = "ldap.txt"
|
||||
|
||||
|
||||
class LDAPInjectionParams(LDAPInjectionBody):
|
||||
"""Test for LDAP injection vulnerabilities in HTTP params."""
|
||||
|
||||
test_name = "LDAP_INJECTION_PARAMS"
|
||||
test_type = "params"
|
||||
|
||||
|
||||
class LDAPInjectionHeaders(LDAPInjectionBody):
|
||||
"""Test for LDAP injection vulnerabilities in HTTP header."""
|
||||
|
||||
test_name = "LDAP_INJECTION_HEADERS"
|
||||
test_type = "headers"
|
||||
|
||||
|
||||
class LDAPInjectionURL(LDAPInjectionBody):
|
||||
"""Test for LDAP injection vulnerabilities in HTTP URL."""
|
||||
|
||||
test_name = "LDAP_INJECTION_URL"
|
||||
test_type = "url"
|
||||
url_var = "FUZZ"
|
||||
|
||||
@@ -18,6 +18,8 @@ from syntribos.tests.fuzz import base_fuzz
|
||||
|
||||
|
||||
class SQLInjectionBody(base_fuzz.BaseFuzzTestCase):
|
||||
"""Test for SQL injection vulnerabilities in HTTP body."""
|
||||
|
||||
test_name = "SQL_INJECTION_BODY"
|
||||
test_type = "data"
|
||||
data_key = "sql-injection.txt"
|
||||
@@ -71,16 +73,22 @@ class SQLInjectionBody(base_fuzz.BaseFuzzTestCase):
|
||||
|
||||
|
||||
class SQLInjectionParams(SQLInjectionBody):
|
||||
"""Test for SQL injection vulnerabilities in HTTP params."""
|
||||
|
||||
test_name = "SQL_INJECTION_PARAMS"
|
||||
test_type = "params"
|
||||
|
||||
|
||||
class SQLInjectionHeaders(SQLInjectionBody):
|
||||
"""Test for SQL injection vulnerabilities in HTTP header."""
|
||||
|
||||
test_name = "SQL_INJECTION_HEADERS"
|
||||
test_type = "headers"
|
||||
|
||||
|
||||
class SQLInjectionURL(SQLInjectionBody):
|
||||
"""Test for SQL injection vulnerabilities in HTTP URL."""
|
||||
|
||||
test_name = "SQL_INJECTION_URL"
|
||||
test_type = "url"
|
||||
url_var = "FUZZ"
|
||||
|
||||
@@ -16,22 +16,30 @@ from syntribos.tests.fuzz import base_fuzz
|
||||
|
||||
|
||||
class StringValidationBody(base_fuzz.BaseFuzzTestCase):
|
||||
"""Test for string validation vulnerabilities in HTTP body."""
|
||||
|
||||
test_name = "STRING_VALIDATION_BODY"
|
||||
test_type = "data"
|
||||
data_key = "string_validation.txt"
|
||||
|
||||
|
||||
class StringValidationParams(StringValidationBody):
|
||||
"""Test for string validation vulnerabilities in HTTP params."""
|
||||
|
||||
test_name = "STRING_VALIDATION_PARAMS"
|
||||
test_type = "params"
|
||||
|
||||
|
||||
class StringValidationHeaders(StringValidationBody):
|
||||
"""Test for string validation vulnerabilities in HTTP header."""
|
||||
|
||||
test_name = "STRING_VALIDATION_HEADERS"
|
||||
test_type = "headers"
|
||||
|
||||
|
||||
class StringValidationURL(StringValidationBody):
|
||||
"""Test for string validation vulnerabilities in HTTP URL."""
|
||||
|
||||
test_name = "STRING_VALIDATION_URL"
|
||||
test_type = "url"
|
||||
url_var = "FUZZ"
|
||||
|
||||
@@ -24,6 +24,8 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
class XMLExternalEntityBody(base_fuzz.BaseFuzzTestCase):
|
||||
"""Test for XML-external-entity injection vulnerabilities in HTTP body."""
|
||||
|
||||
test_name = "XML_EXTERNAL_ENTITY_BODY"
|
||||
test_type = "data"
|
||||
dtds_data_key = "xml-external.txt"
|
||||
|
||||
@@ -17,6 +17,8 @@ from syntribos.tests.fuzz import base_fuzz
|
||||
|
||||
|
||||
class XSSBody(base_fuzz.BaseFuzzTestCase):
|
||||
"""Test for cross-site-scripting vulnerabilities in HTTP body."""
|
||||
|
||||
test_name = "XSS_BODY"
|
||||
test_type = "data"
|
||||
data_key = "xss.txt"
|
||||
|
||||
@@ -25,12 +25,7 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
class CorsHeader(base.BaseTestCase):
|
||||
|
||||
"""Adds the CORS header response to test_signals (a list of signals)
|
||||
|
||||
If any Cross Origin Resource Sharing (CORS) header check fails then
|
||||
it is registered as a signal and an issue is raised.
|
||||
"""
|
||||
"""Test for CORS wild character vulnerabilities in HTTP header."""
|
||||
|
||||
test_name = "CORS_WILDCARD_HEADERS"
|
||||
test_type = "headers"
|
||||
|
||||
@@ -24,6 +24,7 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
class SSLTestCase(base.BaseTestCase):
|
||||
"""Test if response body contains non-https links."""
|
||||
|
||||
test_name = "SSL_ENDPOINT_BODY"
|
||||
test_type = "body"
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Copyright 2016 Intel
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from oslo_config import cfg
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def colorize(string, color="nocolor"):
|
||||
"""A simple method to add ascii colors to the terminal."""
|
||||
|
||||
color_names = ["red", "green", "yellow", "blue"]
|
||||
colors = dict(zip(color_names, range(31, 35)))
|
||||
colors["nocolor"] = 0 # No Color
|
||||
|
||||
if not CONF.colorize:
|
||||
return string
|
||||
return "\033[0;{color}m{string}\033[0;m".format(string=string,
|
||||
color=colors.setdefault(
|
||||
color, 0))
|
||||
@@ -12,12 +12,56 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import division
|
||||
from __future__ import unicode_literals
|
||||
from math import ceil
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
import syntribos
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def print_symbol():
|
||||
"""Syntribos radiation symbol."""
|
||||
symbol = """ Syntribos
|
||||
xxxxxxx
|
||||
x xxxxxxxxxxxxx x
|
||||
x xxxxxxxxxxx x
|
||||
xxxxxxxxx
|
||||
x xxxxxxx x
|
||||
xxxxx
|
||||
x xxx x
|
||||
x
|
||||
xxxxxxxxxxxxxxx xxxxxxxxxxxxxxx
|
||||
xxxxxxxxxxxxx xxxxxxxxxxxxx
|
||||
xxxxxxxxxxx xxxxxxxxxxx
|
||||
xxxxxxxxx xxxxxxxxx
|
||||
xxxxxx xxxxxx
|
||||
xxx xxx
|
||||
x x
|
||||
x
|
||||
=== Automated API Scanning ==="""
|
||||
print(syntribos.SEP)
|
||||
print(symbol)
|
||||
print(syntribos.SEP)
|
||||
|
||||
|
||||
def colorize(string, color="nocolor"):
|
||||
"""Method to add ascii colors to the terminal."""
|
||||
|
||||
color_names = ["red", "green", "yellow", "blue"]
|
||||
colors = dict(zip(color_names, range(31, 35)))
|
||||
colors["nocolor"] = 0 # No Color
|
||||
|
||||
if not CONF.colorize:
|
||||
return string
|
||||
return "\033[0;{color}m{string}\033[0;m".format(string=string,
|
||||
color=colors.setdefault(
|
||||
color, 0))
|
||||
|
||||
|
||||
class ProgressBar(object):
|
||||
"""A simple progressBar.
|
||||
@@ -13,8 +13,8 @@
|
||||
# limitations under the License.
|
||||
import testtools
|
||||
|
||||
from syntribos.utils.ascii_colors import colorize
|
||||
from syntribos.utils.ascii_colors import CONF
|
||||
from syntribos.utils.cli import colorize
|
||||
from syntribos.utils.cli import CONF
|
||||
|
||||
|
||||
class TestColorize(testtools.TestCase):
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
import testtools
|
||||
|
||||
from syntribos.utils.progress_bar import ProgressBar
|
||||
from syntribos.utils.cli import ProgressBar
|
||||
|
||||
|
||||
class TestProgressBar(testtools.TestCase):
|
||||
|
||||
Reference in New Issue
Block a user