Add benchmark to chibitest and misaka.
This commit is contained in:
parent
e5164bc7c5
commit
2e0b10f565
4
setup.py
4
setup.py
|
@ -14,12 +14,14 @@ class TestCommand(Command):
|
||||||
user_options = [
|
user_options = [
|
||||||
('include=', 'i', 'comma separated list of testcases'),
|
('include=', 'i', 'comma separated list of testcases'),
|
||||||
('exclude=', 'e', 'comma separated list of testcases'),
|
('exclude=', 'e', 'comma separated list of testcases'),
|
||||||
|
('benchmark', 'b', 'run bechmarks'),
|
||||||
('list', 'l', 'list all testcases'),
|
('list', 'l', 'list all testcases'),
|
||||||
]
|
]
|
||||||
|
|
||||||
def initialize_options(self):
|
def initialize_options(self):
|
||||||
self.include = ''
|
self.include = ''
|
||||||
self.exclude = ''
|
self.exclude = ''
|
||||||
|
self.benchmark = 0
|
||||||
self.list = 0
|
self.list = 0
|
||||||
|
|
||||||
def finalize_options(self):
|
def finalize_options(self):
|
||||||
|
@ -30,6 +32,8 @@ class TestCommand(Command):
|
||||||
if self.list:
|
if self.list:
|
||||||
args.append('--list')
|
args.append('--list')
|
||||||
else:
|
else:
|
||||||
|
if self.benchmark:
|
||||||
|
args.append('--benchmark')
|
||||||
if self.include:
|
if self.include:
|
||||||
args.append('--include')
|
args.append('--include')
|
||||||
args.extend(self.include.split(','))
|
args.extend(self.include.split(','))
|
||||||
|
|
|
@ -10,14 +10,15 @@ Inspired by Oktest, http://www.kuwata-lab.com/oktest/.
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys
|
|
||||||
import inspect
|
import inspect
|
||||||
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
from difflib import unified_diff
|
from difflib import unified_diff
|
||||||
from collections import namedtuple, defaultdict
|
from collections import defaultdict
|
||||||
|
from timeit import default_timer
|
||||||
|
|
||||||
|
|
||||||
Result = namedtuple('Result', ('func', 'name', 'failure'))
|
LINE = '*' * 72
|
||||||
|
|
||||||
|
|
||||||
def _get_doc_line(obj):
|
def _get_doc_line(obj):
|
||||||
|
@ -40,6 +41,19 @@ def _exc_name(exception_class):
|
||||||
exception_class.__name__)
|
exception_class.__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def readable_duration(s, suffix=''):
|
||||||
|
if s >= 1:
|
||||||
|
f = '{:.2f} s'.format(s)
|
||||||
|
elif s < 1:
|
||||||
|
ms = 1000 * s
|
||||||
|
if ms >= 1:
|
||||||
|
f = '{:.2f} ms'.format(ms)
|
||||||
|
elif ms < 1:
|
||||||
|
f = '{:.2f} us'.format(ms * 1000)
|
||||||
|
|
||||||
|
return f + suffix
|
||||||
|
|
||||||
|
|
||||||
class AssertionObject(object):
|
class AssertionObject(object):
|
||||||
def __init__(self, target):
|
def __init__(self, target):
|
||||||
self._target = target
|
self._target = target
|
||||||
|
@ -111,7 +125,6 @@ class AssertionObject(object):
|
||||||
else:
|
else:
|
||||||
raise AssertionError('{} not raised'.format(name))
|
raise AssertionError('{} not raised'.format(name))
|
||||||
|
|
||||||
|
|
||||||
def not_raises(self, exception_class=Exception):
|
def not_raises(self, exception_class=Exception):
|
||||||
name = _exc_name(exception_class)
|
name = _exc_name(exception_class)
|
||||||
|
|
||||||
|
@ -130,6 +143,52 @@ class AssertionObject(object):
|
||||||
ok = AssertionObject
|
ok = AssertionObject
|
||||||
|
|
||||||
|
|
||||||
|
class TestResult(object):
|
||||||
|
__slots__ = ('func', 'doc_name', 'passed', 'message')
|
||||||
|
|
||||||
|
def __init__(self, func, doc_name=None, passed=False, message=None):
|
||||||
|
self.func = func
|
||||||
|
self.doc_name = doc_name
|
||||||
|
self.passed = passed
|
||||||
|
self.message = message
|
||||||
|
|
||||||
|
def name(self):
|
||||||
|
return self.doc_name or self.func
|
||||||
|
|
||||||
|
def status(self):
|
||||||
|
return 'PASSED' if self.passed else 'FAILED'
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
s = '{} ... {}'.format(self.name(), self.status())
|
||||||
|
if self.message:
|
||||||
|
s += '\n{}\n{}\n{}'.format(LINE, self.message, LINE)
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
class BenchmarkResult(TestResult):
|
||||||
|
def __init__(self, func, doc_name=None, passed=False, message=None,
|
||||||
|
repeated=0, timing=0.0):
|
||||||
|
self.repeated = repeated
|
||||||
|
self.timing = timing
|
||||||
|
TestResult.__init__(self, func, doc_name, passed, message)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.passed:
|
||||||
|
s = '{:<25} {:>8} {:>16} {:>16}'.format(
|
||||||
|
self.name(),
|
||||||
|
self.repeated,
|
||||||
|
readable_duration(self.timing, suffix='/t'),
|
||||||
|
readable_duration(self.timing / self.repeated, suffix='/op'))
|
||||||
|
else:
|
||||||
|
s = '{} ... FAILED'.format(self.name())
|
||||||
|
|
||||||
|
if self.message:
|
||||||
|
s += '\n{}\n{}\n{}'.format(LINE, self.message, LINE)
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
class TestCase(object):
|
class TestCase(object):
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.config = config
|
self.config = config
|
||||||
|
@ -137,7 +196,7 @@ class TestCase(object):
|
||||||
|
|
||||||
for t in dir(self):
|
for t in dir(self):
|
||||||
if t.startswith('test_'):
|
if t.startswith('test_'):
|
||||||
self._tests.append(self._wrap_test(getattr(self, t)))
|
self.add_test(getattr(self, t))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def name(cls):
|
def name(cls):
|
||||||
|
@ -148,21 +207,27 @@ class TestCase(object):
|
||||||
return cls.__name__
|
return cls.__name__
|
||||||
|
|
||||||
def add_test(self, func):
|
def add_test(self, func):
|
||||||
self._tests.append(self._wrap_test(func))
|
self._tests.append(self.wrap_test(func))
|
||||||
|
|
||||||
def _wrap_test(self, func):
|
def wrap_test(self, func):
|
||||||
def catch_exception():
|
def catch_exception():
|
||||||
failure = None
|
message = None
|
||||||
|
passed = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
func()
|
func()
|
||||||
|
passed = True
|
||||||
except AssertionError as e: # Expected exception
|
except AssertionError as e: # Expected exception
|
||||||
failure = str(e)
|
message = str(e)
|
||||||
except Exception as e: # Unexpected exception
|
except Exception as e: # Unexpected exception
|
||||||
failure = ''.join(traceback.format_exception(
|
message = ''.join(traceback.format_exception(
|
||||||
*sys.exc_info())).strip()
|
*sys.exc_info())).strip()
|
||||||
|
|
||||||
return Result(func.__name__, _get_doc_line(func) or None, failure)
|
return TestResult(
|
||||||
|
func.__name__,
|
||||||
|
_get_doc_line(func) or None,
|
||||||
|
passed,
|
||||||
|
message)
|
||||||
|
|
||||||
return catch_exception
|
return catch_exception
|
||||||
|
|
||||||
|
@ -179,8 +244,52 @@ class TestCase(object):
|
||||||
self.teardown()
|
self.teardown()
|
||||||
|
|
||||||
|
|
||||||
|
class Benchmark(TestCase):
|
||||||
|
def __init__(self, config):
|
||||||
|
self.duration = config.get('duration', 1.0)
|
||||||
|
TestCase.__init__(self, config)
|
||||||
|
|
||||||
|
def wrap_test(self, func):
|
||||||
|
def catch_exception():
|
||||||
|
message = None
|
||||||
|
passed = False
|
||||||
|
repeated = 10
|
||||||
|
timing = 0.0
|
||||||
|
|
||||||
|
try:
|
||||||
|
start = default_timer()
|
||||||
|
repeat = 10
|
||||||
|
while True:
|
||||||
|
while repeat > 0:
|
||||||
|
func()
|
||||||
|
repeat -= 1
|
||||||
|
|
||||||
|
if default_timer() - start >= self.duration:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
repeat = 10
|
||||||
|
repeated += 10
|
||||||
|
|
||||||
|
timing = default_timer() - start
|
||||||
|
passed = True
|
||||||
|
except AssertionError as e: # Expected exception
|
||||||
|
message = str(e)
|
||||||
|
except Exception as e: # Unexpected exception
|
||||||
|
message = ''.join(traceback.format_exception(
|
||||||
|
*sys.exc_info())).strip()
|
||||||
|
|
||||||
|
return BenchmarkResult(
|
||||||
|
func.__name__,
|
||||||
|
_get_doc_line(func) or None,
|
||||||
|
passed,
|
||||||
|
message,
|
||||||
|
repeated,
|
||||||
|
timing)
|
||||||
|
|
||||||
|
return catch_exception
|
||||||
|
|
||||||
|
|
||||||
def runner(testcases, setup_func=None, teardown_func=None, config={}):
|
def runner(testcases, setup_func=None, teardown_func=None, config={}):
|
||||||
line = '*' * 80
|
|
||||||
passed = failed = 0
|
passed = failed = 0
|
||||||
config = defaultdict(lambda: None, config)
|
config = defaultdict(lambda: None, config)
|
||||||
|
|
||||||
|
@ -193,18 +302,11 @@ def runner(testcases, setup_func=None, teardown_func=None, config={}):
|
||||||
print('>> {}'.format(testcase.name()))
|
print('>> {}'.format(testcase.name()))
|
||||||
|
|
||||||
for result in tests.run():
|
for result in tests.run():
|
||||||
name = result.name or result.func
|
if result.passed:
|
||||||
if result.failure is not None:
|
|
||||||
failed += 1
|
|
||||||
|
|
||||||
if result.failure:
|
|
||||||
print('{} ... FAILED\n{}\n{}\n{}'
|
|
||||||
.format(name, line, result.failure, line))
|
|
||||||
else:
|
|
||||||
print('{} ... FAILED'.format(name))
|
|
||||||
else:
|
|
||||||
passed += 1
|
passed += 1
|
||||||
print('{} ... PASSED'.format(name))
|
else:
|
||||||
|
failed += 1
|
||||||
|
print(result)
|
||||||
|
|
||||||
print()
|
print()
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ from os.path import dirname, join as jp, splitext
|
||||||
CWD = dirname(sys.modules[__name__].__file__)
|
CWD = dirname(sys.modules[__name__].__file__)
|
||||||
sys.path.insert(0, jp(CWD, '..'))
|
sys.path.insert(0, jp(CWD, '..'))
|
||||||
|
|
||||||
from chibitest import runner, TestCase
|
from chibitest import runner, TestCase, Benchmark
|
||||||
|
|
||||||
|
|
||||||
help_message = """\
|
help_message = """\
|
||||||
|
@ -40,32 +40,41 @@ def get_test_modules():
|
||||||
return modules
|
return modules
|
||||||
|
|
||||||
|
|
||||||
def is_test(n):
|
def is_testcase(n):
|
||||||
return inspect.isclass(n) and issubclass(n, TestCase) and not n is TestCase
|
return inspect.isclass(n) \
|
||||||
|
and issubclass(n, TestCase) \
|
||||||
|
and not n is TestCase \
|
||||||
|
and not n is Benchmark
|
||||||
|
|
||||||
|
|
||||||
def get_tests(module):
|
def is_benchmark(n):
|
||||||
|
return inspect.isclass(n) \
|
||||||
|
and issubclass(n, Benchmark) \
|
||||||
|
and not n is Benchmark
|
||||||
|
|
||||||
|
|
||||||
|
def get_testcases(module):
|
||||||
return [(testcase.name(), testcase) \
|
return [(testcase.name(), testcase) \
|
||||||
for _, testcase in inspect.getmembers(module, is_test)]
|
for _, testcase in inspect.getmembers(module, is_testcase)]
|
||||||
|
|
||||||
|
|
||||||
def run_tests(tests, include=[], exclude=[]):
|
def run_testcases(testcases, include=[], exclude=[]):
|
||||||
if include:
|
if include:
|
||||||
tests = [n for n in tests if n[0] in include]
|
testcases = [n for n in testcases if n[0] in include]
|
||||||
if exclude:
|
if exclude:
|
||||||
tests = [n for n in tests if not n[0] in exclude]
|
testcases = [n for n in testcases if not n[0] in exclude]
|
||||||
|
|
||||||
runner([n[1] for n in tests])
|
runner([n[1] for n in testcases])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
tests = list(chain(*map(get_tests, get_test_modules())))
|
testcases = list(chain(*map(get_testcases, get_test_modules())))
|
||||||
include = []
|
include = []
|
||||||
exclude = []
|
exclude = []
|
||||||
|
|
||||||
if len(sys.argv) >= 2:
|
if len(sys.argv) >= 2:
|
||||||
if sys.argv[1] == '--list':
|
if sys.argv[1] == '--list':
|
||||||
for name, testcase in tests:
|
for name, testcase in testcases:
|
||||||
print(name)
|
print(name)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
elif sys.argv[1] == '--help':
|
elif sys.argv[1] == '--help':
|
||||||
|
@ -83,4 +92,9 @@ if __name__ == '__main__':
|
||||||
elif last_arg == '--exclude':
|
elif last_arg == '--exclude':
|
||||||
exclude.append(arg)
|
exclude.append(arg)
|
||||||
|
|
||||||
run_tests(tests, include, exclude)
|
if '--benchmark' in sys.argv[1:]:
|
||||||
|
testcases = list(filter(lambda n: is_benchmark(n[1]), testcases))
|
||||||
|
else:
|
||||||
|
testcases = list(filter(lambda n: not is_benchmark(n[1]), testcases))
|
||||||
|
|
||||||
|
run_testcases(testcases, include, exclude)
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from os.path import dirname, join as join_path
|
||||||
|
|
||||||
|
from chibitest import TestCase, Benchmark, ok
|
||||||
|
|
||||||
|
|
||||||
|
class BenchmarkLibraries(Benchmark):
|
||||||
|
def setup(self):
|
||||||
|
fp = join_path(dirname(__file__), 'data', 'markdown-syntax.md')
|
||||||
|
with open(fp, 'r') as f:
|
||||||
|
self.text = f.read()
|
||||||
|
|
||||||
|
if sys.version_info[0] == 2:
|
||||||
|
self.hoep_text = unicode(self.text)
|
||||||
|
else:
|
||||||
|
self.hoep_text = self.text
|
||||||
|
|
||||||
|
def test_misaka(self):
|
||||||
|
import misaka
|
||||||
|
extensions = (
|
||||||
|
'no-intra-emphasis',
|
||||||
|
'fenced=code',
|
||||||
|
'autolink',
|
||||||
|
'tables',
|
||||||
|
'strikethrough',
|
||||||
|
)
|
||||||
|
misaka.html(self.text, extensions)
|
||||||
|
|
||||||
|
def test_misaka_classes(self):
|
||||||
|
import misaka
|
||||||
|
extensions = (
|
||||||
|
'no-intra-emphasis',
|
||||||
|
'fenced=code',
|
||||||
|
'autolink',
|
||||||
|
'tables',
|
||||||
|
'strikethrough',
|
||||||
|
)
|
||||||
|
r = misaka.HtmlRenderer()
|
||||||
|
p = misaka.Markdown(r, extensions)
|
||||||
|
p(self.text)
|
||||||
|
|
||||||
|
def test_mistune(self):
|
||||||
|
import mistune
|
||||||
|
mistune.markdown(self.text)
|
||||||
|
|
||||||
|
def test_markdown(self):
|
||||||
|
import markdown
|
||||||
|
markdown.markdown(self.text, ['extra'])
|
||||||
|
|
||||||
|
def test_markdown2(self):
|
||||||
|
import markdown2
|
||||||
|
extras = ['code-friendly', 'fenced-code-blocks', 'footnotes']
|
||||||
|
markdown2.markdown(self.text, extras=extras)
|
||||||
|
|
||||||
|
def test_hoep(self):
|
||||||
|
import hoep as m
|
||||||
|
extensions = (
|
||||||
|
m.EXT_NO_INTRA_EMPHASIS | m.EXT_FENCED_CODE | m.EXT_AUTOLINK |
|
||||||
|
m.EXT_TABLES | m.EXT_STRIKETHROUGH | m.EXT_FOOTNOTES)
|
||||||
|
md = m.Hoep(extensions=extensions)
|
||||||
|
md.render(self.hoep_text)
|
Loading…
Reference in New Issue