Commit bcab1246 authored by Kirill Smelkov's avatar Kirill Smelkov

Prepare to benchmark python code with py.test

For this, a small wrapper over py.test is developed (to discover/collect
functions to benchmar, etc) and then it runs such functions several
times in a boxed enveronment.

Benchmarks should be named bench_*.py
parent 90d32e51
...@@ -19,6 +19,7 @@ all : ...@@ -19,6 +19,7 @@ all :
PYTHON ?= python PYTHON ?= python
PYTEST ?= $(PYTHON) -m pytest PYTEST ?= $(PYTHON) -m pytest
PYBENCH ?= $(PYTHON) t/py.bench
VALGRIND?= valgrind VALGRIND?= valgrind
# use the same C compiler as python # use the same C compiler as python
...@@ -174,3 +175,8 @@ test.fault : $(FAULTS:%=%.tfault) ...@@ -174,3 +175,8 @@ test.fault : $(FAULTS:%=%.tfault)
%.tfault : $(tfault).t %.tfault : $(tfault).t
t/tfault-run $< $* $(shell grep '{"$*"' $(tfault).c | awk '{print $$NF}') t/tfault-run $< $* $(shell grep '{"$*"' $(tfault).c | awk '{print $$NF}')
# -*- benchmarking -*-
bench : bigfile/_bigfile.so
$(PYBENCH) $(PYTEST_IGNORE)
...@@ -201,5 +201,6 @@ setup( ...@@ -201,5 +201,6 @@ setup(
'll_build_ext': _build_ext, # original build_ext for Makefile 'll_build_ext': _build_ext, # original build_ext for Makefile
'build_py': build_py, 'build_py': build_py,
'test': viamake('test', 'run tests'), 'test': viamake('test', 'run tests'),
'bench': viamake('bench', 'run benchmarks'),
}, },
) )
#!/usr/bin/env python
# benchmarking via py.test
# TODO intro / license
from __future__ import print_function
import pytest
import _pytest.runner
from _pytest.terminal import TerminalReporter as _pytest_TerminalReporter
from time import time
from py.process import ForkedFunc
import sys
from six.moves import range as xrange
# XXX hack for ForkedFunc not to capture stdout/stderr.
# so that test prints could be seen when run under --capture=no
# otoh, py.test captures stdout/stderr fds so here we don't have to.
import py._process.forkedfunc
class XForkedFunc(ForkedFunc):
def _child(self, nice, on_start, on_exit):
# we are monkeypatching only in child, so no effect on parent =safe
py._process.forkedfunc.get_unbuffered_io = self._fake_get_unbuffered_io
return ForkedFunc._child(self, nice, on_start, on_exit)
@staticmethod
def _fake_get_unbuffered_io(fd, _):
if fd == 1: return sys.stdout
if fd == 2: return sys.stderr
raise RuntimeError("only stdout/stderr expected")
# plugin to collect & run benchmarks
class BenchPlugin:
# redirect python collector to bench_*.py and bench_*()
def pytest_configure(self, config):
# XXX a bit hacky
ini = config.inicfg
ini['python_files'] = 'bench_*.py'
ini['python_classes'] = 'Bench'
ini['python_functions'] = 'bench_'
def pytest_addoption(self, parser):
g = parser.getgroup('benchmarking')
g.addoption('--benchruns', action='store', type=int, dest='benchruns', default=3,
help="number of time to run each benchmark")
g.addoption('--dontfork', action='store_true', dest='dontfork', default=False,
help="run all benchmarks in the same process")
# run benchmark several times, and get best timing
# each benchmark run is executed in separate process (if not told otherwise)
def pytest_runtest_call(self, item, __multicall__):
# tell MultiCall not to execut other runtest_calls (e.g. runner.pytest_runtest_call)
__multicall__.firstresult = True
def run():
tstart = time()
item.runtest()
tend = time()
return tend-tstart
tv = []
for i in xrange(item.config.option.benchruns):
if item.config.option.dontfork:
t = run()
else:
runf = XForkedFunc(run)
result = runf.waitfinish()
if result.exitstatus == XForkedFunc.EXITSTATUS_EXCEPTION:
print(result.err, file=sys.stderr) # XXX vs runf doesn't capture stderr
1/0 # TODO re-raise properly
elif result.exitstatus != 0:
print(result.err, file=sys.stderr) # XXX vs runf doesn't capture stderr
1/0 # TODO handle properly
t = result.retval
tv.append(t)
return tv
# set benchmarking time in report, if run ok
def pytest_runtest_makereport(self, item, call):
report = _pytest.runner.pytest_runtest_makereport(item, call)
if call.when == 'call' and not call.excinfo:
report.bench_times = call.result
return report
# colors to use when printing !passed
# XXX somewhat dup from _pytest/terminal.py
def report_markup(report):
m = {}
if report.failed:
m = {'red': True, 'bold': True}
elif report.skipeed:
m = {'yellow': True}
return m
# max(seq) or 0 if !seq
def max0(seq):
seq = list(seq) # if generator -> generate
if not seq:
return 0
else:
return max(seq)
# Adjujsted terminal reporting to benchmarking needs
class XTerminalReporter(_pytest_TerminalReporter):
# determine largest item name (to ralign timings)
def pytest_collection_finish(self, session):
_pytest_TerminalReporter.pytest_collection_finish(self, session)
# FIXME Item.nodeid is not strictly the same as ._locationline()
self.benchname_maxlen = max0(len(_.nodeid) for _ in session.items)
def pytest_runtest_logstart(self, nodeid, location):
return # nothing
def pytest_runtest_logreport(self, report):
_ =self.config.hook.pytest_report_teststatus(report=report)
cat, letter, word = _
self.stats.setdefault(cat, []).append(report) # XXX needed for bench?
if not letter and not word:
# passed setup/teardown
return
# TODO ralign timing
# TODO shorter benchname?
benchname = self._locationline(report.fspath, *report.location)
self._tw.line()
self._tw.write('%-*s\t' % (self.benchname_maxlen, benchname))
if report.passed:
self._tw.write('%.2f' % min(report.bench_times))
if not self.config.option.quiet:
self._tw.write(' (%s)' %
' '.join('%.2f' % _ for _ in report.bench_times))
else:
self._tw.write('[%s]' % word, **report_markup(report))
# there is no way to override it otherwise - it is hardcoded in
# _pytest.terminal's pytest_configure()
_pytest.terminal.TerminalReporter = XTerminalReporter
def main():
pytest.main(plugins=[BenchPlugin()])
if __name__ == '__main__':
main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment