Commit 074ce24d authored by Kirill Smelkov's avatar Kirill Smelkov

py.bench: Less noisy output

For several benchmarks in a python module instead of always printing
whole benchmark function path (nodeid in pytest speak), first print

	pymod: modulename

header, and then only function names.

Before patch:

============================= test session starts ==============================
platform linux2 -- Python 2.7.14, pytest-3.3.0, py-1.5.2, pluggy-0.6.0
rootdir: /home/kirr/src/wendelin/wendelin.core, inifile:
collected 14 items

bigfile/tests/bench_0virtmem.py::bench_file_mmapread_hole 	0.22  (0.39 0.22 0.22)
bigfile/tests/bench_0virtmem.py::bench_file_read_hole    	0.24  (0.24 0.24 0.24)
bigfile/tests/bench_0virtmem.py::bench_file_readbig_hole 	0.30  (0.30 0.30 0.30)
bigfile/tests/bench_0virtmem.py::bench_bigf_read_hole    	0.43  (0.43 0.43 0.43)
bigfile/tests/bench_0virtmem.py::bench_file_mmapwrite0   	0.12  (0.13 0.12 0.13)
bigfile/tests/bench_0virtmem.py::bench_file_write55      	0.08  (0.08 0.08 0.08)
bigfile/tests/bench_0virtmem.py::bench_bigf_writeff      	0.45  (0.45 0.45 0.45)
bigfile/tests/bench_0virtmem.py::bench_file_mmapread     	0.21  (0.21 0.21 0.22)
bigfile/tests/bench_0virtmem.py::bench_file_read         	0.23  (0.23 0.24 0.24)
bigfile/tests/bench_0virtmem.py::bench_file_readbig      	0.30  (0.30 0.30 0.30)
bigfile/tests/bench_0virtmem.py::bench_bigf_read         	0.43  (0.44 0.43 0.44)
bigfile/tests/bench_1filezodb.py::bench_bigz_readhole    	0.42  (0.42 0.43 0.43)
bigfile/tests/bench_1filezodb.py::bench_bigz_writeff     	0.84  (0.84 1.25 1.25)
bigfile/tests/bench_1filezodb.py::bench_bigz_read        	0.60  (0.64 0.60 0.60)
========================== 14 passed in 21.78 seconds ==========================

After patch:

============================= test session starts ==============================
platform linux2 -- Python 2.7.14, pytest-3.3.0, py-1.5.2, pluggy-0.6.0
rootdir: /home/kirr/src/wendelin/wendelin.core, inifile:
collected 14 items

pymod: bigfile/tests/bench_0virtmem.py
bench_file_mmapread_hole	0.21  (0.39 0.22 0.21)
bench_file_read_hole    	0.24  (0.24 0.24 0.24)
bench_file_readbig_hole 	0.30  (0.30 0.30 0.30)
bench_bigf_read_hole    	0.43  (0.43 0.43 0.43)
bench_file_mmapwrite0   	0.12  (0.13 0.12 0.13)
bench_file_write55      	0.08  (0.08 0.08 0.08)
bench_bigf_writeff      	0.45  (0.52 0.51 0.45)
bench_file_mmapread     	0.21  (0.21 0.21 0.21)
bench_file_read         	0.24  (0.24 0.24 0.24)
bench_file_readbig      	0.30  (0.30 0.30 0.30)
bench_bigf_read         	0.43  (0.43 0.43 0.43)

pymod: bigfile/tests/bench_1filezodb.py
bench_bigz_readhole     	0.43  (0.43 0.43 0.43)
bench_bigz_writeff      	0.83  (0.83 1.20 1.21)
bench_bigz_read         	0.60  (0.65 0.60 0.60)

========================== 14 passed in 21.80 seconds ==========================

The `key: value` header is compatible with Go benchmark format[1] and
further we'll be trying to teach py.bench to output results in that
format so that Go benchmarking tools like benchstat and perf.golang.org
could be used for free.

[1] https://github.com/golang/proposal/blob/master/design/14313-benchmark-format.md
parent ed13c3f9
...@@ -134,6 +134,10 @@ def max0(seq): ...@@ -134,6 +134,10 @@ def max0(seq):
else: else:
return max(seq) return max(seq)
# benchname(nodeid) returns name of a benchmark from a function nodeid
def benchname(nodeid):
return nodeid.split("::", 1)[1] # everything after fspath
# Adjujsted terminal reporting to benchmarking needs # Adjujsted terminal reporting to benchmarking needs
class XTerminalReporter(_pytest_TerminalReporter): class XTerminalReporter(_pytest_TerminalReporter):
...@@ -141,12 +145,22 @@ class XTerminalReporter(_pytest_TerminalReporter): ...@@ -141,12 +145,22 @@ class XTerminalReporter(_pytest_TerminalReporter):
def pytest_collection_finish(self, session): def pytest_collection_finish(self, session):
_pytest_TerminalReporter.pytest_collection_finish(self, session) _pytest_TerminalReporter.pytest_collection_finish(self, session)
# FIXME Item.nodeid is not strictly the same as ._locationline() self.benchname_maxlen = max0(len(benchname(_.nodeid)) for _ in session.items)
self.benchname_maxlen = max0(len(_.nodeid) for _ in session.items)
def pytest_runtest_logstart(self, nodeid, location): def pytest_runtest_logstart(self, nodeid, location):
return # nothing # print `pymod: ...` header for every module
fspath = self.config.rootdir.join(nodeid.split("::")[0])
if fspath == self.currentfspath:
return
first = (self.currentfspath == None)
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
# vskip in between modules
if not first:
self._tw.line()
self.write("pymod: %s" % fspath)
def pytest_runtest_logreport(self, report): def pytest_runtest_logreport(self, report):
_ =self.config.hook.pytest_report_teststatus(report=report) _ =self.config.hook.pytest_report_teststatus(report=report)
...@@ -157,10 +171,8 @@ class XTerminalReporter(_pytest_TerminalReporter): ...@@ -157,10 +171,8 @@ class XTerminalReporter(_pytest_TerminalReporter):
return return
# TODO ralign timing # TODO ralign timing
# TODO shorter benchname?
benchname = self._locationline(report.nodeid, *report.location)
self._tw.line() self._tw.line()
self._tw.write('%-*s\t' % (self.benchname_maxlen, benchname)) self._tw.write('%-*s\t' % (self.benchname_maxlen, benchname(report.nodeid)))
if report.passed: if report.passed:
self._tw.write('%.2f' % min(report.bench_times)) self._tw.write('%.2f' % min(report.bench_times))
if not self.config.option.quiet: if not self.config.option.quiet:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment