Commit f1ac4eb7 authored by Marc-André Lemburg's avatar Marc-André Lemburg

Updated to pybench 2.0.

See svn.python.org/external/pybench-2.0 for the original import of that
version.

Note that platform.py was not copied over from pybench-2.0 since
it is already part of Python 2.5.
parent 5131a4e5
...@@ -2,7 +2,7 @@ from pybench import Test ...@@ -2,7 +2,7 @@ from pybench import Test
class SimpleIntegerArithmetic(Test): class SimpleIntegerArithmetic(Test):
version = 0.3 version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3) operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000 rounds = 120000
...@@ -157,9 +157,9 @@ class SimpleIntegerArithmetic(Test): ...@@ -157,9 +157,9 @@ class SimpleIntegerArithmetic(Test):
class SimpleFloatArithmetic(Test): class SimpleFloatArithmetic(Test):
version = 0.3 version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3) operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 100000 rounds = 120000
def test(self): def test(self):
...@@ -312,7 +312,7 @@ class SimpleFloatArithmetic(Test): ...@@ -312,7 +312,7 @@ class SimpleFloatArithmetic(Test):
class SimpleIntFloatArithmetic(Test): class SimpleIntFloatArithmetic(Test):
version = 0.3 version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3) operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 120000 rounds = 120000
...@@ -468,9 +468,9 @@ class SimpleIntFloatArithmetic(Test): ...@@ -468,9 +468,9 @@ class SimpleIntFloatArithmetic(Test):
class SimpleLongArithmetic(Test): class SimpleLongArithmetic(Test):
version = 0.3 version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3) operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 30000 rounds = 60000
def test(self): def test(self):
...@@ -623,9 +623,9 @@ class SimpleLongArithmetic(Test): ...@@ -623,9 +623,9 @@ class SimpleLongArithmetic(Test):
class SimpleComplexArithmetic(Test): class SimpleComplexArithmetic(Test):
version = 0.3 version = 2.0
operations = 5 * (3 + 5 + 5 + 3 + 3 + 3) operations = 5 * (3 + 5 + 5 + 3 + 3 + 3)
rounds = 40000 rounds = 80000
def test(self): def test(self):
......
...@@ -2,7 +2,7 @@ from pybench import Test ...@@ -2,7 +2,7 @@ from pybench import Test
class PythonFunctionCalls(Test): class PythonFunctionCalls(Test):
version = 0.3 version = 2.0
operations = 5*(1+4+4+2) operations = 5*(1+4+4+2)
rounds = 60000 rounds = 60000
...@@ -111,9 +111,9 @@ class PythonFunctionCalls(Test): ...@@ -111,9 +111,9 @@ class PythonFunctionCalls(Test):
class BuiltinFunctionCalls(Test): class BuiltinFunctionCalls(Test):
version = 0.4 version = 2.0
operations = 5*(2+5+5+5) operations = 5*(2+5+5+5)
rounds = 30000 rounds = 60000
def test(self): def test(self):
...@@ -232,9 +232,9 @@ class BuiltinFunctionCalls(Test): ...@@ -232,9 +232,9 @@ class BuiltinFunctionCalls(Test):
class PythonMethodCalls(Test): class PythonMethodCalls(Test):
version = 0.3 version = 2.0
operations = 5*(6 + 5 + 4) operations = 5*(6 + 5 + 4)
rounds = 20000 rounds = 30000
def test(self): def test(self):
...@@ -374,9 +374,9 @@ class PythonMethodCalls(Test): ...@@ -374,9 +374,9 @@ class PythonMethodCalls(Test):
class Recursion(Test): class Recursion(Test):
version = 0.3 version = 2.0
operations = 5 operations = 5
rounds = 50000 rounds = 100000
def test(self): def test(self):
...@@ -407,3 +407,98 @@ class Recursion(Test): ...@@ -407,3 +407,98 @@ class Recursion(Test):
for i in xrange(self.rounds): for i in xrange(self.rounds):
pass pass
### Test to make Fredrik happy...
if __name__ == '__main__':
import timeit
if 0:
timeit.TestClass = PythonFunctionCalls
timeit.main(['-s', 'test = TestClass(); test.rounds = 1000',
'test.test()'])
else:
setup = """\
global f,f1,g,h
# define functions
def f():
pass
def f1(x):
pass
def g(a,b,c):
return a,b,c
def h(a,b,c,d=1,e=2,f=3):
return d,e,f
i = 1
"""
test = """\
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
f()
f1(i)
f1(i)
f1(i)
f1(i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
g(i,i,i)
h(i,i,3,i,i)
h(i,i,i,2,i,3)
"""
timeit.main(['-s', setup,
test])
...@@ -358,7 +358,7 @@ class Application: ...@@ -358,7 +358,7 @@ class Application:
except self.InternalError: except self.InternalError:
print print
print '* Internal Error' print '* Internal Error (use --debug to display the traceback)'
if self.debug: if self.debug:
print print
traceback.print_exc(20, sys.stdout) traceback.print_exc(20, sys.stdout)
......
...@@ -2,7 +2,7 @@ from pybench import Test ...@@ -2,7 +2,7 @@ from pybench import Test
class IfThenElse(Test): class IfThenElse(Test):
version = 0.31 version = 2.0
operations = 30*3 # hard to say... operations = 30*3 # hard to say...
rounds = 150000 rounds = 150000
...@@ -469,9 +469,9 @@ class IfThenElse(Test): ...@@ -469,9 +469,9 @@ class IfThenElse(Test):
class NestedForLoops(Test): class NestedForLoops(Test):
version = 0.3 version = 2.0
operations = 1000*10*5 operations = 1000*10*5
rounds = 150 rounds = 300
def test(self): def test(self):
...@@ -494,9 +494,9 @@ class NestedForLoops(Test): ...@@ -494,9 +494,9 @@ class NestedForLoops(Test):
class ForLoops(Test): class ForLoops(Test):
version = 0.1 version = 2.0
operations = 5 * 5 operations = 5 * 5
rounds = 8000 rounds = 10000
def test(self): def test(self):
......
...@@ -2,9 +2,9 @@ from pybench import Test ...@@ -2,9 +2,9 @@ from pybench import Test
class DictCreation(Test): class DictCreation(Test):
version = 0.3 version = 2.0
operations = 5*(5 + 5) operations = 5*(5 + 5)
rounds = 60000 rounds = 80000
def test(self): def test(self):
...@@ -77,7 +77,7 @@ class DictCreation(Test): ...@@ -77,7 +77,7 @@ class DictCreation(Test):
class DictWithStringKeys(Test): class DictWithStringKeys(Test):
version = 0.1 version = 2.0
operations = 5*(6 + 6) operations = 5*(6 + 6)
rounds = 200000 rounds = 200000
...@@ -166,9 +166,9 @@ class DictWithStringKeys(Test): ...@@ -166,9 +166,9 @@ class DictWithStringKeys(Test):
class DictWithFloatKeys(Test): class DictWithFloatKeys(Test):
version = 0.1 version = 2.0
operations = 5*(6 + 6) operations = 5*(6 + 6)
rounds = 200000 rounds = 150000
def test(self): def test(self):
...@@ -255,7 +255,7 @@ class DictWithFloatKeys(Test): ...@@ -255,7 +255,7 @@ class DictWithFloatKeys(Test):
class DictWithIntegerKeys(Test): class DictWithIntegerKeys(Test):
version = 0.1 version = 2.0
operations = 5*(6 + 6) operations = 5*(6 + 6)
rounds = 200000 rounds = 200000
...@@ -344,13 +344,14 @@ class DictWithIntegerKeys(Test): ...@@ -344,13 +344,14 @@ class DictWithIntegerKeys(Test):
class SimpleDictManipulation(Test): class SimpleDictManipulation(Test):
version = 0.3 version = 2.0
operations = 5*(6 + 6 + 6 + 6) operations = 5*(6 + 6 + 6 + 6)
rounds = 50000 rounds = 100000
def test(self): def test(self):
d = {} d = {}
has_key = d.has_key
for i in xrange(self.rounds): for i in xrange(self.rounds):
...@@ -368,12 +369,12 @@ class SimpleDictManipulation(Test): ...@@ -368,12 +369,12 @@ class SimpleDictManipulation(Test):
x = d[4] x = d[4]
x = d[5] x = d[5]
d.has_key(0) has_key(0)
d.has_key(2) has_key(2)
d.has_key(4) has_key(4)
d.has_key(6) has_key(6)
d.has_key(8) has_key(8)
d.has_key(10) has_key(10)
del d[0] del d[0]
del d[1] del d[1]
...@@ -396,12 +397,12 @@ class SimpleDictManipulation(Test): ...@@ -396,12 +397,12 @@ class SimpleDictManipulation(Test):
x = d[4] x = d[4]
x = d[5] x = d[5]
d.has_key(0) has_key(0)
d.has_key(2) has_key(2)
d.has_key(4) has_key(4)
d.has_key(6) has_key(6)
d.has_key(8) has_key(8)
d.has_key(10) has_key(10)
del d[0] del d[0]
del d[1] del d[1]
...@@ -424,12 +425,12 @@ class SimpleDictManipulation(Test): ...@@ -424,12 +425,12 @@ class SimpleDictManipulation(Test):
x = d[4] x = d[4]
x = d[5] x = d[5]
d.has_key(0) has_key(0)
d.has_key(2) has_key(2)
d.has_key(4) has_key(4)
d.has_key(6) has_key(6)
d.has_key(8) has_key(8)
d.has_key(10) has_key(10)
del d[0] del d[0]
del d[1] del d[1]
...@@ -452,12 +453,12 @@ class SimpleDictManipulation(Test): ...@@ -452,12 +453,12 @@ class SimpleDictManipulation(Test):
x = d[4] x = d[4]
x = d[5] x = d[5]
d.has_key(0) has_key(0)
d.has_key(2) has_key(2)
d.has_key(4) has_key(4)
d.has_key(6) has_key(6)
d.has_key(8) has_key(8)
d.has_key(10) has_key(10)
del d[0] del d[0]
del d[1] del d[1]
...@@ -480,12 +481,12 @@ class SimpleDictManipulation(Test): ...@@ -480,12 +481,12 @@ class SimpleDictManipulation(Test):
x = d[4] x = d[4]
x = d[5] x = d[5]
d.has_key(0) has_key(0)
d.has_key(2) has_key(2)
d.has_key(4) has_key(4)
d.has_key(6) has_key(6)
d.has_key(8) has_key(8)
d.has_key(10) has_key(10)
del d[0] del d[0]
del d[1] del d[1]
...@@ -497,6 +498,7 @@ class SimpleDictManipulation(Test): ...@@ -497,6 +498,7 @@ class SimpleDictManipulation(Test):
def calibrate(self): def calibrate(self):
d = {} d = {}
has_key = d.has_key
for i in xrange(self.rounds): for i in xrange(self.rounds):
pass pass
...@@ -2,9 +2,9 @@ from pybench import Test ...@@ -2,9 +2,9 @@ from pybench import Test
class TryRaiseExcept(Test): class TryRaiseExcept(Test):
version = 0.1 version = 2.0
operations = 2 + 3 operations = 2 + 3 + 3
rounds = 60000 rounds = 80000
def test(self): def test(self):
...@@ -31,6 +31,18 @@ class TryRaiseExcept(Test): ...@@ -31,6 +31,18 @@ class TryRaiseExcept(Test):
raise error,"something" raise error,"something"
except: except:
pass pass
try:
raise error("something")
except:
pass
try:
raise error("something")
except:
pass
try:
raise error("something")
except:
pass
def calibrate(self): def calibrate(self):
...@@ -42,9 +54,9 @@ class TryRaiseExcept(Test): ...@@ -42,9 +54,9 @@ class TryRaiseExcept(Test):
class TryExcept(Test): class TryExcept(Test):
version = 0.1 version = 2.0
operations = 15 * 10 operations = 15 * 10
rounds = 200000 rounds = 150000
def test(self): def test(self):
...@@ -677,3 +689,11 @@ class TryExcept(Test): ...@@ -677,3 +689,11 @@ class TryExcept(Test):
for i in xrange(self.rounds): for i in xrange(self.rounds):
pass pass
### Test to make Fredrik happy...
if __name__ == '__main__':
import timeit
timeit.TestClass = TryRaiseExcept
timeit.main(['-s', 'test = TestClass(); test.rounds = 1000',
'test.test()'])
...@@ -6,9 +6,9 @@ import package.submodule ...@@ -6,9 +6,9 @@ import package.submodule
class SecondImport(Test): class SecondImport(Test):
version = 0.1 version = 2.0
operations = 5 * 5 operations = 5 * 5
rounds = 20000 rounds = 40000
def test(self): def test(self):
...@@ -51,9 +51,9 @@ class SecondImport(Test): ...@@ -51,9 +51,9 @@ class SecondImport(Test):
class SecondPackageImport(Test): class SecondPackageImport(Test):
version = 0.1 version = 2.0
operations = 5 * 5 operations = 5 * 5
rounds = 20000 rounds = 40000
def test(self): def test(self):
...@@ -95,9 +95,9 @@ class SecondPackageImport(Test): ...@@ -95,9 +95,9 @@ class SecondPackageImport(Test):
class SecondSubmoduleImport(Test): class SecondSubmoduleImport(Test):
version = 0.1 version = 2.0
operations = 5 * 5 operations = 5 * 5
rounds = 20000 rounds = 40000
def test(self): def test(self):
......
...@@ -2,9 +2,9 @@ from pybench import Test ...@@ -2,9 +2,9 @@ from pybench import Test
class CreateInstances(Test): class CreateInstances(Test):
version = 0.2 version = 2.0
operations = 3 + 7 + 4 operations = 3 + 7 + 4
rounds = 60000 rounds = 80000
def test(self): def test(self):
......
...@@ -2,22 +2,23 @@ from pybench import Test ...@@ -2,22 +2,23 @@ from pybench import Test
class SimpleListManipulation(Test): class SimpleListManipulation(Test):
version = 0.3 version = 2.0
operations = 5* (6 + 6 + 6) operations = 5* (6 + 6 + 6)
rounds = 60000 rounds = 130000
def test(self): def test(self):
l = [] l = []
append = l.append
for i in xrange(self.rounds): for i in xrange(self.rounds):
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -33,12 +34,12 @@ class SimpleListManipulation(Test): ...@@ -33,12 +34,12 @@ class SimpleListManipulation(Test):
x = l[4] x = l[4]
x = l[5] x = l[5]
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -54,12 +55,12 @@ class SimpleListManipulation(Test): ...@@ -54,12 +55,12 @@ class SimpleListManipulation(Test):
x = l[4] x = l[4]
x = l[5] x = l[5]
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -75,12 +76,12 @@ class SimpleListManipulation(Test): ...@@ -75,12 +76,12 @@ class SimpleListManipulation(Test):
x = l[4] x = l[4]
x = l[5] x = l[5]
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -96,12 +97,12 @@ class SimpleListManipulation(Test): ...@@ -96,12 +97,12 @@ class SimpleListManipulation(Test):
x = l[4] x = l[4]
x = l[5] x = l[5]
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l.append(2) append(2)
l.append(3) append(3)
l.append(4) append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -124,15 +125,16 @@ class SimpleListManipulation(Test): ...@@ -124,15 +125,16 @@ class SimpleListManipulation(Test):
def calibrate(self): def calibrate(self):
l = [] l = []
append = l.append
for i in xrange(self.rounds): for i in xrange(self.rounds):
pass pass
class ListSlicing(Test): class ListSlicing(Test):
version = 0.4 version = 2.0
operations = 25*(3+1+2+1) operations = 25*(3+1+2+1)
rounds = 400 rounds = 800
def test(self): def test(self):
...@@ -141,7 +143,7 @@ class ListSlicing(Test): ...@@ -141,7 +143,7 @@ class ListSlicing(Test):
for i in xrange(self.rounds): for i in xrange(self.rounds):
l = range(100) l = n[:]
for j in r: for j in r:
...@@ -159,17 +161,14 @@ class ListSlicing(Test): ...@@ -159,17 +161,14 @@ class ListSlicing(Test):
r = range(25) r = range(25)
for i in xrange(self.rounds): for i in xrange(self.rounds):
l = range(100)
for j in r: for j in r:
pass pass
class SmallLists(Test): class SmallLists(Test):
version = 0.3 version = 2.0
operations = 5*(1+ 6 + 6 + 3 + 1) operations = 5*(1+ 6 + 6 + 3 + 1)
rounds = 60000 rounds = 80000
def test(self): def test(self):
...@@ -177,12 +176,13 @@ class SmallLists(Test): ...@@ -177,12 +176,13 @@ class SmallLists(Test):
l = [] l = []
l.append(2) append = l.append
l.append(3) append(2)
l.append(4) append(3)
l.append(2) append(4)
l.append(3) append(2)
l.append(4) append(3)
append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -199,12 +199,13 @@ class SmallLists(Test): ...@@ -199,12 +199,13 @@ class SmallLists(Test):
l = [] l = []
l.append(2) append = l.append
l.append(3) append(2)
l.append(4) append(3)
l.append(2) append(4)
l.append(3) append(2)
l.append(4) append(3)
append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -221,12 +222,13 @@ class SmallLists(Test): ...@@ -221,12 +222,13 @@ class SmallLists(Test):
l = [] l = []
l.append(2) append = l.append
l.append(3) append(2)
l.append(4) append(3)
l.append(2) append(4)
l.append(3) append(2)
l.append(4) append(3)
append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -243,12 +245,13 @@ class SmallLists(Test): ...@@ -243,12 +245,13 @@ class SmallLists(Test):
l = [] l = []
l.append(2) append = l.append
l.append(3) append(2)
l.append(4) append(3)
l.append(2) append(4)
l.append(3) append(2)
l.append(4) append(3)
append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -265,12 +268,13 @@ class SmallLists(Test): ...@@ -265,12 +268,13 @@ class SmallLists(Test):
l = [] l = []
l.append(2) append = l.append
l.append(3) append(2)
l.append(4) append(3)
l.append(2) append(4)
l.append(3) append(2)
l.append(4) append(3)
append(4)
l[0] = 3 l[0] = 3
l[1] = 4 l[1] = 4
...@@ -288,4 +292,4 @@ class SmallLists(Test): ...@@ -288,4 +292,4 @@ class SmallLists(Test):
def calibrate(self): def calibrate(self):
for i in xrange(self.rounds): for i in xrange(self.rounds):
l = [] pass
...@@ -2,7 +2,7 @@ from pybench import Test ...@@ -2,7 +2,7 @@ from pybench import Test
class SpecialClassAttribute(Test): class SpecialClassAttribute(Test):
version = 0.3 version = 2.0
operations = 5*(12 + 12) operations = 5*(12 + 12)
rounds = 100000 rounds = 100000
...@@ -183,7 +183,7 @@ class SpecialClassAttribute(Test): ...@@ -183,7 +183,7 @@ class SpecialClassAttribute(Test):
class NormalClassAttribute(Test): class NormalClassAttribute(Test):
version = 0.3 version = 2.0
operations = 5*(12 + 12) operations = 5*(12 + 12)
rounds = 100000 rounds = 100000
...@@ -369,7 +369,7 @@ class NormalClassAttribute(Test): ...@@ -369,7 +369,7 @@ class NormalClassAttribute(Test):
class SpecialInstanceAttribute(Test): class SpecialInstanceAttribute(Test):
version = 0.3 version = 2.0
operations = 5*(12 + 12) operations = 5*(12 + 12)
rounds = 100000 rounds = 100000
...@@ -557,7 +557,7 @@ class SpecialInstanceAttribute(Test): ...@@ -557,7 +557,7 @@ class SpecialInstanceAttribute(Test):
class NormalInstanceAttribute(Test): class NormalInstanceAttribute(Test):
version = 0.3 version = 2.0
operations = 5*(12 + 12) operations = 5*(12 + 12)
rounds = 100000 rounds = 100000
...@@ -745,7 +745,7 @@ class NormalInstanceAttribute(Test): ...@@ -745,7 +745,7 @@ class NormalInstanceAttribute(Test):
class BuiltinMethodLookup(Test): class BuiltinMethodLookup(Test):
version = 0.3 version = 2.0
operations = 5*(3*5 + 3*5) operations = 5*(3*5 + 3*5)
rounds = 70000 rounds = 70000
......
from pybench import Test from pybench import Test
# Check for new-style class support:
try:
class c(object):
pass
except NameError:
raise ImportError
###
class CreateNewInstances(Test): class CreateNewInstances(Test):
version = 0.1 version = 2.0
operations = 3 + 7 + 4 operations = 3 + 7 + 4
rounds = 60000 rounds = 60000
......
...@@ -2,7 +2,7 @@ from pybench import Test ...@@ -2,7 +2,7 @@ from pybench import Test
class CompareIntegers(Test): class CompareIntegers(Test):
version = 0.1 version = 2.0
operations = 30 * 5 operations = 30 * 5
rounds = 120000 rounds = 120000
...@@ -198,9 +198,9 @@ class CompareIntegers(Test): ...@@ -198,9 +198,9 @@ class CompareIntegers(Test):
class CompareFloats(Test): class CompareFloats(Test):
version = 0.1 version = 2.0
operations = 30 * 5 operations = 30 * 5
rounds = 60000 rounds = 80000
def test(self): def test(self):
...@@ -394,7 +394,7 @@ class CompareFloats(Test): ...@@ -394,7 +394,7 @@ class CompareFloats(Test):
class CompareFloatsIntegers(Test): class CompareFloatsIntegers(Test):
version = 0.1 version = 2.0
operations = 30 * 5 operations = 30 * 5
rounds = 60000 rounds = 60000
...@@ -590,9 +590,9 @@ class CompareFloatsIntegers(Test): ...@@ -590,9 +590,9 @@ class CompareFloatsIntegers(Test):
class CompareLongs(Test): class CompareLongs(Test):
version = 0.1 version = 2.0
operations = 30 * 5 operations = 30 * 5
rounds = 60000 rounds = 70000
def test(self): def test(self):
......
...@@ -28,12 +28,37 @@ and then print out a report to stdout. ...@@ -28,12 +28,37 @@ and then print out a report to stdout.
Micro-Manual Micro-Manual
------------ ------------
Run 'pybench.py -h' to see the help screen. Run 'pybench.py -h' to see the help screen. Run 'pybench.py' to run
Run 'pybench.py' to just let the benchmark suite do it's thing and the benchmark suite using default settings and 'pybench.py -f <file>'
'pybench.py -f <file>' to have it store the results in a file too. to have it store the results in a file too.
It is usually a good idea to run pybench.py multiple times to see
whether the environment, timers and benchmark run-times are suitable
for doing benchmark tests.
You can use the comparison feature of pybench.py ('pybench.py -c
<file>') to check how well the system behaves in comparison to a
reference run.
If the differences are well below 10% for each test, then you have a
system that is good for doing benchmark testings. Of you get random
differences of more than 10% or significant differences between the
values for minimum and average time, then you likely have some
background processes running which cause the readings to become
inconsistent. Examples include: web-browsers, email clients, RSS
readers, music players, backup programs, etc.
If you are only interested in a few tests of the whole suite, you can
use the filtering option, e.g. 'pybench.py -t string' will only
run/show the tests that have 'string' in their name.
This is the current output of pybench.py --help: This is the current output of pybench.py --help:
"""
------------------------------------------------------------------------
PYBENCH - a benchmark test suite for Python interpreters/compilers.
------------------------------------------------------------------------
Synopsis: Synopsis:
pybench.py [option] files... pybench.py [option] files...
...@@ -42,14 +67,14 @@ Options and default settings: ...@@ -42,14 +67,14 @@ Options and default settings:
-f arg save benchmark to file arg () -f arg save benchmark to file arg ()
-c arg compare benchmark with the one in file arg () -c arg compare benchmark with the one in file arg ()
-s arg show benchmark in file arg, then exit () -s arg show benchmark in file arg, then exit ()
-S show statistics of benchmarks (0) -w arg set warp factor to arg (10)
-w arg set warp factor to arg (20) -t arg run only tests with names matching arg ()
-d hide noise in compares (0) -C arg set the number of calibration runs to arg (20)
--no-gc disable garbage collection (0) -d hide noise in comparisons (0)
--no-syscheck "disable" sys check interval (set to sys.maxint) (0) -v verbose output (not recommended) (0)
-t arg tests containing substring () --with-gc enable garbage collection (0)
-C arg number of calibration runs (20) --with-syscheck use default sys check interval (0)
-v generate verbose output --timer arg use given timer (time.time)
-h show this help text -h show this help text
--help show this help text --help show this help text
--debug enable debugging --debug enable debugging
...@@ -57,17 +82,23 @@ Options and default settings: ...@@ -57,17 +82,23 @@ Options and default settings:
--examples show examples of usage --examples show examples of usage
Version: Version:
1.3 2.0
The normal operation is to run the suite and display the The normal operation is to run the suite and display the
results. Use -f to save them for later reuse or comparisms. results. Use -f to save them for later reuse or comparisons.
Examples: Available timers:
python1.5 pybench.py -w 100 -f p15 time.time
python1.4 pybench.py -w 100 -f p14 time.clock
python pybench.py -s p15 -c p14 systimes.processtime
Examples:
python2.1 pybench.py -f p21.pybench
python2.5 pybench.py -f p25.pybench
python pybench.py -s p25.pybench -c p21.pybench
"""
License License
------- -------
...@@ -78,184 +109,103 @@ See LICENSE file. ...@@ -78,184 +109,103 @@ See LICENSE file.
Sample output Sample output
------------- -------------
PYBENCH 1.3 """
-------------------------------------------------------------------------------
PYBENCH 2.0
-------------------------------------------------------------------------------
* using Python 2.4.2
* disabled garbage collection
* system check interval set to maximum: 2147483647
* using timer: time.time
Machine Details: Calibrating tests. Please wait...
Platform ID: Linux-2.6.8-24.19-default-x86_64-with-SuSE-9.2-x86-64
Executable: /home/lemburg/projects/Python/Installation/bin/python Running 10 round(s) of the suite at warp factor 10:
Python: 2.5a1.0
Compiler: GCC 3.3.4 (pre 3.3.5 20040809)
Build: Apr 9 2006 01:50:57 (#trunk)
Searching for tests...
BuiltinFunctionCalls
BuiltinMethodLookup
CompareFloats
CompareFloatsIntegers
CompareIntegers
CompareInternedStrings
CompareLongs
CompareStrings
CompareUnicode
ConcatStrings
ConcatUnicode
CreateInstances
CreateStringsWithConcat
CreateUnicodeWithConcat
DictCreation
DictWithFloatKeys
DictWithIntegerKeys
DictWithStringKeys
ForLoops
IfThenElse
ListSlicing
NestedForLoops
NormalClassAttribute
NormalInstanceAttribute
PythonFunctionCalls
PythonMethodCalls
Recursion
SecondImport
SecondPackageImport
SecondSubmoduleImport
SimpleComplexArithmetic
SimpleDictManipulation
SimpleFloatArithmetic
SimpleIntFloatArithmetic
SimpleIntegerArithmetic
SimpleListManipulation
SimpleLongArithmetic
SmallLists
SmallTuples
SpecialClassAttribute
SpecialInstanceAttribute
StringMappings
StringPredicates
StringSlicing
TryExcept
TryRaiseExcept
TupleSlicing
UnicodeMappings
UnicodePredicates
UnicodeProperties
UnicodeSlicing
Running 10 round(s) of the suite:
* Round 1 done in 6.388 seconds.
* Round 2 done in 6.485 seconds.
* Round 3 done in 6.786 seconds.
... ...
* Round 10 done in 6.546 seconds.
Round 10 real abs overhead -------------------------------------------------------------------------------
BuiltinFunctionCalls: 0.030r 0.030a 0.000o Benchmark: 2006-06-12 12:09:25
BuiltinMethodLookup: 0.059r 0.060a 0.001o -------------------------------------------------------------------------------
CompareFloats: 0.050r 0.050a 0.000o
CompareFloatsIntegers: 0.050r 0.050a 0.000o Rounds: 10
CompareIntegers: 0.070r 0.070a 0.000o Warp: 10
CompareInternedStrings: 0.039r 0.040a 0.001o Timer: time.time
CompareLongs: 0.050r 0.050a 0.000o
CompareStrings: 0.060r 0.060a 0.000o Machine Details:
CompareUnicode: 0.060r 0.060a 0.000o Platform ID: Linux-2.6.8-24.19-default-x86_64-with-SuSE-9.2-x86-64
ConcatStrings: 0.040r 0.040a 0.000o Processor: x86_64
ConcatUnicode: 0.050r 0.050a 0.000o
CreateInstances: 0.050r 0.050a 0.000o
CreateStringsWithConcat: 0.029r 0.030a 0.001o
CreateUnicodeWithConcat: 0.060r 0.060a 0.000o
DictCreation: 0.040r 0.040a 0.000o
DictWithFloatKeys: 0.089r 0.090a 0.000o
DictWithIntegerKeys: 0.059r 0.060a 0.001o
DictWithStringKeys: 0.070r 0.070a 0.001o
ForLoops: 0.050r 0.050a 0.000o
IfThenElse: 0.070r 0.070a 0.000o
ListSlicing: 0.030r 0.030a 0.000o
NestedForLoops: 0.030r 0.030a 0.000o
NormalClassAttribute: 0.060r 0.060a 0.000o
NormalInstanceAttribute: 0.060r 0.060a 0.000o
PythonFunctionCalls: 0.060r 0.060a 0.000o
PythonMethodCalls: 0.050r 0.050a 0.000o
Recursion: 0.050r 0.050a 0.000o
SecondImport: 0.030r 0.030a 0.000o
SecondPackageImport: 0.030r 0.030a 0.000o
SecondSubmoduleImport: 0.040r 0.040a 0.000o
SimpleComplexArithmetic: 0.030r 0.030a 0.000o
SimpleDictManipulation: 0.040r 0.040a 0.000o
SimpleFloatArithmetic: 0.050r 0.050a 0.001o
SimpleIntFloatArithmetic: 0.060r 0.060a 0.000o
SimpleIntegerArithmetic: 0.060r 0.060a 0.000o
SimpleListManipulation: 0.030r 0.030a 0.000o
SimpleLongArithmetic: 0.030r 0.030a 0.000o
SmallLists: 0.050r 0.050a 0.000o
SmallTuples: 0.050r 0.050a 0.000o
SpecialClassAttribute: 0.060r 0.060a 0.000o
SpecialInstanceAttribute: 0.079r 0.080a 0.001o
StringMappings: 0.060r 0.060a 0.000o
StringPredicates: 0.049r 0.050a 0.001o
StringSlicing: 0.039r 0.040a 0.000o
TryExcept: 0.079r 0.080a 0.001o
TryRaiseExcept: 0.059r 0.060a 0.001o
TupleSlicing: 0.050r 0.050a 0.000o
UnicodeMappings: 0.070r 0.070a 0.001o
UnicodePredicates: 0.059r 0.060a 0.001o
UnicodeProperties: 0.059r 0.060a 0.001o
UnicodeSlicing: 0.050r 0.050a 0.000o
----------------------
Average round time: 2.937 seconds
Tests: per run per oper. overhead
------------------------------------------------------------------------
BuiltinFunctionCalls: 29.85 ms 0.23 us 0.00 ms
BuiltinMethodLookup: 66.85 ms 0.13 us 0.50 ms
CompareFloats: 43.00 ms 0.10 us 0.00 ms
CompareFloatsIntegers: 51.80 ms 0.12 us 0.00 ms
CompareIntegers: 70.70 ms 0.08 us 0.50 ms
CompareInternedStrings: 41.40 ms 0.08 us 0.50 ms
CompareLongs: 47.90 ms 0.11 us 0.00 ms
CompareStrings: 58.50 ms 0.12 us 0.50 ms
CompareUnicode: 56.55 ms 0.15 us 0.50 ms
ConcatStrings: 44.75 ms 0.30 us 0.00 ms
ConcatUnicode: 54.55 ms 0.36 us 0.50 ms
CreateInstances: 50.95 ms 1.21 us 0.00 ms
CreateStringsWithConcat: 28.85 ms 0.14 us 0.50 ms
CreateUnicodeWithConcat: 53.75 ms 0.27 us 0.00 ms
DictCreation: 41.90 ms 0.28 us 0.00 ms
DictWithFloatKeys: 88.50 ms 0.15 us 0.50 ms
DictWithIntegerKeys: 62.55 ms 0.10 us 0.50 ms
DictWithStringKeys: 60.50 ms 0.10 us 0.50 ms
ForLoops: 46.90 ms 4.69 us 0.00 ms
IfThenElse: 60.55 ms 0.09 us 0.00 ms
ListSlicing: 29.90 ms 8.54 us 0.00 ms
NestedForLoops: 33.95 ms 0.10 us 0.00 ms
NormalClassAttribute: 62.75 ms 0.10 us 0.50 ms
NormalInstanceAttribute: 61.80 ms 0.10 us 0.50 ms
PythonFunctionCalls: 60.00 ms 0.36 us 0.00 ms
PythonMethodCalls: 50.00 ms 0.67 us 0.00 ms
Recursion: 46.85 ms 3.75 us 0.00 ms
SecondImport: 35.00 ms 1.40 us 0.00 ms
SecondPackageImport: 32.00 ms 1.28 us 0.00 ms
SecondSubmoduleImport: 38.00 ms 1.52 us 0.00 ms
SimpleComplexArithmetic: 26.85 ms 0.12 us 0.00 ms
SimpleDictManipulation: 40.85 ms 0.14 us 0.00 ms
SimpleFloatArithmetic: 48.70 ms 0.09 us 0.50 ms
SimpleIntFloatArithmetic: 57.70 ms 0.09 us 0.00 ms
SimpleIntegerArithmetic: 58.75 ms 0.09 us 0.50 ms
SimpleListManipulation: 34.80 ms 0.13 us 0.00 ms
SimpleLongArithmetic: 30.95 ms 0.19 us 0.50 ms
SmallLists: 47.60 ms 0.19 us 0.00 ms
SmallTuples: 48.80 ms 0.20 us 0.50 ms
SpecialClassAttribute: 61.70 ms 0.10 us 0.00 ms
SpecialInstanceAttribute: 76.70 ms 0.13 us 0.50 ms
StringMappings: 58.70 ms 0.47 us 0.00 ms
StringPredicates: 50.00 ms 0.18 us 1.00 ms
StringSlicing: 39.65 ms 0.23 us 0.50 ms
TryExcept: 84.45 ms 0.06 us 0.50 ms
TryRaiseExcept: 61.75 ms 4.12 us 0.50 ms
TupleSlicing: 48.95 ms 0.47 us 0.00 ms
UnicodeMappings: 71.50 ms 3.97 us 0.50 ms
UnicodePredicates: 52.75 ms 0.23 us 1.00 ms
UnicodeProperties: 61.90 ms 0.31 us 1.00 ms
UnicodeSlicing: 53.75 ms 0.31 us 0.50 ms
------------------------------------------------------------------------
Average round time: 2937.00 ms
Python:
Executable: /usr/local/bin/python
Version: 2.4.2
Compiler: GCC 3.3.4 (pre 3.3.5 20040809)
Bits: 64bit
Build: Oct 1 2005 15:24:35 (#1)
Unicode: UCS2
Test minimum average operation overhead
-------------------------------------------------------------------------------
BuiltinFunctionCalls: 126ms 145ms 0.28us 0.274ms
BuiltinMethodLookup: 124ms 130ms 0.12us 0.316ms
CompareFloats: 109ms 110ms 0.09us 0.361ms
CompareFloatsIntegers: 100ms 104ms 0.12us 0.271ms
CompareIntegers: 137ms 138ms 0.08us 0.542ms
CompareInternedStrings: 124ms 127ms 0.08us 1.367ms
CompareLongs: 100ms 104ms 0.10us 0.316ms
CompareStrings: 111ms 115ms 0.12us 0.929ms
CompareUnicode: 108ms 128ms 0.17us 0.693ms
ConcatStrings: 142ms 155ms 0.31us 0.562ms
ConcatUnicode: 119ms 127ms 0.42us 0.384ms
CreateInstances: 123ms 128ms 1.14us 0.367ms
CreateNewInstances: 121ms 126ms 1.49us 0.335ms
CreateStringsWithConcat: 130ms 135ms 0.14us 0.916ms
CreateUnicodeWithConcat: 130ms 135ms 0.34us 0.361ms
DictCreation: 108ms 109ms 0.27us 0.361ms
DictWithFloatKeys: 149ms 153ms 0.17us 0.678ms
DictWithIntegerKeys: 124ms 126ms 0.11us 0.915ms
DictWithStringKeys: 114ms 117ms 0.10us 0.905ms
ForLoops: 110ms 111ms 4.46us 0.063ms
IfThenElse: 118ms 119ms 0.09us 0.685ms
ListSlicing: 116ms 120ms 8.59us 0.103ms
NestedForLoops: 125ms 137ms 0.09us 0.019ms
NormalClassAttribute: 124ms 136ms 0.11us 0.457ms
NormalInstanceAttribute: 110ms 117ms 0.10us 0.454ms
PythonFunctionCalls: 107ms 113ms 0.34us 0.271ms
PythonMethodCalls: 140ms 149ms 0.66us 0.141ms
Recursion: 156ms 166ms 3.32us 0.452ms
SecondImport: 112ms 118ms 1.18us 0.180ms
SecondPackageImport: 118ms 127ms 1.27us 0.180ms
SecondSubmoduleImport: 140ms 151ms 1.51us 0.180ms
SimpleComplexArithmetic: 128ms 139ms 0.16us 0.361ms
SimpleDictManipulation: 134ms 136ms 0.11us 0.452ms
SimpleFloatArithmetic: 110ms 113ms 0.09us 0.571ms
SimpleIntFloatArithmetic: 106ms 111ms 0.08us 0.548ms
SimpleIntegerArithmetic: 106ms 109ms 0.08us 0.544ms
SimpleListManipulation: 103ms 113ms 0.10us 0.587ms
SimpleLongArithmetic: 112ms 118ms 0.18us 0.271ms
SmallLists: 105ms 116ms 0.17us 0.366ms
SmallTuples: 108ms 128ms 0.24us 0.406ms
SpecialClassAttribute: 119ms 136ms 0.11us 0.453ms
SpecialInstanceAttribute: 143ms 155ms 0.13us 0.454ms
StringMappings: 115ms 121ms 0.48us 0.405ms
StringPredicates: 120ms 129ms 0.18us 2.064ms
StringSlicing: 111ms 127ms 0.23us 0.781ms
TryExcept: 125ms 126ms 0.06us 0.681ms
TryRaiseExcept: 133ms 137ms 2.14us 0.361ms
TupleSlicing: 117ms 120ms 0.46us 0.066ms
UnicodeMappings: 156ms 160ms 4.44us 0.429ms
UnicodePredicates: 117ms 121ms 0.22us 2.487ms
UnicodeProperties: 115ms 153ms 0.38us 2.070ms
UnicodeSlicing: 126ms 129ms 0.26us 0.689ms
-------------------------------------------------------------------------------
Totals: 6283ms 6673ms
"""
________________________________________________________________________ ________________________________________________________________________
Writing New Tests Writing New Tests
...@@ -293,7 +243,7 @@ class IntegerCounting(Test): ...@@ -293,7 +243,7 @@ class IntegerCounting(Test):
# Number of rounds to execute per test run. This should be # Number of rounds to execute per test run. This should be
# adjusted to a figure that results in a test run-time of between # adjusted to a figure that results in a test run-time of between
# 20-50 seconds. # 1-2 seconds (at warp 1).
rounds = 100000 rounds = 100000
def test(self): def test(self):
...@@ -377,6 +327,41 @@ longer strictly comparable with previous runs, the '.version' class ...@@ -377,6 +327,41 @@ longer strictly comparable with previous runs, the '.version' class
variable should be updated. Therefafter, comparisons with previous variable should be updated. Therefafter, comparisons with previous
versions of the test will list as "n/a" to reflect the change. versions of the test will list as "n/a" to reflect the change.
Version History
---------------
2.0: rewrote parts of pybench which resulted in more repeatable
timings:
- made timer a parameter
- changed the platform default timer to use high-resolution
timers rather than process timers (which have a much lower
resolution)
- added option to select timer
- added process time timer (using systimes.py)
- changed to use min() as timing estimator (average
is still taken as well to provide an idea of the difference)
- garbage collection is turned off per default
- sys check interval is set to the highest possible value
- calibration is now a separate step and done using
a different strategy that allows measuring the test
overhead more accurately
- modified the tests to each give a run-time of between
100-200ms using warp 10
- changed default warp factor to 10 (from 20)
- compared results with timeit.py and confirmed measurements
- bumped all test versions to 2.0
- updated platform.py to the latest version
- changed the output format a bit to make it look
nicer
- refactored the APIs somewhat
1.3+: Steve Holden added the NewInstances test and the filtering
option during the NeedForSpeed sprint; this also triggered a long
discussion on how to improve benchmark timing and finally
resulted in the release of 2.0
1.3: initial checkin into the Python SVN repository
Have fun, Have fun,
-- --
Marc-Andre Lemburg Marc-Andre Lemburg
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
# Defaults # Defaults
Number_of_rounds = 10 Number_of_rounds = 10
Warp_factor = 20 Warp_factor = 10
# Import tests # Import tests
from Arithmetic import * from Arithmetic import *
...@@ -24,8 +24,8 @@ from Lookups import * ...@@ -24,8 +24,8 @@ from Lookups import *
from Instances import * from Instances import *
try: try:
from NewInstances import * from NewInstances import *
except: except ImportError:
print "Cannot test new-style objects" pass
from Lists import * from Lists import *
from Tuples import * from Tuples import *
from Dict import * from Dict import *
......
...@@ -3,9 +3,9 @@ from string import join ...@@ -3,9 +3,9 @@ from string import join
class ConcatStrings(Test): class ConcatStrings(Test):
version = 0.1 version = 2.0
operations = 10 * 5 operations = 10 * 5
rounds = 60000 rounds = 100000
def test(self): def test(self):
...@@ -85,7 +85,7 @@ class ConcatStrings(Test): ...@@ -85,7 +85,7 @@ class ConcatStrings(Test):
class CompareStrings(Test): class CompareStrings(Test):
version = 0.2 version = 2.0
operations = 10 * 5 operations = 10 * 5
rounds = 200000 rounds = 200000
...@@ -167,9 +167,9 @@ class CompareStrings(Test): ...@@ -167,9 +167,9 @@ class CompareStrings(Test):
class CompareInternedStrings(Test): class CompareInternedStrings(Test):
version = 0.1 version = 2.0
operations = 10 * 5 operations = 10 * 5
rounds = 200000 rounds = 300000
def test(self): def test(self):
...@@ -249,9 +249,9 @@ class CompareInternedStrings(Test): ...@@ -249,9 +249,9 @@ class CompareInternedStrings(Test):
class CreateStringsWithConcat(Test): class CreateStringsWithConcat(Test):
version = 0.1 version = 2.0
operations = 10 * 5 operations = 10 * 5
rounds = 80000 rounds = 200000
def test(self): def test(self):
...@@ -324,9 +324,9 @@ class CreateStringsWithConcat(Test): ...@@ -324,9 +324,9 @@ class CreateStringsWithConcat(Test):
class StringSlicing(Test): class StringSlicing(Test):
version = 0.1 version = 2.0
operations = 5 * 7 operations = 5 * 7
rounds = 100000 rounds = 160000
def test(self): def test(self):
...@@ -387,7 +387,7 @@ if hasattr('', 'lower'): ...@@ -387,7 +387,7 @@ if hasattr('', 'lower'):
class StringMappings(Test): class StringMappings(Test):
version = 0.1 version = 2.0
operations = 3 * (5 + 4 + 2 + 1) operations = 3 * (5 + 4 + 2 + 1)
rounds = 70000 rounds = 70000
...@@ -460,9 +460,9 @@ if hasattr('', 'lower'): ...@@ -460,9 +460,9 @@ if hasattr('', 'lower'):
class StringPredicates(Test): class StringPredicates(Test):
version = 0.1 version = 2.0
operations = 10 * 7 operations = 10 * 7
rounds = 80000 rounds = 100000
def test(self): def test(self):
......
...@@ -2,18 +2,17 @@ from pybench import Test ...@@ -2,18 +2,17 @@ from pybench import Test
class TupleSlicing(Test): class TupleSlicing(Test):
version = 0.31 version = 2.0
operations = 3 * 25 * 10 * 7 operations = 3 * 25 * 10 * 7
rounds = 400 rounds = 500
def test(self): def test(self):
r = range(25) r = range(25)
t = tuple(range(100))
for i in xrange(self.rounds): for i in xrange(self.rounds):
t = tuple(range(100))
for j in r: for j in r:
m = t[50:] m = t[50:]
...@@ -259,20 +258,17 @@ class TupleSlicing(Test): ...@@ -259,20 +258,17 @@ class TupleSlicing(Test):
def calibrate(self): def calibrate(self):
r = range(25) r = range(25)
for i in xrange(self.rounds):
t = tuple(range(100)) t = tuple(range(100))
for i in xrange(self.rounds):
for j in r: for j in r:
pass pass
class SmallTuples(Test): class SmallTuples(Test):
version = 0.3 version = 2.0
operations = 5*(1 + 3 + 6 + 2) operations = 5*(1 + 3 + 6 + 2)
rounds = 80000 rounds = 90000
def test(self): def test(self):
......
...@@ -8,7 +8,7 @@ from string import join ...@@ -8,7 +8,7 @@ from string import join
class ConcatUnicode(Test): class ConcatUnicode(Test):
version = 0.1 version = 2.0
operations = 10 * 5 operations = 10 * 5
rounds = 60000 rounds = 60000
...@@ -90,7 +90,7 @@ class ConcatUnicode(Test): ...@@ -90,7 +90,7 @@ class ConcatUnicode(Test):
class CompareUnicode(Test): class CompareUnicode(Test):
version = 0.1 version = 2.0
operations = 10 * 5 operations = 10 * 5
rounds = 150000 rounds = 150000
...@@ -172,7 +172,7 @@ class CompareUnicode(Test): ...@@ -172,7 +172,7 @@ class CompareUnicode(Test):
class CreateUnicodeWithConcat(Test): class CreateUnicodeWithConcat(Test):
version = 0.1 version = 2.0
operations = 10 * 5 operations = 10 * 5
rounds = 80000 rounds = 80000
...@@ -247,9 +247,9 @@ class CreateUnicodeWithConcat(Test): ...@@ -247,9 +247,9 @@ class CreateUnicodeWithConcat(Test):
class UnicodeSlicing(Test): class UnicodeSlicing(Test):
version = 0.1 version = 2.0
operations = 5 * 7 operations = 5 * 7
rounds = 100000 rounds = 140000
def test(self): def test(self):
...@@ -308,7 +308,7 @@ class UnicodeSlicing(Test): ...@@ -308,7 +308,7 @@ class UnicodeSlicing(Test):
class UnicodeMappings(Test): class UnicodeMappings(Test):
version = 0.1 version = 2.0
operations = 3 * (5 + 4 + 2 + 1) operations = 3 * (5 + 4 + 2 + 1)
rounds = 10000 rounds = 10000
...@@ -381,9 +381,9 @@ class UnicodeMappings(Test): ...@@ -381,9 +381,9 @@ class UnicodeMappings(Test):
class UnicodePredicates(Test): class UnicodePredicates(Test):
version = 0.1 version = 2.0
operations = 5 * 9 operations = 5 * 9
rounds = 100000 rounds = 120000
def test(self): def test(self):
...@@ -458,7 +458,7 @@ except ImportError: ...@@ -458,7 +458,7 @@ except ImportError:
else: else:
class UnicodeProperties(Test): class UnicodeProperties(Test):
version = 0.1 version = 2.0
operations = 5 * 8 operations = 5 * 8
rounds = 100000 rounds = 100000
......
#!/usr/bin/env python
""" clockres - calculates the resolution in seconds of a given timer.
Copyright (c) 2006, Marc-Andre Lemburg (mal@egenix.com). See the
documentation for further information on copyrights, or contact
the author. All Rights Reserved.
"""
import time
TEST_TIME = 1.0
def clockres(timer):
d = {}
wallclock = time.time
start = wallclock()
stop = wallclock() + TEST_TIME
spin_loops = range(1000)
while 1:
now = wallclock()
if now >= stop:
break
for i in spin_loops:
d[timer()] = 1
values = d.keys()
values.sort()
min_diff = TEST_TIME
for i in range(len(values) - 1):
diff = values[i+1] - values[i]
if diff < min_diff:
min_diff = diff
return min_diff
if __name__ == '__main__':
print 'Clock resolution of various timer implementations:'
print 'time.clock: %10.3fus' % (clockres(time.clock) * 1e6)
print 'time.time: %10.3fus' % (clockres(time.time) * 1e6)
try:
import systimes
print 'systimes.processtime: %10.3fus' % (clockres(systimes.processtime) * 1e6)
except ImportError:
pass
...@@ -34,20 +34,7 @@ NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION ...@@ -34,20 +34,7 @@ NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
""" """
# Version number import sys, time, operator, string
__version__ = '1.3'
#
# NOTE: Use xrange for all test loops unless you want to face
# a 20MB process !
#
# All tests should have rounds set to values so that a run()
# takes between 20-50 seconds. This is to get fairly good
# clock() values. You can use option -w to speedup the tests
# by a fixed integer factor (the "warp factor").
#
import sys,time,operator
from CommandLine import * from CommandLine import *
try: try:
...@@ -56,6 +43,111 @@ try: ...@@ -56,6 +43,111 @@ try:
except ImportError: except ImportError:
import pickle import pickle
# Version number; version history: see README file !
__version__ = '2.0'
### Constants
# Second fractions
MILLI_SECONDS = 1e3
MICRO_SECONDS = 1e6
# Percent unit
PERCENT = 100
# Horizontal line length
LINE = 79
# Minimum test run-time
MIN_TEST_RUNTIME = 1e-3
# Number of calibration runs to use for calibrating the tests
CALIBRATION_RUNS = 20
# Number of calibration loops to run for each calibration run
CALIBRATION_LOOPS = 20
# Allow skipping calibration ?
ALLOW_SKIPPING_CALIBRATION = 1
# Timer types
TIMER_TIME_TIME = 'time.time'
TIMER_TIME_CLOCK = 'time.clock'
TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
# Choose platform default timer
if sys.platform[:3] == 'win':
# On WinXP this has 2.5ms resolution
TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
else:
# On Linux this has 1ms resolution
TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
# Print debug information ?
_debug = 0
### Helpers
def get_timer(timertype):
if timertype == TIMER_TIME_TIME:
return time.time
elif timertype == TIMER_TIME_CLOCK:
return time.clock
elif timertype == TIMER_SYSTIMES_PROCESSTIME:
import systimes
return systimes.processtime
else:
raise TypeError('unknown timer type: %s' % timertype)
def get_machine_details():
import platform
if _debug:
print 'Getting machine details...'
buildno, builddate = platform.python_build()
python = platform.python_version()
if python > '2.0':
try:
unichr(100000)
except ValueError:
# UCS2 build (standard)
unicode = 'UCS2'
else:
# UCS4 build (most recent Linux distros)
unicode = 'UCS4'
else:
unicode = None
bits, linkage = platform.architecture()
return {
'platform': platform.platform(),
'processor': platform.processor(),
'executable': sys.executable,
'python': platform.python_version(),
'compiler': platform.python_compiler(),
'buildno': buildno,
'builddate': builddate,
'unicode': unicode,
'bits': bits,
}
def print_machine_details(d, indent=''):
l = ['Machine Details:',
' Platform ID: %s' % d.get('platform', 'n/a'),
' Processor: %s' % d.get('processor', 'n/a'),
'',
'Python:',
' Executable: %s' % d.get('executable', 'n/a'),
' Version: %s' % d.get('python', 'n/a'),
' Compiler: %s' % d.get('compiler', 'n/a'),
' Bits: %s' % d.get('bits', 'n/a'),
' Build: %s (#%s)' % (d.get('builddate', 'n/a'),
d.get('buildno', 'n/a')),
' Unicode: %s' % d.get('unicode', 'n/a'),
]
print indent + string.join(l, '\n' + indent) + '\n'
### Test baseclass ### Test baseclass
class Test: class Test:
...@@ -84,7 +176,7 @@ class Test: ...@@ -84,7 +176,7 @@ class Test:
# Version number of the test as float (x.yy); this is important # Version number of the test as float (x.yy); this is important
# for comparisons of benchmark runs - tests with unequal version # for comparisons of benchmark runs - tests with unequal version
# number will not get compared. # number will not get compared.
version = 1.0 version = 2.0
# The number of abstract operations done in each round of the # The number of abstract operations done in each round of the
# test. An operation is the basic unit of what you want to # test. An operation is the basic unit of what you want to
...@@ -97,36 +189,125 @@ class Test: ...@@ -97,36 +189,125 @@ class Test:
# Number of rounds to execute per test run. This should be # Number of rounds to execute per test run. This should be
# adjusted to a figure that results in a test run-time of between # adjusted to a figure that results in a test run-time of between
# 20-50 seconds. # 1-2 seconds.
rounds = 10000 rounds = 100000
### Internal variables ### Internal variables
# Mark this class as implementing a test # Mark this class as implementing a test
is_a_test = 1 is_a_test = 1
# Misc. internal variables # Last timing: (real, run, overhead)
last_timing = (0,0,0) # last timing (real,run,calibration) last_timing = (0.0, 0.0, 0.0)
warp = 1 # warp factor this test uses
cruns = 20 # number of calibration runs
overhead = None # list of calibration timings
def __init__(self,warp=1): # Warp factor to use for this test
warp = 1
if warp > 1: # Number of calibration runs to use
self.rounds = self.rounds / warp calibration_runs = CALIBRATION_RUNS
# List of calibration timings
overhead_times = None
# List of test run timings
times = []
# Timer used for the benchmark
timer = TIMER_PLATFORM_DEFAULT
def __init__(self, warp=None, calibration_runs=None, timer=None):
# Set parameters
if warp is not None:
self.rounds = int(self.rounds / warp)
if self.rounds == 0: if self.rounds == 0:
self.rounds = 1 raise ValueError('warp factor set too high')
self.warp = warp self.warp = warp
if calibration_runs is not None:
if (not ALLOW_SKIPPING_CALIBRATION and
calibration_runs < 1):
raise ValueError('at least one calibration run is required')
self.calibration_runs = calibration_runs
if timer is not None:
timer = timer
# Init variables
self.times = [] self.times = []
self.overhead = [] self.overhead_times = []
# We want these to be in the instance dict, so that pickle # We want these to be in the instance dict, so that pickle
# saves them # saves them
self.version = self.version self.version = self.version
self.operations = self.operations self.operations = self.operations
self.rounds = self.rounds self.rounds = self.rounds
def run(self, cruns): def get_timer(self):
""" Return the timer function to use for the test.
"""
return get_timer(self.timer)
def compatible(self, other):
""" Return 1/0 depending on whether the test is compatible
with the other Test instance or not.
"""
if self.version != other.version:
return 0
if self.rounds != other.rounds:
return 0
return 1
def calibrate_test(self):
if self.calibration_runs == 0:
self.overhead_times = [0.0]
return
calibrate = self.calibrate
timer = self.get_timer()
calibration_loops = range(CALIBRATION_LOOPS)
# Time the calibration loop overhead
prep_times = []
for i in range(self.calibration_runs):
t = timer()
for i in calibration_loops:
pass
t = timer() - t
prep_times.append(t)
min_prep_time = min(prep_times)
if _debug:
print
print 'Calib. prep time = %.6fms' % (
min_prep_time * MILLI_SECONDS)
# Time the calibration runs (doing CALIBRATION_LOOPS loops of
# .calibrate() method calls each)
for i in range(self.calibration_runs):
t = timer()
for i in calibration_loops:
calibrate()
t = timer() - t
self.overhead_times.append(t / CALIBRATION_LOOPS
- min_prep_time)
# Check the measured times
min_overhead = min(self.overhead_times)
max_overhead = max(self.overhead_times)
if _debug:
print 'Calib. overhead time = %.6fms' % (
min_overhead * MILLI_SECONDS)
if min_overhead < 0.0:
raise ValueError('calibration setup did not work')
if max_overhead - min_overhead > 0.1:
raise ValueError(
'overhead calibration timing range too inaccurate: '
'%r - %r' % (min_overhead, max_overhead))
def run(self):
""" Run the test in two phases: first calibrate, then """ Run the test in two phases: first calibrate, then
do the actual test. Be careful to keep the calibration do the actual test. Be careful to keep the calibration
...@@ -134,27 +315,23 @@ class Test: ...@@ -134,27 +315,23 @@ class Test:
""" """
test = self.test test = self.test
calibrate = self.calibrate timer = self.get_timer()
clock = time.clock
# first calibrate # Get calibration
t = clock() min_overhead = min(self.overhead_times)
calibrate()
offset = clock() - t # Test run
if cruns: t = timer()
for i in range(cruns-1):
t = clock()
calibrate()
t = clock() - t
if t < offset:
offset = t
# now the real thing
t = clock()
test() test()
t = clock() - t t = timer() - t
if t < 0.01: if t < MIN_TEST_RUNTIME:
sys.exit("Lower warp required: test times < 10 ms are unreliable") raise ValueError('warp factor too high: '
self.last_timing = (t-offset,t,offset) 'test times are < 10ms')
self.times.append(t-offset) eff_time = t - min_overhead
if eff_time < 0:
raise ValueError('wrong calibration')
self.last_timing = (eff_time, t, min_overhead)
self.times.append(eff_time)
def calibrate(self): def calibrate(self):
...@@ -176,33 +353,33 @@ class Test: ...@@ -176,33 +353,33 @@ class Test:
self.operations number of operations each. self.operations number of operations each.
""" """
# do some tests
return return
def stat(self): def stat(self):
""" Returns four values: """ Return test run statistics as tuple:
minimum round time
average time per round (minimum run time,
average time per operation average run time,
average overhead time total run time,
average time per operation,
minimum overhead time)
XXX Should this take warp factors into account?
""" """
runs = len(self.times) runs = len(self.times)
if runs == 0: if runs == 0:
return 0,0 return 0.0, 0.0, 0.0, 0.0
mintime = min(self.times) min_time = min(self.times)
totaltime = reduce(operator.add,self.times,0.0) total_time = reduce(operator.add, self.times, 0.0)
avg = totaltime / float(runs) avg_time = total_time / float(runs)
op_avg = totaltime / float(runs * self.rounds * self.operations) operation_avg = total_time / float(runs
if self.overhead: * self.rounds
totaloverhead = reduce(operator.add,self.overhead,0.0) * self.operations)
ov_avg = totaloverhead / float(runs) if self.overhead_times:
min_overhead = min(self.overhead_times)
else: else:
# use self.last_timing - not too accurate min_overhead = self.last_timing[2]
ov_avg = self.last_timing[2] return min_time, avg_time, total_time, operation_avg, min_overhead
return mintime, avg, op_avg, ov_avg
### Load Setup ### Load Setup
...@@ -215,153 +392,353 @@ import Setup ...@@ -215,153 +392,353 @@ import Setup
class Benchmark: class Benchmark:
name = '?' # Name of the benchmark # Name of the benchmark
rounds = 1 # Number of rounds to run name = ''
# Number of benchmark rounds to run
rounds = 1
# Warp factor use to run the tests
warp = 1 # Warp factor warp = 1 # Warp factor
roundtime = 0 # Average round time
version = None # Benchmark version number (see __init__)
# as float x.yy
def __init__(self): # Average benchmark round time
roundtime = 0
self.tests = {} # Benchmark version number as float x.yy
self.version = 0.31 version = 2.0
def load_tests(self, setupmod, warp=1, limitnames="", verbose=0): # Produce verbose output ?
verbose = 0
self.warp = warp # Dictionary with the machine details
if limitnames: machine_details = None
limitnames = re.compile(limitnames, re.I)
# Timer used for the benchmark
timer = TIMER_PLATFORM_DEFAULT
def __init__(self, name, verbose=None, timer=None, warp=None,
calibration_runs=None):
if name:
self.name = name
else: else:
limitnames = None self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
tests = self.tests (time.localtime(time.time())[:6])
if verbose: if verbose is not None:
print 'Searching for tests ...', self.verbose = verbose
setupmod.__dict__.values() if timer is not None:
for c in setupmod.__dict__.values(): self.timer = timer
if not hasattr(c,'is_a_test'): if warp is not None:
self.warp = warp
if calibration_runs is not None:
self.calibration_runs = calibration_runs
# Init vars
self.tests = {}
if _debug:
print 'Getting machine details...'
self.machine_details = get_machine_details()
# Make .version an instance attribute to have it saved in the
# Benchmark pickle
self.version = self.version
def get_timer(self):
""" Return the timer function to use for the test.
"""
return get_timer(self.timer)
def compatible(self, other):
""" Return 1/0 depending on whether the benchmark is
compatible with the other Benchmark instance or not.
"""
if self.version != other.version:
return 0
if (self.machine_details == other.machine_details and
self.timer != other.timer):
return 0
if (self.calibration_runs == 0 and
other.calibration_runs != 0):
return 0
if (self.calibration_runs != 0 and
other.calibration_runs == 0):
return 0
return 1
def load_tests(self, setupmod, limitnames=None):
# Add tests
if self.verbose:
print 'Searching for tests ...'
print '--------------------------------------'
for testclass in setupmod.__dict__.values():
if not hasattr(testclass, 'is_a_test'):
continue continue
name = c.__name__ name = testclass.__name__
if name == 'Test': if name == 'Test':
continue continue
if limitnames is not None and limitnames.search(name) is None: if (limitnames is not None and
limitnames.search(name) is None):
continue continue
tests[name] = c(warp) self.tests[name] = testclass(
l = tests.keys() warp=self.warp,
calibration_runs=self.calibration_runs,
timer=self.timer)
l = self.tests.keys()
l.sort() l.sort()
if verbose: if self.verbose:
for name in l:
print ' %s' % name
print '--------------------------------------'
print ' %i tests found' % len(l)
print print
for t in l:
print ' ', t def calibrate(self):
print len(l), "tests found"
print 'Calibrating tests. Please wait...'
if self.verbose:
print
print 'Test min max'
print '-' * LINE
tests = self.tests.items()
tests.sort()
for i in range(len(tests)):
name, test = tests[i]
test.calibrate_test()
if self.verbose:
print '%30s: %6.3fms %6.3fms' % \
(name,
min(test.overhead_times) * MILLI_SECONDS,
max(test.overhead_times) * MILLI_SECONDS)
print print
def run(self, verbose, cruns): def run(self):
tests = self.tests.items() tests = self.tests.items()
tests.sort() tests.sort()
clock = time.clock timer = self.get_timer()
print 'Running %i round(s) of the suite at warp factor %i:' % (self.rounds, self.warp) print 'Running %i round(s) of the suite at warp factor %i:' % \
(self.rounds, self.warp)
print print
roundtime = clock() self.roundtimes = []
for i in range(self.rounds): for i in range(self.rounds):
roundstarttime = clock() if self.verbose:
if verbose: print ' Round %-25i effective absolute overhead' % (i+1)
print ' Round %-25i real abs overhead' % (i+1) total_eff_time = 0.0
for j in range(len(tests)): for j in range(len(tests)):
name, t = tests[j] name, test = tests[j]
if verbose: if self.verbose:
print '%30s:' % name, print '%30s:' % name,
t.run(cruns) test.run()
if verbose: (eff_time, abs_time, min_overhead) = test.last_timing
print ' %.3fr %.3fa %.3fo' % t.last_timing total_eff_time = total_eff_time + eff_time
if verbose: if self.verbose:
print ' ----------------------' print ' %5.0fms %5.0fms %7.3fms' % \
print ' Average round time: %.3f seconds' % \ (eff_time * MILLI_SECONDS,
((clock() - roundtime)/(i+1)) abs_time * MILLI_SECONDS,
min_overhead * MILLI_SECONDS)
self.roundtimes.append(total_eff_time)
if self.verbose:
print (' '
' ------------------------------')
print (' '
' Totals: %6.0fms' %
(total_eff_time * MILLI_SECONDS))
print print
else: else:
print '%d done in %.3f seconds' % (i+1, (clock() - roundstarttime)) print '* Round %i done in %.3f seconds.' % (i+1,
self.roundtime = (clock() - roundtime) / self.rounds total_eff_time)
print
def stat(self):
""" Return benchmark run statistics as tuple:
(minimum round time,
average round time,
maximum round time)
XXX Currently not used, since the benchmark does test
statistics across all rounds.
"""
runs = len(self.roundtimes)
if runs == 0:
return 0.0, 0.0
min_time = min(self.roundtimes)
total_time = reduce(operator.add, self.roundtimes, 0.0)
avg_time = total_time / float(runs)
max_time = max(self.roundtimes)
return (min_time, avg_time, max_time)
def print_header(self, title='Benchmark'):
print '-' * LINE
print '%s: %s' % (title, self.name)
print '-' * LINE
print
print ' Rounds: %s' % self.rounds
print ' Warp: %s' % self.warp
print ' Timer: %s' % self.timer
print
if self.machine_details:
print_machine_details(self.machine_details, indent=' ')
print print
def print_stat(self, compare_to=None, hidenoise=0): def print_benchmark(self, hidenoise=0, limitnames=None):
if not compare_to: print ('Test '
print '%-30s min run avg run per oprn overhead' % 'Tests:' ' minimum average operation overhead')
print '-'*77 print '-' * LINE
tests = self.tests.items() tests = self.tests.items()
tests.sort() tests.sort()
totalmintime = 0 total_min_time = 0.0
for name,t in tests: total_avg_time = 0.0
mintime,avg,op_avg,ov_avg = t.stat() for name, test in tests:
totalmintime += mintime if (limitnames is not None and
print '%30s: %9.2f ms %9.2f ms %6.2f us %6.2f' % \ limitnames.search(name) is None):
(name,mintime*1000.0,avg*1000.0,op_avg*1000000.0,ov_avg*1000.0) continue
print '-'*77 (min_time,
print '%30s: %9.2f ms' % \ avg_time,
('Notional minimum round time', totalmintime * 1000.0) total_time,
op_avg,
min_overhead) = test.stat()
total_min_time = total_min_time + min_time
total_avg_time = total_avg_time + avg_time
print '%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \
(name,
min_time * MILLI_SECONDS,
avg_time * MILLI_SECONDS,
op_avg * MICRO_SECONDS,
min_overhead *MILLI_SECONDS)
print '-' * LINE
print ('Totals: '
' %6.0fms %6.0fms' %
(total_min_time * MILLI_SECONDS,
total_avg_time * MILLI_SECONDS,
))
print
else: def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
print 'Comparing with: %s (rounds=%i, warp=%i)' % \
(compare_to.name,compare_to.rounds,compare_to.warp) # Check benchmark versions
print '%-30s min run cmp run avg run diff' % \ if compare_to.version != self.version:
'Tests:' print ('* Benchmark versions differ: '
print '-'*77 'cannot compare this benchmark to "%s" !' %
compare_to.name)
print
self.print_benchmark(hidenoise=hidenoise,
limitnames=limitnames)
return
# Print header
compare_to.print_header('Comparing with')
print ('Test '
' minimum run-time average run-time')
print (' '
' this other diff this other diff')
print '-' * LINE
# Print test comparisons
tests = self.tests.items() tests = self.tests.items()
tests.sort() tests.sort()
compatible = 1 total_min_time = other_total_min_time = 0.0
totalmintime = other_totalmintime = 0 total_avg_time = other_total_avg_time = 0.0
for name, t in tests: benchmarks_compatible = self.compatible(compare_to)
mintime, avg, op_avg, ov_avg = t.stat() tests_compatible = 1
totalmintime += mintime for name, test in tests:
if (limitnames is not None and
limitnames.search(name) is None):
continue
(min_time,
avg_time,
total_time,
op_avg,
min_overhead) = test.stat()
total_min_time = total_min_time + min_time
total_avg_time = total_avg_time + avg_time
try: try:
other = compare_to.tests[name] other = compare_to.tests[name]
except KeyError: except KeyError:
other = None other = None
if other and other.version == t.version and \ if other is None:
other.operations == t.operations: # Other benchmark doesn't include the given test
mintime1, avg1, op_avg1, ov_avg1 = other.stat() min_diff, avg_diff = 'n/a', 'n/a'
other_totalmintime += mintime1 other_min_time = 0.0
diff = ((mintime*self.warp)/(mintime1*other.warp) - 1.0)*100.0 other_avg_time = 0.0
if hidenoise and abs(qop_avg) < 10: tests_compatible = 0
diff = '' else:
(other_min_time,
other_avg_time,
other_total_time,
other_op_avg,
other_min_overhead) = other.stat()
other_total_min_time = other_total_min_time + other_min_time
other_total_avg_time = other_total_avg_time + other_avg_time
if (benchmarks_compatible and
test.compatible(other)):
# Both benchmark and tests are comparible
min_diff = ((min_time * self.warp) /
(other_min_time * other.warp) - 1.0)
avg_diff = ((avg_time * self.warp) /
(other_avg_time * other.warp) - 1.0)
if hidenoise and abs(min_diff) < 10.0:
min_diff = ''
else:
min_diff = '%+5.1f%%' % (min_diff * PERCENT)
if hidenoise and abs(avg_diff) < 10.0:
avg_diff = ''
else: else:
diff = '%+7.2f%%' % diff avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
else: else:
qavg, diff = 'n/a', 'n/a' # Benchmark or tests are not comparible
compatible = 0 min_diff, avg_diff = 'n/a', 'n/a'
print '%30s: %8.2f ms %8.2f ms %8.2f ms %8s' % \ tests_compatible = 0
(name,mintime*1000.0,mintime1*1000.0 * compare_to.warp/self.warp, avg*1000.0,diff) print '%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
print '-'*77 (name,
# min_time * MILLI_SECONDS,
other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
min_diff,
avg_time * MILLI_SECONDS,
other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
avg_diff)
print '-' * LINE
# Summarise test results # Summarise test results
# if not benchmarks_compatible or not tests_compatible:
if compatible and compare_to.roundtime > 0 and \ min_diff, avg_diff = 'n/a', 'n/a'
compare_to.version == self.version: else:
print '%30s: %8.2f ms %8.2f ms %+7.2f%%' % \ if other_total_min_time != 0.0:
('Notional minimum round time', totalmintime * 1000.0, min_diff = '%+5.1f%%' % (
other_totalmintime * 1000.0 * compare_to.warp/self.warp, ((total_min_time * self.warp) /
((totalmintime*self.warp)/ (other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
(other_totalmintime*compare_to.warp)-1.0)*100.0) else:
min_diff = 'n/a'
if other_total_avg_time != 0.0:
avg_diff = '%+5.1f%%' % (
((total_avg_time * self.warp) /
(other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
else: else:
print '%30s: %9.2f ms n/a' % \ avg_diff = 'n/a'
('Notional minimum round time', totalmintime * 1000.0) print ('Totals: '
' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
(total_min_time * MILLI_SECONDS,
(other_total_min_time * compare_to.warp/self.warp
* MILLI_SECONDS),
min_diff,
total_avg_time * MILLI_SECONDS,
(other_total_avg_time * compare_to.warp/self.warp
* MILLI_SECONDS),
avg_diff
))
print
print '(this=%s, other=%s)' % (self.name,
compare_to.name)
print print
def print_machine():
import platform
print 'Machine Details:'
print ' Platform ID: %s' % platform.platform()
print ' Executable: %s' % sys.executable
# There's a bug in Python 2.2b1+...
if sys.version[:6] == '2.2b1+':
return
print ' Python: %s' % platform.python_version()
print ' Compiler: %s' % platform.python_compiler()
buildno, builddate = platform.python_build()
print ' Build: %s (#%s)' % (builddate, buildno)
class PyBenchCmdline(Application): class PyBenchCmdline(Application):
...@@ -370,50 +747,64 @@ class PyBenchCmdline(Application): ...@@ -370,50 +747,64 @@ class PyBenchCmdline(Application):
version = __version__ version = __version__
options = [ArgumentOption('-n','number of rounds',Setup.Number_of_rounds), debug = _debug
ArgumentOption('-f','save benchmark to file arg',''),
ArgumentOption('-c','compare benchmark with the one in file arg',''), options = [ArgumentOption('-n',
ArgumentOption('-s','show benchmark in file arg, then exit',''), 'number of rounds',
SwitchOption('-S','show statistics of benchmarks',0), Setup.Number_of_rounds),
ArgumentOption('-w','set warp factor to arg',Setup.Warp_factor), ArgumentOption('-f',
SwitchOption('-d','hide noise in compares', 0), 'save benchmark to file arg',
SwitchOption('-v','verbose output (not recommended)', 0), ''),
SwitchOption('--no-gc','disable garbage collection', 0), ArgumentOption('-c',
SwitchOption('--no-syscheck', 'compare benchmark with the one in file arg',
'"disable" sys check interval (set to sys.maxint)', 0), ''),
ArgumentOption('-t', 'tests containing substring', ''), ArgumentOption('-s',
ArgumentOption('-C', 'number of calibration runs', 20) 'show benchmark in file arg, then exit',
''),
ArgumentOption('-w',
'set warp factor to arg',
Setup.Warp_factor),
ArgumentOption('-t',
'run only tests with names matching arg',
''),
ArgumentOption('-C',
'set the number of calibration runs to arg',
CALIBRATION_RUNS),
SwitchOption('-d',
'hide noise in comparisons',
0),
SwitchOption('-v',
'verbose output (not recommended)',
0),
SwitchOption('--with-gc',
'enable garbage collection',
0),
SwitchOption('--with-syscheck',
'use default sys check interval',
0),
ArgumentOption('--timer',
'use given timer',
TIMER_PLATFORM_DEFAULT),
] ]
about = """\ about = """\
The normal operation is to run the suite and display the The normal operation is to run the suite and display the
results. Use -f to save them for later reuse or comparisms. results. Use -f to save them for later reuse or comparisons.
Available timers:
time.time
time.clock
systimes.processtime
Examples: Examples:
python1.5 pybench.py -w 100 -f p15 python2.1 pybench.py -f p21.pybench
python1.4 pybench.py -w 100 -f p14 python2.5 pybench.py -f p25.pybench
python pybench.py -s p15 -c p14 python pybench.py -s p25.pybench -c p21.pybench
""" """
copyright = __copyright__ copyright = __copyright__
def handle_S(self, value):
""" Display one line stats for each benchmark file given on the
command line.
"""
for benchmark in self.files:
try:
f = open(benchmark, 'rb')
bench = pickle.load(f)
f.close()
except IOError:
print '* Error opening/reading file %s' % repr(benchmark)
else:
print '%s,%-.2f,ms' % (benchmark, bench.roundtime*1000.0)
return 0
def main(self): def main(self):
rounds = self.values['-n'] rounds = self.values['-n']
...@@ -421,36 +812,50 @@ python pybench.py -s p15 -c p14 ...@@ -421,36 +812,50 @@ python pybench.py -s p15 -c p14
show_bench = self.values['-s'] show_bench = self.values['-s']
compare_to = self.values['-c'] compare_to = self.values['-c']
hidenoise = self.values['-d'] hidenoise = self.values['-d']
warp = self.values['-w'] warp = int(self.values['-w'])
nogc = self.values['--no-gc'] withgc = self.values['--with-gc']
limitnames = self.values['-t'] limitnames = self.values['-t']
if limitnames:
if _debug:
print '* limiting test names to one with substring "%s"' % \
limitnames
limitnames = re.compile(limitnames, re.I)
else:
limitnames = None
verbose = self.verbose verbose = self.verbose
nosyscheck = self.values['--no-syscheck'] withsyscheck = self.values['--with-syscheck']
cruns = self.values['-C'] calibration_runs = self.values['-C']
print "CRUNS:", cruns timer = self.values['--timer']
print 'PYBENCH',__version__ print '-' * LINE
print 'PYBENCH %s' % __version__
print '-' * LINE
print '* using Python %s' % (string.split(sys.version)[0])
# Switch off GC # Switch off garbage collection
if nogc: if not withgc:
try: try:
import gc import gc
except ImportError: except ImportError:
nogc = 0 print '* Python version doesn\'t support garbage collection'
else: else:
if self.values['--no-gc']:
gc.disable() gc.disable()
print 'NO GC' print '* disabled garbage collection'
# maximise sys check interval # "Disable" sys check interval
if nosyscheck: if not withsyscheck:
sys.setcheckinterval(sys.maxint) # Too bad the check interval uses an int instead of a long...
print 'CHECKINTERVAL =', sys.maxint value = 2147483647
sys.setcheckinterval(value)
print print '* system check interval set to maximum: %s' % value
if timer == TIMER_SYSTIMES_PROCESSTIME:
import systimes
print '* using timer: systimes.processtime (%s)' % \
systimes.SYSTIMES_IMPLEMENTATION
else:
print '* using timer: %s' % timer
if not compare_to:
print_machine()
print print
if compare_to: if compare_to:
...@@ -460,8 +865,10 @@ python pybench.py -s p15 -c p14 ...@@ -460,8 +865,10 @@ python pybench.py -s p15 -c p14
bench.name = compare_to bench.name = compare_to
f.close() f.close()
compare_to = bench compare_to = bench
except IOError: except IOError, reason:
print '* Error opening/reading file',compare_to print '* Error opening/reading file %s: %s' % (
repr(compare_to),
reason)
compare_to = None compare_to = None
if show_bench: if show_bench:
...@@ -470,37 +877,52 @@ python pybench.py -s p15 -c p14 ...@@ -470,37 +877,52 @@ python pybench.py -s p15 -c p14
bench = pickle.load(f) bench = pickle.load(f)
bench.name = show_bench bench.name = show_bench
f.close() f.close()
print 'Benchmark: %s (rounds=%i, warp=%i)' % \ bench.print_header()
(bench.name,bench.rounds,bench.warp) if compare_to:
print bench.print_comparison(compare_to,
bench.print_stat(compare_to, hidenoise) hidenoise=hidenoise,
limitnames=limitnames)
else:
bench.print_benchmark(hidenoise=hidenoise,
limitnames=limitnames)
except IOError: except IOError:
print '* Error opening/reading file',show_bench print '* Error opening/reading file %s: %s' % (
repr(show_bench),
reason)
print print
return return
if reportfile: if reportfile:
if nogc: print 'Creating benchmark: %s (rounds=%i, warp=%i)' % \
print 'Benchmark: %s (rounds=%i, warp=%i, no GC)' % \ (reportfile, rounds, warp)
(reportfile,rounds,warp)
else:
print 'Benchmark: %s (rounds=%i, warp=%i)' % \
(reportfile,rounds,warp)
print print
# Create benchmark object # Create benchmark object
bench = Benchmark() bench = Benchmark(reportfile,
verbose=verbose,
timer=timer,
warp=warp,
calibration_runs=calibration_runs)
bench.rounds = rounds bench.rounds = rounds
bench.load_tests(Setup, warp, limitnames, verbose) bench.load_tests(Setup, limitnames=limitnames)
try: try:
bench.run(verbose, cruns) bench.calibrate()
bench.run()
except KeyboardInterrupt: except KeyboardInterrupt:
print print
print '*** KeyboardInterrupt -- Aborting' print '*** KeyboardInterrupt -- Aborting'
print print
return return
bench.print_stat(compare_to) bench.print_header()
# ring bell if compare_to:
bench.print_comparison(compare_to,
hidenoise=hidenoise,
limitnames=limitnames)
else:
bench.print_benchmark(hidenoise=hidenoise,
limitnames=limitnames)
# Ring bell
sys.stderr.write('\007') sys.stderr.write('\007')
if reportfile: if reportfile:
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
platforms. platforms.
If no supported timing methods based on process time can be found, If no supported timing methods based on process time can be found,
the module reverts to the highest resolution wall-time timer the module reverts to the highest resolution wall-clock timer
instead. The system time part will then always be 0.0. instead. The system time part will then always be 0.0.
The module exports one public API: The module exports one public API:
...@@ -52,8 +52,8 @@ USE_CTYPES_GETPROCESSTIMES = 'cytpes GetProcessTimes() wrapper' ...@@ -52,8 +52,8 @@ USE_CTYPES_GETPROCESSTIMES = 'cytpes GetProcessTimes() wrapper'
USE_WIN32PROCESS_GETPROCESSTIMES = 'win32process.GetProcessTimes()' USE_WIN32PROCESS_GETPROCESSTIMES = 'win32process.GetProcessTimes()'
USE_RESOURCE_GETRUSAGE = 'resource.getrusage()' USE_RESOURCE_GETRUSAGE = 'resource.getrusage()'
USE_PROCESS_TIME_CLOCK = 'time.clock() (process time)' USE_PROCESS_TIME_CLOCK = 'time.clock() (process time)'
USE_WALL_TIME_CLOCK = 'time.clock() (wall-time)' USE_WALL_TIME_CLOCK = 'time.clock() (wall-clock)'
USE_WALL_TIME_TIME = 'time.time() (wall-time)' USE_WALL_TIME_TIME = 'time.time() (wall-clock)'
if sys.platform[:3] == 'win': if sys.platform[:3] == 'win':
# Windows platform # Windows platform
...@@ -63,7 +63,7 @@ if sys.platform[:3] == 'win': ...@@ -63,7 +63,7 @@ if sys.platform[:3] == 'win':
try: try:
import ctypes import ctypes
except ImportError: except ImportError:
# Use the wall-time implementation time.clock(), since this # Use the wall-clock implementation time.clock(), since this
# is the highest resolution clock available on Windows # is the highest resolution clock available on Windows
SYSTIMES_IMPLEMENTATION = USE_WALL_TIME_CLOCK SYSTIMES_IMPLEMENTATION = USE_WALL_TIME_CLOCK
else: else:
...@@ -91,7 +91,7 @@ if SYSTIMES_IMPLEMENTATION is None: ...@@ -91,7 +91,7 @@ if SYSTIMES_IMPLEMENTATION is None:
# time) # time)
SYSTIMES_IMPLEMENTATION = USE_PROCESS_TIME_CLOCK SYSTIMES_IMPLEMENTATION = USE_PROCESS_TIME_CLOCK
else: else:
# Use wall-time implementation time.time() since this provides # Use wall-clock implementation time.time() since this provides
# the highest resolution clock on most systems # the highest resolution clock on most systems
SYSTIMES_IMPLEMENTATION = USE_WALL_TIME_TIME SYSTIMES_IMPLEMENTATION = USE_WALL_TIME_TIME
...@@ -103,24 +103,27 @@ def getrusage_systimes(): ...@@ -103,24 +103,27 @@ def getrusage_systimes():
def process_time_clock_systimes(): def process_time_clock_systimes():
return (time.clock(), 0.0) return (time.clock(), 0.0)
def wall_time_clock_systimes(): def wall_clock_clock_systimes():
return (time.clock(), 0.0) return (time.clock(), 0.0)
def wall_time_time_systimes(): def wall_clock_time_systimes():
return (time.time(), 0.0) return (time.time(), 0.0)
# Number of clock ticks per second for the values returned # Number of clock ticks per second for the values returned
# by GetProcessTimes() on Windows. # by GetProcessTimes() on Windows.
# #
# Note: Ticks returned by GetProcessTimes() are micro-seconds on # Note: Ticks returned by GetProcessTimes() are 100ns intervals on
# Windows XP (though the docs say 100ns intervals) # Windows XP. However, the process times are only updated with every
WIN32_PROCESS_TIMES_TICKS_PER_SECOND = 10e6 # clock tick and the frequency of these is somewhat lower: depending
# on the OS version between 10ms and 15ms. Even worse, the process
# time seems to be allocated to process currently running when the
# clock interrupt arrives, ie. it is possible that the current time
# slice gets accounted to a different process.
WIN32_PROCESS_TIMES_TICKS_PER_SECOND = 1e7
def win32process_getprocesstimes_systimes(): def win32process_getprocesstimes_systimes():
d = win32process.GetProcessTimes(win32process.GetCurrentProcess()) d = win32process.GetProcessTimes(win32process.GetCurrentProcess())
# Note: I'm not sure whether KernelTime on Windows is the same as
# system time on Unix - I've yet to see a non-zero value for
# KernelTime on Windows.
return (d['UserTime'] / WIN32_PROCESS_TIMES_TICKS_PER_SECOND, return (d['UserTime'] / WIN32_PROCESS_TIMES_TICKS_PER_SECOND,
d['KernelTime'] / WIN32_PROCESS_TIMES_TICKS_PER_SECOND) d['KernelTime'] / WIN32_PROCESS_TIMES_TICKS_PER_SECOND)
...@@ -149,10 +152,10 @@ elif SYSTIMES_IMPLEMENTATION is USE_PROCESS_TIME_CLOCK: ...@@ -149,10 +152,10 @@ elif SYSTIMES_IMPLEMENTATION is USE_PROCESS_TIME_CLOCK:
systimes = process_time_clock_systimes systimes = process_time_clock_systimes
elif SYSTIMES_IMPLEMENTATION is USE_WALL_TIME_CLOCK: elif SYSTIMES_IMPLEMENTATION is USE_WALL_TIME_CLOCK:
systimes = wall_time_clock_systimes systimes = wall_clock_clock_systimes
elif SYSTIMES_IMPLEMENTATION is USE_WALL_TIME_TIME: elif SYSTIMES_IMPLEMENTATION is USE_WALL_TIME_TIME:
systimes = wall_time_time_systimes systimes = wall_clock_time_systimes
elif SYSTIMES_IMPLEMENTATION is USE_WIN32PROCESS_GETPROCESSTIMES: elif SYSTIMES_IMPLEMENTATION is USE_WIN32PROCESS_GETPROCESSTIMES:
systimes = win32process_getprocesstimes_systimes systimes = win32process_getprocesstimes_systimes
...@@ -163,6 +166,17 @@ elif SYSTIMES_IMPLEMENTATION is USE_CTYPES_GETPROCESSTIMES: ...@@ -163,6 +166,17 @@ elif SYSTIMES_IMPLEMENTATION is USE_CTYPES_GETPROCESSTIMES:
else: else:
raise TypeError('no suitable systimes() implementation found') raise TypeError('no suitable systimes() implementation found')
def processtime():
""" Return the total time spent on the process.
This is the sum of user and system time as returned by
systimes().
"""
user, system = systimes()
return user + system
### Testing ### Testing
def some_workload(): def some_workload():
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment