Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
1
Issues
1
List
Boards
Labels
Milestones
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
neoppod
Commits
a5f2f604
Commit
a5f2f604
authored
Jan 16, 2014
by
Julien Muchembled
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
tests: review report and mark known failures as expected
parent
d250deca
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
105 additions
and
134 deletions
+105
-134
neo/scripts/runner.py
neo/scripts/runner.py
+63
-118
neo/tests/__init__.py
neo/tests/__init__.py
+20
-6
neo/tests/functional/testClient.py
neo/tests/functional/testClient.py
+2
-0
neo/tests/threaded/test.py
neo/tests/threaded/test.py
+8
-2
neo/tests/zodb/testPack.py
neo/tests/zodb/testPack.py
+7
-6
neo/tests/zodb/testUndo.py
neo/tests/zodb/testUndo.py
+4
-1
tools/test_bot
tools/test_bot
+1
-1
No files found.
neo/scripts/runner.py
View file @
a5f2f604
...
...
@@ -22,7 +22,9 @@ import time
import
sys
import
neo
import
os
from
collections
import
Counter
,
defaultdict
from
cStringIO
import
StringIO
from
unittest.runner
import
_WritelnDecorator
from
neo.tests
import
getTempDirectory
,
__dict__
as
neo_tests__dict__
from
neo.tests.benchmark
import
BenchmarkRunner
...
...
@@ -91,17 +93,22 @@ ZODB_TEST_MODULES = [
]
class
NeoTestRunner
(
unittest
.
TestResult
):
class
NeoTestRunner
(
unittest
.
Te
xtTe
stResult
):
""" Custom result class to build report with statistics per module """
def
__init__
(
self
,
title
):
unittest
.
TestResult
.
__init__
(
self
)
def
__init__
(
self
,
title
,
verbosity
):
super
(
NeoTestRunner
,
self
).
__init__
(
_WritelnDecorator
(
sys
.
stderr
),
False
,
verbosity
)
self
.
_title
=
title
self
.
modulesStats
=
{}
self
.
failedImports
=
{}
self
.
lastStart
=
None
self
.
run_dict
=
defaultdict
(
int
)
self
.
time_dict
=
defaultdict
(
int
)
self
.
temp_directory
=
getTempDirectory
()
def
wasSuccessful
(
self
):
return
not
(
self
.
failures
or
self
.
errors
or
self
.
unexpectedSuccesses
)
def
run
(
self
,
name
,
modules
):
print
'
\
n
'
,
name
suite
=
unittest
.
TestSuite
()
...
...
@@ -123,137 +130,74 @@ class NeoTestRunner(unittest.TestResult):
suite
.
addTests
(
loader
.
loadTestsFromModule
(
test_module
))
suite
.
run
(
self
)
class
ModuleStats
(
object
):
run
=
0
errors
=
0
success
=
0
failures
=
0
time
=
0.0
def
_getModuleStats
(
self
,
test
):
module
=
test
.
__class__
.
__module__
module
=
tuple
(
module
.
split
(
'.'
))
try
:
return
self
.
modulesStats
[
module
]
except
KeyError
:
self
.
modulesStats
[
module
]
=
self
.
ModuleStats
()
return
self
.
modulesStats
[
module
]
def
_updateTimer
(
self
,
stats
):
stats
.
time
+=
time
.
time
()
-
self
.
lastStart
def
startTest
(
self
,
test
):
unittest
.
TestResult
.
startTest
(
self
,
test
)
logging
.
info
(
" * TEST %s"
,
test
)
stats
=
self
.
_getModuleStats
(
test
)
stats
.
run
+=
1
self
.
lastStart
=
time
.
time
()
super
(
NeoTestRunner
,
self
).
startTest
(
test
)
self
.
run_dict
[
test
.
__class__
.
__module__
]
+=
1
self
.
start_time
=
time
.
time
()
def
addSuccess
(
self
,
test
):
print
"OK"
unittest
.
TestResult
.
addSuccess
(
self
,
test
)
stats
=
self
.
_getModuleStats
(
test
)
stats
.
success
+=
1
self
.
_updateTimer
(
stats
)
def
addError
(
self
,
test
,
err
):
print
"ERROR"
unittest
.
TestResult
.
addError
(
self
,
test
,
err
)
stats
=
self
.
_getModuleStats
(
test
)
stats
.
errors
+=
1
self
.
_updateTimer
(
stats
)
def
addFailure
(
self
,
test
,
err
):
print
"FAIL"
unittest
.
TestResult
.
addFailure
(
self
,
test
,
err
)
stats
=
self
.
_getModuleStats
(
test
)
stats
.
failures
+=
1
self
.
_updateTimer
(
stats
)
def
stopTest
(
self
,
test
):
self
.
time_dict
[
test
.
__class__
.
__module__
]
+=
\
time
.
time
()
-
self
.
start_time
super
(
NeoTestRunner
,
self
).
stopTest
(
test
)
def
_buildSummary
(
self
,
add_status
):
success
=
self
.
testsRun
-
len
(
self
.
errors
)
-
len
(
self
.
failures
)
unexpected_count
=
len
(
self
.
errors
)
+
len
(
self
.
failures
)
\
+
len
(
self
.
unexpectedSuccesses
)
expected_count
=
len
(
self
.
expectedFailures
)
success
=
self
.
testsRun
-
unexpected_count
-
expected_count
add_status
(
'Directory'
,
self
.
temp_directory
)
if
self
.
testsRun
:
add_status
(
'Status'
,
'%.3f%%'
%
(
success
*
100.0
/
self
.
testsRun
))
for
var
in
os
.
environ
.
iterkeys
()
:
for
var
in
os
.
environ
:
if
var
.
startswith
(
'NEO_TEST'
):
add_status
(
var
,
os
.
environ
[
var
])
# visual
header
=
"%25s |
run | success | errors | fails | time
\
n
"
%
'Test Module'
separator
=
"%25s-+-------
--+---------+
---------+---------+----------
\
n
"
%
(
'-'
*
25
)
header
=
"%25s |
run | unexpected | expected | skipped | time
\
n
"
%
'Test Module'
separator
=
"%25s-+-------
+------------+-
---------+---------+----------
\
n
"
%
(
'-'
*
25
)
format
=
"%25s | %3s | %3s | %3s | %3s | %6.2fs
\
n
"
group_f
=
"%25s | | | | |
\
n
"
# header
s
=
' '
*
30
+
' NEO TESTS REPORT'
s
+=
'
\
n
'
s
+=
'
\
n
'
+
header
+
separator
s
=
' '
*
30
+
' NEO TESTS REPORT
\
n
\
n
'
+
header
+
separator
group
=
None
t_success
=
0
unexpected
=
Counter
(
x
[
0
].
__class__
.
__module__
for
x
in
(
self
.
errors
,
self
.
failures
)
for
x
in
x
)
unexpected
.
update
(
x
.
__class__
.
__module__
for
x
in
self
.
unexpectedSuccesses
)
expected
=
Counter
(
x
[
0
].
__class__
.
__module__
for
x
in
self
.
expectedFailures
)
skipped
=
Counter
(
x
[
0
].
__class__
.
__module__
for
x
in
self
.
skipped
)
total_time
=
0
# for each test case
for
k
,
v
in
sorted
(
self
.
modulesStats
.
items
()):
for
k
,
v
in
sorted
(
self
.
run_dict
.
iter
items
()):
# display group below its content
_group
=
'.'
.
join
(
k
[:
-
1
])
if
group
is
None
:
group
=
_group
_group
,
name
=
k
.
rsplit
(
'.'
,
1
)
if
_group
!=
group
:
if
group
:
s
+=
separator
+
group_f
%
group
+
separator
group
=
_group
# test case stats
t_success
+=
v
.
success
run
,
success
=
v
.
run
or
'.'
,
v
.
success
or
'.'
errors
,
failures
=
v
.
errors
or
'.'
,
v
.
failures
or
'.'
name
=
k
[
-
1
].
lstrip
(
'test'
)
args
=
(
name
,
run
,
success
,
errors
,
failures
,
v
.
time
)
s
+=
format
%
args
t
=
self
.
time_dict
[
k
]
total_time
+=
t
s
+=
format
%
(
name
.
lstrip
(
'test'
),
v
,
unexpected
.
get
(
k
,
'.'
),
expected
.
get
(
k
,
'.'
),
skipped
.
get
(
k
,
'.'
),
t
)
# the last group
s
+=
separator
+
group_f
%
group
+
separator
# the final summary
errors
,
failures
=
len
(
self
.
errors
)
or
'.'
,
len
(
self
.
failures
)
or
'.'
args
=
(
"Summary"
,
self
.
testsRun
,
t_success
,
errors
,
failures
,
self
.
time
)
s
+=
format
%
args
+
separator
+
'
\
n
'
return
s
def
_buildErrors
(
self
):
s
=
''
test_formatter
=
lambda
t
:
t
.
id
()
if
len
(
self
.
errors
):
s
+=
'
\
n
ERRORS:
\
n
'
for
test
,
trace
in
self
.
errors
:
s
+=
"%s
\
n
"
%
test_formatter
(
test
)
s
+=
"-------------------------------------------------------------
\
n
"
s
+=
trace
s
+=
"-------------------------------------------------------------
\
n
"
s
+=
'
\
n
'
if
len
(
self
.
failures
):
s
+=
'
\
n
FAILURES:
\
n
'
for
test
,
trace
in
self
.
failures
:
s
+=
"%s
\
n
"
%
test_formatter
(
test
)
s
+=
"-------------------------------------------------------------
\
n
"
s
+=
trace
s
+=
"-------------------------------------------------------------
\
n
"
s
+=
'
\
n
'
return
s
def
_buildWarnings
(
self
):
s
=
'
\
n
'
if
self
.
failedImports
:
s
+=
'Failed imports :
\
n
'
for
module
,
err
in
self
.
failedImports
.
items
():
s
+=
'%s:
\
n
%s'
%
(
module
,
err
)
s
+=
'
\
n
'
return
s
s
+=
format
%
(
"Summary"
,
self
.
testsRun
,
unexpected_count
or
'.'
,
expected_count
or
'.'
,
len
(
self
.
skipped
)
or
'.'
,
total_time
)
+
separator
+
'
\
n
'
return
"%s Tests, %s Failed"
%
(
self
.
testsRun
,
unexpected_count
),
s
def
buildReport
(
self
,
add_status
):
self
.
time
=
sum
([
s
.
time
for
s
in
self
.
modulesStats
.
values
()])
# TODO: Add 'Broken' for known failures (not a regression)
# and 'Fixed' for unexpected successes.
self
.
subject
=
"%s Tests, %s Failed"
%
(
self
.
testsRun
,
len
(
self
.
errors
)
+
len
(
self
.
failures
))
summary
=
self
.
_buildSummary
(
add_status
)
errors
=
self
.
_buildErrors
()
warnings
=
self
.
_buildWarnings
()
report
=
'
\
n
'
.
join
([
summary
,
errors
,
warnings
])
return
(
self
.
subject
,
report
)
subject
,
summary
=
self
.
_buildSummary
(
add_status
)
body
=
StringIO
()
body
.
write
(
summary
)
for
test
in
self
.
unexpectedSuccesses
:
body
.
write
(
"UNEXPECTED SUCCESS: %s
\
n
"
%
self
.
getDescription
(
test
))
self
.
stream
=
_WritelnDecorator
(
body
)
self
.
printErrors
()
return
subject
,
body
.
getvalue
()
class
TestRunner
(
BenchmarkRunner
):
...
...
@@ -264,6 +208,8 @@ class TestRunner(BenchmarkRunner):
help
=
'Unit & threaded tests'
)
parser
.
add_option
(
'-z'
,
'--zodb'
,
action
=
'store_true'
,
help
=
'ZODB test suite running on a NEO'
)
parser
.
add_option
(
'-v'
,
'--verbose'
,
action
=
'store_true'
,
help
=
'Verbose output'
)
parser
.
format_epilog
=
lambda
_
:
"""
Environment Variables:
NEO_TESTS_ADAPTER Default is SQLite for threaded clusters,
...
...
@@ -291,14 +237,13 @@ Environment Variables:
unit
=
options
.
unit
,
functional
=
options
.
functional
,
zodb
=
options
.
zodb
,
verbosity
=
2
if
options
.
verbose
else
1
,
)
def
start
(
self
):
config
=
self
.
_config
# run requested tests
runner
=
NeoTestRunner
(
title
=
config
.
title
or
'Neo'
,
)
runner
=
NeoTestRunner
(
config
.
title
or
'Neo'
,
config
.
verbosity
)
try
:
if
config
.
unit
:
runner
.
run
(
'Unit tests'
,
UNIT_TEST_MODULES
)
...
...
neo/tests/__init__.py
View file @
a5f2f604
...
...
@@ -16,6 +16,7 @@
import
__builtin__
import
errno
import
functools
import
os
import
random
import
socket
...
...
@@ -31,11 +32,28 @@ from neo.lib.protocol import NodeTypes, Packets, UUID_NAMESPACES
from
neo.lib.util
import
getAddressType
from
time
import
time
from
struct
import
pack
,
unpack
from
unittest.case
import
_ExpectedFailure
,
_UnexpectedSuccess
try
:
from
ZODB.utils
import
newTid
except
ImportError
:
pass
def
expectedFailure
(
exception
=
AssertionError
):
def
decorator
(
func
):
def
wrapper
(
*
args
,
**
kw
):
try
:
func
(
*
args
,
**
kw
)
except
exception
,
e
:
# XXX: passing sys.exc_info() causes deadlocks
raise
_ExpectedFailure
((
type
(
e
),
None
,
None
))
raise
_UnexpectedSuccess
return
functools
.
wraps
(
func
)(
wrapper
)
if
callable
(
exception
)
and
not
isinstance
(
exception
,
type
):
func
=
exception
exception
=
Exception
return
decorator
(
func
)
return
decorator
DB_PREFIX
=
os
.
getenv
(
'NEO_DB_PREFIX'
,
'test_neo'
)
DB_ADMIN
=
os
.
getenv
(
'NEO_DB_ADMIN'
,
'root'
)
DB_PASSWD
=
os
.
getenv
(
'NEO_DB_PASSWD'
,
''
)
...
...
@@ -117,8 +135,6 @@ def setupMySQLdb(db_list, user=DB_USER, password='', clear_databases=True):
class
NeoTestBase
(
unittest
.
TestCase
):
def
setUp
(
self
):
sys
.
stdout
.
write
(
' * %s '
%
(
self
.
id
(),
))
sys
.
stdout
.
flush
()
logging
.
name
=
self
.
setupLog
()
unittest
.
TestCase
.
setUp
(
self
)
...
...
@@ -126,17 +142,15 @@ class NeoTestBase(unittest.TestCase):
test_case
,
logging
.
name
=
self
.
id
().
rsplit
(
'.'
,
1
)
logging
.
setup
(
os
.
path
.
join
(
getTempDirectory
(),
test_case
+
'.log'
))
def
tearDown
(
self
,
success
=
'ok'
if
sys
.
version_info
<
(
2
,
7
)
else
'success'
):
def
tearDown
(
self
):
assert
self
.
tearDown
.
im_func
is
NeoTestBase
.
tearDown
.
im_func
self
.
_tearDown
(
sys
.
_getframe
(
1
).
f_locals
[
success
])
self
.
_tearDown
(
sys
.
_getframe
(
1
).
f_locals
[
'success'
])
def
_tearDown
(
self
,
success
):
# Kill all unfinished transactions for next test.
# Note we don't even abort them because it may require a valid
# connection to a master node (see Storage.sync()).
transaction
.
manager
.
__init__
()
print
class
failureException
(
AssertionError
):
def
__init__
(
self
,
msg
=
None
):
...
...
neo/tests/functional/testClient.py
View file @
a5f2f604
...
...
@@ -26,6 +26,7 @@ from ZODB.FileStorage import FileStorage
from
ZODB.POSException
import
ConflictError
from
ZODB.tests.StorageTestBase
import
zodb_pickle
from
persistent
import
Persistent
from
..
import
expectedFailure
from
.
import
NEOCluster
,
NEOFunctionalTest
TREE_SIZE
=
6
...
...
@@ -220,6 +221,7 @@ class ClientTests(NEOFunctionalTest):
self
.
__checkTree
(
root
[
'trees'
])
@
expectedFailure
(
AttributeError
)
def
testExportFileStorageBug
(
self
):
# currently fails due to a bug in ZODB.FileStorage
self
.
testExport
(
True
)
...
...
neo/tests/threaded/test.py
View file @
a5f2f604
...
...
@@ -26,6 +26,7 @@ from neo.storage.transactions import TransactionManager, \
from
neo.lib.connection
import
ConnectionClosed
,
MTClientConnection
from
neo.lib.protocol
import
CellStates
,
ClusterStates
,
NodeStates
,
Packets
,
\
ZERO_TID
from
..
import
expectedFailure
,
_UnexpectedSuccess
from
.
import
ClientApplication
,
NEOCluster
,
NEOThreadedTest
,
Patch
from
neo.lib.util
import
add64
,
makeChecksum
from
neo.client.pool
import
CELL_CONNECTED
,
CELL_GOOD
...
...
@@ -237,6 +238,7 @@ class Test(NEOThreadedTest):
self
.
assertEqual
(
self
.
_testDeadlockAvoidance
([
2
,
4
]),
[
DelayedError
,
DelayedError
,
ConflictError
,
ConflictError
])
@
expectedFailure
(
POSException
.
ConflictError
)
def
testDeadlockAvoidance
(
self
):
# This test fail because deadlock avoidance is not fully implemented.
# 0: C1 -> S1
...
...
@@ -717,9 +719,13 @@ class Test(NEOThreadedTest):
# XXX: This is an expected failure. A ttid column was added to
# 'trans' table to permit recovery, by checking that the
# transaction was really committed.
self
.
assertRaises
(
ConnectionClosed
,
t
.
commit
)
try
:
t
.
commit
()
raise
_UnexpectedSuccess
except
ConnectionClosed
:
pass
t
.
begin
()
c
.
root
()[
'x'
]
expectedFailure
(
self
.
assertIn
)(
'x'
,
c
.
root
())
finally
:
cluster
.
stop
()
...
...
neo/tests/zodb/testPack.py
View file @
a5f2f604
...
...
@@ -16,14 +16,11 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import
unittest
try
:
from
ZODB.tests.PackableStorage
import
PackableStorageWithOptionalGC
except
ImportError
:
from
ZODB.tests.PackableStorage
import
PackableStorage
as
\
PackableStorageWithOptionalGC
from
ZODB.tests.PackableStorage
import
PackableUndoStorage
from
ZODB.tests.PackableStorage
import
\
PackableStorageWithOptionalGC
,
PackableUndoStorage
from
ZODB.tests.StorageTestBase
import
StorageTestBase
from
..
import
expectedFailure
from
.
import
ZODBTestCase
class
PackableTests
(
ZODBTestCase
,
StorageTestBase
,
...
...
@@ -32,6 +29,10 @@ class PackableTests(ZODBTestCase, StorageTestBase,
def
setUp
(
self
):
super
(
PackableTests
,
self
).
setUp
(
cluster_kw
=
{
'adapter'
:
'MySQL'
})
checkPackAllRevisions
=
expectedFailure
()(
PackableStorageWithOptionalGC
.
checkPackAllRevisions
)
checkPackUndoLog
=
expectedFailure
()(
PackableUndoStorage
.
checkPackUndoLog
)
if
__name__
==
"__main__"
:
suite
=
unittest
.
makeSuite
(
PackableTests
,
'check'
)
unittest
.
main
(
defaultTest
=
'suite'
)
...
...
neo/tests/zodb/testUndo.py
View file @
a5f2f604
...
...
@@ -19,11 +19,14 @@ from ZODB.tests.StorageTestBase import StorageTestBase
from
ZODB.tests.TransactionalUndoStorage
import
TransactionalUndoStorage
from
ZODB.tests.ConflictResolution
import
ConflictResolvingTransUndoStorage
from
..
import
expectedFailure
from
.
import
ZODBTestCase
class
UndoTests
(
ZODBTestCase
,
StorageTestBase
,
TransactionalUndoStorage
,
ConflictResolvingTransUndoStorage
):
pass
checkTransactionalUndoAfterPack
=
expectedFailure
()(
TransactionalUndoStorage
.
checkTransactionalUndoAfterPack
)
# Don't run this test. It cannot run with pipelined store, and is not executed
# on Zeo - but because Zeo doesn't have an iterator, while Neo has.
...
...
tools/test_bot
View file @
a5f2f604
...
...
@@ -85,7 +85,7 @@ def main():
revision
[:
7
],
os
.
path
.
basename
(
test_home
),
backend
)
if
tests
:
subprocess
.
call
([
os
.
path
.
join
(
bin
,
'neotestrunner'
),
'-'
+
tests
,
'--title'
,
'NEO tests '
+
title
,
'-
v
'
+
tests
,
'--title'
,
'NEO tests '
+
title
,
]
+
sys
.
argv
[
1
:
arg_count
])
if
'm'
in
tasks
:
subprocess
.
call
([
os
.
path
.
join
(
bin
,
'python'
),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment