Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZEO
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
ZEO
Commits
19db5a3a
Commit
19db5a3a
authored
Mar 27, 2018
by
Jason Madden
Committed by
GitHub
Mar 27, 2018
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #109 from zopefoundation/issue107
Add zodbpickle to server_find_global.
parents
711232ff
52fb8d46
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
124 additions
and
107 deletions
+124
-107
.travis.yml
.travis.yml
+4
-2
CHANGES.rst
CHANGES.rst
+3
-1
src/ZEO/asyncio/client.py
src/ZEO/asyncio/client.py
+1
-1
src/ZEO/asyncio/marshal.py
src/ZEO/asyncio/marshal.py
+3
-2
src/ZEO/asyncio/mtacceptor.py
src/ZEO/asyncio/mtacceptor.py
+1
-1
src/ZEO/asyncio/server.py
src/ZEO/asyncio/server.py
+3
-3
src/ZEO/asyncio/tests.py
src/ZEO/asyncio/tests.py
+21
-21
src/ZEO/tests/ConnectionTests.py
src/ZEO/tests/ConnectionTests.py
+12
-11
src/ZEO/tests/IterationTests.py
src/ZEO/tests/IterationTests.py
+11
-11
src/ZEO/tests/ThreadTests.py
src/ZEO/tests/ThreadTests.py
+1
-1
src/ZEO/tests/testConversionSupport.py
src/ZEO/tests/testConversionSupport.py
+4
-0
src/ZEO/tests/testTransactionBuffer.py
src/ZEO/tests/testTransactionBuffer.py
+1
-0
src/ZEO/tests/testZEO.py
src/ZEO/tests/testZEO.py
+26
-26
src/ZEO/tests/testZEO2.py
src/ZEO/tests/testZEO2.py
+4
-1
src/ZEO/tests/test_cache.py
src/ZEO/tests/test_cache.py
+28
-25
src/ZEO/tests/testssl.py
src/ZEO/tests/testssl.py
+1
-1
No files found.
.travis.yml
View file @
19db5a3a
...
...
@@ -5,11 +5,13 @@ matrix:
-
os
:
linux
python
:
2.7
-
os
:
linux
python
:
pypy
-5.6.0
python
:
pypy
-
os
:
linux
python
:
3.4
-
os
:
linux
python
:
3.5
-
os
:
linux
python
:
3.6
-
os
:
linux
python
:
3.4
env
:
ZEO_MTACCEPTOR=1
...
...
@@ -35,6 +37,6 @@ cache:
directories
:
-
eggs
script
:
-
bin/test -v
1
j99
-
bin/test -v
v -
j99
notifications
:
email
:
false
CHANGES.rst
View file @
19db5a3a
...
...
@@ -4,7 +4,9 @@ Changelog
5.1.2 (unreleased)
------------------
- Nothing changed yet.
- Allow ``zodbpickle.binary`` to be used in RPC requests, which is
necessary for compatibility with ZODB 5.4.0 on Python 2. See `issue
107 <https://github.com/zopefoundation/ZEO/issues/107>`_.
5.1.1 (2017-12-18)
...
...
src/ZEO/asyncio/client.py
View file @
19db5a3a
...
...
@@ -122,7 +122,7 @@ class Protocol(base.Protocol):
cr
=
self
.
loop
.
create_unix_connection
(
self
.
protocol_factory
,
self
.
addr
,
ssl
=
self
.
ssl
)
self
.
_connecting
=
cr
=
asyncio
.
async
(
cr
,
loop
=
self
.
loop
)
self
.
_connecting
=
cr
=
asyncio
.
ensure_future
(
cr
,
loop
=
self
.
loop
)
@
cr
.
add_done_callback
def
done_connecting
(
future
):
...
...
src/ZEO/asyncio/marshal.py
View file @
19db5a3a
...
...
@@ -156,9 +156,10 @@ def find_global(module, name):
def
server_find_global
(
module
,
name
):
"""Helper for message unpickler"""
if
module
not
in
(
'ZopeUndo.Prefix'
,
'copy_reg'
,
'__builtin__'
,
'zodbpickle'
):
raise
ImportError
(
"Module not allowed: %s"
%
(
module
,))
try
:
if
module
not
in
(
'ZopeUndo.Prefix'
,
'copy_reg'
,
'__builtin__'
):
raise
ImportError
m
=
__import__
(
module
,
_globals
,
_globals
,
_silly
)
except
ImportError
as
msg
:
raise
ImportError
(
"import error %s: %s"
%
(
module
,
msg
))
...
...
src/ZEO/asyncio/mtacceptor.py
View file @
19db5a3a
...
...
@@ -191,7 +191,7 @@ class Acceptor(asyncore.dispatcher):
server_hostname
=
''
)
asyncio
.
async
(
cr
,
loop
=
loop
)
asyncio
.
ensure_future
(
cr
,
loop
=
loop
)
loop
.
run_forever
()
loop
.
close
()
...
...
src/ZEO/asyncio/server.py
View file @
19db5a3a
...
...
@@ -152,7 +152,7 @@ assert best_protocol_version in ServerProtocol.protocols
def
new_connection
(
loop
,
addr
,
socket
,
zeo_storage
,
msgpack
):
protocol
=
ServerProtocol
(
loop
,
addr
,
zeo_storage
,
msgpack
)
cr
=
loop
.
create_connection
((
lambda
:
protocol
),
sock
=
socket
)
asyncio
.
async
(
cr
,
loop
=
loop
)
asyncio
.
ensure_future
(
cr
,
loop
=
loop
)
class
Delay
(
object
):
"""Used to delay response to client for synchronous calls.
...
...
@@ -231,7 +231,7 @@ class Acceptor(object):
else
:
cr
=
loop
.
create_unix_server
(
self
.
factory
,
addr
,
ssl
=
ssl
)
f
=
asyncio
.
async
(
cr
,
loop
=
loop
)
f
=
asyncio
.
ensure_future
(
cr
,
loop
=
loop
)
server
=
loop
.
run_until_complete
(
f
)
self
.
server
=
server
...
...
@@ -271,7 +271,7 @@ class Acceptor(object):
self
.
server
.
close
()
f
=
asyncio
.
async
(
self
.
server
.
wait_closed
(),
loop
=
loop
)
f
=
asyncio
.
ensure_future
(
self
.
server
.
wait_closed
(),
loop
=
loop
)
@
f
.
add_done_callback
def
server_closed
(
f
):
# stop the loop when the server closes:
...
...
src/ZEO/asyncio/tests.py
View file @
19db5a3a
...
...
@@ -180,7 +180,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Now we're connected, the cache was initialized, and the
# queued message has been sent:
self
.
assert
_
(
client
.
connected
.
done
())
self
.
assert
True
(
client
.
connected
.
done
())
self
.
assertEqual
(
cache
.
getLastTid
(),
'a'
*
8
)
self
.
assertEqual
(
self
.
pop
(),
(
4
,
False
,
'foo'
,
(
1
,
2
)))
...
...
@@ -192,7 +192,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Now we can make async calls:
f2
=
self
.
async
(
'bar'
,
3
,
4
)
self
.
assert
_
(
f2
.
done
()
and
f2
.
exception
()
is
None
)
self
.
assert
True
(
f2
.
done
()
and
f2
.
exception
()
is
None
)
self
.
assertEqual
(
self
.
pop
(),
(
0
,
True
,
'bar'
,
(
3
,
4
)))
# Loading objects gets special handling to leverage the cache.
...
...
@@ -289,8 +289,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self
.
assertEqual
(
f1
.
exception
().
args
,
(
exc
,))
# Because we reconnected, a new protocol and transport were created:
self
.
assert
_
(
protocol
is
not
loop
.
protocol
)
self
.
assert
_
(
transport
is
not
loop
.
transport
)
self
.
assert
True
(
protocol
is
not
loop
.
protocol
)
self
.
assert
True
(
transport
is
not
loop
.
transport
)
protocol
=
loop
.
protocol
transport
=
loop
.
transport
...
...
@@ -313,7 +313,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Because the server tid matches the cache tid, we're done connecting
wrapper
.
notify_connected
.
assert_called_with
(
client
,
{
'length'
:
42
})
self
.
assert
_
(
client
.
connected
.
done
()
and
not
transport
.
data
)
self
.
assert
True
(
client
.
connected
.
done
()
and
not
transport
.
data
)
self
.
assertEqual
(
cache
.
getLastTid
(),
b'e'
*
8
)
# Because we were able to update the cache, we didn't have to
...
...
@@ -322,7 +322,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# The close method closes the connection and cache:
client
.
close
()
self
.
assert
_
(
transport
.
closed
and
cache
.
closed
)
self
.
assert
True
(
transport
.
closed
and
cache
.
closed
)
# The client doesn't reconnect
self
.
assertEqual
(
loop
.
protocol
,
protocol
)
...
...
@@ -351,7 +351,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self
.
respond
(
4
,
dict
(
length
=
42
))
# Now that verification is done, we're done connecting
self
.
assert
_
(
client
.
connected
.
done
()
and
not
transport
.
data
)
self
.
assert
True
(
client
.
connected
.
done
()
and
not
transport
.
data
)
self
.
assertEqual
(
cache
.
getLastTid
(),
b'e'
*
8
)
# And the cache has been updated:
...
...
@@ -388,7 +388,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self
.
respond
(
4
,
dict
(
length
=
42
))
# Now that verification is done, we're done connecting
self
.
assert
_
(
client
.
connected
.
done
()
and
not
transport
.
data
)
self
.
assert
True
(
client
.
connected
.
done
()
and
not
transport
.
data
)
self
.
assertEqual
(
cache
.
getLastTid
(),
b'e'
*
8
)
# But the cache is now empty and we invalidated the database cache
...
...
@@ -402,7 +402,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
addrs
,
())
# We haven't connected yet
self
.
assert
_
(
protocol
is
None
and
transport
is
None
)
self
.
assert
True
(
protocol
is
None
and
transport
is
None
)
# There are 2 connection attempts outstanding:
self
.
assertEqual
(
sorted
(
loop
.
connecting
),
addrs
)
...
...
@@ -413,7 +413,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# The failed connection is attempted in the future:
delay
,
func
,
args
,
_
=
loop
.
later
.
pop
(
0
)
self
.
assert
_
(
1
<=
delay
<=
2
)
self
.
assert
True
(
1
<=
delay
<=
2
)
func
(
*
args
)
self
.
assertEqual
(
sorted
(
loop
.
connecting
),
addrs
)
...
...
@@ -447,7 +447,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self
.
pop
()
self
.
assertFalse
(
client
.
connected
.
done
()
or
transport
.
data
)
delay
,
func
,
args
,
_
=
loop
.
later
.
pop
(
1
)
# first in later is heartbeat
self
.
assert
_
(
8
<
delay
<
10
)
self
.
assert
True
(
8
<
delay
<
10
)
self
.
assertEqual
(
len
(
loop
.
later
),
1
)
# first in later is heartbeat
func
(
*
args
)
# connect again
self
.
assertFalse
(
protocol
is
loop
.
protocol
)
...
...
@@ -461,8 +461,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self
.
pop
(
4
)
self
.
assertEqual
(
self
.
pop
(),
(
3
,
False
,
'get_info'
,
()))
self
.
respond
(
3
,
dict
(
length
=
42
))
self
.
assert
_
(
client
.
connected
.
done
()
and
not
transport
.
data
)
self
.
assert
_
(
client
.
ready
)
self
.
assert
True
(
client
.
connected
.
done
()
and
not
transport
.
data
)
self
.
assert
True
(
client
.
ready
)
def
test_readonly_fallback
(
self
):
addrs
=
[(
'1.2.3.4'
,
8200
),
(
'2.2.3.4'
,
8200
)]
...
...
@@ -493,7 +493,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# At this point, the client is ready and using the protocol,
# and the protocol is read-only:
self
.
assert
_
(
client
.
ready
)
self
.
assert
True
(
client
.
ready
)
self
.
assertEqual
(
client
.
protocol
,
protocol
)
self
.
assertEqual
(
protocol
.
read_only
,
True
)
connected
=
client
.
connected
...
...
@@ -502,7 +502,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self
.
assertEqual
(
self
.
pop
(),
(
4
,
False
,
'get_info'
,
()))
self
.
respond
(
4
,
dict
(
length
=
42
))
self
.
assert
_
(
connected
.
done
())
self
.
assert
True
(
connected
.
done
())
# We connect the second address:
loop
.
connect_connecting
(
addrs
[
1
])
...
...
@@ -527,7 +527,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self
.
assertFalse
(
client
.
protocol
is
protocol
)
self
.
assertEqual
(
client
.
protocol
,
loop
.
protocol
)
self
.
assertEqual
(
protocol
.
closed
,
True
)
self
.
assert
_
(
client
.
connected
is
not
connected
)
self
.
assert
True
(
client
.
connected
is
not
connected
)
self
.
assertFalse
(
client
.
connected
.
done
())
protocol
,
transport
=
loop
.
protocol
,
loop
.
transport
self
.
assertEqual
(
protocol
.
read_only
,
False
)
...
...
@@ -535,8 +535,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Now, we finish verification
self
.
respond
(
2
,
'b'
*
8
)
self
.
respond
(
3
,
dict
(
length
=
42
))
self
.
assert
_
(
client
.
ready
)
self
.
assert
_
(
client
.
connected
.
done
())
self
.
assert
True
(
client
.
ready
)
self
.
assert
True
(
client
.
connected
.
done
())
def
test_invalidations_while_verifying
(
self
):
# While we're verifying, invalidations are ignored
...
...
@@ -553,8 +553,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# We'll disconnect:
protocol
.
connection_lost
(
Exception
(
"lost"
))
self
.
assert
_
(
protocol
is
not
loop
.
protocol
)
self
.
assert
_
(
transport
is
not
loop
.
transport
)
self
.
assert
True
(
protocol
is
not
loop
.
protocol
)
self
.
assert
True
(
transport
is
not
loop
.
transport
)
protocol
=
loop
.
protocol
transport
=
loop
.
transport
...
...
@@ -606,7 +606,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
with
mock
.
patch
(
"ZEO.asyncio.client.logger.error"
)
as
error
:
self
.
assertFalse
(
error
.
called
)
protocol
.
data_received
(
sized
(
self
.
enc
+
b'200'
))
self
.
assert
_
(
isinstance
(
error
.
call_args
[
0
][
1
],
ProtocolError
))
self
.
assert
True
(
isinstance
(
error
.
call_args
[
0
][
1
],
ProtocolError
))
def
test_get_peername
(
self
):
...
...
src/ZEO/tests/ConnectionTests.py
View file @
19db5a3a
...
...
@@ -266,7 +266,7 @@ class ConnectionTests(CommonSetupTearDown):
self
.
startServer
(
create
=
0
,
index
=
0
,
ro_svr
=
1
)
# Start a read-only-fallback client
self
.
_storage
=
self
.
openClientStorage
(
read_only_fallback
=
1
)
self
.
assert
_
(
self
.
_storage
.
isReadOnly
())
self
.
assert
True
(
self
.
_storage
.
isReadOnly
())
# Stores should fail here
self
.
assertRaises
(
ReadOnlyError
,
self
.
_dostore
)
self
.
_storage
.
close
()
...
...
@@ -493,7 +493,7 @@ class ConnectionTests(CommonSetupTearDown):
# Wait for all threads to finish
for
t
in
threads
:
t
.
join
(
60
)
self
.
failIf
(
t
.
isAlive
(),
"%s didn't die"
%
t
.
getName
())
self
.
assertFalse
(
t
.
isAlive
(),
"%s didn't die"
%
t
.
getName
())
finally
:
for
t
in
threads
:
t
.
closeclients
()
...
...
@@ -949,7 +949,7 @@ class ReconnectionTests(CommonSetupTearDown):
break
except
ClientDisconnected
:
time
.
sleep
(
0.5
)
self
.
assert
_
(
did_a_store
)
self
.
assert
True
(
did_a_store
)
self
.
_storage
.
close
()
class
TimeoutTests
(
CommonSetupTearDown
):
...
...
@@ -971,7 +971,7 @@ class TimeoutTests(CommonSetupTearDown):
):
break
else
:
self
.
assert
_
(
False
,
'bad logging'
)
self
.
assert
True
(
False
,
'bad logging'
)
storage
.
close
()
...
...
@@ -993,7 +993,7 @@ class TimeoutTests(CommonSetupTearDown):
def
checkTimeoutAfterVote
(
self
):
self
.
_storage
=
storage
=
self
.
openClientStorage
()
# Assert that the zeo cache is empty
self
.
assert
_
(
not
list
(
storage
.
_cache
.
contents
()))
self
.
assert
True
(
not
list
(
storage
.
_cache
.
contents
()))
# Create the object
oid
=
storage
.
new_oid
()
obj
=
MinPO
(
7
)
...
...
@@ -1005,17 +1005,17 @@ class TimeoutTests(CommonSetupTearDown):
storage
.
tpc_vote
(
t
)
# Now sleep long enough for the storage to time out
time
.
sleep
(
3
)
self
.
assert
_
(
self
.
assert
True
(
(
not
storage
.
is_connected
())
or
(
storage
.
connection_count_for_tests
>
old_connection_count
)
)
storage
.
_wait
()
self
.
assert
_
(
storage
.
is_connected
())
self
.
assert
True
(
storage
.
is_connected
())
# We expect finish to fail
self
.
assertRaises
(
ClientDisconnected
,
storage
.
tpc_finish
,
t
)
# The cache should still be empty
self
.
assert
_
(
not
list
(
storage
.
_cache
.
contents
()))
self
.
assert
True
(
not
list
(
storage
.
_cache
.
contents
()))
# Load should fail since the object should not be in either the cache
# or the server.
self
.
assertRaises
(
KeyError
,
storage
.
load
,
oid
,
''
)
...
...
@@ -1079,10 +1079,10 @@ class MSTThread(threading.Thread):
for
c
in
clients
:
# Check that we got serials for all oids
for
oid
in
c
.
__oids
:
testcase
.
failUnless
(
oid
in
c
.
__serials
)
testcase
.
assertIn
(
oid
,
c
.
__serials
)
# Check that we got serials for no other oids
for
oid
in
c
.
__serials
.
keys
():
testcase
.
failUnless
(
oid
in
c
.
__oids
)
testcase
.
assertIn
(
oid
,
c
.
__oids
)
def
closeclients
(
self
):
# Close clients opened by run()
...
...
@@ -1102,7 +1102,8 @@ def short_timeout(self):
# Run IPv6 tests if V6 sockets are supported
try
:
socket
.
socket
(
socket
.
AF_INET6
,
socket
.
SOCK_STREAM
)
with
socket
.
socket
(
socket
.
AF_INET6
,
socket
.
SOCK_STREAM
)
as
s
:
pass
except
(
socket
.
error
,
AttributeError
):
pass
else
:
...
...
src/ZEO/tests/IterationTests.py
View file @
19db5a3a
...
...
@@ -33,7 +33,7 @@ class IterationTests(object):
# make sure there's no race conditions cleaning out the weak refs
gc
.
disable
()
try
:
self
.
assertEqual
s
(
0
,
len
(
self
.
_storage
.
_iterator_ids
))
self
.
assertEqual
(
0
,
len
(
self
.
_storage
.
_iterator_ids
))
except
AssertionError
:
# Ok, we have ids. That should also mean that the
# weak dictionary has the same length.
...
...
@@ -50,7 +50,7 @@ class IterationTests(object):
self
.
assertEqual
(
len
(
self
.
_storage
.
_iterators
),
len
(
self
.
_storage
.
_iterator_ids
))
self
.
assertEqual
s
(
0
,
len
(
self
.
_storage
.
_iterator_ids
))
self
.
assertEqual
(
0
,
len
(
self
.
_storage
.
_iterator_ids
))
finally
:
if
gc_enabled
:
gc
.
enable
()
...
...
@@ -63,7 +63,7 @@ class IterationTests(object):
iid
=
server
.
iterator_start
(
None
,
None
)
# None signals the end of iteration.
self
.
assertEqual
s
(
None
,
server
.
iterator_next
(
iid
))
self
.
assertEqual
(
None
,
server
.
iterator_next
(
iid
))
# The server has disposed the iterator already.
self
.
assertRaises
(
KeyError
,
server
.
iterator_next
,
iid
)
...
...
@@ -80,10 +80,10 @@ class IterationTests(object):
# At this point, a wrapping iterator might not have called the CS
# iterator yet. We'll consume one item to make sure this happens.
six
.
advance_iterator
(
iterator
)
self
.
assertEqual
s
(
1
,
len
(
self
.
_storage
.
_iterator_ids
))
self
.
assertEqual
(
1
,
len
(
self
.
_storage
.
_iterator_ids
))
iid
=
list
(
self
.
_storage
.
_iterator_ids
)[
0
]
self
.
assertEqual
s
([],
list
(
iterator
))
self
.
assertEqual
s
(
0
,
len
(
self
.
_storage
.
_iterator_ids
))
self
.
assertEqual
([],
list
(
iterator
))
self
.
assertEqual
(
0
,
len
(
self
.
_storage
.
_iterator_ids
))
# The iterator has run through, so the server has already disposed it.
self
.
assertRaises
(
KeyError
,
self
.
_storage
.
_call
,
'iterator_next'
,
iid
)
...
...
@@ -98,7 +98,7 @@ class IterationTests(object):
# don't see the transaction we just wrote being picked up, because
# iterators only see the state from the point in time when they were
# created.)
self
.
assert
_
(
list
(
iterator
))
self
.
assert
True
(
list
(
iterator
))
def
checkIteratorGCStorageCommitting
(
self
):
# We want the iterator to be garbage-collected, so we don't keep any
...
...
@@ -111,7 +111,7 @@ class IterationTests(object):
self
.
_dostore
()
six
.
advance_iterator
(
self
.
_storage
.
iterator
())
self
.
assertEqual
s
(
1
,
len
(
self
.
_storage
.
_iterator_ids
))
self
.
assertEqual
(
1
,
len
(
self
.
_storage
.
_iterator_ids
))
iid
=
list
(
self
.
_storage
.
_iterator_ids
)[
0
]
# GC happens at the transaction boundary. After that, both the storage
...
...
@@ -154,7 +154,7 @@ class IterationTests(object):
# as well. I'm calling this directly to avoid accidentally
# calling tpc_abort implicitly.
self
.
_storage
.
notify_disconnected
()
self
.
assertEqual
s
(
0
,
len
(
self
.
_storage
.
_iterator_ids
))
self
.
assertEqual
(
0
,
len
(
self
.
_storage
.
_iterator_ids
))
def
checkIteratorParallel
(
self
):
self
.
_dostore
()
...
...
@@ -163,10 +163,10 @@ class IterationTests(object):
iter2
=
self
.
_storage
.
iterator
()
txn_info1
=
six
.
advance_iterator
(
iter1
)
txn_info2
=
six
.
advance_iterator
(
iter2
)
self
.
assertEqual
s
(
txn_info1
.
tid
,
txn_info2
.
tid
)
self
.
assertEqual
(
txn_info1
.
tid
,
txn_info2
.
tid
)
txn_info1
=
six
.
advance_iterator
(
iter1
)
txn_info2
=
six
.
advance_iterator
(
iter2
)
self
.
assertEqual
s
(
txn_info1
.
tid
,
txn_info2
.
tid
)
self
.
assertEqual
(
txn_info1
.
tid
,
txn_info2
.
tid
)
self
.
assertRaises
(
StopIteration
,
next
,
iter1
)
self
.
assertRaises
(
StopIteration
,
next
,
iter2
)
...
...
src/ZEO/tests/ThreadTests.py
View file @
19db5a3a
...
...
@@ -119,7 +119,7 @@ class ThreadTests(object):
for
t
in
threads
:
t
.
join
(
30
)
for
i
in
threads
:
self
.
failUnless
(
not
t
.
isAlive
())
self
.
assertFalse
(
t
.
isAlive
())
# Helper for checkMTStores
def
mtstorehelper
(
self
):
...
...
src/ZEO/tests/testConversionSupport.py
View file @
19db5a3a
...
...
@@ -122,6 +122,9 @@ First, fake out the connection manager so we can make a connection:
... next = None
...
... return oid, oid*8, 'data ' + oid, next
...
... def close(self):
... pass
>>> client = ZEO.client(
... '', wait=False, _client_factory=Client)
...
...
@@ -138,6 +141,7 @@ Now we'll have our way with it's private _server attr:
2
3
4
>>> client.close()
"""
...
...
src/ZEO/tests/testTransactionBuffer.py
View file @
19db5a3a
...
...
@@ -51,6 +51,7 @@ class TransBufTests(unittest.TestCase):
for
i
,
(
oid
,
d
,
resolved
)
in
enumerate
(
tbuf
):
self
.
assertEqual
((
oid
,
d
),
data
[
i
][
0
])
self
.
assertEqual
(
resolved
,
data
[
i
][
1
])
tbuf
.
close
()
def
test_suite
():
return
unittest
.
makeSuite
(
TransBufTests
,
'check'
)
src/ZEO/tests/testZEO.py
View file @
19db5a3a
...
...
@@ -221,15 +221,15 @@ class MiscZEOTests(object):
# available right after successful connection, this is required now.
addr
=
self
.
_storage
.
_addr
storage2
=
ClientStorage
(
addr
,
**
self
.
_client_options
())
self
.
assert
_
(
storage2
.
is_connected
())
self
.
assertEqual
s
(
ZODB
.
utils
.
z64
,
storage2
.
lastTransaction
())
self
.
assert
True
(
storage2
.
is_connected
())
self
.
assertEqual
(
ZODB
.
utils
.
z64
,
storage2
.
lastTransaction
())
storage2
.
close
()
self
.
_dostore
()
storage3
=
ClientStorage
(
addr
,
**
self
.
_client_options
())
self
.
assert
_
(
storage3
.
is_connected
())
self
.
assertEqual
s
(
8
,
len
(
storage3
.
lastTransaction
()))
self
.
assertNotEqual
s
(
ZODB
.
utils
.
z64
,
storage3
.
lastTransaction
())
self
.
assert
True
(
storage3
.
is_connected
())
self
.
assertEqual
(
8
,
len
(
storage3
.
lastTransaction
()))
self
.
assertNotEqual
(
ZODB
.
utils
.
z64
,
storage3
.
lastTransaction
())
storage3
.
close
()
class
GenericTestBase
(
...
...
@@ -422,12 +422,12 @@ class FileStorageTests(FullGenericTests):
# ClientStorage itself doesn't implement IStorageIteration, but the
# FileStorage on the other end does, and thus the ClientStorage
# instance that is connected to it reflects this.
self
.
failIf
(
ZODB
.
interfaces
.
IStorageIteration
.
implementedBy
(
self
.
assertFalse
(
ZODB
.
interfaces
.
IStorageIteration
.
implementedBy
(
ZEO
.
ClientStorage
.
ClientStorage
))
self
.
failUnless
(
ZODB
.
interfaces
.
IStorageIteration
.
providedBy
(
self
.
assertTrue
(
ZODB
.
interfaces
.
IStorageIteration
.
providedBy
(
self
.
_storage
))
# This is communicated using ClientStorage's _info object:
self
.
assertEqual
s
(
self
.
_expected_interfaces
,
self
.
assertEqual
(
self
.
_expected_interfaces
,
self
.
_storage
.
_info
[
'interfaces'
]
)
...
...
@@ -552,7 +552,7 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
log
=
str
(
handler
)
handler
.
uninstall
()
self
.
assert
_
(
"Client loop stopped unexpectedly"
in
log
)
self
.
assert
True
(
"Client loop stopped unexpectedly"
in
log
)
def
checkExceptionLogsAtError
(
self
):
# Test the exceptions are logged at error
...
...
@@ -570,7 +570,7 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
self
.
assertRaises
(
ZODB
.
POSException
.
POSKeyError
,
self
.
_storage
.
history
,
None
,
None
)
handler
.
uninstall
()
self
.
assertEqual
s
(
str
(
handler
),
''
)
self
.
assertEqual
(
str
(
handler
),
''
)
def
checkConnectionInvalidationOnReconnect
(
self
):
...
...
@@ -639,7 +639,7 @@ class CommonBlobTests(object):
tfname
=
bd_fh
.
name
oid
=
self
.
_storage
.
new_oid
()
data
=
zodb_pickle
(
blob
)
self
.
assert
_
(
os
.
path
.
exists
(
tfname
))
self
.
assert
True
(
os
.
path
.
exists
(
tfname
))
t
=
TransactionMetaData
()
try
:
...
...
@@ -650,9 +650,9 @@ class CommonBlobTests(object):
except
:
self
.
_storage
.
tpc_abort
(
t
)
raise
self
.
assert
_
(
not
os
.
path
.
exists
(
tfname
))
self
.
assert
True
(
not
os
.
path
.
exists
(
tfname
))
filename
=
self
.
_storage
.
fshelper
.
getBlobFilename
(
oid
,
revid
)
self
.
assert
_
(
os
.
path
.
exists
(
filename
))
self
.
assert
True
(
os
.
path
.
exists
(
filename
))
with
open
(
filename
,
'rb'
)
as
f
:
self
.
assertEqual
(
somedata
,
f
.
read
())
...
...
@@ -693,11 +693,11 @@ class CommonBlobTests(object):
filename
=
self
.
_storage
.
loadBlob
(
oid
,
serial
)
with
open
(
filename
,
'rb'
)
as
f
:
self
.
assertEqual
(
somedata
,
f
.
read
())
self
.
assert
_
(
not
(
os
.
stat
(
filename
).
st_mode
&
stat
.
S_IWRITE
))
self
.
assert
_
((
os
.
stat
(
filename
).
st_mode
&
stat
.
S_IREAD
))
self
.
assert
True
(
not
(
os
.
stat
(
filename
).
st_mode
&
stat
.
S_IWRITE
))
self
.
assert
True
((
os
.
stat
(
filename
).
st_mode
&
stat
.
S_IREAD
))
def
checkTemporaryDirectory
(
self
):
self
.
assertEqual
s
(
os
.
path
.
join
(
self
.
blob_cache_dir
,
'tmp'
),
self
.
assertEqual
(
os
.
path
.
join
(
self
.
blob_cache_dir
,
'tmp'
),
self
.
_storage
.
temporaryDirectory
())
def
checkTransactionBufferCleanup
(
self
):
...
...
@@ -726,10 +726,10 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
somedata
.
write
((
"%s
\
n
"
%
i
).
encode
(
'ascii'
))
def
check_data
(
path
):
self
.
assert_
(
os
.
path
.
exists
(
path
))
f
=
open
(
path
,
'rb'
)
self
.
assertTrue
(
os
.
path
.
exists
(
path
))
somedata
.
seek
(
0
)
d1
=
d2
=
1
with
open
(
path
,
'rb'
)
as
f
:
while
d1
or
d2
:
d1
=
f
.
read
(
8096
)
d2
=
somedata
.
read
(
8096
)
...
...
@@ -743,7 +743,7 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
tfname
=
bd_fh
.
name
oid
=
self
.
_storage
.
new_oid
()
data
=
zodb_pickle
(
blob
)
self
.
assert
_
(
os
.
path
.
exists
(
tfname
))
self
.
assert
True
(
os
.
path
.
exists
(
tfname
))
t
=
TransactionMetaData
()
try
:
...
...
@@ -756,7 +756,7 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
raise
# The uncommitted data file should have been removed
self
.
assert
_
(
not
os
.
path
.
exists
(
tfname
))
self
.
assert
True
(
not
os
.
path
.
exists
(
tfname
))
# The file should be in the cache ...
filename
=
self
.
_storage
.
fshelper
.
getBlobFilename
(
oid
,
revid
)
...
...
@@ -768,7 +768,7 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
ZODB
.
blob
.
BushyLayout
().
getBlobFilePath
(
oid
,
revid
),
)
self
.
assert
_
(
server_filename
.
startswith
(
self
.
blobdir
))
self
.
assert
True
(
server_filename
.
startswith
(
self
.
blobdir
))
check_data
(
server_filename
)
# If we remove it from the cache and call loadBlob, it should
...
...
@@ -1203,7 +1203,7 @@ def runzeo_without_configfile():
... ''' % sys.path)
>>> import subprocess, re
>>> print(re.sub(b'
\
d
\
d+|[:]', b'', subprocess.Popen(
>>> print(re.sub(b
r
'
\
d
\
d+|[:]', b'', subprocess.Popen(
... [sys.executable, 'runzeo', '-a:0', '-ft', '--test'],
... stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
... ).stdout.read()).decode('ascii'))
...
...
src/ZEO/tests/testZEO2.py
View file @
19db5a3a
...
...
@@ -149,6 +149,7 @@ We can start another client and get the storage lock.
>>> zs1.tpc_finish('1').set_sender(0, zs1.connection)
>>> fs.close()
>>> server.close()
"""
def
errors_in_vote_should_clear_lock
():
...
...
@@ -408,6 +409,7 @@ If clients disconnect while waiting, they will be dequeued:
>>> logging.getLogger('ZEO').setLevel(logging.NOTSET)
>>> logging.getLogger('ZEO').removeHandler(handler)
>>> server.close()
"""
def
lock_sanity_check
():
...
...
@@ -489,6 +491,8 @@ ZEOStorage as closed and see if trying to get a lock cleans it up:
>>> logging.getLogger('ZEO').setLevel(logging.NOTSET)
>>> logging.getLogger('ZEO').removeHandler(handler)
>>> server.close()
"""
def
test_suite
():
...
...
@@ -507,4 +511,3 @@ def test_suite():
if
__name__
==
'__main__'
:
unittest
.
main
(
defaultTest
=
'test_suite'
)
src/ZEO/tests/test_cache.py
View file @
19db5a3a
...
...
@@ -141,12 +141,12 @@ class CacheTests(ZODB.tests.util.TestCase):
for
i
in
range
(
50
):
n
=
p64
(
i
)
cache
.
store
(
n
,
n
,
None
,
data
[
i
])
self
.
assertEqual
s
(
len
(
cache
),
i
+
1
)
self
.
assertEqual
(
len
(
cache
),
i
+
1
)
# The cache is now almost full. The next insert
# should delete some objects.
n
=
p64
(
50
)
cache
.
store
(
n
,
n
,
None
,
data
[
51
])
self
.
assert
_
(
len
(
cache
)
<
51
)
self
.
assert
True
(
len
(
cache
)
<
51
)
# TODO: Need to make sure eviction of non-current data
# are handled correctly.
...
...
@@ -174,41 +174,44 @@ class CacheTests(ZODB.tests.util.TestCase):
eq
(
dict
([(
k
,
dict
(
v
))
for
(
k
,
v
)
in
copy
.
noncurrent
.
items
()]),
dict
([(
k
,
dict
(
v
))
for
(
k
,
v
)
in
self
.
cache
.
noncurrent
.
items
()]),
)
copy
.
close
()
def
testCurrentObjectLargerThanCache
(
self
):
if
self
.
cache
.
path
:
os
.
remove
(
self
.
cache
.
path
)
self
.
cache
.
close
()
self
.
cache
=
ZEO
.
cache
.
ClientCache
(
size
=
50
)
# We store an object that is a bit larger than the cache can handle.
self
.
cache
.
store
(
n1
,
n2
,
None
,
"x"
*
64
)
# We can see that it was not stored.
self
.
assertEqual
s
(
None
,
self
.
cache
.
load
(
n1
))
self
.
assertEqual
(
None
,
self
.
cache
.
load
(
n1
))
# If an object cannot be stored in the cache, it must not be
# recorded as current.
self
.
assert
_
(
n1
not
in
self
.
cache
.
current
)
self
.
assert
True
(
n1
not
in
self
.
cache
.
current
)
# Regression test: invalidation must still work.
self
.
cache
.
invalidate
(
n1
,
n2
)
def
testOldObjectLargerThanCache
(
self
):
if
self
.
cache
.
path
:
os
.
remove
(
self
.
cache
.
path
)
self
.
cache
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
size
=
50
)
# We store an object that is a bit larger than the cache can handle.
cache
.
store
(
n1
,
n2
,
n3
,
"x"
*
64
)
# We can see that it was not stored.
self
.
assertEqual
s
(
None
,
cache
.
load
(
n1
))
self
.
assertEqual
(
None
,
cache
.
load
(
n1
))
# If an object cannot be stored in the cache, it must not be
# recorded as non-current.
self
.
assert
_
(
1
not
in
cache
.
noncurrent
)
self
.
assert
True
(
1
not
in
cache
.
noncurrent
)
def
testVeryLargeCaches
(
self
):
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
(
1
<<
32
)
+
(
1
<<
20
))
cache
.
store
(
n1
,
n2
,
None
,
b"x"
)
cache
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
(
1
<<
33
)
+
(
1
<<
20
))
self
.
assertEqual
s
(
cache
.
load
(
n1
),
(
b'x'
,
n2
))
self
.
assertEqual
(
cache
.
load
(
n1
),
(
b'x'
,
n2
))
cache
.
close
()
def
testConversionOfLargeFreeBlocks
(
self
):
...
...
@@ -225,8 +228,8 @@ class CacheTests(ZODB.tests.util.TestCase):
cache
.
close
()
with
open
(
'cache'
,
'rb'
)
as
f
:
f
.
seek
(
12
)
self
.
assertEqual
s
(
f
.
read
(
1
),
b'f'
)
self
.
assertEqual
s
(
struct
.
unpack
(
">I"
,
f
.
read
(
4
))[
0
],
self
.
assertEqual
(
f
.
read
(
1
),
b'f'
)
self
.
assertEqual
(
struct
.
unpack
(
">I"
,
f
.
read
(
4
))[
0
],
ZEO
.
cache
.
max_block_size
)
if
not
sys
.
platform
.
startswith
(
'linux'
):
...
...
@@ -261,8 +264,8 @@ class CacheTests(ZODB.tests.util.TestCase):
'cache'
,
size
=
ZEO
.
cache
.
ZEC_HEADER_SIZE
+
100
*
recsize
+
extra
)
for
i
in
range
(
100
):
cache
.
store
(
p64
(
i
),
n1
,
None
,
data
)
self
.
assertEqual
s
(
len
(
cache
),
100
)
self
.
assertEqual
s
(
os
.
path
.
getsize
(
self
.
assertEqual
(
len
(
cache
),
100
)
self
.
assertEqual
(
os
.
path
.
getsize
(
'cache'
),
ZEO
.
cache
.
ZEC_HEADER_SIZE
+
100
*
recsize
+
extra
)
# Now make it smaller
...
...
@@ -270,10 +273,10 @@ class CacheTests(ZODB.tests.util.TestCase):
small
=
50
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
ZEO
.
cache
.
ZEC_HEADER_SIZE
+
small
*
recsize
+
extra
)
self
.
assertEqual
s
(
len
(
cache
),
small
)
self
.
assertEqual
s
(
os
.
path
.
getsize
(
self
.
assertEqual
(
len
(
cache
),
small
)
self
.
assertEqual
(
os
.
path
.
getsize
(
'cache'
),
ZEO
.
cache
.
ZEC_HEADER_SIZE
+
small
*
recsize
+
extra
)
self
.
assertEqual
s
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
self
.
assertEqual
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
set
(
range
(
small
)))
for
i
in
range
(
100
,
110
):
cache
.
store
(
p64
(
i
),
n1
,
None
,
data
)
...
...
@@ -282,9 +285,9 @@ class CacheTests(ZODB.tests.util.TestCase):
# evicted because of the optimization to assure that we
# always get a free block after a new allocated block.
expected_len
=
small
-
1
self
.
assertEqual
s
(
len
(
cache
),
expected_len
)
self
.
assertEqual
(
len
(
cache
),
expected_len
)
expected_oids
=
set
(
list
(
range
(
11
,
50
))
+
list
(
range
(
100
,
110
)))
self
.
assertEqual
s
(
self
.
assertEqual
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
expected_oids
)
...
...
@@ -292,8 +295,8 @@ class CacheTests(ZODB.tests.util.TestCase):
cache
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
ZEO
.
cache
.
ZEC_HEADER_SIZE
+
small
*
recsize
+
extra
)
self
.
assertEqual
s
(
len
(
cache
),
expected_len
)
self
.
assertEqual
s
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
self
.
assertEqual
(
len
(
cache
),
expected_len
)
self
.
assertEqual
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
expected_oids
)
# Now make it bigger
...
...
@@ -301,10 +304,10 @@ class CacheTests(ZODB.tests.util.TestCase):
large
=
150
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
ZEO
.
cache
.
ZEC_HEADER_SIZE
+
large
*
recsize
+
extra
)
self
.
assertEqual
s
(
len
(
cache
),
expected_len
)
self
.
assertEqual
s
(
os
.
path
.
getsize
(
self
.
assertEqual
(
len
(
cache
),
expected_len
)
self
.
assertEqual
(
os
.
path
.
getsize
(
'cache'
),
ZEO
.
cache
.
ZEC_HEADER_SIZE
+
large
*
recsize
+
extra
)
self
.
assertEqual
s
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
self
.
assertEqual
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
expected_oids
)
...
...
@@ -313,19 +316,19 @@ class CacheTests(ZODB.tests.util.TestCase):
# We use large-2 for the same reason we used small-1 above.
expected_len
=
large
-
2
self
.
assertEqual
s
(
len
(
cache
),
expected_len
)
self
.
assertEqual
(
len
(
cache
),
expected_len
)
expected_oids
=
set
(
list
(
range
(
11
,
50
))
+
list
(
range
(
106
,
110
))
+
list
(
range
(
200
,
305
)))
self
.
assertEqual
s
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
self
.
assertEqual
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
expected_oids
)
# Make sure we can reopen with same size
cache
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
ZEO
.
cache
.
ZEC_HEADER_SIZE
+
large
*
recsize
+
extra
)
self
.
assertEqual
s
(
len
(
cache
),
expected_len
)
self
.
assertEqual
s
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
self
.
assertEqual
(
len
(
cache
),
expected_len
)
self
.
assertEqual
(
set
(
u64
(
oid
)
for
(
oid
,
tid
)
in
cache
.
contents
()),
expected_oids
)
# Cleanup
...
...
src/ZEO/tests/testssl.py
View file @
19db5a3a
...
...
@@ -118,7 +118,7 @@ class SSLConfigTest(ZEOConfigTestBase):
stop
()
@
unittest
.
skipIf
(
forker
.
ZEO4_SERVER
,
"ZEO4 servers don't support SSL"
)
@
mock
.
patch
((
'asyncio'
if
PY3
else
'trollius'
)
+
'.
async
'
)
@
mock
.
patch
((
'asyncio'
if
PY3
else
'trollius'
)
+
'.
ensure_future
'
)
@
mock
.
patch
((
'asyncio'
if
PY3
else
'trollius'
)
+
'.set_event_loop'
)
@
mock
.
patch
((
'asyncio'
if
PY3
else
'trollius'
)
+
'.new_event_loop'
)
@
mock
.
patch
(
'ZEO.asyncio.client.new_event_loop'
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment