Commit 227953b9 authored by Jim Fulton's avatar Jim Fulton

Simplify MVCC by determining transaction start time using lastTransaction.

This implements: https://github.com/zopefoundation/ZODB/issues/50

Rather than watching invalidations, simply use 1 + the storages
lastTransaction, which is equivalent to but much simpler than waiting
for the first invalidation after a transaction starts.

More importantly, it means we can always use loadBefore and get away
from load.  We no longer have to worry about ordering of invalidations
and load() results.

Much thanks to NEO for pointing the way toward this simplification!

Implementing this initially caused a deadlock, because DB.open()
called Connection.open() while holding a database lock and
Connection.open() now calls IStotage.lastTransaction(), which acquires a
storage lock. (It's not clear that lastTransaction() really needs a
storage lock.)  Meanwhile, IStotage.tpc_finish() calls a DB function
that requires the DB lock while holding the storage lock.  Fixing this
required moving the call to Connection.open() outside the region where
the DB lock was held.

To debug the problem above, I greatly improved lock-debugging
support. Now all of the ZODB code imports Lock, RLock and Condition
from ZODB.utils. If the DEBUG_LOCKING is set to a non-empty value,
then these are wrapped in such a way that debugging information is
printed as they are used. This made spotting the nature of the
deadlock easier.

Of course, a change this basic broke lots of tests. Most of the
breakage arises from the fact that connections now call
lastTransaction on storages at transaction boundaries.  Lots of tests
didn't clean up databases and connections properly.  I fixed many
tests, but ultimately gave up and added some extra cleanup code that
violated transaction-manager underware (and the underware's privates)
to clear transaction synchonizers in test setup and tear-down.  I plan
to add a transaction manager API for this and to use it in a
subsequent PR.

This tests makes database and connection hygiene a bit more important,
especially for tests, because a connection will continue to interact
with storages if it isn't properly closed, which can lead to errors if
the storage is closed.  I chose not to swallow these errors in
Connection, choosing rather to clean up tests.

The thread debugging and test changes make this PR larger than I would
have liked. Apologies in advance to the reviewers.
parent 2ae41705
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# #
############################################################################## ##############################################################################
"""ZODB transfer activity monitoring """ZODB transfer activity monitoring
"""
$Id$"""
import threading
import time import time
from . import utils
class ActivityMonitor: class ActivityMonitor:
"""ZODB load/store activity monitor """ZODB load/store activity monitor
...@@ -31,7 +31,7 @@ class ActivityMonitor: ...@@ -31,7 +31,7 @@ class ActivityMonitor:
def __init__(self, history_length=3600): def __init__(self, history_length=3600):
self.history_length = history_length # Number of seconds self.history_length = history_length # Number of seconds
self.log = [] # [(time, loads, stores)] self.log = [] # [(time, loads, stores)]
self.trim_lock = threading.Lock() self.trim_lock = utils.Lock()
def closedConnection(self, conn): def closedConnection(self, conn):
log = self.log log = self.log
......
...@@ -18,7 +18,6 @@ its use is not recommended. It's still here for historical reasons. ...@@ -18,7 +18,6 @@ its use is not recommended. It's still here for historical reasons.
""" """
from __future__ import print_function from __future__ import print_function
import threading
import time import time
import logging import logging
import sys import sys
...@@ -28,10 +27,10 @@ import zope.interface ...@@ -28,10 +27,10 @@ import zope.interface
from persistent.TimeStamp import TimeStamp from persistent.TimeStamp import TimeStamp
import ZODB.interfaces import ZODB.interfaces
from ZODB import POSException from . import POSException, utils
from ZODB.utils import z64, oid_repr, byte_ord, byte_chr from .utils import z64, oid_repr, byte_ord, byte_chr
from ZODB.UndoLogCompatible import UndoLogCompatible from .UndoLogCompatible import UndoLogCompatible
from ZODB._compat import dumps, _protocol, py2_hasattr from ._compat import dumps, _protocol, py2_hasattr
log = logging.getLogger("ZODB.BaseStorage") log = logging.getLogger("ZODB.BaseStorage")
...@@ -85,8 +84,8 @@ class BaseStorage(UndoLogCompatible): ...@@ -85,8 +84,8 @@ class BaseStorage(UndoLogCompatible):
log.debug("create storage %s", self.__name__) log.debug("create storage %s", self.__name__)
# Allocate locks: # Allocate locks:
self._lock = threading.RLock() self._lock = utils.RLock()
self.__commit_lock = threading.Lock() self.__commit_lock = utils.Lock()
# Comment out the following 4 lines to debug locking: # Comment out the following 4 lines to debug locking:
self._lock_acquire = self._lock.acquire self._lock_acquire = self._lock.acquire
...@@ -108,45 +107,6 @@ class BaseStorage(UndoLogCompatible): ...@@ -108,45 +107,6 @@ class BaseStorage(UndoLogCompatible):
else: else:
self._oid = oid self._oid = oid
########################################################################
# The following methods are normally overridden on instances,
# except when debugging:
def _lock_acquire(self, *args):
f = sys._getframe(1)
sys.stdout.write("[la(%s:%s)\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
self._lock.acquire(*args)
sys.stdout.write("la(%s:%s)]\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
def _lock_release(self, *args):
f = sys._getframe(1)
sys.stdout.write("[lr(%s:%s)\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
self._lock.release(*args)
sys.stdout.write("lr(%s:%s)]\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
def _commit_lock_acquire(self, *args):
f = sys._getframe(1)
sys.stdout.write("[ca(%s:%s)\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
self.__commit_lock.acquire(*args)
sys.stdout.write("ca(%s:%s)]\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
def _commit_lock_release(self, *args):
f = sys._getframe(1)
sys.stdout.write("[cr(%s:%s)\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
self.__commit_lock.release(*args)
sys.stdout.write("cr(%s:%s)]\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
#
########################################################################
def sortKey(self): def sortKey(self):
"""Return a string that can be used to sort storage instances. """Return a string that can be used to sort storage instances.
......
...@@ -12,13 +12,11 @@ ...@@ -12,13 +12,11 @@
# #
############################################################################## ##############################################################################
"""Database connection support """Database connection support
"""
$Id$""" from __future__ import print_function
import logging import logging
import sys import sys
import tempfile import tempfile
import threading
import warnings import warnings
import os import os
import time import time
...@@ -54,6 +52,7 @@ import six ...@@ -54,6 +52,7 @@ import six
global_reset_counter = 0 global_reset_counter = 0
noop = lambda : None
def resetCaches(): def resetCaches():
"""Causes all connection caches to be reset as connections are reopened. """Causes all connection caches to be reset as connections are reopened.
...@@ -184,7 +183,7 @@ class Connection(ExportImport, object): ...@@ -184,7 +183,7 @@ class Connection(ExportImport, object):
# type of an oid is str. TODO: remove the related now-unnecessary # type of an oid is str. TODO: remove the related now-unnecessary
# critical sections (if any -- this needs careful thought). # critical sections (if any -- this needs careful thought).
self._inv_lock = threading.Lock() self._inv_lock = utils.Lock()
self._invalidated = set() self._invalidated = set()
# Flag indicating whether the cache has been invalidated: # Flag indicating whether the cache has been invalidated:
...@@ -253,7 +252,17 @@ class Connection(ExportImport, object): ...@@ -253,7 +252,17 @@ class Connection(ExportImport, object):
if obj is not None: if obj is not None:
return obj return obj
p, serial = self._storage.load(oid, '') before = self.before
if before is None:
# Normal case
before = self._txn_time
data = self._storage.loadBefore(oid, before)
if data is None:
# see the comment in setstate
raise ReadConflictError()
p, _, _ = data
obj = self._reader.getGhost(p) obj = self._reader.getGhost(p)
# Avoid infiniate loop if obj tries to load its state before # Avoid infiniate loop if obj tries to load its state before
...@@ -348,17 +357,9 @@ class Connection(ExportImport, object): ...@@ -348,17 +357,9 @@ class Connection(ExportImport, object):
if self.before is not None: if self.before is not None:
# This is a historical connection. Invalidations are irrelevant. # This is a historical connection. Invalidations are irrelevant.
return return
self._inv_lock.acquire()
try:
if self._txn_time is None:
self._txn_time = tid
elif (tid is not None) and (tid < self._txn_time):
raise AssertionError("invalidations out of order, %r < %r"
% (tid, self._txn_time))
with self._inv_lock:
self._invalidated.update(oids) self._invalidated.update(oids)
finally:
self._inv_lock.release()
def invalidateCache(self): def invalidateCache(self):
self._inv_lock.acquire() self._inv_lock.acquire()
...@@ -404,7 +405,6 @@ class Connection(ExportImport, object): ...@@ -404,7 +405,6 @@ class Connection(ExportImport, object):
def sync(self): def sync(self):
"""Manually update the view on the database.""" """Manually update the view on the database."""
self.transaction_manager.abort() self.transaction_manager.abort()
self._storage_sync()
def getDebugInfo(self): def getDebugInfo(self):
"""Returns a tuple with different items for debugging the """Returns a tuple with different items for debugging the
...@@ -534,7 +534,6 @@ class Connection(ExportImport, object): ...@@ -534,7 +534,6 @@ class Connection(ExportImport, object):
invalidated = dict.fromkeys(self._invalidated) invalidated = dict.fromkeys(self._invalidated)
self._invalidated = set() self._invalidated = set()
self._txn_time = None
if self._invalidatedCache: if self._invalidatedCache:
self._invalidatedCache = False self._invalidatedCache = False
invalidated = self._cache.cache_data.copy() invalidated = self._cache.cache_data.copy()
...@@ -825,18 +824,18 @@ class Connection(ExportImport, object): ...@@ -825,18 +824,18 @@ class Connection(ExportImport, object):
# We don't do anything before a commit starts. # We don't do anything before a commit starts.
pass pass
# Call the underlying storage's sync() method (if any), and process def newTransaction(self, transaction=None):
# pending invalidations regardless. Of course this should only be
# called at transaction boundaries.
def _storage_sync(self, *ignored):
self._readCurrent.clear() self._readCurrent.clear()
sync = getattr(self._storage, 'sync', 0) getattr(self._storage, 'sync', noop)()
if sync: if self.opened:
sync() self._txn_time = p64(u64(self._storage.lastTransaction()) + 1)
# Nope that we flush invalidation *after* setting transaction
# time, because invalidating persistent classes causes data to
# be loaded.
self._flush_invalidations() self._flush_invalidations()
afterCompletion = _storage_sync afterCompletion = newTransaction
newTransaction = _storage_sync
# Transaction-manager synchronization -- ISynchronizer # Transaction-manager synchronization -- ISynchronizer
########################################################################## ##########################################################################
...@@ -866,63 +865,26 @@ class Connection(ExportImport, object): ...@@ -866,63 +865,26 @@ class Connection(ExportImport, object):
raise raise
try: try:
self._setstate(obj, oid)
except ConflictError:
raise
except:
self._log.exception("Couldn't load state for %s %s",
className(obj), oid_repr(oid))
raise
def _setstate(self, obj, oid):
# Helper for setstate(), which provides logging of failures.
# We accept the oid param, which must be the same as obj._p_oid,
# as a performance optimization for the pure-Python persistent implementation
# where accessing an attribute involves __getattribute__ calls
# The control flow is complicated here to avoid loading an
# object revision that we are sure we aren't going to use. As
# a result, invalidation tests occur before and after the
# load. We can only be sure about invalidations after the
# load.
# If an object has been invalidated, among the cases to consider:
# - Try MVCC
# - Raise ConflictError.
if self.before is not None:
# Load data that was current before the time we have.
before = self.before before = self.before
t = self._storage.loadBefore(oid, before) if before is None:
if t is None: # Normal case
raise POSKeyError() # historical connection!
p, serial, end = t
else:
# There is a harmless data race with self._invalidated. A
# dict update could go on in another thread, but we don't care
# because we have to check again after the load anyway.
if self._invalidatedCache: if self._invalidatedCache:
raise ReadConflictError() raise ReadConflictError()
before = self._txn_time
data = self._storage.loadBefore(oid, before)
if data is None:
# We had data (by definition, since we have a
# reference to it), but it's gone. It must have
# updated since this transaction, and been packed
# away, cuz the tests are mean. The best we can do is
# raise a ReadConflictError and try again
raise ReadConflictError()
if (oid in self._invalidated): p, serial, _ = data
self._load_before_or_conflict(obj)
return
p, serial = self._storage.load(oid, '')
self._load_count += 1 self._load_count += 1
self._inv_lock.acquire()
try:
invalid = oid in self._invalidated
finally:
self._inv_lock.release()
if invalid:
self._load_before_or_conflict(obj)
return
self._reader.setGhostState(obj, p) self._reader.setGhostState(obj, p)
obj._p_serial = serial obj._p_serial = serial
self._cache.update_object_size_estimation(oid, len(p)) self._cache.update_object_size_estimation(oid, len(p))
...@@ -933,42 +895,12 @@ class Connection(ExportImport, object): ...@@ -933,42 +895,12 @@ class Connection(ExportImport, object):
obj._p_blob_uncommitted = None obj._p_blob_uncommitted = None
obj._p_blob_committed = self._storage.loadBlob(oid, serial) obj._p_blob_committed = self._storage.loadBlob(oid, serial)
def _load_before_or_conflict(self, obj): except ConflictError:
"""Load non-current state for obj or raise ReadConflictError.""" raise
if not self._setstate_noncurrent(obj): except:
self._register(obj) self._log.exception("Couldn't load state for %s %s",
self._conflicts[obj._p_oid] = True className(obj), oid_repr(oid))
raise ReadConflictError(object=obj) raise
def _setstate_noncurrent(self, obj):
"""Set state using non-current data.
Return True if state was available, False if not.
"""
try:
# Load data that was current before the commit at txn_time.
t = self._storage.loadBefore(obj._p_oid, self._txn_time)
except KeyError:
return False
if t is None:
return False
data, start, end = t
# The non-current transaction must have been written before
# txn_time. It must be current at txn_time, but could have
# been modified at txn_time.
assert start < self._txn_time, (u64(start), u64(self._txn_time))
assert end is not None
assert self._txn_time <= end, (u64(self._txn_time), u64(end))
self._reader.setGhostState(obj, data)
obj._p_serial = start
# MVCC Blob support
if isinstance(obj, Blob):
obj._p_blob_uncommitted = None
obj._p_blob_committed = self._storage.loadBlob(obj._p_oid, start)
return True
def register(self, obj): def register(self, obj):
"""Register obj with the current transaction manager. """Register obj with the current transaction manager.
...@@ -1044,20 +976,20 @@ class Connection(ExportImport, object): ...@@ -1044,20 +976,20 @@ class Connection(ExportImport, object):
register for afterCompletion() calls. register for afterCompletion() calls.
""" """
self.opened = time.time()
if transaction_manager is None: if transaction_manager is None:
transaction_manager = transaction.manager transaction_manager = transaction.manager
self.transaction_manager = transaction_manager self.transaction_manager = transaction_manager
transaction_manager.registerSynch(self)
self.opened = time.time()
if self._reset_counter != global_reset_counter: if self._reset_counter != global_reset_counter:
# New code is in place. Start a new cache. # New code is in place. Start a new cache.
self._resetCache() self._resetCache()
else:
self._flush_invalidations()
transaction_manager.registerSynch(self) self.newTransaction()
if self._cache is not None: if self._cache is not None:
self._cache.incrgc() # This is a good time to do some GC self._cache.incrgc() # This is a good time to do some GC
...@@ -1083,6 +1015,7 @@ class Connection(ExportImport, object): ...@@ -1083,6 +1015,7 @@ class Connection(ExportImport, object):
self._reader._cache = cache self._reader._cache = cache
def _release_resources(self): def _release_resources(self):
assert not self.opened
for c in six.itervalues(self.connections): for c in six.itervalues(self.connections):
if c._mvcc_storage: if c._mvcc_storage:
if c._storage is not None: if c._storage is not None:
...@@ -1129,7 +1062,7 @@ class Connection(ExportImport, object): ...@@ -1129,7 +1062,7 @@ class Connection(ExportImport, object):
def savepoint(self): def savepoint(self):
if self._savepoint_storage is None: if self._savepoint_storage is None:
tmpstore = TmpStore(self._normal_storage) tmpstore = TmpStore(self._normal_storage, self._txn_time)
self._savepoint_storage = tmpstore self._savepoint_storage = tmpstore
self._storage = self._savepoint_storage self._storage = self._savepoint_storage
...@@ -1178,7 +1111,7 @@ class Connection(ExportImport, object): ...@@ -1178,7 +1111,7 @@ class Connection(ExportImport, object):
self._creating.update(src.creating) self._creating.update(src.creating)
for oid in oids: for oid in oids:
data, serial = src.load(oid, src) data, serial, _ = src.loadBefore(oid, self._txn_time)
obj = self._cache.get(oid, None) obj = self._cache.get(oid, None)
if obj is not None: if obj is not None:
self._cache.update_object_size_estimation( self._cache.update_object_size_estimation(
...@@ -1251,12 +1184,10 @@ class TmpStore: ...@@ -1251,12 +1184,10 @@ class TmpStore:
"""A storage-like thing to support savepoints.""" """A storage-like thing to support savepoints."""
def __init__(self, storage): def __init__(self, storage, before):
self._storage = storage self._storage = storage
for method in ( self._before = before
'getName', 'new_oid', 'getSize', 'sortKey', 'loadBefore', for method in 'getName', 'new_oid', 'getSize', 'sortKey', 'isReadOnly':
'isReadOnly'
):
setattr(self, method, getattr(storage, method)) setattr(self, method, getattr(storage, method))
self._file = tempfile.TemporaryFile(prefix='TmpStore') self._file = tempfile.TemporaryFile(prefix='TmpStore')
...@@ -1278,10 +1209,11 @@ class TmpStore: ...@@ -1278,10 +1209,11 @@ class TmpStore:
remove_committed_dir(self._blob_dir) remove_committed_dir(self._blob_dir)
self._blob_dir = None self._blob_dir = None
def load(self, oid, version): def loadBefore(self, oid, before):
assert before == self._before
pos = self.index.get(oid) pos = self.index.get(oid)
if pos is None: if pos is None:
return self._storage.load(oid, '') return self._storage.loadBefore(oid, before)
self._file.seek(pos) self._file.seek(pos)
h = self._file.read(8) h = self._file.read(8)
oidlen = u64(h) oidlen = u64(h)
...@@ -1291,7 +1223,7 @@ class TmpStore: ...@@ -1291,7 +1223,7 @@ class TmpStore:
h = self._file.read(16) h = self._file.read(16)
size = u64(h[8:]) size = u64(h[8:])
serial = h[:8] serial = h[:8]
return self._file.read(size), serial return self._file.read(size), serial, None
def store(self, oid, serial, data, version, transaction): def store(self, oid, serial, data, version, transaction):
# we have this funny signature so we can reuse the normal non-commit # we have this funny signature so we can reuse the normal non-commit
......
...@@ -13,13 +13,15 @@ ...@@ -13,13 +13,15 @@
############################################################################## ##############################################################################
"""Database objects """Database objects
""" """
from __future__ import print_function
import sys import sys
import threading
import logging import logging
import datetime import datetime
import time import time
import warnings import warnings
from . import utils
from ZODB.broken import find_global from ZODB.broken import find_global
from ZODB.utils import z64 from ZODB.utils import z64
from ZODB.Connection import Connection from ZODB.Connection import Connection
...@@ -179,6 +181,7 @@ class ConnectionPool(AbstractConnectionPool): ...@@ -179,6 +181,7 @@ class ConnectionPool(AbstractConnectionPool):
(available and available[0][0] < threshhold) (available and available[0][0] < threshhold)
): ):
t, c = available.pop(0) t, c = available.pop(0)
assert not c.opened
self.all.remove(c) self.all.remove(c)
c._release_resources() c._release_resources()
...@@ -213,6 +216,7 @@ class ConnectionPool(AbstractConnectionPool): ...@@ -213,6 +216,7 @@ class ConnectionPool(AbstractConnectionPool):
to_remove = () to_remove = ()
for (t, c) in self.available: for (t, c) in self.available:
assert not c.opened
if t < threshhold: if t < threshhold:
to_remove += (c,) to_remove += (c,)
self.all.remove(c) self.all.remove(c)
...@@ -405,7 +409,7 @@ class DB(object): ...@@ -405,7 +409,7 @@ class DB(object):
storage = ZODB.MappingStorage.MappingStorage(**storage_args) storage = ZODB.MappingStorage.MappingStorage(**storage_args)
# Allocate lock. # Allocate lock.
x = threading.RLock() x = utils.RLock()
self._a = x.acquire self._a = x.acquire
self._r = x.release self._r = x.release
...@@ -559,15 +563,17 @@ class DB(object): ...@@ -559,15 +563,17 @@ class DB(object):
# sys.getrefcount(ob) returns. But, in addition to that, # sys.getrefcount(ob) returns. But, in addition to that,
# the cache holds an extra reference on non-ghost objects, # the cache holds an extra reference on non-ghost objects,
# and we also want to pretend that doesn't exist. # and we also want to pretend that doesn't exist.
# If we have no way to get a refcount, we return False to symbolize # If we have no way to get a refcount, we return False
# that. As opposed to None, this has the advantage of being usable # to symbolize that. As opposed to None, this has the
# as a number (0) in case clients depended on that. # advantage of being usable as a number (0) in case
# clients depended on that.
detail.append({ detail.append({
'conn_no': cn, 'conn_no': cn,
'oid': oid, 'oid': oid,
'id': id, 'id': id,
'klass': "%s%s" % (module, ob.__class__.__name__), 'klass': "%s%s" % (module, ob.__class__.__name__),
'rc': rc(ob) - 3 - (ob._p_changed is not None) if rc else False, 'rc': (rc(ob) - 3 - (ob._p_changed is not None)
if rc else False),
'state': ob._p_changed, 'state': ob._p_changed,
#'references': con.references(oid), #'references': con.references(oid),
}) })
...@@ -628,7 +634,11 @@ class DB(object): ...@@ -628,7 +634,11 @@ class DB(object):
@self._connectionMap @self._connectionMap
def _(c): def _(c):
if c.opened:
c.transaction_manager.abort() c.transaction_manager.abort()
# Note that this will modify out pool, but this is safe, because
# _connectionMap makes a list of the pool to iterate over
c.close()
c.afterCompletion = c.newTransaction = c.close = noop c.afterCompletion = c.newTransaction = c.close = noop
c._release_resources() c._release_resources()
...@@ -753,18 +763,18 @@ class DB(object): ...@@ -753,18 +763,18 @@ class DB(object):
assert result is not None assert result is not None
# open the connection. # open the connection.
result.open(transaction_manager)
# A good time to do some cache cleanup. # A good time to do some cache cleanup.
# (note we already have the lock) # (note we already have the lock)
self.pool.availableGC() self.pool.availableGC()
self.historical_pool.availableGC() self.historical_pool.availableGC()
return result
finally: finally:
self._r() self._r()
result.open(transaction_manager)
return result
def connectionDebugInfo(self): def connectionDebugInfo(self):
result = [] result = []
t = time.time() t = time.time()
...@@ -986,7 +996,7 @@ class ContextManager: ...@@ -986,7 +996,7 @@ class ContextManager:
self.tm.abort() self.tm.abort()
self.conn.close() self.conn.close()
resource_counter_lock = threading.Lock() resource_counter_lock = utils.Lock()
resource_counter = 0 resource_counter = 0
class TransactionalUndo(object): class TransactionalUndo(object):
......
...@@ -19,11 +19,11 @@ to be layered over a base database. ...@@ -19,11 +19,11 @@ to be layered over a base database.
The base storage must not change. The base storage must not change.
""" """
from __future__ import print_function
import os import os
import random import random
import weakref import weakref
import tempfile import tempfile
import threading
import ZODB.BaseStorage import ZODB.BaseStorage
import ZODB.blob import ZODB.blob
import ZODB.interfaces import ZODB.interfaces
...@@ -71,7 +71,7 @@ class DemoStorage(object): ...@@ -71,7 +71,7 @@ class DemoStorage(object):
self._issued_oids = set() self._issued_oids = set()
self._stored_oids = set() self._stored_oids = set()
self._commit_lock = threading.Lock() self._commit_lock = ZODB.utils.Lock()
self._transaction = None self._transaction = None
if name is None: if name is None:
...@@ -319,6 +319,7 @@ class DemoStorage(object): ...@@ -319,6 +319,7 @@ class DemoStorage(object):
@ZODB.utils.locked @ZODB.utils.locked
def tpc_abort(self, transaction): def tpc_abort(self, transaction):
if transaction is not self._transaction: if transaction is not self._transaction:
print('WTF', transaction, self._transaction)
return return
self._stored_oids = set() self._stored_oids = set()
self._transaction = None self._transaction = None
......
...@@ -20,7 +20,6 @@ import contextlib ...@@ -20,7 +20,6 @@ import contextlib
import errno import errno
import logging import logging
import os import os
import threading
import time import time
from struct import pack from struct import pack
from struct import unpack from struct import unpack
...@@ -32,6 +31,8 @@ from zc.lockfile import LockFile ...@@ -32,6 +31,8 @@ from zc.lockfile import LockFile
from zope.interface import alsoProvides from zope.interface import alsoProvides
from zope.interface import implementer from zope.interface import implementer
from .. import utils
from ZODB.blob import BlobStorageMixin from ZODB.blob import BlobStorageMixin
from ZODB.blob import link_or_copy from ZODB.blob import link_or_copy
from ZODB.blob import remove_committed from ZODB.blob import remove_committed
...@@ -2046,7 +2047,7 @@ class FilePool: ...@@ -2046,7 +2047,7 @@ class FilePool:
self.name = file_name self.name = file_name
self._files = [] self._files = []
self._out = [] self._out = []
self._cond = threading.Condition() self._cond = utils.Condition()
@contextlib.contextmanager @contextlib.contextmanager
def write_lock(self): def write_lock(self):
......
...@@ -19,7 +19,6 @@ storage without distracting storage details. ...@@ -19,7 +19,6 @@ storage without distracting storage details.
import BTrees import BTrees
import time import time
import threading
import ZODB.BaseStorage import ZODB.BaseStorage
import ZODB.interfaces import ZODB.interfaces
import ZODB.POSException import ZODB.POSException
...@@ -40,10 +39,10 @@ class MappingStorage(object): ...@@ -40,10 +39,10 @@ class MappingStorage(object):
self._transactions = BTrees.OOBTree.OOBTree() # {tid->TransactionRecord} self._transactions = BTrees.OOBTree.OOBTree() # {tid->TransactionRecord}
self._ltid = ZODB.utils.z64 self._ltid = ZODB.utils.z64
self._last_pack = None self._last_pack = None
_lock = threading.RLock() _lock = ZODB.utils.RLock()
self._lock_acquire = _lock.acquire self._lock_acquire = _lock.acquire
self._lock_release = _lock.release self._lock_release = _lock.release
self._commit_lock = threading.Lock() self._commit_lock = ZODB.utils.Lock()
self._opened = True self._opened = True
self._transaction = None self._transaction = None
self._oid = 0 self._oid = 0
......
...@@ -29,6 +29,8 @@ import transaction ...@@ -29,6 +29,8 @@ import transaction
import zope.interface import zope.interface
import zope.interface.verify import zope.interface.verify
from .. import utils
ZERO = b'\0'*8 ZERO = b'\0'*8
class BasicStorage: class BasicStorage:
...@@ -345,7 +347,7 @@ class BasicStorage: ...@@ -345,7 +347,7 @@ class BasicStorage:
results = {} results = {}
started.wait() started.wait()
attempts = [] attempts = []
attempts_cond = threading.Condition() attempts_cond = utils.Condition()
def update_attempts(): def update_attempts():
with attempts_cond: with attempts_cond:
......
...@@ -65,6 +65,7 @@ class ZODBClientThread(TestThread): ...@@ -65,6 +65,7 @@ class ZODBClientThread(TestThread):
for i in range(self.commits): for i in range(self.commits):
self.commit(d, i) self.commit(d, i)
self.test.assertEqual(sorted(d.keys()), list(range(self.commits))) self.test.assertEqual(sorted(d.keys()), list(range(self.commits)))
conn.close()
def commit(self, d, num): def commit(self, d, num):
d[num] = time.time() d[num] = time.time()
......
...@@ -130,3 +130,10 @@ class MVCCMappingStorage(MappingStorage): ...@@ -130,3 +130,10 @@ class MVCCMappingStorage(MappingStorage):
MappingStorage.pack(self, t, referencesf, gc) MappingStorage.pack(self, t, referencesf, gc)
finally: finally:
self._commit_lock.release() self._commit_lock.release()
@ZODB.utils.locked(MappingStorage.opened)
def lastTransaction(self):
if self._transactions:
return self._transactions.maxKey()
else:
return ZODB.utils.z64
...@@ -25,7 +25,8 @@ from ZODB.serialize import referencesf ...@@ -25,7 +25,8 @@ from ZODB.serialize import referencesf
from ZODB.tests.MinPO import MinPO from ZODB.tests.MinPO import MinPO
from ZODB.tests.MTStorage import TestThread from ZODB.tests.MTStorage import TestThread
from ZODB.tests.StorageTestBase import snooze from ZODB.tests.StorageTestBase import snooze
from ZODB._compat import loads, PersistentPickler, Pickler, Unpickler, BytesIO, _protocol from ZODB._compat import (loads, PersistentPickler, Pickler, Unpickler,
BytesIO, _protocol)
import transaction import transaction
import ZODB.interfaces import ZODB.interfaces
import ZODB.tests.util import ZODB.tests.util
...@@ -270,6 +271,8 @@ class PackableStorage(PackableStorageBase): ...@@ -270,6 +271,8 @@ class PackableStorage(PackableStorageBase):
self._sanity_check() self._sanity_check()
db.close()
def checkPackWhileWriting(self): def checkPackWhileWriting(self):
self._PackWhileWriting(pack_now=False) self._PackWhileWriting(pack_now=False)
...@@ -312,6 +315,8 @@ class PackableStorage(PackableStorageBase): ...@@ -312,6 +315,8 @@ class PackableStorage(PackableStorageBase):
self._sanity_check() self._sanity_check()
db.close()
def checkPackWithMultiDatabaseReferences(self): def checkPackWithMultiDatabaseReferences(self):
databases = {} databases = {}
db = DB(self._storage, databases=databases, database_name='') db = DB(self._storage, databases=databases, database_name='')
...@@ -327,6 +332,9 @@ class PackableStorage(PackableStorageBase): ...@@ -327,6 +332,9 @@ class PackableStorage(PackableStorageBase):
db.pack(time.time()+1) db.pack(time.time()+1)
# some valid storages always return 0 for len() # some valid storages always return 0 for len()
self.assertTrue(len(self._storage) in (0, 1)) self.assertTrue(len(self._storage) in (0, 1))
conn.close()
otherdb.close()
db.close()
def checkPackAllRevisions(self): def checkPackAllRevisions(self):
self._initroot() self._initroot()
...@@ -718,7 +726,7 @@ class ClientThread(TestThread): ...@@ -718,7 +726,7 @@ class ClientThread(TestThread):
def __init__(self, db, choices, loop_trip, timer, thread_id): def __init__(self, db, choices, loop_trip, timer, thread_id):
TestThread.__init__(self) TestThread.__init__(self)
self.root = db.open().root() self.db = db
self.choices = choices self.choices = choices
self.loop_trip = loop_trip self.loop_trip = loop_trip
self.millis = timer.elapsed_millis self.millis = timer.elapsed_millis
...@@ -737,6 +745,8 @@ class ClientThread(TestThread): ...@@ -737,6 +745,8 @@ class ClientThread(TestThread):
def runtest(self): def runtest(self):
from random import choice from random import choice
conn = self.db.open()
root = conn.root()
for j in range(self.loop_trip): for j in range(self.loop_trip):
assign_worked = False assign_worked = False
...@@ -745,7 +755,7 @@ class ClientThread(TestThread): ...@@ -745,7 +755,7 @@ class ClientThread(TestThread):
try: try:
index = choice(self.choices) index = choice(self.choices)
alist.extend([self.millis(), index]) alist.extend([self.millis(), index])
self.root[index].value = MinPO(j) root[index].value = MinPO(j)
assign_worked = True assign_worked = True
transaction.commit() transaction.commit()
alist.append(self.millis()) alist.append(self.millis())
...@@ -756,6 +766,8 @@ class ClientThread(TestThread): ...@@ -756,6 +766,8 @@ class ClientThread(TestThread):
transaction.abort() transaction.abort()
alist.append(assign_worked) alist.append(assign_worked)
conn.close()
class ElapsedTimer: class ElapsedTimer:
def __init__(self, start_time): def __init__(self, start_time):
self.start_time = start_time self.start_time = start_time
...@@ -776,5 +788,5 @@ def IExternalGC_suite(factory): ...@@ -776,5 +788,5 @@ def IExternalGC_suite(factory):
return doctest.DocFileSuite( return doctest.DocFileSuite(
'IExternalGC.test', 'IExternalGC.test',
setUp=setup, tearDown=zope.testing.setupstack.tearDown, setUp=setup, tearDown=ZODB.tests.util.tearDown,
checker=ZODB.tests.util.checker) checker=ZODB.tests.util.checker)
...@@ -23,7 +23,7 @@ import sys ...@@ -23,7 +23,7 @@ import sys
import time import time
import transaction import transaction
from ZODB.utils import u64 from ZODB.utils import u64, z64
from ZODB.tests.MinPO import MinPO from ZODB.tests.MinPO import MinPO
from ZODB._compat import PersistentPickler, Unpickler, BytesIO, _protocol from ZODB._compat import PersistentPickler, Unpickler, BytesIO, _protocol
import ZODB.tests.util import ZODB.tests.util
...@@ -153,8 +153,8 @@ class StorageTestBase(ZODB.tests.util.TestCase): ...@@ -153,8 +153,8 @@ class StorageTestBase(ZODB.tests.util.TestCase):
self._storage.close() self._storage.close()
def tearDown(self): def tearDown(self):
self._close()
ZODB.tests.util.TestCase.tearDown(self) ZODB.tests.util.TestCase.tearDown(self)
self._close()
def _dostore(self, oid=None, revid=None, data=None, def _dostore(self, oid=None, revid=None, data=None,
already_pickled=0, user=None, description=None): already_pickled=0, user=None, description=None):
......
...@@ -130,4 +130,4 @@ revision as well as the entire directory: ...@@ -130,4 +130,4 @@ revision as well as the entire directory:
Clean up our blob directory and database: Clean up our blob directory and database:
>>> blob_storage.close() >>> database.close()
...@@ -49,3 +49,5 @@ writing and expect the file to be in the blob temporary directory:: ...@@ -49,3 +49,5 @@ writing and expect the file to be in the blob temporary directory::
True True
>>> w.close() >>> w.close()
>>> database.close()
...@@ -160,3 +160,5 @@ knowledge that the underlying storage's pack method is also called: ...@@ -160,3 +160,5 @@ knowledge that the underlying storage's pack method is also called:
>>> blob_storage._blobs_pack_is_in_progress >>> blob_storage._blobs_pack_is_in_progress
False False
>>> base_storage.pack = base_pack >>> base_storage.pack = base_pack
>>> database.close()
...@@ -61,7 +61,7 @@ While it's boring, it's important to verify that the same relationships ...@@ -61,7 +61,7 @@ While it's boring, it's important to verify that the same relationships
hold if the default pool size is overridden. hold if the default pool size is overridden.
>>> handler.clear() >>> handler.clear()
>>> st.close() >>> db.close()
>>> st = Storage() >>> st = Storage()
>>> PS = 2 # smaller pool size >>> PS = 2 # smaller pool size
>>> db = DB(st, pool_size=PS) >>> db = DB(st, pool_size=PS)
...@@ -117,7 +117,7 @@ We can change the pool size on the fly: ...@@ -117,7 +117,7 @@ We can change the pool size on the fly:
Enough of that. Enough of that.
>>> handler.clear() >>> handler.clear()
>>> st.close() >>> db.close()
More interesting is the stack-like nature of connection reuse. So long as More interesting is the stack-like nature of connection reuse. So long as
we keep opening new connections, and keep them alive, all connections we keep opening new connections, and keep them alive, all connections
...@@ -256,7 +256,7 @@ Nothing in that last block should have logged any msgs: ...@@ -256,7 +256,7 @@ Nothing in that last block should have logged any msgs:
If "too many" connections are open, then closing one may kick an older If "too many" connections are open, then closing one may kick an older
closed one out of the available connection stack. closed one out of the available connection stack.
>>> st.close() >>> db.close()
>>> st = Storage() >>> st = Storage()
>>> db = DB(st, pool_size=3) >>> db = DB(st, pool_size=3)
>>> conns = [db.open() for dummy in range(6)] >>> conns = [db.open() for dummy in range(6)]
...@@ -324,7 +324,7 @@ gc to reclaim the Connection and its cache eventually works, but that can ...@@ -324,7 +324,7 @@ gc to reclaim the Connection and its cache eventually works, but that can
take "a long time" and caches can hold on to many objects, and limited take "a long time" and caches can hold on to many objects, and limited
resources (like RDB connections), for the duration. resources (like RDB connections), for the duration.
>>> st.close() >>> db.close()
>>> st = Storage() >>> st = Storage()
>>> db = DB(st, pool_size=2) >>> db = DB(st, pool_size=2)
>>> conn0 = db.open() >>> conn0 = db.open()
......
...@@ -25,10 +25,11 @@ Make a change locally: ...@@ -25,10 +25,11 @@ Make a change locally:
>>> rt = cn.root() >>> rt = cn.root()
>>> rt['a'] = 1 >>> rt['a'] = 1
Sync should not have been called yet. Sync is called when a connection is open, as that starts a new transaction:
>>> st.sync_called # False before 3.4 >>> st.sync_called
False True
>>> st.sync_called = False
``sync()`` is called by the Connection's ``afterCompletion()`` hook after the ``sync()`` is called by the Connection's ``afterCompletion()`` hook after the
...@@ -77,7 +78,9 @@ path at some point when serving pages. ...@@ -77,7 +78,9 @@ path at some point when serving pages.
>>> rt = cn.root() # make a change >>> rt = cn.root() # make a change
>>> rt['c'] = 3 >>> rt['c'] = 3
>>> st.sync_called >>> st.sync_called
False True
>>> st.sync_called = False
Now ensure that ``cn.afterCompletion() -> st.sync()`` gets called by commit Now ensure that ``cn.afterCompletion() -> st.sync()`` gets called by commit
despite that the `Connection` registered after the transaction began: despite that the `Connection` registered after the transaction began:
...@@ -96,7 +99,8 @@ And try the same thing with a non-threaded transaction manager: ...@@ -96,7 +99,8 @@ And try the same thing with a non-threaded transaction manager:
>>> rt = cn.root() # make a change >>> rt = cn.root() # make a change
>>> rt['d'] = 4 >>> rt['d'] = 4
>>> st.sync_called >>> st.sync_called
False True
>>> st.sync_called = False
>>> tm.commit() >>> tm.commit()
>>> st.sync_called >>> st.sync_called
True True
......
...@@ -22,7 +22,7 @@ import unittest ...@@ -22,7 +22,7 @@ import unittest
import transaction import transaction
import ZODB.tests.util import ZODB.tests.util
from ZODB.config import databaseFromString from ZODB.config import databaseFromString
from ZODB.utils import p64 from ZODB.utils import p64, u64, z64
from persistent import Persistent from persistent import Persistent
from zope.interface.verify import verifyObject from zope.interface.verify import verifyObject
from zope.testing import loggingsupport, renormalizing from zope.testing import loggingsupport, renormalizing
...@@ -522,16 +522,24 @@ class InvalidationTests(unittest.TestCase): ...@@ -522,16 +522,24 @@ class InvalidationTests(unittest.TestCase):
create one from an int. create one from an int.
>>> cn.invalidate(p64(1), {p1._p_oid: 1}) >>> cn.invalidate(p64(1), {p1._p_oid: 1})
>>> cn._txn_time
'\x00\x00\x00\x00\x00\x00\x00\x01' Transaction start times are based on storage's last
transaction. (Previousely, they were based on the first
invalidation seen in a transaction.)
>>> cn._txn_time == p64(u64(db.storage.lastTransaction()) + 1)
True
>>> p1._p_oid in cn._invalidated >>> p1._p_oid in cn._invalidated
True True
>>> p2._p_oid in cn._invalidated >>> p2._p_oid in cn._invalidated
False False
>>> cn.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1}) >>> cn.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1})
>>> cn._txn_time
'\x00\x00\x00\x00\x00\x00\x00\x01' >>> cn._txn_time == p64(u64(db.storage.lastTransaction()) + 1)
True
>>> p1._p_oid in cn._invalidated >>> p1._p_oid in cn._invalidated
True True
>>> p2._p_oid in cn._invalidated >>> p2._p_oid in cn._invalidated
...@@ -560,6 +568,7 @@ class InvalidationTests(unittest.TestCase): ...@@ -560,6 +568,7 @@ class InvalidationTests(unittest.TestCase):
>>> cn._invalidated >>> cn._invalidated
set([]) set([])
>>> db.close()
""" """
def doctest_invalidateCache(): def doctest_invalidateCache():
...@@ -1289,6 +1298,9 @@ class StubStorage: ...@@ -1289,6 +1298,9 @@ class StubStorage:
raise TypeError('StubStorage does not support versions.') raise TypeError('StubStorage does not support versions.')
return self._data[oid] return self._data[oid]
def loadBefore(self, oid, tid):
return self._data[oid] + (None, )
def store(self, oid, serial, p, version, transaction): def store(self, oid, serial, p, version, transaction):
if version != '': if version != '':
raise TypeError('StubStorage does not support versions.') raise TypeError('StubStorage does not support versions.')
...@@ -1304,6 +1316,9 @@ class StubStorage: ...@@ -1304,6 +1316,9 @@ class StubStorage:
# storage # storage
return None return None
def lastTransaction(self):
return z64
class TestConnectionInterface(unittest.TestCase): class TestConnectionInterface(unittest.TestCase):
......
...@@ -125,7 +125,7 @@ def connectionDebugInfo(): ...@@ -125,7 +125,7 @@ def connectionDebugInfo():
... now += .1 ... now += .1
... return now ... return now
>>> real_time = time.time >>> real_time = time.time
>>> if isinstance(time,type): >>> if isinstance(time, type):
... time.time = staticmethod(faux_time) # Jython ... time.time = staticmethod(faux_time) # Jython
... else: ... else:
... time.time = faux_time ... time.time = faux_time
...@@ -151,7 +151,7 @@ def connectionDebugInfo(): ...@@ -151,7 +151,7 @@ def connectionDebugInfo():
>>> before >>> before
[None, '\x03zY\xd8\xc0m9\xdd', None] [None, '\x03zY\xd8\xc0m9\xdd', None]
>>> opened >>> opened
['2008-12-04T20:40:44Z (1.40s)', '2008-12-04T20:40:45Z (0.30s)', None] ['2008-12-04T20:40:44Z (1.30s)', '2008-12-04T20:40:46Z (0.10s)', None]
>>> infos >>> infos
['test info (2)', ' (0)', ' (0)'] ['test info (2)', ' (0)', ' (0)']
......
...@@ -75,12 +75,12 @@ class DemoStorageTests( ...@@ -75,12 +75,12 @@ class DemoStorageTests(
db = DB(self._storage) # creates object 0. :) db = DB(self._storage) # creates object 0. :)
self.assertEqual(len(self._storage), 1) self.assertEqual(len(self._storage), 1)
self.assertTrue(self._storage) self.assertTrue(self._storage)
conn = db.open() with db.transaction() as conn:
for i in range(10): for i in range(10):
conn.root()[i] = conn.root().__class__() conn.root()[i] = conn.root().__class__()
transaction.commit()
self.assertEqual(len(self._storage), 11) self.assertEqual(len(self._storage), 11)
self.assertTrue(self._storage) self.assertTrue(self._storage)
db.close()
def checkLoadBeforeUndo(self): def checkLoadBeforeUndo(self):
pass # we don't support undo yet pass # we don't support undo yet
......
...@@ -35,6 +35,7 @@ from ZODB.tests import ReadOnlyStorage, RecoveryStorage ...@@ -35,6 +35,7 @@ from ZODB.tests import ReadOnlyStorage, RecoveryStorage
from ZODB.tests.StorageTestBase import MinPO, zodb_pickle from ZODB.tests.StorageTestBase import MinPO, zodb_pickle
from ZODB._compat import dump, dumps, _protocol from ZODB._compat import dump, dumps, _protocol
from . import util
class FileStorageTests( class FileStorageTests(
StorageTestBase.StorageTestBase, StorageTestBase.StorageTestBase,
...@@ -696,7 +697,7 @@ def test_suite(): ...@@ -696,7 +697,7 @@ def test_suite():
suite.addTest(unittest.makeSuite(klass, "check")) suite.addTest(unittest.makeSuite(klass, "check"))
suite.addTest(doctest.DocTestSuite( suite.addTest(doctest.DocTestSuite(
setUp=zope.testing.setupstack.setUpDirectory, setUp=zope.testing.setupstack.setUpDirectory,
tearDown=zope.testing.setupstack.tearDown, tearDown=util.tearDown,
checker=ZODB.tests.util.checker)) checker=ZODB.tests.util.checker))
suite.addTest(ZODB.tests.testblob.storage_reusable_suite( suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
'BlobFileStorage', 'BlobFileStorage',
......
...@@ -33,6 +33,7 @@ from ZODB.tests import ( ...@@ -33,6 +33,7 @@ from ZODB.tests import (
) )
class MVCCTests: class MVCCTests:
def checkClosingNestedDatabasesWorks(self): def checkClosingNestedDatabasesWorks(self):
# This tests for the error described in # This tests for the error described in
# https://github.com/zopefoundation/ZODB/issues/45 # https://github.com/zopefoundation/ZODB/issues/45
...@@ -42,7 +43,6 @@ class MVCCTests: ...@@ -42,7 +43,6 @@ class MVCCTests:
db1.close() db1.close()
db2.close() db2.close()
def checkCrossConnectionInvalidation(self): def checkCrossConnectionInvalidation(self):
# Verify connections see updated state at txn boundaries. # Verify connections see updated state at txn boundaries.
# This will fail if the Connection doesn't poll for changes. # This will fail if the Connection doesn't poll for changes.
......
...@@ -38,6 +38,8 @@ __test__ = dict( ...@@ -38,6 +38,8 @@ __test__ = dict(
>>> list(conn2.root()[0].keys()) >>> list(conn2.root()[0].keys())
[] []
>>> db2.close()
>>> db1.close()
""", """,
) )
......
...@@ -64,7 +64,7 @@ Now we see two transactions and two changed objects. ...@@ -64,7 +64,7 @@ Now we see two transactions and two changed objects.
Clean up. Clean up.
>>> st.close() >>> db.close()
""" """
import re import re
...@@ -87,6 +87,6 @@ checker = renormalizing.RENormalizing([ ...@@ -87,6 +87,6 @@ checker = renormalizing.RENormalizing([
def test_suite(): def test_suite():
return doctest.DocTestSuite( return doctest.DocTestSuite(
setUp=zope.testing.setupstack.setUpDirectory, setUp=zope.testing.setupstack.setUpDirectory,
tearDown=zope.testing.setupstack.tearDown, tearDown=ZODB.tests.util.tearDown,
optionflags=doctest.REPORT_NDIFF, optionflags=doctest.REPORT_NDIFF,
checker=ZODB.tests.util.checker + checker) checker=ZODB.tests.util.checker + checker)
...@@ -99,10 +99,13 @@ class MinimalMemoryStorage(BaseStorage, object): ...@@ -99,10 +99,13 @@ class MinimalMemoryStorage(BaseStorage, object):
del self._txn del self._txn
def _finish(self, tid, u, d, e): def _finish(self, tid, u, d, e):
with self._lock: self._lock_acquire()
try:
self._index.update(self._txn.index) self._index.update(self._txn.index)
self._cur.update(self._txn.cur()) self._cur.update(self._txn.cur())
self._ltid = self._tid self._ltid = self._tid
finally:
self._lock_release()
def loadBefore(self, the_oid, the_tid): def loadBefore(self, the_oid, the_tid):
# It's okay if loadBefore() is really expensive, because this # It's okay if loadBefore() is really expensive, because this
...@@ -121,6 +124,9 @@ class MinimalMemoryStorage(BaseStorage, object): ...@@ -121,6 +124,9 @@ class MinimalMemoryStorage(BaseStorage, object):
end_tid = None end_tid = None
else: else:
end_tid = tids[j] end_tid = tids[j]
self.hook(the_oid, self._cur[the_oid], '')
return self._index[(the_oid, tid)], tid, end_tid return self._index[(the_oid, tid)], tid, end_tid
def loadSerial(self, oid, serial): def loadSerial(self, oid, serial):
......
...@@ -54,6 +54,8 @@ except NameError: ...@@ -54,6 +54,8 @@ except NameError:
import io import io
file_type = io.BufferedReader file_type = io.BufferedReader
from . import util
def new_time(): def new_time():
"""Create a _new_ time stamp. """Create a _new_ time stamp.
...@@ -334,6 +336,7 @@ class RecoveryBlobStorage(BlobTestBase, ...@@ -334,6 +336,7 @@ class RecoveryBlobStorage(BlobTestBase,
transaction.commit() transaction.commit()
self._dst.copyTransactionsFrom(self._storage) self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst) self.compare(self._storage, self._dst)
db.close()
def gc_blob_removes_uncommitted_data(): def gc_blob_removes_uncommitted_data():
...@@ -446,7 +449,6 @@ def packing_with_uncommitted_data_non_undoing(): ...@@ -446,7 +449,6 @@ def packing_with_uncommitted_data_non_undoing():
Clean up: Clean up:
>>> database.close() >>> database.close()
""" """
def packing_with_uncommitted_data_undoing(): def packing_with_uncommitted_data_undoing():
...@@ -545,13 +547,14 @@ def loadblob_tmpstore(): ...@@ -545,13 +547,14 @@ def loadblob_tmpstore():
>>> transaction.commit() >>> transaction.commit()
>>> blob_oid = root['blob']._p_oid >>> blob_oid = root['blob']._p_oid
>>> tid = connection._storage.lastTransaction() >>> tid = connection._storage.lastTransaction()
>>> _txn_time = connection._txn_time
Now we open a database with a TmpStore in front: Now we open a database with a TmpStore in front:
>>> database.close() >>> database.close()
>>> from ZODB.Connection import TmpStore >>> from ZODB.Connection import TmpStore
>>> tmpstore = TmpStore(blob_storage) >>> tmpstore = TmpStore(blob_storage, _txn_time)
We can access the blob correctly: We can access the blob correctly:
...@@ -609,7 +612,7 @@ def do_not_depend_on_cwd(): ...@@ -609,7 +612,7 @@ def do_not_depend_on_cwd():
>>> with conn.root()['blob'].open() as fp: fp.read() >>> with conn.root()['blob'].open() as fp: fp.read()
'data' 'data'
>>> bs.close() >>> db.close()
""" """
def savepoint_isolation(): def savepoint_isolation():
...@@ -700,9 +703,11 @@ def savepoint_cleanup(): ...@@ -700,9 +703,11 @@ def savepoint_cleanup():
>>> db.close() >>> db.close()
""" """
def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop(): def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
r""" r"""
>>> conn = ZODB.connection('data.fs', blob_dir='blobs') >>> db = ZODB.DB('data.fs', blob_dir='blobs')
>>> conn = db.open()
>>> blob = ZODB.blob.Blob(b'blah') >>> blob = ZODB.blob.Blob(b'blah')
>>> conn.add(blob) >>> conn.add(blob)
>>> transaction.commit() >>> transaction.commit()
...@@ -714,7 +719,7 @@ def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop(): ...@@ -714,7 +719,7 @@ def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
>>> old_serial == blob._p_serial >>> old_serial == blob._p_serial
True True
>>> conn.close() >>> db.close()
""" """
def setUp(test): def setUp(test):
...@@ -757,7 +762,7 @@ def storage_reusable_suite(prefix, factory, ...@@ -757,7 +762,7 @@ def storage_reusable_suite(prefix, factory,
"blob_connection.txt", "blob_connection.txt",
"blob_importexport.txt", "blob_importexport.txt",
"blob_transaction.txt", "blob_transaction.txt",
setUp=setup, tearDown=zope.testing.setupstack.tearDown, setUp=setup, tearDown=util.tearDown,
checker=zope.testing.renormalizing.RENormalizing([ checker=zope.testing.renormalizing.RENormalizing([
# Py3k renders bytes where Python2 used native strings... # Py3k renders bytes where Python2 used native strings...
(re.compile(r"^b'"), "'"), (re.compile(r"^b'"), "'"),
...@@ -780,15 +785,16 @@ def storage_reusable_suite(prefix, factory, ...@@ -780,15 +785,16 @@ def storage_reusable_suite(prefix, factory,
if test_packing: if test_packing:
suite.addTest(doctest.DocFileSuite( suite.addTest(doctest.DocFileSuite(
"blob_packing.txt", "blob_packing.txt",
setUp=setup, tearDown=zope.testing.setupstack.tearDown, setUp=setup, tearDown=util.tearDown,
)) ))
suite.addTest(doctest.DocTestSuite( suite.addTest(doctest.DocTestSuite(
setUp=setup, tearDown=zope.testing.setupstack.tearDown, setUp=setup, tearDown=util.tearDown,
checker = ZODB.tests.util.checker + \ checker = (
ZODB.tests.util.checker +
zope.testing.renormalizing.RENormalizing([ zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'), (re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'), (re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
]), ])),
)) ))
def create_storage(self, name='data', blob_dir=None): def create_storage(self, name='data', blob_dir=None):
...@@ -823,7 +829,7 @@ def test_suite(): ...@@ -823,7 +829,7 @@ def test_suite():
"blob_tempdir.txt", "blob_tempdir.txt",
"blobstorage_packing.txt", "blobstorage_packing.txt",
setUp=setUp, setUp=setUp,
tearDown=zope.testing.setupstack.tearDown, tearDown=util.tearDown,
optionflags=doctest.ELLIPSIS, optionflags=doctest.ELLIPSIS,
checker=ZODB.tests.util.checker, checker=ZODB.tests.util.checker,
)) ))
...@@ -831,7 +837,7 @@ def test_suite(): ...@@ -831,7 +837,7 @@ def test_suite():
"blob_layout.txt", "blob_layout.txt",
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE, optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
setUp=setUp, setUp=setUp,
tearDown=zope.testing.setupstack.tearDown, tearDown=util.tearDown,
checker=ZODB.tests.util.checker + checker=ZODB.tests.util.checker +
zope.testing.renormalizing.RENormalizing([ zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'), (re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
......
...@@ -33,9 +33,17 @@ This note includes doctests that explain how MVCC is implemented (and ...@@ -33,9 +33,17 @@ This note includes doctests that explain how MVCC is implemented (and
test that the implementation is correct). The tests use a test that the implementation is correct). The tests use a
MinimalMemoryStorage that implements MVCC support, but not much else. MinimalMemoryStorage that implements MVCC support, but not much else.
***IMPORTANT***: The MVCC approach has changed since these tests were
originally written. The new approach is much simpler because we no
longer call load to get the current state of an object. We call
loadBefore instead, having gotten a transaction time at the start of a
transaction. As a result, the rhythm of the tests is a little odd,
because the probe a complex dance that doesn't exist any more.
>>> from ZODB.tests.test_storage import MinimalMemoryStorage >>> from ZODB.tests.test_storage import MinimalMemoryStorage
>>> from ZODB import DB >>> from ZODB import DB
>>> db = DB(MinimalMemoryStorage()) >>> st = MinimalMemoryStorage()
>>> db = DB(st)
We will use two different connections with different transaction managers We will use two different connections with different transaction managers
to make sure that the connections act independently, even though they'll to make sure that the connections act independently, even though they'll
...@@ -59,6 +67,10 @@ Now open a second connection. ...@@ -59,6 +67,10 @@ Now open a second connection.
>>> tm2 = transaction.TransactionManager() >>> tm2 = transaction.TransactionManager()
>>> cn2 = db.open(transaction_manager=tm2) >>> cn2 = db.open(transaction_manager=tm2)
>>> from ZODB.utils import p64, u64
>>> cn2._txn_time == p64(u64(st.lastTransaction()) + 1)
True
>>> txn_time2 = cn2._txn_time
Connection high-water mark Connection high-water mark
-------------------------- --------------------------
...@@ -67,22 +79,20 @@ The ZODB Connection tracks a transaction high-water mark, which ...@@ -67,22 +79,20 @@ The ZODB Connection tracks a transaction high-water mark, which
bounds the latest transaction id that can be read by the current bounds the latest transaction id that can be read by the current
transaction and still present a consistent view of the database. transaction and still present a consistent view of the database.
Transactions with ids up to but not including the high-water mark Transactions with ids up to but not including the high-water mark
are OK to read. When a transaction commits, the database sends are OK to read. At the beginning of a transaction, a connection
invalidations to all the other connections; the invalidation contains sets the high-water mark to just over the last transaction time the
the transaction id and the oids of modified objects. The Connection storage has seen.
stores the high-water mark in _txn_time, which is set to None until
an invalidation arrives.
>>> cn = db.open() >>> cn = db.open()
>>> print(cn._txn_time) >>> cn._txn_time == p64(u64(st.lastTransaction()) + 1)
None True
>>> cn.invalidate(100, dict.fromkeys([1, 2])) >>> cn.invalidate(100, dict.fromkeys([1, 2]))
>>> cn._txn_time >>> cn._txn_time == p64(u64(st.lastTransaction()) + 1)
100 True
>>> cn.invalidate(200, dict.fromkeys([1, 2])) >>> cn.invalidate(200, dict.fromkeys([1, 2]))
>>> cn._txn_time >>> cn._txn_time == p64(u64(st.lastTransaction()) + 1)
100 True
A connection's high-water mark is set to the transaction id taken from A connection's high-water mark is set to the transaction id taken from
the first invalidation processed by the connection. Transaction ids are the first invalidation processed by the connection. Transaction ids are
...@@ -95,8 +105,8 @@ but that doesn't work unless an object is modified. sync() will abort ...@@ -95,8 +105,8 @@ but that doesn't work unless an object is modified. sync() will abort
a transaction and process invalidations. a transaction and process invalidations.
>>> cn.sync() >>> cn.sync()
>>> print(cn._txn_time) # the high-water mark got reset to None >>> cn._txn_time == p64(u64(st.lastTransaction()) + 1)
None True
Basic functionality Basic functionality
------------------- -------------------
...@@ -109,9 +119,9 @@ will modify "a." The other transaction will then modify "b" and commit. ...@@ -109,9 +119,9 @@ will modify "a." The other transaction will then modify "b" and commit.
>>> tm1.get().commit() >>> tm1.get().commit()
>>> txn = db.lastTransaction() >>> txn = db.lastTransaction()
The second connection has its high-water mark set now. The second connection already has its high-water mark set.
>>> cn2._txn_time == txn >>> cn2._txn_time == txn_time2
True True
It is safe to read "b," because it was not modified by the concurrent It is safe to read "b," because it was not modified by the concurrent
...@@ -153,22 +163,23 @@ ConflictError: database conflict error (oid 0x01, class ZODB.tests.MinPO.MinPO) ...@@ -153,22 +163,23 @@ ConflictError: database conflict error (oid 0x01, class ZODB.tests.MinPO.MinPO)
This example will demonstrate that we can commit a transaction if we only This example will demonstrate that we can commit a transaction if we only
modify current revisions. modify current revisions.
>>> print(cn2._txn_time) >>> cn2._txn_time == p64(u64(st.lastTransaction()) + 1)
None True
>>> txn_time2 = cn2._txn_time
>>> r1 = cn1.root() >>> r1 = cn1.root()
>>> r1["a"].value = 3 >>> r1["a"].value = 3
>>> tm1.get().commit() >>> tm1.get().commit()
>>> txn = db.lastTransaction() >>> txn = db.lastTransaction()
>>> cn2._txn_time == txn >>> cn2._txn_time == txn_time2
True True
>>> r2["b"].value = r2["a"].value + 1 >>> r2["b"].value = r2["a"].value + 1
>>> r2["b"].value >>> r2["b"].value
3 3
>>> tm2.get().commit() >>> tm2.get().commit()
>>> print(cn2._txn_time) >>> cn2._txn_time == p64(u64(st.lastTransaction()) + 1)
None True
Object cache Object cache
------------ ------------
...@@ -302,22 +313,18 @@ same things now. ...@@ -302,22 +313,18 @@ same things now.
>>> r2["a"].value, r2["b"].value >>> r2["a"].value, r2["b"].value
(42, 43) (42, 43)
>>> db.close()
Late invalidation Late invalidation
----------------- -----------------
The combination of ZEO and MVCC adds more complexity. Since The combination of ZEO and MVCC used to add more complexity. That's
invalidations are delivered asynchronously by ZEO, it is possible for why ZODB no-longer calls load. :)
an invalidation to arrive just after a request to load the invalidated
object is sent. The connection can't use the just-loaded data,
because the invalidation arrived first. The complexity for MVCC is
that it must check for invalidated objects after it has loaded them,
just in case.
Rather than add all the complexity of ZEO to these tests, the Rather than add all the complexity of ZEO to these tests, the
MinimalMemoryStorage has a hook. We'll write a subclass that will MinimalMemoryStorage has a hook. We'll write a subclass that will
deliver an invalidation when it loads an object. The hook allows us deliver an invalidation when it loads (or loadBefore's) an object.
to test the Connection code. The hook allows us to test the Connection code.
>>> class TestStorage(MinimalMemoryStorage): >>> class TestStorage(MinimalMemoryStorage):
... def __init__(self): ... def __init__(self):
...@@ -351,6 +358,12 @@ non-current revision to load. ...@@ -351,6 +358,12 @@ non-current revision to load.
>>> oid = r1["b"]._p_oid >>> oid = r1["b"]._p_oid
>>> ts.hooked[oid] = 1 >>> ts.hooked[oid] = 1
This test is kinda screwy because it depends on an old approach that
has changed. We'll hack the _txn_time to get the original expected
result, even though what's going on now is much simpler.
>>> cn1._txn_time = ts.lastTransaction()
Once the oid is hooked, an invalidation will be delivered the next Once the oid is hooked, an invalidation will be delivered the next
time it is activated. The code below activates the object, then time it is activated. The code below activates the object, then
confirms that the hook worked and that the old state was retrieved. confirms that the hook worked and that the old state was retrieved.
...@@ -367,6 +380,8 @@ True ...@@ -367,6 +380,8 @@ True
>>> r1["b"].value >>> r1["b"].value
0 0
>>> db.close()
No earlier revision available No earlier revision available
----------------------------- -----------------------------
...@@ -395,14 +410,13 @@ section above, this is no older state to retrieve. ...@@ -395,14 +410,13 @@ section above, this is no older state to retrieve.
False False
>>> r1["b"]._p_state >>> r1["b"]._p_state
-1 -1
>>> cn1._txn_time = ts.lastTransaction()
>>> r1["b"]._p_activate() >>> r1["b"]._p_activate()
Traceback (most recent call last): Traceback (most recent call last):
... ...
ReadConflictError: database read conflict error (oid 0x02, class ZODB.tests.MinPO.MinPO) ReadConflictError: database read conflict error
>>> oid in cn1._invalidated
True >>> db.close()
>>> ts.count
1
""" """
import doctest import doctest
import re import re
......
...@@ -61,6 +61,7 @@ checker = renormalizing.RENormalizing([ ...@@ -61,6 +61,7 @@ checker = renormalizing.RENormalizing([
]) ])
def setUp(test, name='test'): def setUp(test, name='test'):
clear_transaction_syncs()
transaction.abort() transaction.abort()
d = tempfile.mkdtemp(prefix=name) d = tempfile.mkdtemp(prefix=name)
zope.testing.setupstack.register(test, zope.testing.setupstack.rmtree, d) zope.testing.setupstack.register(test, zope.testing.setupstack.rmtree, d)
...@@ -71,7 +72,9 @@ def setUp(test, name='test'): ...@@ -71,7 +72,9 @@ def setUp(test, name='test'):
os.chdir(d) os.chdir(d)
zope.testing.setupstack.register(test, transaction.abort) zope.testing.setupstack.register(test, transaction.abort)
tearDown = zope.testing.setupstack.tearDown def tearDown(test):
zope.testing.setupstack.tearDown(test)
clear_transaction_syncs()
class TestCase(unittest.TestCase): class TestCase(unittest.TestCase):
...@@ -186,3 +189,18 @@ def mess_with_time(test=None, globs=None, now=1278864701.5): ...@@ -186,3 +189,18 @@ def mess_with_time(test=None, globs=None, now=1278864701.5):
time.time = staticmethod(faux_time) # jython time.time = staticmethod(faux_time) # jython
else: else:
time.time = faux_time time.time = faux_time
def clear_transaction_syncs():
"""Clear data managers registered with the global transaction manager
Many tests don't clean up synchronizer's registered with the
global transaction managers, which can wreak havoc with following
tests, now that connections interact with their storages at
transaction boundaries. We need to make sure that we clear any
registered data managers.
For now, we'll use the transaction manager's
underware. Eventually, an transaction managers need to grow an API
for this.
"""
transaction.manager._synchs.data.clear()
...@@ -11,10 +11,12 @@ ...@@ -11,10 +11,12 @@
# FOR A PARTICULAR PURPOSE # FOR A PARTICULAR PURPOSE
# #
############################################################################## ##############################################################################
from __future__ import print_function
import os import os
import struct import struct
import sys import sys
import time import time
import threading
import warnings import warnings
from binascii import hexlify, unhexlify from binascii import hexlify, unhexlify
from struct import pack, unpack from struct import pack, unpack
...@@ -308,3 +310,65 @@ class locked(object): ...@@ -308,3 +310,65 @@ class locked(object):
def __call__(self, func): def __call__(self, func):
return Locked(func, preconditions=self.preconditions) return Locked(func, preconditions=self.preconditions)
if os.environ.get('DEBUG_LOCKING'):
class Lock:
lock_class = threading.Lock
def __init__(self):
self._lock = self.lock_class()
def pr(self, name, a=None, kw=None):
f = sys._getframe(2)
if f.f_code.co_filename.endswith('ZODB/utils.py'):
f = sys._getframe(3)
f = '%s:%s' % (f.f_code.co_filename, f.f_lineno)
print(id(self), self._lock, threading.get_ident(), f, name,
a if a else '', kw if kw else '')
def acquire(self, *a, **kw):
self.pr('acquire', a, kw)
return self._lock.acquire(*a, **kw)
def release(self):
self.pr('release')
return self._lock.release()
def __enter__(self):
self.pr('acquire')
return self._lock.acquire()
def __exit__(self, *ignored):
self.pr('release')
return self._lock.release()
class RLock(Lock):
lock_class = threading.RLock
class Condition(Lock):
lock_class = threading.Condition
def wait(self, *a, **kw):
self.pr('wait', a, kw)
return self._lock.wait(*a, **kw)
def wait_for(self, *a, **kw):
self.pr('wait_for', a, kw)
return self._lock.wait_for(*a, **kw)
def notify(self, *a, **kw):
self.pr('notify', a, kw)
return self._lock.notify(*a, **kw)
def notify_all(self):
self.pr('notify_all')
return self._lock.notify_all()
notifyAll = notify_all
else:
from threading import Condition, Lock, RLock
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment