Commit 7d81f21a authored by Jim Fulton's avatar Jim Fulton Committed by GitHub

Merge pull request #56 from zopefoundation/no-more-load

Simplify MVCC by determining transaction start time using lastTransac…
parents 4905bb85 7ed1a8f9
...@@ -20,13 +20,12 @@ to application logic. ZODB includes features such as a plugable storage ...@@ -20,13 +20,12 @@ to application logic. ZODB includes features such as a plugable storage
interface, rich transaction support, and undo. interface, rich transaction support, and undo.
""" """
version = "4.3.1" version = "5.0.dev0"
import os import os
from setuptools import setup, find_packages from setuptools import setup, find_packages
classifiers = """\ classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: Developers Intended Audience :: Developers
License :: OSI Approved :: Zope Public License License :: OSI Approved :: Zope Public License
Programming Language :: Python Programming Language :: Python
...@@ -142,7 +141,7 @@ setup(name="ZODB", ...@@ -142,7 +141,7 @@ setup(name="ZODB",
'persistent >= 4.2.0', 'persistent >= 4.2.0',
'BTrees >= 4.2.0', 'BTrees >= 4.2.0',
'ZConfig', 'ZConfig',
'transaction >= 1.5.0', 'transaction >= 1.6.1',
'six', 'six',
'zc.lockfile', 'zc.lockfile',
'zope.interface', 'zope.interface',
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# #
############################################################################## ##############################################################################
"""ZODB transfer activity monitoring """ZODB transfer activity monitoring
"""
$Id$"""
import threading
import time import time
from . import utils
class ActivityMonitor: class ActivityMonitor:
"""ZODB load/store activity monitor """ZODB load/store activity monitor
...@@ -31,7 +31,7 @@ class ActivityMonitor: ...@@ -31,7 +31,7 @@ class ActivityMonitor:
def __init__(self, history_length=3600): def __init__(self, history_length=3600):
self.history_length = history_length # Number of seconds self.history_length = history_length # Number of seconds
self.log = [] # [(time, loads, stores)] self.log = [] # [(time, loads, stores)]
self.trim_lock = threading.Lock() self.trim_lock = utils.Lock()
def closedConnection(self, conn): def closedConnection(self, conn):
log = self.log log = self.log
...@@ -42,7 +42,7 @@ class ActivityMonitor: ...@@ -42,7 +42,7 @@ class ActivityMonitor:
def trim(self, now): def trim(self, now):
self.trim_lock.acquire() self.trim_lock.acquire()
log = self.log log = self.log
cutoff = now - self.history_length cutoff = now - self.history_length
n = 0 n = 0
...@@ -51,7 +51,7 @@ class ActivityMonitor: ...@@ -51,7 +51,7 @@ class ActivityMonitor:
n = n + 1 n = n + 1
if n: if n:
del log[:n] del log[:n]
self.trim_lock.release() self.trim_lock.release()
def setHistoryLength(self, history_length): def setHistoryLength(self, history_length):
......
...@@ -18,7 +18,6 @@ its use is not recommended. It's still here for historical reasons. ...@@ -18,7 +18,6 @@ its use is not recommended. It's still here for historical reasons.
""" """
from __future__ import print_function from __future__ import print_function
import threading
import time import time
import logging import logging
import sys import sys
...@@ -28,10 +27,10 @@ import zope.interface ...@@ -28,10 +27,10 @@ import zope.interface
from persistent.TimeStamp import TimeStamp from persistent.TimeStamp import TimeStamp
import ZODB.interfaces import ZODB.interfaces
from ZODB import POSException from . import POSException, utils
from ZODB.utils import z64, oid_repr, byte_ord, byte_chr from .utils import z64, oid_repr, byte_ord, byte_chr
from ZODB.UndoLogCompatible import UndoLogCompatible from .UndoLogCompatible import UndoLogCompatible
from ZODB._compat import dumps, _protocol, py2_hasattr from ._compat import dumps, _protocol, py2_hasattr
log = logging.getLogger("ZODB.BaseStorage") log = logging.getLogger("ZODB.BaseStorage")
...@@ -85,8 +84,8 @@ class BaseStorage(UndoLogCompatible): ...@@ -85,8 +84,8 @@ class BaseStorage(UndoLogCompatible):
log.debug("create storage %s", self.__name__) log.debug("create storage %s", self.__name__)
# Allocate locks: # Allocate locks:
self._lock = threading.RLock() self._lock = utils.RLock()
self.__commit_lock = threading.Lock() self.__commit_lock = utils.Lock()
# Comment out the following 4 lines to debug locking: # Comment out the following 4 lines to debug locking:
self._lock_acquire = self._lock.acquire self._lock_acquire = self._lock.acquire
...@@ -108,45 +107,6 @@ class BaseStorage(UndoLogCompatible): ...@@ -108,45 +107,6 @@ class BaseStorage(UndoLogCompatible):
else: else:
self._oid = oid self._oid = oid
########################################################################
# The following methods are normally overridden on instances,
# except when debugging:
def _lock_acquire(self, *args):
f = sys._getframe(1)
sys.stdout.write("[la(%s:%s)\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
self._lock.acquire(*args)
sys.stdout.write("la(%s:%s)]\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
def _lock_release(self, *args):
f = sys._getframe(1)
sys.stdout.write("[lr(%s:%s)\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
self._lock.release(*args)
sys.stdout.write("lr(%s:%s)]\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
def _commit_lock_acquire(self, *args):
f = sys._getframe(1)
sys.stdout.write("[ca(%s:%s)\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
self.__commit_lock.acquire(*args)
sys.stdout.write("ca(%s:%s)]\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
def _commit_lock_release(self, *args):
f = sys._getframe(1)
sys.stdout.write("[cr(%s:%s)\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
self.__commit_lock.release(*args)
sys.stdout.write("cr(%s:%s)]\n" % (f.f_code.co_filename, f.f_lineno))
sys.stdout.flush()
#
########################################################################
def sortKey(self): def sortKey(self):
"""Return a string that can be used to sort storage instances. """Return a string that can be used to sort storage instances.
......
This diff is collapsed.
...@@ -13,13 +13,15 @@ ...@@ -13,13 +13,15 @@
############################################################################## ##############################################################################
"""Database objects """Database objects
""" """
from __future__ import print_function
import sys import sys
import threading
import logging import logging
import datetime import datetime
import time import time
import warnings import warnings
from . import utils
from ZODB.broken import find_global from ZODB.broken import find_global
from ZODB.utils import z64 from ZODB.utils import z64
from ZODB.Connection import Connection from ZODB.Connection import Connection
...@@ -179,6 +181,7 @@ class ConnectionPool(AbstractConnectionPool): ...@@ -179,6 +181,7 @@ class ConnectionPool(AbstractConnectionPool):
(available and available[0][0] < threshhold) (available and available[0][0] < threshhold)
): ):
t, c = available.pop(0) t, c = available.pop(0)
assert not c.opened
self.all.remove(c) self.all.remove(c)
c._release_resources() c._release_resources()
...@@ -213,6 +216,7 @@ class ConnectionPool(AbstractConnectionPool): ...@@ -213,6 +216,7 @@ class ConnectionPool(AbstractConnectionPool):
to_remove = () to_remove = ()
for (t, c) in self.available: for (t, c) in self.available:
assert not c.opened
if t < threshhold: if t < threshhold:
to_remove += (c,) to_remove += (c,)
self.all.remove(c) self.all.remove(c)
...@@ -397,15 +401,9 @@ class DB(object): ...@@ -397,15 +401,9 @@ class DB(object):
- `xrefs` - Boolian flag indicating whether implicit cross-database - `xrefs` - Boolian flag indicating whether implicit cross-database
references are allowed references are allowed
""" """
if isinstance(storage, six.string_types):
from ZODB import FileStorage
storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
elif storage is None:
from ZODB import MappingStorage
storage = ZODB.MappingStorage.MappingStorage(**storage_args)
# Allocate lock. # Allocate lock.
x = threading.RLock() x = utils.RLock()
self._a = x.acquire self._a = x.acquire
self._r = x.release self._r = x.release
...@@ -419,12 +417,24 @@ class DB(object): ...@@ -419,12 +417,24 @@ class DB(object):
self._historical_cache_size_bytes = historical_cache_size_bytes self._historical_cache_size_bytes = historical_cache_size_bytes
# Setup storage # Setup storage
if isinstance(storage, six.string_types):
from ZODB import FileStorage
storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
elif storage is None:
from ZODB import MappingStorage
storage = ZODB.MappingStorage.MappingStorage(**storage_args)
else:
assert not storage_args
self.storage = storage self.storage = storage
if IMVCCStorage.providedBy(storage):
self._mvcc_storage = storage
else:
from .mvccadapter import MVCCAdapter
self._mvcc_storage = MVCCAdapter(storage)
self.references = ZODB.serialize.referencesf self.references = ZODB.serialize.referencesf
try:
storage.registerDB(self)
except TypeError:
storage.registerDB(self, None) # Backward compat
if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly(): if (not hasattr(storage, 'tpc_vote')) and not storage.isReadOnly():
warnings.warn( warnings.warn(
...@@ -434,12 +444,10 @@ class DB(object): ...@@ -434,12 +444,10 @@ class DB(object):
DeprecationWarning, 2) DeprecationWarning, 2)
storage.tpc_vote = lambda *args: None storage.tpc_vote = lambda *args: None
if IMVCCStorage.providedBy(storage): temp_storage = self._mvcc_storage.new_instance()
temp_storage = storage.new_instance()
else:
temp_storage = storage
try: try:
try: try:
temp_storage.poll_invalidations()
temp_storage.load(z64, '') temp_storage.load(z64, '')
except KeyError: except KeyError:
# Create the database's root in the storage if it doesn't exist # Create the database's root in the storage if it doesn't exist
...@@ -458,8 +466,7 @@ class DB(object): ...@@ -458,8 +466,7 @@ class DB(object):
temp_storage.tpc_vote(t) temp_storage.tpc_vote(t)
temp_storage.tpc_finish(t) temp_storage.tpc_finish(t)
finally: finally:
if IMVCCStorage.providedBy(temp_storage): temp_storage.release()
temp_storage.release()
# Multi-database setup. # Multi-database setup.
if databases is None: if databases is None:
...@@ -559,15 +566,17 @@ class DB(object): ...@@ -559,15 +566,17 @@ class DB(object):
# sys.getrefcount(ob) returns. But, in addition to that, # sys.getrefcount(ob) returns. But, in addition to that,
# the cache holds an extra reference on non-ghost objects, # the cache holds an extra reference on non-ghost objects,
# and we also want to pretend that doesn't exist. # and we also want to pretend that doesn't exist.
# If we have no way to get a refcount, we return False to symbolize # If we have no way to get a refcount, we return False
# that. As opposed to None, this has the advantage of being usable # to symbolize that. As opposed to None, this has the
# as a number (0) in case clients depended on that. # advantage of being usable as a number (0) in case
# clients depended on that.
detail.append({ detail.append({
'conn_no': cn, 'conn_no': cn,
'oid': oid, 'oid': oid,
'id': id, 'id': id,
'klass': "%s%s" % (module, ob.__class__.__name__), 'klass': "%s%s" % (module, ob.__class__.__name__),
'rc': rc(ob) - 3 - (ob._p_changed is not None) if rc else False, 'rc': (rc(ob) - 3 - (ob._p_changed is not None)
if rc else False),
'state': ob._p_changed, 'state': ob._p_changed,
#'references': con.references(oid), #'references': con.references(oid),
}) })
...@@ -632,8 +641,9 @@ class DB(object): ...@@ -632,8 +641,9 @@ class DB(object):
c.afterCompletion = c.newTransaction = c.close = noop c.afterCompletion = c.newTransaction = c.close = noop
c._release_resources() c._release_resources()
self.storage.close() self._mvcc_storage.close()
del self.storage del self.storage
del self._mvcc_storage
def getCacheSize(self): def getCacheSize(self):
return self._cache_size return self._cache_size
...@@ -665,27 +675,6 @@ class DB(object): ...@@ -665,27 +675,6 @@ class DB(object):
def getHistoricalTimeout(self): def getHistoricalTimeout(self):
return self.historical_pool.timeout return self.historical_pool.timeout
def invalidate(self, tid, oids, connection=None, version=''):
"""Invalidate references to a given oid.
This is used to indicate that one of the connections has committed a
change to the object. The connection commiting the change should be
passed in to prevent useless (but harmless) messages to the
connection.
"""
# Storages, esp. ZEO tests, need the version argument still. :-/
assert version==''
# Notify connections.
def inval(c):
if c is not connection:
c.invalidate(tid, oids)
self._connectionMap(inval)
def invalidateCache(self):
"""Invalidate each of the connection caches
"""
self._connectionMap(lambda c: c.invalidateCache())
transform_record_data = untransform_record_data = lambda self, data: data transform_record_data = untransform_record_data = lambda self, data: data
def objectCount(self): def objectCount(self):
...@@ -752,19 +741,17 @@ class DB(object): ...@@ -752,19 +741,17 @@ class DB(object):
result = self.pool.pop() result = self.pool.pop()
assert result is not None assert result is not None
# open the connection.
result.open(transaction_manager)
# A good time to do some cache cleanup. # A good time to do some cache cleanup.
# (note we already have the lock) # (note we already have the lock)
self.pool.availableGC() self.pool.availableGC()
self.historical_pool.availableGC() self.historical_pool.availableGC()
return result
finally: finally:
self._r() self._r()
result.open(transaction_manager)
return result
def connectionDebugInfo(self): def connectionDebugInfo(self):
result = [] result = []
t = time.time() t = time.time()
...@@ -986,16 +973,16 @@ class ContextManager: ...@@ -986,16 +973,16 @@ class ContextManager:
self.tm.abort() self.tm.abort()
self.conn.close() self.conn.close()
resource_counter_lock = threading.Lock() resource_counter_lock = utils.Lock()
resource_counter = 0 resource_counter = 0
class TransactionalUndo(object): class TransactionalUndo(object):
def __init__(self, db, tids): def __init__(self, db, tids):
self._db = db self._db = db
self._storage = db.storage self._storage = getattr(
db._mvcc_storage, 'undo_instance', db._mvcc_storage.new_instance)()
self._tids = tids self._tids = tids
self._oids = set()
def abort(self, transaction): def abort(self, transaction):
pass pass
...@@ -1005,19 +992,13 @@ class TransactionalUndo(object): ...@@ -1005,19 +992,13 @@ class TransactionalUndo(object):
def commit(self, transaction): def commit(self, transaction):
for tid in self._tids: for tid in self._tids:
result = self._storage.undo(tid, transaction) self._storage.undo(tid, transaction)
if result:
self._oids.update(result[1])
def tpc_vote(self, transaction): def tpc_vote(self, transaction):
for oid, _ in self._storage.tpc_vote(transaction) or (): self._storage.tpc_vote(transaction)
self._oids.add(oid)
def tpc_finish(self, transaction): def tpc_finish(self, transaction):
self._storage.tpc_finish( self._storage.tpc_finish(transaction)
transaction,
lambda tid: self._db.invalidate(tid, self._oids)
)
def tpc_abort(self, transaction): def tpc_abort(self, transaction):
self._storage.tpc_abort(transaction) self._storage.tpc_abort(transaction)
......
...@@ -19,11 +19,11 @@ to be layered over a base database. ...@@ -19,11 +19,11 @@ to be layered over a base database.
The base storage must not change. The base storage must not change.
""" """
from __future__ import print_function
import os import os
import random import random
import weakref import weakref
import tempfile import tempfile
import threading
import ZODB.BaseStorage import ZODB.BaseStorage
import ZODB.blob import ZODB.blob
import ZODB.interfaces import ZODB.interfaces
...@@ -72,7 +72,7 @@ class DemoStorage(ConflictResolvingStorage): ...@@ -72,7 +72,7 @@ class DemoStorage(ConflictResolvingStorage):
self._issued_oids = set() self._issued_oids = set()
self._stored_oids = set() self._stored_oids = set()
self._commit_lock = threading.Lock() self._commit_lock = ZODB.utils.Lock()
self._transaction = None self._transaction = None
if name is None: if name is None:
......
...@@ -20,7 +20,6 @@ import contextlib ...@@ -20,7 +20,6 @@ import contextlib
import errno import errno
import logging import logging
import os import os
import threading
import time import time
from struct import pack from struct import pack
from struct import unpack from struct import unpack
...@@ -31,6 +30,8 @@ from zc.lockfile import LockFile ...@@ -31,6 +30,8 @@ from zc.lockfile import LockFile
from zope.interface import alsoProvides from zope.interface import alsoProvides
from zope.interface import implementer from zope.interface import implementer
from .. import utils
from ZODB.blob import BlobStorageMixin from ZODB.blob import BlobStorageMixin
from ZODB.blob import link_or_copy from ZODB.blob import link_or_copy
from ZODB.blob import remove_committed from ZODB.blob import remove_committed
...@@ -2047,7 +2048,7 @@ class FilePool: ...@@ -2047,7 +2048,7 @@ class FilePool:
self.name = file_name self.name = file_name
self._files = [] self._files = []
self._out = [] self._out = []
self._cond = threading.Condition() self._cond = utils.Condition()
@contextlib.contextmanager @contextlib.contextmanager
def write_lock(self): def write_lock(self):
......
...@@ -19,7 +19,6 @@ storage without distracting storage details. ...@@ -19,7 +19,6 @@ storage without distracting storage details.
import BTrees import BTrees
import time import time
import threading
import ZODB.BaseStorage import ZODB.BaseStorage
import ZODB.interfaces import ZODB.interfaces
import ZODB.POSException import ZODB.POSException
...@@ -40,10 +39,10 @@ class MappingStorage(object): ...@@ -40,10 +39,10 @@ class MappingStorage(object):
self._transactions = BTrees.OOBTree.OOBTree() # {tid->TransactionRecord} self._transactions = BTrees.OOBTree.OOBTree() # {tid->TransactionRecord}
self._ltid = ZODB.utils.z64 self._ltid = ZODB.utils.z64
self._last_pack = None self._last_pack = None
_lock = threading.RLock() _lock = ZODB.utils.RLock()
self._lock_acquire = _lock.acquire self._lock_acquire = _lock.acquire
self._lock_release = _lock.release self._lock_release = _lock.release
self._commit_lock = threading.Lock() self._commit_lock = ZODB.utils.Lock()
self._opened = True self._opened = True
self._transaction = None self._transaction = None
self._oid = 0 self._oid = 0
......
...@@ -273,8 +273,6 @@ test. ...@@ -273,8 +273,6 @@ test.
>>> conn.root()['first']['count'] += 1 >>> conn.root()['first']['count'] += 1
>>> conn.root()['third'] = persistent.mapping.PersistentMapping() >>> conn.root()['third'] = persistent.mapping.PersistentMapping()
>>> transaction.commit() >>> transaction.commit()
>>> len(historical_conn._invalidated)
0
>>> historical_conn.close() >>> historical_conn.close()
Note that if you try to open an historical connection to a time in the future, Note that if you try to open an historical connection to a time in the future,
......
...@@ -201,21 +201,6 @@ class IConnection(Interface): ...@@ -201,21 +201,6 @@ class IConnection(Interface):
def isReadOnly(): def isReadOnly():
"""Returns True if the storage for this connection is read only.""" """Returns True if the storage for this connection is read only."""
def invalidate(tid, oids):
"""Notify the Connection that transaction 'tid' invalidated oids.
When the next transaction boundary is reached, objects will be
invalidated. If any of the invalidated objects are accessed by the
current transaction, the revision written before Connection.tid will be
used.
The DB calls this method, even when the Connection is closed.
Parameters:
tid: the storage-level id of the transaction that committed
oids: oids is an iterable of oids.
"""
def root(): def root():
"""Return the database root object. """Return the database root object.
...@@ -278,14 +263,6 @@ class IConnection(Interface): ...@@ -278,14 +263,6 @@ class IConnection(Interface):
If clear is True, reset the counters. If clear is True, reset the counters.
""" """
def invalidateCache():
"""Invalidate the connection cache
This invalidates *all* objects in the cache. If the connection
is open, subsequent reads will fail until a new transaction
begins or until the connection os reopned.
"""
def readCurrent(obj): def readCurrent(obj):
"""Make sure an object being read is current """Make sure an object being read is current
...@@ -577,6 +554,20 @@ class IStorage(Interface): ...@@ -577,6 +554,20 @@ class IStorage(Interface):
def load(oid, version): def load(oid, version):
"""Load data for an object id """Load data for an object id
NOTE: This method is deprecated and may be removed in the
future. It is no longer used by ZODB, although it may still
be used in some tests or scripts. Previously, there was a
requirement that load results be properly ordered with
invalidations so that at any point in time, clients have a
consistent view of what version of an object is current. This
restriction has been relaxed and some storages will be
simplified as a result of the removal of this requirement.
An alternative to calling load is calling loadBefore passing
ZODB.utils.maxtid::
store.loadBefore(oid, ZODB.utils.maxtid)
The version argumement should always be an empty string. It The version argumement should always be an empty string. It
exists soley for backward compatibility with older storage exists soley for backward compatibility with older storage
implementations. implementations.
...@@ -611,19 +602,6 @@ class IStorage(Interface): ...@@ -611,19 +602,6 @@ class IStorage(Interface):
otherwise, POSKeyError is raised. otherwise, POSKeyError is raised.
""" """
# The following two methods are effectively part of the interface,
# as they are generally needed when one storage wraps
# another. This deserves some thought, at probably debate, before
# adding them.
#
# def _lock_acquire():
# """Acquire the storage lock
# """
# def _lock_release():
# """Release the storage lock
# """
def new_oid(): def new_oid():
"""Allocate a new object id. """Allocate a new object id.
...@@ -661,11 +639,7 @@ class IStorage(Interface): ...@@ -661,11 +639,7 @@ class IStorage(Interface):
The passed object is a wrapper object that provides an upcall The passed object is a wrapper object that provides an upcall
interface to support composition. interface to support composition.
Note that, for historical reasons, an implementation may Note that, for historical reasons, this is called registerDB rather
require a second argument, however, if required, the None will
be passed as the second argument.
Also, for historical reasons, this is called registerDB rather
than register_wrapper. than register_wrapper.
""" """
...@@ -804,7 +778,6 @@ class IStorage(Interface): ...@@ -804,7 +778,6 @@ class IStorage(Interface):
""" """
class IStorageRestoreable(IStorage): class IStorageRestoreable(IStorage):
"""Copying Transactions """Copying Transactions
...@@ -1096,11 +1069,9 @@ class IMVCCStorage(IStorage): ...@@ -1096,11 +1069,9 @@ class IMVCCStorage(IStorage):
""" """
def release(): def release():
"""Release all persistent sessions used by this storage instance. """Release resources held by the storage instance.
After this call, the storage instance can still be used; The storage instance won't be used again after this call.
calling methods that use persistent sessions will cause the
persistent sessions to be reopened.
""" """
def poll_invalidations(): def poll_invalidations():
......
"""Adapt IStorage objects to IMVCCStorage
This is a largely internal implementation of ZODB, especially DB and
Connection. It takes the MVCC implementation involving invalidations
and start time and moves it into a storage adapter. This allows ZODB
to treat Relstoage and other storages in pretty much the same way and
also simplifies the implementation of the DB and Connection classes.
"""
import zope.interface
from . import interfaces, serialize, POSException
from .utils import p64, u64, Lock
class Base(object):
_copy_methods = (
'getName', 'getSize', 'history', 'lastTransaction', 'sortKey',
'loadBlob', 'openCommittedBlobFile',
'isReadOnly', 'supportsUndo', 'undoLog', 'undoInfo',
'temporaryDirectory',
)
def __init__(self, storage):
self._storage = storage
if interfaces.IBlobStorage.providedBy(storage):
zope.interface.alsoProvides(self, interfaces.IBlobStorage)
def __getattr__(self, name):
if name in self._copy_methods:
if hasattr(self._storage, name):
m = getattr(self._storage, name)
setattr(self, name, m)
return m
raise AttributeError(name)
def __len__(self):
return len(self._storage)
class MVCCAdapter(Base):
def __init__(self, storage):
Base.__init__(self, storage)
self._instances = set()
self._lock = Lock()
if hasattr(storage, 'registerDB'):
storage.registerDB(self)
def new_instance(self):
instance = MVCCAdapterInstance(self)
with self._lock:
self._instances.add(instance)
return instance
def before_instance(self, before=None):
return HistoricalStorageAdapter(self._storage, before)
def undo_instance(self):
return UndoAdapterInstance(self)
def _release(self, instance):
with self._lock:
self._instances.remove(instance)
closed = False
def close(self):
if not self.closed:
self.closed = True
self._storage.close()
del self._instances
del self._storage
def invalidateCache(self):
with self._lock:
for instance in self._instances:
instance._invalidateCache()
def invalidate(self, transaction_id, oids, version=''):
with self._lock:
for instance in self._instances:
instance._invalidate(oids)
def _invalidate_finish(self, oids, committing_instance):
with self._lock:
for instance in self._instances:
if instance is not committing_instance:
instance._invalidate(oids)
references = serialize.referencesf
transform_record_data = untransform_record_data = lambda self, data: data
def pack(self, pack_time, referencesf):
return self._storage.pack(pack_time, referencesf)
class MVCCAdapterInstance(Base):
_copy_methods = Base._copy_methods + (
'loadSerial', 'new_oid', 'tpc_vote',
'checkCurrentSerialInTransaction', 'tpc_abort',
)
def __init__(self, base):
self._base = base
Base.__init__(self, base._storage)
self._lock = Lock()
self._invalidations = set()
self._start = None # Transaction start time
self._sync = getattr(self._storage, 'sync', lambda : None)
def release(self):
self._base._release(self)
close = release
def _invalidateCache(self):
with self._lock:
self._invalidations = None
def _invalidate(self, oids):
with self._lock:
try:
self._invalidations.update(oids)
except AttributeError:
if self._invalidations is not None:
raise
def sync(self, force=True):
if force:
self._sync()
def poll_invalidations(self):
self._start = p64(u64(self._storage.lastTransaction()) + 1)
with self._lock:
if self._invalidations is None:
self._invalidations = set()
return None
else:
result = list(self._invalidations)
self._invalidations.clear()
return result
def load(self, oid, version=''):
assert self._start is not None
r = self._storage.loadBefore(oid, self._start)
if r is None:
raise POSException.ReadConflictError(repr(oid))
return r[:2]
_modified = None # Used to keep track of oids modified within a
# transaction, so we can invalidate them later.
def tpc_begin(self, transaction):
self._storage.tpc_begin(transaction)
self._modified = set()
def store(self, oid, serial, data, version, transaction):
s = self._storage.store(oid, serial, data, version, transaction)
self._modified.add(oid)
return s
def storeBlob(self, oid, serial, data, blobfilename, version, transaction):
s = self._storage.storeBlob(
oid, serial, data, blobfilename, '', transaction)
self._modified.add(oid)
return s
def tpc_finish(self, transaction, func = lambda tid: None):
modified = self._modified
self._modified = None
def invalidate_finish(tid):
self._base._invalidate_finish(modified, self)
func(tid)
self._storage.tpc_finish(transaction, invalidate_finish)
def read_only_writer(self, *a, **kw):
raise POSException.ReadOnlyError
class HistoricalStorageAdapter(Base):
"""Adapt a storage to a historical storage
"""
_copy_methods = Base._copy_methods + (
'loadSerial', 'tpc_begin', 'tpc_finish', 'tpc_abort', 'tpc_vote',
'checkCurrentSerialInTransaction',
)
def __init__(self, storage, before=None):
Base.__init__(self, storage)
self._before = before
def isReadOnly(self):
return True
def supportsUndo(self):
return False
def release(self):
pass
close = release
def sync(self, force=True):
pass
def poll_invalidations(self):
return []
new_oid = pack = store = read_only_writer
def load(self, oid, version=''):
r = self._storage.loadBefore(oid, self._before)
if r is None:
raise POSException.POSKeyError(oid)
return r[:2]
class UndoAdapterInstance(Base):
_copy_methods = Base._copy_methods + (
'tpc_abort',
)
def __init__(self, base):
self._base = base
Base.__init__(self, base._storage)
def release(self):
pass
close = release
def tpc_begin(self, transaction):
self._storage.tpc_begin(transaction)
self._undone = set()
def undo(self, transaction_id, transaction):
result = self._storage.undo(transaction_id, transaction)
if result:
self._undone.update(result[1])
return result
def tpc_vote(self, transaction):
result = self._storage.tpc_vote(transaction)
if result:
for oid, serial in result:
self._undone.add(oid)
def tpc_finish(self, transaction, func = lambda tid: None):
def invalidate_finish(tid):
self._base._invalidate_finish(self._undone, None)
func(tid)
self._storage.tpc_finish(transaction, invalidate_finish)
...@@ -29,6 +29,8 @@ import transaction ...@@ -29,6 +29,8 @@ import transaction
import zope.interface import zope.interface
import zope.interface.verify import zope.interface.verify
from .. import utils
ZERO = b'\0'*8 ZERO = b'\0'*8
class BasicStorage: class BasicStorage:
...@@ -345,7 +347,7 @@ class BasicStorage: ...@@ -345,7 +347,7 @@ class BasicStorage:
results = {} results = {}
started.wait() started.wait()
attempts = [] attempts = []
attempts_cond = threading.Condition() attempts_cond = utils.Condition()
def update_attempts(): def update_attempts():
with attempts_cond: with attempts_cond:
......
...@@ -65,6 +65,7 @@ class ZODBClientThread(TestThread): ...@@ -65,6 +65,7 @@ class ZODBClientThread(TestThread):
for i in range(self.commits): for i in range(self.commits):
self.commit(d, i) self.commit(d, i)
self.test.assertEqual(sorted(d.keys()), list(range(self.commits))) self.test.assertEqual(sorted(d.keys()), list(range(self.commits)))
conn.close()
def commit(self, d, num): def commit(self, d, num):
d[num] = time.time() d[num] = time.time()
......
...@@ -46,6 +46,8 @@ class MVCCMappingStorage(MappingStorage): ...@@ -46,6 +46,8 @@ class MVCCMappingStorage(MappingStorage):
inst._commit_lock = self._commit_lock inst._commit_lock = self._commit_lock
inst.new_oid = self.new_oid inst.new_oid = self.new_oid
inst.pack = self.pack inst.pack = self.pack
inst.loadBefore = self.loadBefore
inst._ltid = self._ltid
inst._main_lock_acquire = self._lock_acquire inst._main_lock_acquire = self._lock_acquire
inst._main_lock_release = self._lock_release inst._main_lock_release = self._lock_release
return inst return inst
...@@ -73,11 +75,10 @@ class MVCCMappingStorage(MappingStorage): ...@@ -73,11 +75,10 @@ class MVCCMappingStorage(MappingStorage):
# prevent changes to _transactions and _data during analysis # prevent changes to _transactions and _data during analysis
self._main_lock_acquire() self._main_lock_acquire()
try: try:
if self._transactions: if self._transactions:
new_tid = self._transactions.maxKey() new_tid = self._transactions.maxKey()
else: else:
new_tid = b'' new_tid = ZODB.utils.z64
# Copy the current data into a snapshot. This is obviously # Copy the current data into a snapshot. This is obviously
# very inefficient for large storages, but it's good for # very inefficient for large storages, but it's good for
...@@ -112,7 +113,7 @@ class MVCCMappingStorage(MappingStorage): ...@@ -112,7 +113,7 @@ class MVCCMappingStorage(MappingStorage):
finally: finally:
self._main_lock_release() self._main_lock_release()
self._polled_tid = new_tid self._polled_tid = self._ltid = new_tid
return list(changed_oids) return list(changed_oids)
def tpc_finish(self, transaction, func = lambda tid: None): def tpc_finish(self, transaction, func = lambda tid: None):
......
...@@ -25,7 +25,8 @@ from ZODB.serialize import referencesf ...@@ -25,7 +25,8 @@ from ZODB.serialize import referencesf
from ZODB.tests.MinPO import MinPO from ZODB.tests.MinPO import MinPO
from ZODB.tests.MTStorage import TestThread from ZODB.tests.MTStorage import TestThread
from ZODB.tests.StorageTestBase import snooze from ZODB.tests.StorageTestBase import snooze
from ZODB._compat import loads, PersistentPickler, Pickler, Unpickler, BytesIO, _protocol from ZODB._compat import (loads, PersistentPickler, Pickler, Unpickler,
BytesIO, _protocol)
import transaction import transaction
import ZODB.interfaces import ZODB.interfaces
import ZODB.tests.util import ZODB.tests.util
...@@ -270,6 +271,8 @@ class PackableStorage(PackableStorageBase): ...@@ -270,6 +271,8 @@ class PackableStorage(PackableStorageBase):
self._sanity_check() self._sanity_check()
db.close()
def checkPackWhileWriting(self): def checkPackWhileWriting(self):
self._PackWhileWriting(pack_now=False) self._PackWhileWriting(pack_now=False)
...@@ -312,6 +315,8 @@ class PackableStorage(PackableStorageBase): ...@@ -312,6 +315,8 @@ class PackableStorage(PackableStorageBase):
self._sanity_check() self._sanity_check()
db.close()
def checkPackWithMultiDatabaseReferences(self): def checkPackWithMultiDatabaseReferences(self):
databases = {} databases = {}
db = DB(self._storage, databases=databases, database_name='') db = DB(self._storage, databases=databases, database_name='')
...@@ -327,6 +332,9 @@ class PackableStorage(PackableStorageBase): ...@@ -327,6 +332,9 @@ class PackableStorage(PackableStorageBase):
db.pack(time.time()+1) db.pack(time.time()+1)
# some valid storages always return 0 for len() # some valid storages always return 0 for len()
self.assertTrue(len(self._storage) in (0, 1)) self.assertTrue(len(self._storage) in (0, 1))
conn.close()
otherdb.close()
db.close()
def checkPackAllRevisions(self): def checkPackAllRevisions(self):
self._initroot() self._initroot()
...@@ -718,7 +726,7 @@ class ClientThread(TestThread): ...@@ -718,7 +726,7 @@ class ClientThread(TestThread):
def __init__(self, db, choices, loop_trip, timer, thread_id): def __init__(self, db, choices, loop_trip, timer, thread_id):
TestThread.__init__(self) TestThread.__init__(self)
self.root = db.open().root() self.db = db
self.choices = choices self.choices = choices
self.loop_trip = loop_trip self.loop_trip = loop_trip
self.millis = timer.elapsed_millis self.millis = timer.elapsed_millis
...@@ -737,6 +745,7 @@ class ClientThread(TestThread): ...@@ -737,6 +745,7 @@ class ClientThread(TestThread):
def runtest(self): def runtest(self):
from random import choice from random import choice
conn = self.db.open()
for j in range(self.loop_trip): for j in range(self.loop_trip):
assign_worked = False assign_worked = False
...@@ -745,7 +754,7 @@ class ClientThread(TestThread): ...@@ -745,7 +754,7 @@ class ClientThread(TestThread):
try: try:
index = choice(self.choices) index = choice(self.choices)
alist.extend([self.millis(), index]) alist.extend([self.millis(), index])
self.root[index].value = MinPO(j) conn.root()[index].value = MinPO(j)
assign_worked = True assign_worked = True
transaction.commit() transaction.commit()
alist.append(self.millis()) alist.append(self.millis())
...@@ -756,6 +765,8 @@ class ClientThread(TestThread): ...@@ -756,6 +765,8 @@ class ClientThread(TestThread):
transaction.abort() transaction.abort()
alist.append(assign_worked) alist.append(assign_worked)
conn.close()
class ElapsedTimer: class ElapsedTimer:
def __init__(self, start_time): def __init__(self, start_time):
self.start_time = start_time self.start_time = start_time
...@@ -776,5 +787,5 @@ def IExternalGC_suite(factory): ...@@ -776,5 +787,5 @@ def IExternalGC_suite(factory):
return doctest.DocFileSuite( return doctest.DocFileSuite(
'IExternalGC.test', 'IExternalGC.test',
setUp=setup, tearDown=zope.testing.setupstack.tearDown, setUp=setup, tearDown=ZODB.tests.util.tearDown,
checker=ZODB.tests.util.checker) checker=ZODB.tests.util.checker)
...@@ -23,7 +23,7 @@ import sys ...@@ -23,7 +23,7 @@ import sys
import time import time
import transaction import transaction
from ZODB.utils import u64 from ZODB.utils import u64, z64
from ZODB.tests.MinPO import MinPO from ZODB.tests.MinPO import MinPO
from ZODB._compat import PersistentPickler, Unpickler, BytesIO, _protocol from ZODB._compat import PersistentPickler, Unpickler, BytesIO, _protocol
import ZODB.tests.util import ZODB.tests.util
...@@ -153,8 +153,8 @@ class StorageTestBase(ZODB.tests.util.TestCase): ...@@ -153,8 +153,8 @@ class StorageTestBase(ZODB.tests.util.TestCase):
self._storage.close() self._storage.close()
def tearDown(self): def tearDown(self):
self._close()
ZODB.tests.util.TestCase.tearDown(self) ZODB.tests.util.TestCase.tearDown(self)
self._close()
def _dostore(self, oid=None, revid=None, data=None, def _dostore(self, oid=None, revid=None, data=None,
already_pickled=0, user=None, description=None): already_pickled=0, user=None, description=None):
......
...@@ -74,7 +74,7 @@ You can't put blobs into a database that has uses a Non-Blob-Storage, though: ...@@ -74,7 +74,7 @@ You can't put blobs into a database that has uses a Non-Blob-Storage, though:
>>> transaction2.commit() # doctest: +ELLIPSIS >>> transaction2.commit() # doctest: +ELLIPSIS
Traceback (most recent call last): Traceback (most recent call last):
... ...
Unsupported: Storing Blobs in <ZODB.MappingStorage.MappingStorage object at ...> is not supported. Unsupported: Storing Blobs in ...
>>> transaction2.abort() >>> transaction2.abort()
>>> connection2.close() >>> connection2.close()
......
...@@ -130,4 +130,4 @@ revision as well as the entire directory: ...@@ -130,4 +130,4 @@ revision as well as the entire directory:
Clean up our blob directory and database: Clean up our blob directory and database:
>>> blob_storage.close() >>> database.close()
...@@ -49,3 +49,5 @@ writing and expect the file to be in the blob temporary directory:: ...@@ -49,3 +49,5 @@ writing and expect the file to be in the blob temporary directory::
True True
>>> w.close() >>> w.close()
>>> database.close()
...@@ -160,3 +160,5 @@ knowledge that the underlying storage's pack method is also called: ...@@ -160,3 +160,5 @@ knowledge that the underlying storage's pack method is also called:
>>> blob_storage._blobs_pack_is_in_progress >>> blob_storage._blobs_pack_is_in_progress
False False
>>> base_storage.pack = base_pack >>> base_storage.pack = base_pack
>>> database.close()
...@@ -61,7 +61,7 @@ While it's boring, it's important to verify that the same relationships ...@@ -61,7 +61,7 @@ While it's boring, it's important to verify that the same relationships
hold if the default pool size is overridden. hold if the default pool size is overridden.
>>> handler.clear() >>> handler.clear()
>>> st.close() >>> db.close()
>>> st = Storage() >>> st = Storage()
>>> PS = 2 # smaller pool size >>> PS = 2 # smaller pool size
>>> db = DB(st, pool_size=PS) >>> db = DB(st, pool_size=PS)
...@@ -117,7 +117,7 @@ We can change the pool size on the fly: ...@@ -117,7 +117,7 @@ We can change the pool size on the fly:
Enough of that. Enough of that.
>>> handler.clear() >>> handler.clear()
>>> st.close() >>> db.close()
More interesting is the stack-like nature of connection reuse. So long as More interesting is the stack-like nature of connection reuse. So long as
we keep opening new connections, and keep them alive, all connections we keep opening new connections, and keep them alive, all connections
...@@ -256,7 +256,7 @@ Nothing in that last block should have logged any msgs: ...@@ -256,7 +256,7 @@ Nothing in that last block should have logged any msgs:
If "too many" connections are open, then closing one may kick an older If "too many" connections are open, then closing one may kick an older
closed one out of the available connection stack. closed one out of the available connection stack.
>>> st.close() >>> db.close()
>>> st = Storage() >>> st = Storage()
>>> db = DB(st, pool_size=3) >>> db = DB(st, pool_size=3)
>>> conns = [db.open() for dummy in range(6)] >>> conns = [db.open() for dummy in range(6)]
...@@ -324,7 +324,7 @@ gc to reclaim the Connection and its cache eventually works, but that can ...@@ -324,7 +324,7 @@ gc to reclaim the Connection and its cache eventually works, but that can
take "a long time" and caches can hold on to many objects, and limited take "a long time" and caches can hold on to many objects, and limited
resources (like RDB connections), for the duration. resources (like RDB connections), for the duration.
>>> st.close() >>> db.close()
>>> st = Storage() >>> st = Storage()
>>> db = DB(st, pool_size=2) >>> db = DB(st, pool_size=2)
>>> conn0 = db.open() >>> conn0 = db.open()
......
...@@ -25,26 +25,43 @@ Make a change locally: ...@@ -25,26 +25,43 @@ Make a change locally:
>>> rt = cn.root() >>> rt = cn.root()
>>> rt['a'] = 1 >>> rt['a'] = 1
Sync should not have been called yet. Sync isn't called when a connectiin is opened, even though that
implicitly starts a new transaction:
>>> st.sync_called # False before 3.4 >>> st.sync_called
False False
Sync is only called when we explicitly start a new transaction:
>>> _ = transaction.begin()
>>> st.sync_called
True
>>> st.sync_called = False
BTW, calling ``sync()`` on a connectin starts a new transaction, which
caused ``sync()`` to be called on the storage:
``sync()`` is called by the Connection's ``afterCompletion()`` hook after the >>> cn.sync()
commit completes. >>> st.sync_called
True
>>> st.sync_called = False
``sync()`` is not called by the Connection's ``afterCompletion()``
hook after the commit completes, because we'll sunc when a new
transaction begins:
>>> transaction.commit() >>> transaction.commit()
>>> st.sync_called # False before 3.4 >>> st.sync_called # False before 3.4
True False
``sync()`` is also called by the ``afterCompletion()`` hook after an abort. ``sync()`` is also not called by the ``afterCompletion()`` hook after an abort.
>>> st.sync_called = False >>> st.sync_called = False
>>> rt['b'] = 2 >>> rt['b'] = 2
>>> transaction.abort() >>> transaction.abort()
>>> st.sync_called # False before 3.4 >>> st.sync_called # False before 3.4
True False
And ``sync()`` is called whenever we explicitly start a new transaction, via And ``sync()`` is called whenever we explicitly start a new transaction, via
the ``newTransaction()`` hook. the ``newTransaction()`` hook.
...@@ -62,48 +79,14 @@ traceback then ;-) ...@@ -62,48 +79,14 @@ traceback then ;-)
>>> cn.close() >>> cn.close()
One more, very obscure. It was the case that if the first action a new As a special case, if a synchronizer registers while a transaction is
threaded transaction manager saw was a ``begin()`` call, then synchronizers in flight, then newTransaction and this the storage sync method is
registered after that in the same transaction weren't communicated to the called:
`Transaction` object, and so the synchronizers' ``afterCompletion()`` hooks
weren't called when the transaction commited. None of the test suites
(ZODB's, Zope 2.8's, or Zope3's) caught that, but apparently Zope 3 takes this
path at some point when serving pages.
UPDATE: transaction 1.6.1 introduced a change that causes
newTransaction to be called when a synchronizer registeres with a
transaction manager with an active transaction.
>>> tm = transaction.ThreadTransactionManager()
>>> st.sync_called = False
>>> dummy = tm.begin() # we're doing this _before_ opening a connection
>>> cn = db.open(transaction_manager=tm)
>>> rt = cn.root() # make a change
>>> rt['c'] = 3
>>> st.sync_called
True
>>> st.sync_called = False
Now ensure that ``cn.afterCompletion() -> st.sync()`` gets called by commit
despite that the `Connection` registered after the transaction began:
>>> tm.commit()
>>> st.sync_called
True
And try the same thing with a non-threaded transaction manager:
>>> cn.close()
>>> tm = transaction.TransactionManager() >>> tm = transaction.TransactionManager()
>>> st.sync_called = False >>> st.sync_called = False
>>> dummy = tm.begin() # we're doing this _before_ opening a connection >>> _ = tm.begin() # we're doing this _before_ opening a connection
>>> cn = db.open(transaction_manager=tm) >>> cn = db.open(transaction_manager=tm)
>>> rt = cn.root() # make a change
>>> rt['d'] = 4
>>> st.sync_called
True
>>> st.sync_called = False
>>> tm.commit()
>>> st.sync_called >>> st.sync_called
True True
......
...@@ -22,11 +22,13 @@ import unittest ...@@ -22,11 +22,13 @@ import unittest
import transaction import transaction
import ZODB.tests.util import ZODB.tests.util
from ZODB.config import databaseFromString from ZODB.config import databaseFromString
from ZODB.utils import p64 from ZODB.utils import p64, u64, z64
from persistent import Persistent from persistent import Persistent
from zope.interface.verify import verifyObject from zope.interface.verify import verifyObject
from zope.testing import loggingsupport, renormalizing from zope.testing import loggingsupport, renormalizing
from .. import mvccadapter
checker = renormalizing.RENormalizing([ checker = renormalizing.RENormalizing([
# Python 3 bytes add a "b". # Python 3 bytes add a "b".
(re.compile("b('.*?')"), r"\1"), (re.compile("b('.*?')"), r"\1"),
...@@ -154,7 +156,8 @@ class ConnectionDotAdd(ZODB.tests.util.TestCase): ...@@ -154,7 +156,8 @@ class ConnectionDotAdd(ZODB.tests.util.TestCase):
self.datamgr.add(obj) self.datamgr.add(obj)
self.datamgr.tpc_begin(self.transaction) self.datamgr.tpc_begin(self.transaction)
self.datamgr.tpc_finish(self.transaction) self.datamgr.tpc_finish(self.transaction)
self.assertTrue(obj._p_oid not in self.datamgr._storage._stored) self.assertTrue(obj._p_oid not in
self.datamgr._storage._storage._stored)
def test__resetCacheResetsReader(self): def test__resetCacheResetsReader(self):
# https://bugs.launchpad.net/zodb/+bug/142667 # https://bugs.launchpad.net/zodb/+bug/142667
...@@ -435,8 +438,11 @@ class UserMethodTests(unittest.TestCase): ...@@ -435,8 +438,11 @@ class UserMethodTests(unittest.TestCase):
... ...
ConnectionStateError: The database connection is closed ConnectionStateError: The database connection is closed
>>> db.close()
An expedient way to create a read-only storage: An expedient way to create a read-only storage:
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> db.storage.isReadOnly = lambda: True >>> db.storage.isReadOnly = lambda: True
>>> cn = db.open() >>> cn = db.open()
>>> cn.isReadOnly() >>> cn.isReadOnly()
...@@ -510,7 +516,9 @@ class InvalidationTests(unittest.TestCase): ...@@ -510,7 +516,9 @@ class InvalidationTests(unittest.TestCase):
they have the expected effect. they have the expected effect.
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>") >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> mvcc_storage = db._mvcc_storage
>>> cn = db.open() >>> cn = db.open()
>>> mvcc_instance = cn._storage
>>> p1 = Persistent() >>> p1 = Persistent()
>>> p2 = Persistent() >>> p2 = Persistent()
>>> p3 = Persistent() >>> p3 = Persistent()
...@@ -521,22 +529,19 @@ class InvalidationTests(unittest.TestCase): ...@@ -521,22 +529,19 @@ class InvalidationTests(unittest.TestCase):
Transaction ids are 8-byte strings, just like oids; p64() will Transaction ids are 8-byte strings, just like oids; p64() will
create one from an int. create one from an int.
>>> cn.invalidate(p64(1), {p1._p_oid: 1}) >>> mvcc_storage.invalidate(p64(1), {p1._p_oid: 1})
>>> cn._txn_time
'\x00\x00\x00\x00\x00\x00\x00\x01'
>>> p1._p_oid in cn._invalidated
True
>>> p2._p_oid in cn._invalidated
False
>>> cn.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1}) Transaction start times are based on storage's last
>>> cn._txn_time transaction. (Previousely, they were based on the first
'\x00\x00\x00\x00\x00\x00\x00\x01' invalidation seen in a transaction.)
>>> p1._p_oid in cn._invalidated
>>> mvcc_instance.poll_invalidations() == [p1._p_oid]
True True
>>> p2._p_oid in cn._invalidated >>> mvcc_instance._start == p64(u64(db.storage.lastTransaction()) + 1)
True True
>>> mvcc_storage.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1})
Calling invalidate() doesn't affect the object state until Calling invalidate() doesn't affect the object state until
a transaction boundary. a transaction boundary.
...@@ -552,23 +557,24 @@ class InvalidationTests(unittest.TestCase): ...@@ -552,23 +557,24 @@ class InvalidationTests(unittest.TestCase):
>>> cn.sync() >>> cn.sync()
>>> p1._p_state >>> p1._p_state
-1 0
>>> p2._p_state >>> p2._p_state
-1 -1
>>> p3._p_state >>> p3._p_state
0 0
>>> cn._invalidated
set([])
>>> db.close()
""" """
def doctest_invalidateCache(): def doctest_invalidateCache():
"""The invalidateCache method invalidates a connection's cache. It also """The invalidateCache method invalidates a connection's cache.
prevents reads until the end of a transaction::
It also prevents reads until the end of a transaction::
>>> from ZODB.tests.util import DB >>> from ZODB.tests.util import DB
>>> import transaction >>> import transaction
>>> db = DB() >>> db = DB()
>>> mvcc_storage = db._mvcc_storage
>>> tm = transaction.TransactionManager() >>> tm = transaction.TransactionManager()
>>> connection = db.open(transaction_manager=tm) >>> connection = db.open(transaction_manager=tm)
>>> connection.root()['a'] = StubObject() >>> connection.root()['a'] = StubObject()
...@@ -584,53 +590,33 @@ def doctest_invalidateCache(): ...@@ -584,53 +590,33 @@ def doctest_invalidateCache():
So we have a connection and an active transaction with some modifications. So we have a connection and an active transaction with some modifications.
Lets call invalidateCache: Lets call invalidateCache:
>>> connection.invalidateCache() >>> mvcc_storage.invalidateCache()
Now, if we try to load an object, we'll get a read conflict:
>>> connection.root()['b'].x
Traceback (most recent call last):
...
ReadConflictError: database read conflict error
If we try to commit the transaction, we'll get a conflict error:
>>> tm.commit() This won't have any effect until the next transaction:
Traceback (most recent call last):
...
ConflictError: database conflict error
and the cache will have been cleared: >>> connection.root()['a']._p_changed
0
>>> connection.root()['b']._p_changed
>>> connection.root()['c']._p_changed
1
>>> print(connection.root()['a']._p_changed) But if we sync():
None
>>> print(connection.root()['b']._p_changed)
None
>>> print(connection.root()['c']._p_changed)
None
But we'll be able to access data again: >>> connection.sync()
>>> connection.root()['b'].x All of our data was invalidated:
1
Aborting a transaction after a read conflict also lets us read data and go >>> connection.root()['a']._p_changed
on about our business: >>> connection.root()['b']._p_changed
>>> connection.root()['c']._p_changed
>>> connection.invalidateCache() But we can load data as usual:
>>> connection.root()['c'].x Now, if we try to load an object, we'll get a read conflict:
Traceback (most recent call last):
...
ReadConflictError: database read conflict error
>>> tm.abort() >>> connection.root()['b'].x
>>> connection.root()['c'].x
1 1
>>> connection.root()['c'].x = 2
>>> tm.commit()
>>> db.close() >>> db.close()
""" """
...@@ -1289,6 +1275,9 @@ class StubStorage: ...@@ -1289,6 +1275,9 @@ class StubStorage:
raise TypeError('StubStorage does not support versions.') raise TypeError('StubStorage does not support versions.')
return self._data[oid] return self._data[oid]
def loadBefore(self, oid, tid):
return self._data[oid] + (None, )
def store(self, oid, serial, p, version, transaction): def store(self, oid, serial, p, version, transaction):
if version != '': if version != '':
raise TypeError('StubStorage does not support versions.') raise TypeError('StubStorage does not support versions.')
...@@ -1304,6 +1293,9 @@ class StubStorage: ...@@ -1304,6 +1293,9 @@ class StubStorage:
# storage # storage
return None return None
def lastTransaction(self):
return z64
class TestConnectionInterface(unittest.TestCase): class TestConnectionInterface(unittest.TestCase):
...@@ -1318,6 +1310,7 @@ class StubDatabase: ...@@ -1318,6 +1310,7 @@ class StubDatabase:
def __init__(self): def __init__(self):
self.storage = StubStorage() self.storage = StubStorage()
self._mvcc_storage = mvccadapter.MVCCAdapter(self.storage)
self.new_oid = self.storage.new_oid self.new_oid = self.storage.new_oid
classFactory = None classFactory = None
......
...@@ -83,34 +83,40 @@ def test_invalidateCache(): ...@@ -83,34 +83,40 @@ def test_invalidateCache():
>>> from ZODB.tests.util import DB >>> from ZODB.tests.util import DB
>>> import transaction >>> import transaction
>>> db = DB() >>> db = DB()
>>> mvcc_storage = db._mvcc_storage
>>> tm1 = transaction.TransactionManager() >>> tm1 = transaction.TransactionManager()
>>> c1 = db.open(transaction_manager=tm1) >>> c1 = db.open(transaction_manager=tm1)
>>> c1.root()['a'] = MinPO(1) >>> c1.root()['a'] = MinPO(1)
>>> tm1.commit() >>> tm1.commit()
>>> tm2 = transaction.TransactionManager() >>> tm2 = transaction.TransactionManager()
>>> c2 = db.open(transaction_manager=tm2) >>> c2 = db.open(transaction_manager=tm2)
>>> c1.root()['a']._p_deactivate() >>> c2.root()['a'].value
1
>>> tm3 = transaction.TransactionManager() >>> tm3 = transaction.TransactionManager()
>>> c3 = db.open(transaction_manager=tm3) >>> c3 = db.open(transaction_manager=tm3)
>>> c3.root()['a'].value >>> c3.root()['a'].value
1 1
>>> c3.close() >>> c3.close()
>>> db.invalidateCache()
>>> c1.root()['a'].value
Traceback (most recent call last):
...
ReadConflictError: database read conflict error
>>> c2.root()['a'].value
Traceback (most recent call last):
...
ReadConflictError: database read conflict error
>>> mvcc_storage.invalidateCache()
>>> c1.root.a._p_changed
0
>>> c1.sync()
>>> c1.root.a._p_changed
>>> c2.root.a._p_changed
0
>>> c2.sync()
>>> c2.root.a._p_changed
>>> c3 is db.open(transaction_manager=tm3) >>> c3 is db.open(transaction_manager=tm3)
True True
>>> print(c3.root()['a']._p_changed) >>> c3.root.a._p_changed
None
>>> c1.root()['a'].value
1
>>> c2.root()['a'].value
1
>>> c3.root()['a'].value
1
>>> db.close() >>> db.close()
""" """
...@@ -125,7 +131,7 @@ def connectionDebugInfo(): ...@@ -125,7 +131,7 @@ def connectionDebugInfo():
... now += .1 ... now += .1
... return now ... return now
>>> real_time = time.time >>> real_time = time.time
>>> if isinstance(time,type): >>> if isinstance(time, type):
... time.time = staticmethod(faux_time) # Jython ... time.time = staticmethod(faux_time) # Jython
... else: ... else:
... time.time = faux_time ... time.time = faux_time
...@@ -151,7 +157,7 @@ def connectionDebugInfo(): ...@@ -151,7 +157,7 @@ def connectionDebugInfo():
>>> before >>> before
[None, '\x03zY\xd8\xc0m9\xdd', None] [None, '\x03zY\xd8\xc0m9\xdd', None]
>>> opened >>> opened
['2008-12-04T20:40:44Z (1.40s)', '2008-12-04T20:40:45Z (0.30s)', None] ['2008-12-04T20:40:44Z (1.30s)', '2008-12-04T20:40:46Z (0.10s)', None]
>>> infos >>> infos
['test info (2)', ' (0)', ' (0)'] ['test info (2)', ' (0)', ' (0)']
......
...@@ -76,12 +76,12 @@ class DemoStorageTests( ...@@ -76,12 +76,12 @@ class DemoStorageTests(
db = DB(self._storage) # creates object 0. :) db = DB(self._storage) # creates object 0. :)
self.assertEqual(len(self._storage), 1) self.assertEqual(len(self._storage), 1)
self.assertTrue(self._storage) self.assertTrue(self._storage)
conn = db.open() with db.transaction() as conn:
for i in range(10): for i in range(10):
conn.root()[i] = conn.root().__class__() conn.root()[i] = conn.root().__class__()
transaction.commit()
self.assertEqual(len(self._storage), 11) self.assertEqual(len(self._storage), 11)
self.assertTrue(self._storage) self.assertTrue(self._storage)
db.close()
def checkLoadBeforeUndo(self): def checkLoadBeforeUndo(self):
pass # we don't support undo yet pass # we don't support undo yet
......
...@@ -36,6 +36,7 @@ from ZODB.tests import ReadOnlyStorage, RecoveryStorage ...@@ -36,6 +36,7 @@ from ZODB.tests import ReadOnlyStorage, RecoveryStorage
from ZODB.tests.StorageTestBase import MinPO, zodb_pickle from ZODB.tests.StorageTestBase import MinPO, zodb_pickle
from ZODB._compat import dump, dumps, _protocol from ZODB._compat import dump, dumps, _protocol
from . import util
class FileStorageTests( class FileStorageTests(
StorageTestBase.StorageTestBase, StorageTestBase.StorageTestBase,
...@@ -706,7 +707,7 @@ def test_suite(): ...@@ -706,7 +707,7 @@ def test_suite():
suite.addTest(unittest.makeSuite(klass, "check")) suite.addTest(unittest.makeSuite(klass, "check"))
suite.addTest(doctest.DocTestSuite( suite.addTest(doctest.DocTestSuite(
setUp=zope.testing.setupstack.setUpDirectory, setUp=zope.testing.setupstack.setUpDirectory,
tearDown=zope.testing.setupstack.tearDown, tearDown=util.tearDown,
checker=ZODB.tests.util.checker)) checker=ZODB.tests.util.checker))
suite.addTest(ZODB.tests.testblob.storage_reusable_suite( suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
'BlobFileStorage', 'BlobFileStorage',
......
...@@ -33,6 +33,7 @@ from ZODB.tests import ( ...@@ -33,6 +33,7 @@ from ZODB.tests import (
) )
class MVCCTests: class MVCCTests:
def checkClosingNestedDatabasesWorks(self): def checkClosingNestedDatabasesWorks(self):
# This tests for the error described in # This tests for the error described in
# https://github.com/zopefoundation/ZODB/issues/45 # https://github.com/zopefoundation/ZODB/issues/45
...@@ -42,7 +43,6 @@ class MVCCTests: ...@@ -42,7 +43,6 @@ class MVCCTests:
db1.close() db1.close()
db2.close() db2.close()
def checkCrossConnectionInvalidation(self): def checkCrossConnectionInvalidation(self):
# Verify connections see updated state at txn boundaries. # Verify connections see updated state at txn boundaries.
# This will fail if the Connection doesn't poll for changes. # This will fail if the Connection doesn't poll for changes.
......
...@@ -38,6 +38,8 @@ __test__ = dict( ...@@ -38,6 +38,8 @@ __test__ = dict(
>>> list(conn2.root()[0].keys()) >>> list(conn2.root()[0].keys())
[] []
>>> db2.close()
>>> db1.close()
""", """,
) )
......
...@@ -64,7 +64,7 @@ Now we see two transactions and two changed objects. ...@@ -64,7 +64,7 @@ Now we see two transactions and two changed objects.
Clean up. Clean up.
>>> st.close() >>> db.close()
""" """
import re import re
...@@ -87,6 +87,6 @@ checker = renormalizing.RENormalizing([ ...@@ -87,6 +87,6 @@ checker = renormalizing.RENormalizing([
def test_suite(): def test_suite():
return doctest.DocTestSuite( return doctest.DocTestSuite(
setUp=zope.testing.setupstack.setUpDirectory, setUp=zope.testing.setupstack.setUpDirectory,
tearDown=zope.testing.setupstack.tearDown, tearDown=ZODB.tests.util.tearDown,
optionflags=doctest.REPORT_NDIFF, optionflags=doctest.REPORT_NDIFF,
checker=ZODB.tests.util.checker + checker) checker=ZODB.tests.util.checker + checker)
...@@ -99,10 +99,13 @@ class MinimalMemoryStorage(BaseStorage, object): ...@@ -99,10 +99,13 @@ class MinimalMemoryStorage(BaseStorage, object):
del self._txn del self._txn
def _finish(self, tid, u, d, e): def _finish(self, tid, u, d, e):
with self._lock: self._lock_acquire()
try:
self._index.update(self._txn.index) self._index.update(self._txn.index)
self._cur.update(self._txn.cur()) self._cur.update(self._txn.cur())
self._ltid = self._tid self._ltid = self._tid
finally:
self._lock_release()
def loadBefore(self, the_oid, the_tid): def loadBefore(self, the_oid, the_tid):
# It's okay if loadBefore() is really expensive, because this # It's okay if loadBefore() is really expensive, because this
...@@ -121,6 +124,9 @@ class MinimalMemoryStorage(BaseStorage, object): ...@@ -121,6 +124,9 @@ class MinimalMemoryStorage(BaseStorage, object):
end_tid = None end_tid = None
else: else:
end_tid = tids[j] end_tid = tids[j]
self.hook(the_oid, self._cur[the_oid], '')
return self._index[(the_oid, tid)], tid, end_tid return self._index[(the_oid, tid)], tid, end_tid
def loadSerial(self, oid, serial): def loadSerial(self, oid, serial):
......
...@@ -54,6 +54,8 @@ except NameError: ...@@ -54,6 +54,8 @@ except NameError:
import io import io
file_type = io.BufferedReader file_type = io.BufferedReader
from . import util
def new_time(): def new_time():
"""Create a _new_ time stamp. """Create a _new_ time stamp.
...@@ -334,6 +336,7 @@ class RecoveryBlobStorage(BlobTestBase, ...@@ -334,6 +336,7 @@ class RecoveryBlobStorage(BlobTestBase,
transaction.commit() transaction.commit()
self._dst.copyTransactionsFrom(self._storage) self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst) self.compare(self._storage, self._dst)
db.close()
def gc_blob_removes_uncommitted_data(): def gc_blob_removes_uncommitted_data():
...@@ -446,7 +449,6 @@ def packing_with_uncommitted_data_non_undoing(): ...@@ -446,7 +449,6 @@ def packing_with_uncommitted_data_non_undoing():
Clean up: Clean up:
>>> database.close() >>> database.close()
""" """
def packing_with_uncommitted_data_undoing(): def packing_with_uncommitted_data_undoing():
...@@ -609,7 +611,7 @@ def do_not_depend_on_cwd(): ...@@ -609,7 +611,7 @@ def do_not_depend_on_cwd():
>>> with conn.root()['blob'].open() as fp: fp.read() >>> with conn.root()['blob'].open() as fp: fp.read()
'data' 'data'
>>> bs.close() >>> db.close()
""" """
def savepoint_isolation(): def savepoint_isolation():
...@@ -700,9 +702,11 @@ def savepoint_cleanup(): ...@@ -700,9 +702,11 @@ def savepoint_cleanup():
>>> db.close() >>> db.close()
""" """
def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop(): def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
r""" r"""
>>> conn = ZODB.connection('data.fs', blob_dir='blobs') >>> db = ZODB.DB('data.fs', blob_dir='blobs')
>>> conn = db.open()
>>> blob = ZODB.blob.Blob(b'blah') >>> blob = ZODB.blob.Blob(b'blah')
>>> conn.add(blob) >>> conn.add(blob)
>>> transaction.commit() >>> transaction.commit()
...@@ -714,7 +718,7 @@ def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop(): ...@@ -714,7 +718,7 @@ def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
>>> old_serial == blob._p_serial >>> old_serial == blob._p_serial
True True
>>> conn.close() >>> db.close()
""" """
def setUp(test): def setUp(test):
...@@ -757,7 +761,7 @@ def storage_reusable_suite(prefix, factory, ...@@ -757,7 +761,7 @@ def storage_reusable_suite(prefix, factory,
"blob_connection.txt", "blob_connection.txt",
"blob_importexport.txt", "blob_importexport.txt",
"blob_transaction.txt", "blob_transaction.txt",
setUp=setup, tearDown=zope.testing.setupstack.tearDown, setUp=setup, tearDown=util.tearDown,
checker=zope.testing.renormalizing.RENormalizing([ checker=zope.testing.renormalizing.RENormalizing([
# Py3k renders bytes where Python2 used native strings... # Py3k renders bytes where Python2 used native strings...
(re.compile(r"^b'"), "'"), (re.compile(r"^b'"), "'"),
...@@ -780,15 +784,16 @@ def storage_reusable_suite(prefix, factory, ...@@ -780,15 +784,16 @@ def storage_reusable_suite(prefix, factory,
if test_packing: if test_packing:
suite.addTest(doctest.DocFileSuite( suite.addTest(doctest.DocFileSuite(
"blob_packing.txt", "blob_packing.txt",
setUp=setup, tearDown=zope.testing.setupstack.tearDown, setUp=setup, tearDown=util.tearDown,
)) ))
suite.addTest(doctest.DocTestSuite( suite.addTest(doctest.DocTestSuite(
setUp=setup, tearDown=zope.testing.setupstack.tearDown, setUp=setup, tearDown=util.tearDown,
checker = ZODB.tests.util.checker + \ checker = (
ZODB.tests.util.checker +
zope.testing.renormalizing.RENormalizing([ zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'), (re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'), (re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
]), ])),
)) ))
def create_storage(self, name='data', blob_dir=None): def create_storage(self, name='data', blob_dir=None):
...@@ -823,7 +828,7 @@ def test_suite(): ...@@ -823,7 +828,7 @@ def test_suite():
"blob_tempdir.txt", "blob_tempdir.txt",
"blobstorage_packing.txt", "blobstorage_packing.txt",
setUp=setUp, setUp=setUp,
tearDown=zope.testing.setupstack.tearDown, tearDown=util.tearDown,
optionflags=doctest.ELLIPSIS, optionflags=doctest.ELLIPSIS,
checker=ZODB.tests.util.checker, checker=ZODB.tests.util.checker,
)) ))
...@@ -831,7 +836,7 @@ def test_suite(): ...@@ -831,7 +836,7 @@ def test_suite():
"blob_layout.txt", "blob_layout.txt",
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE, optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
setUp=setUp, setUp=setUp,
tearDown=zope.testing.setupstack.tearDown, tearDown=util.tearDown,
checker=ZODB.tests.util.checker + checker=ZODB.tests.util.checker +
zope.testing.renormalizing.RENormalizing([ zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'), (re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
......
...@@ -33,9 +33,17 @@ This note includes doctests that explain how MVCC is implemented (and ...@@ -33,9 +33,17 @@ This note includes doctests that explain how MVCC is implemented (and
test that the implementation is correct). The tests use a test that the implementation is correct). The tests use a
MinimalMemoryStorage that implements MVCC support, but not much else. MinimalMemoryStorage that implements MVCC support, but not much else.
***IMPORTANT***: The MVCC approach has changed since these tests were
originally written. The new approach is much simpler because we no
longer call load to get the current state of an object. We call
loadBefore instead, having gotten a transaction time at the start of a
transaction. As a result, the rhythm of the tests is a little odd,
because we no longer need to probe a complex dance that doesn't exist any more.
>>> from ZODB.tests.test_storage import MinimalMemoryStorage >>> from ZODB.tests.test_storage import MinimalMemoryStorage
>>> from ZODB import DB >>> from ZODB import DB
>>> db = DB(MinimalMemoryStorage()) >>> st = MinimalMemoryStorage()
>>> db = DB(st)
We will use two different connections with different transaction managers We will use two different connections with different transaction managers
to make sure that the connections act independently, even though they'll to make sure that the connections act independently, even though they'll
...@@ -59,6 +67,10 @@ Now open a second connection. ...@@ -59,6 +67,10 @@ Now open a second connection.
>>> tm2 = transaction.TransactionManager() >>> tm2 = transaction.TransactionManager()
>>> cn2 = db.open(transaction_manager=tm2) >>> cn2 = db.open(transaction_manager=tm2)
>>> from ZODB.utils import p64, u64
>>> cn2._storage._start == p64(u64(st.lastTransaction()) + 1)
True
>>> txn_time2 = cn2._storage._start
Connection high-water mark Connection high-water mark
-------------------------- --------------------------
...@@ -67,22 +79,20 @@ The ZODB Connection tracks a transaction high-water mark, which ...@@ -67,22 +79,20 @@ The ZODB Connection tracks a transaction high-water mark, which
bounds the latest transaction id that can be read by the current bounds the latest transaction id that can be read by the current
transaction and still present a consistent view of the database. transaction and still present a consistent view of the database.
Transactions with ids up to but not including the high-water mark Transactions with ids up to but not including the high-water mark
are OK to read. When a transaction commits, the database sends are OK to read. At the beginning of a transaction, a connection
invalidations to all the other connections; the invalidation contains sets the high-water mark to just over the last transaction time the
the transaction id and the oids of modified objects. The Connection storage has seen.
stores the high-water mark in _txn_time, which is set to None until
an invalidation arrives.
>>> cn = db.open() >>> cn = db.open()
>>> print(cn._txn_time) >>> cn._storage._start == p64(u64(st.lastTransaction()) + 1)
None True
>>> cn.invalidate(100, dict.fromkeys([1, 2])) >>> cn.db()._mvcc_storage.invalidate(100, dict.fromkeys([1, 2]))
>>> cn._txn_time >>> cn._storage._start == p64(u64(st.lastTransaction()) + 1)
100 True
>>> cn.invalidate(200, dict.fromkeys([1, 2])) >>> cn.db()._mvcc_storage.invalidate(200, dict.fromkeys([1, 2]))
>>> cn._txn_time >>> cn._storage._start == p64(u64(st.lastTransaction()) + 1)
100 True
A connection's high-water mark is set to the transaction id taken from A connection's high-water mark is set to the transaction id taken from
the first invalidation processed by the connection. Transaction ids are the first invalidation processed by the connection. Transaction ids are
...@@ -95,8 +105,8 @@ but that doesn't work unless an object is modified. sync() will abort ...@@ -95,8 +105,8 @@ but that doesn't work unless an object is modified. sync() will abort
a transaction and process invalidations. a transaction and process invalidations.
>>> cn.sync() >>> cn.sync()
>>> print(cn._txn_time) # the high-water mark got reset to None >>> cn._storage._start == p64(u64(st.lastTransaction()) + 1)
None True
Basic functionality Basic functionality
------------------- -------------------
...@@ -109,16 +119,16 @@ will modify "a." The other transaction will then modify "b" and commit. ...@@ -109,16 +119,16 @@ will modify "a." The other transaction will then modify "b" and commit.
>>> tm1.get().commit() >>> tm1.get().commit()
>>> txn = db.lastTransaction() >>> txn = db.lastTransaction()
The second connection has its high-water mark set now. The second connection already has its high-water mark set.
>>> cn2._txn_time == txn >>> cn2._storage._start == txn_time2
True True
It is safe to read "b," because it was not modified by the concurrent It is safe to read "b," because it was not modified by the concurrent
transaction. transaction.
>>> r2 = cn2.root() >>> r2 = cn2.root()
>>> r2["b"]._p_serial < cn2._txn_time >>> r2["b"]._p_serial < cn2._storage._start
True True
>>> r2["b"].value >>> r2["b"].value
1 1
...@@ -130,7 +140,7 @@ non-current version. ...@@ -130,7 +140,7 @@ non-current version.
>>> r2["a"].value >>> r2["a"].value
1 1
>>> r2["a"]._p_serial < cn2._txn_time >>> r2["a"]._p_serial < cn2._storage._start
True True
We can confirm that we have a non-current revision by asking the We can confirm that we have a non-current revision by asking the
...@@ -143,32 +153,33 @@ It's possible to modify "a", but we get a conflict error when we ...@@ -143,32 +153,33 @@ It's possible to modify "a", but we get a conflict error when we
commit the transaction. commit the transaction.
>>> r2["a"].value = 3 >>> r2["a"].value = 3
>>> tm2.get().commit() >>> tm2.get().commit() # doctest: +ELLIPSIS
Traceback (most recent call last): Traceback (most recent call last):
... ...
ConflictError: database conflict error (oid 0x01, class ZODB.tests.MinPO.MinPO) ConflictError: database conflict error (oid 0x01, class ZODB.tests.MinPO...
>>> tm2.get().abort() >>> tm2.get().abort()
This example will demonstrate that we can commit a transaction if we only This example will demonstrate that we can commit a transaction if we only
modify current revisions. modify current revisions.
>>> print(cn2._txn_time) >>> cn2._storage._start == p64(u64(st.lastTransaction()) + 1)
None True
>>> txn_time2 = cn2._storage._start
>>> r1 = cn1.root() >>> r1 = cn1.root()
>>> r1["a"].value = 3 >>> r1["a"].value = 3
>>> tm1.get().commit() >>> tm1.get().commit()
>>> txn = db.lastTransaction() >>> txn = db.lastTransaction()
>>> cn2._txn_time == txn >>> cn2._storage._start == txn_time2
True True
>>> r2["b"].value = r2["a"].value + 1 >>> r2["b"].value = r2["a"].value + 1
>>> r2["b"].value >>> r2["b"].value
3 3
>>> tm2.get().commit() >>> tm2.get().commit()
>>> print(cn2._txn_time) >>> cn2._storage._start == p64(u64(st.lastTransaction()) + 1)
None True
Object cache Object cache
------------ ------------
...@@ -302,22 +313,18 @@ same things now. ...@@ -302,22 +313,18 @@ same things now.
>>> r2["a"].value, r2["b"].value >>> r2["a"].value, r2["b"].value
(42, 43) (42, 43)
>>> db.close()
Late invalidation Late invalidation
----------------- -----------------
The combination of ZEO and MVCC adds more complexity. Since The combination of ZEO and MVCC used to add more complexity. That's
invalidations are delivered asynchronously by ZEO, it is possible for why ZODB no-longer calls load. :)
an invalidation to arrive just after a request to load the invalidated
object is sent. The connection can't use the just-loaded data,
because the invalidation arrived first. The complexity for MVCC is
that it must check for invalidated objects after it has loaded them,
just in case.
Rather than add all the complexity of ZEO to these tests, the Rather than add all the complexity of ZEO to these tests, the
MinimalMemoryStorage has a hook. We'll write a subclass that will MinimalMemoryStorage has a hook. We'll write a subclass that will
deliver an invalidation when it loads an object. The hook allows us deliver an invalidation when it loads (or loadBefore's) an object.
to test the Connection code. The hook allows us to test the Connection code.
>>> class TestStorage(MinimalMemoryStorage): >>> class TestStorage(MinimalMemoryStorage):
... def __init__(self): ... def __init__(self):
...@@ -351,22 +358,30 @@ non-current revision to load. ...@@ -351,22 +358,30 @@ non-current revision to load.
>>> oid = r1["b"]._p_oid >>> oid = r1["b"]._p_oid
>>> ts.hooked[oid] = 1 >>> ts.hooked[oid] = 1
This test is kinda screwy because it depends on an old approach that
has changed. We'll hack the _txn_time to get the original expected
result, even though what's going on now is much simpler.
>>> cn1._storage._start = ts.lastTransaction()
Once the oid is hooked, an invalidation will be delivered the next Once the oid is hooked, an invalidation will be delivered the next
time it is activated. The code below activates the object, then time it is activated. The code below activates the object, then
confirms that the hook worked and that the old state was retrieved. confirms that the hook worked and that the old state was retrieved.
>>> oid in cn1._invalidated >>> oid in cn1._storage._invalidations
False False
>>> r1["b"]._p_state >>> r1["b"]._p_state
-1 -1
>>> r1["b"]._p_activate() >>> r1["b"]._p_activate()
>>> oid in cn1._invalidated >>> oid in cn1._storage._invalidations
True True
>>> ts.count >>> ts.count
1 1
>>> r1["b"].value >>> r1["b"].value
0 0
>>> db.close()
No earlier revision available No earlier revision available
----------------------------- -----------------------------
...@@ -391,18 +406,17 @@ Again, once the oid is hooked, an invalidation will be delivered the next ...@@ -391,18 +406,17 @@ Again, once the oid is hooked, an invalidation will be delivered the next
time it is activated. The code below activates the object, but unlike the time it is activated. The code below activates the object, but unlike the
section above, this is no older state to retrieve. section above, this is no older state to retrieve.
>>> oid in cn1._invalidated >>> oid in cn1._storage._invalidations
False False
>>> r1["b"]._p_state >>> r1["b"]._p_state
-1 -1
>>> r1["b"]._p_activate() >>> cn1._storage._start = ts.lastTransaction()
>>> r1["b"]._p_activate() # doctest: +ELLIPSIS
Traceback (most recent call last): Traceback (most recent call last):
... ...
ReadConflictError: database read conflict error (oid 0x02, class ZODB.tests.MinPO.MinPO) ReadConflictError: ...
>>> oid in cn1._invalidated
True >>> db.close()
>>> ts.count
1
""" """
import doctest import doctest
import re import re
......
...@@ -61,6 +61,7 @@ checker = renormalizing.RENormalizing([ ...@@ -61,6 +61,7 @@ checker = renormalizing.RENormalizing([
]) ])
def setUp(test, name='test'): def setUp(test, name='test'):
clear_transaction_syncs()
transaction.abort() transaction.abort()
d = tempfile.mkdtemp(prefix=name) d = tempfile.mkdtemp(prefix=name)
zope.testing.setupstack.register(test, zope.testing.setupstack.rmtree, d) zope.testing.setupstack.register(test, zope.testing.setupstack.rmtree, d)
...@@ -71,7 +72,9 @@ def setUp(test, name='test'): ...@@ -71,7 +72,9 @@ def setUp(test, name='test'):
os.chdir(d) os.chdir(d)
zope.testing.setupstack.register(test, transaction.abort) zope.testing.setupstack.register(test, transaction.abort)
tearDown = zope.testing.setupstack.tearDown def tearDown(test):
clear_transaction_syncs()
zope.testing.setupstack.tearDown(test)
class TestCase(unittest.TestCase): class TestCase(unittest.TestCase):
...@@ -186,3 +189,18 @@ def mess_with_time(test=None, globs=None, now=1278864701.5): ...@@ -186,3 +189,18 @@ def mess_with_time(test=None, globs=None, now=1278864701.5):
time.time = staticmethod(faux_time) # jython time.time = staticmethod(faux_time) # jython
else: else:
time.time = faux_time time.time = faux_time
def clear_transaction_syncs():
"""Clear data managers registered with the global transaction manager
Many tests don't clean up synchronizer's registered with the
global transaction managers, which can wreak havoc with following
tests, now that connections interact with their storages at
transaction boundaries. We need to make sure that we clear any
registered data managers.
For now, we'll use the transaction manager's
underware. Eventually, an transaction managers need to grow an API
for this.
"""
transaction.manager.clearSynchs()
...@@ -11,10 +11,12 @@ ...@@ -11,10 +11,12 @@
# FOR A PARTICULAR PURPOSE # FOR A PARTICULAR PURPOSE
# #
############################################################################## ##############################################################################
from __future__ import print_function
import os import os
import struct import struct
import sys import sys
import time import time
import threading
import warnings import warnings
from binascii import hexlify, unhexlify from binascii import hexlify, unhexlify
from struct import pack, unpack from struct import pack, unpack
...@@ -32,6 +34,7 @@ __all__ = ['z64', ...@@ -32,6 +34,7 @@ __all__ = ['z64',
'u64', 'u64',
'U64', 'U64',
'cp', 'cp',
'maxtid',
'newTid', 'newTid',
'oid_repr', 'oid_repr',
'serial_repr', 'serial_repr',
...@@ -100,6 +103,8 @@ else: ...@@ -100,6 +103,8 @@ else:
z64 = b'\0' * 8 z64 = b'\0' * 8
maxtid = b'\x7f\xff\xff\xff\xff\xff\xff\xff'
assert sys.hexversion >= 0x02030000 assert sys.hexversion >= 0x02030000
# The distinction between ints and longs is blurred in Python 2.2, # The distinction between ints and longs is blurred in Python 2.2,
...@@ -308,3 +313,65 @@ class locked(object): ...@@ -308,3 +313,65 @@ class locked(object):
def __call__(self, func): def __call__(self, func):
return Locked(func, preconditions=self.preconditions) return Locked(func, preconditions=self.preconditions)
if os.environ.get('DEBUG_LOCKING'):
class Lock:
lock_class = threading.Lock
def __init__(self):
self._lock = self.lock_class()
def pr(self, name, a=None, kw=None):
f = sys._getframe(2)
if f.f_code.co_filename.endswith('ZODB/utils.py'):
f = sys._getframe(3)
f = '%s:%s' % (f.f_code.co_filename, f.f_lineno)
print(id(self), self._lock, threading.get_ident(), f, name,
a if a else '', kw if kw else '')
def acquire(self, *a, **kw):
self.pr('acquire', a, kw)
return self._lock.acquire(*a, **kw)
def release(self):
self.pr('release')
return self._lock.release()
def __enter__(self):
self.pr('acquire')
return self._lock.acquire()
def __exit__(self, *ignored):
self.pr('release')
return self._lock.release()
class RLock(Lock):
lock_class = threading.RLock
class Condition(Lock):
lock_class = threading.Condition
def wait(self, *a, **kw):
self.pr('wait', a, kw)
return self._lock.wait(*a, **kw)
def wait_for(self, *a, **kw):
self.pr('wait_for', a, kw)
return self._lock.wait_for(*a, **kw)
def notify(self, *a, **kw):
self.pr('notify', a, kw)
return self._lock.notify(*a, **kw)
def notify_all(self):
self.pr('notify_all')
return self._lock.notify_all()
notifyAll = notify_all
else:
from threading import Condition, Lock, RLock
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment