Commit 2739fae0 authored by Tres Seaver's avatar Tres Seaver

Play with svn:externals stitching.

parent 508ace53
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""ZODB transfer activity monitoring
$Id$"""
import time
class ActivityMonitor:
"""ZODB load/store activity monitor
This simple implementation just keeps a small log in memory
and iterates over the log when getActivityAnalysis() is called.
It assumes that log entries are added in chronological sequence,
which is only guaranteed because DB.py holds a lock when calling
the closedConnection() method.
"""
def __init__(self, history_length=3600):
self.history_length = history_length # Number of seconds
self.log = [] # [(time, loads, stores)]
def closedConnection(self, conn):
log = self.log
now = time.time()
loads, stores = conn.getTransferCounts(1)
log.append((now, loads, stores))
self.trim(now)
def trim(self, now):
log = self.log
cutoff = now - self.history_length
n = 0
loglen = len(log)
while n < loglen and log[n][0] < cutoff:
n = n + 1
if n:
del log[:n]
def setHistoryLength(self, history_length):
self.history_length = history_length
self.trim(time.time())
def getHistoryLength(self):
return self.history_length
def getActivityAnalysis(self, start=0, end=0, divisions=10):
res = []
now = time.time()
if start == 0:
start = now - self.history_length
if end == 0:
end = now
for n in range(divisions):
res.append({
'start': start + (end - start) * n / divisions,
'end': start + (end - start) * (n + 1) / divisions,
'loads': 0,
'stores': 0,
'connections': 0,
})
div = res[0]
div_end = div['end']
div_index = 0
connections = 0
total_loads = 0
total_stores = 0
for t, loads, stores in self.log:
if t < start:
# We could use a binary search to find the start.
continue
elif t > end:
# We could use a binary search to find the end also.
break
while t > div_end:
div['loads'] = total_loads
div['stores'] = total_stores
div['connections'] = connections
total_loads = 0
total_stores = 0
connections = 0
div_index = div_index + 1
if div_index < divisions:
div = res[div_index]
div_end = div['end']
connections = connections + 1
total_loads = total_loads + loads
total_stores = total_stores + stores
div['stores'] = div['stores'] + total_stores
div['loads'] = div['loads'] + total_loads
div['connections'] = div['connections'] + connections
return res
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Handy standard storage machinery
$Id$
"""
import cPickle
import threading
import time
import logging
from persistent.TimeStamp import TimeStamp
from ZODB import POSException
from ZODB.utils import z64, oid_repr
from ZODB.UndoLogCompatible import UndoLogCompatible
log = logging.getLogger("ZODB.BaseStorage")
class BaseStorage(UndoLogCompatible):
"""Abstract base class that supports storage implementations.
A subclass must define the following methods:
load()
close()
cleanup()
lastSerial()
lastTransaction()
It must override these hooks:
_begin()
_vote()
_abort()
_finish()
_clear_temp()
If it stores multiple revisions, it should implement
loadSerial()
loadBefore()
iterator()
If the subclass wants to implement undo, it should implement the
multiple revision methods and:
loadSerial()
undo()
undoInfo()
undoLog()
If the subclass wants to implement versions, it must implement:
abortVersion()
commitVersion()
modifiedInVersion()
versionEmpty()
versions()
Each storage will have two locks that are accessed via lock
acquire and release methods bound to the instance. (Yuck.)
_lock_acquire / _lock_release (reentrant)
_commit_lock_acquire / _commit_lock_release
The commit lock is acquired in tpc_begin() and released in
tpc_abort() and tpc_finish(). It is never acquired with the other
lock held.
The other lock appears to protect _oid and _transaction and
perhaps other things. It is always held when load() is called, so
presumably the load() implementation should also acquire the lock.
"""
_transaction=None # Transaction that is being committed
_tstatus=' ' # Transaction status, used for copying data
_is_read_only = False
def __init__(self, name, base=None):
self.__name__= name
log.debug("create storage %s", self.__name__)
# Allocate locks:
l = threading.RLock()
self._lock_acquire = l.acquire
self._lock_release = l.release
l = threading.Lock()
self._commit_lock_acquire = l.acquire
self._commit_lock_release = l.release
t=time.time()
t=self._ts=apply(TimeStamp,(time.gmtime(t)[:5]+(t%60,)))
self._tid = `t`
if base is None:
self._oid=z64
else:
self._oid=base._oid
def abortVersion(self, src, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
return self._tid, []
def commitVersion(self, src, dest, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
return self._tid, []
def close(self):
pass
def cleanup(self):
pass
def sortKey(self):
"""Return a string that can be used to sort storage instances.
The key must uniquely identify a storage and must be the same
across multiple instantiations of the same storage.
"""
# name may not be sufficient, e.g. ZEO has a user-definable name.
return self.__name__
def getName(self):
return self.__name__
def getSize(self):
return len(self)*300 # WAG!
def history(self, oid, version, length=1, filter=None):
pass
def modifiedInVersion(self, oid):
return ''
def new_oid(self, last=None):
# 'last' is only for internal use, not part of the public API
if self._is_read_only:
raise POSException.ReadOnlyError()
if last is None:
self._lock_acquire()
try:
last=self._oid
d=ord(last[-1])
if d < 255: last=last[:-1]+chr(d+1)
else: last=self.new_oid(last[:-1])
self._oid=last
return last
finally: self._lock_release()
else:
d=ord(last[-1])
if d < 255: return last[:-1]+chr(d+1)+'\0'*(8-len(last))
else: return self.new_oid(last[:-1])
def registerDB(self, db, limit):
pass # we don't care
def isReadOnly(self):
return self._is_read_only
def supportsUndo(self):
return 0
def supportsVersions(self):
return 0
def tpc_abort(self, transaction):
self._lock_acquire()
try:
if transaction is not self._transaction:
return
self._abort()
self._clear_temp()
self._transaction = None
self._commit_lock_release()
finally:
self._lock_release()
def _abort(self):
"""Subclasses should redefine this to supply abort actions"""
pass
def tpc_begin(self, transaction, tid=None, status=' '):
if self._is_read_only:
raise POSException.ReadOnlyError()
self._lock_acquire()
try:
if self._transaction is transaction:
return
self._lock_release()
self._commit_lock_acquire()
self._lock_acquire()
self._transaction = transaction
self._clear_temp()
user = transaction.user
desc = transaction.description
ext = transaction._extension
if ext:
ext = cPickle.dumps(ext, 1)
else:
ext = ""
self._ude = user, desc, ext
if tid is None:
now = time.time()
t = TimeStamp(*(time.gmtime(now)[:5] + (now % 60,)))
self._ts = t = t.laterThan(self._ts)
self._tid = `t`
else:
self._ts = TimeStamp(tid)
self._tid = tid
self._tstatus = status
self._begin(self._tid, user, desc, ext)
finally:
self._lock_release()
def _begin(self, tid, u, d, e):
"""Subclasses should redefine this to supply transaction start actions.
"""
pass
def tpc_vote(self, transaction):
self._lock_acquire()
try:
if transaction is not self._transaction:
return
self._vote()
finally:
self._lock_release()
def _vote(self):
"""Subclasses should redefine this to supply transaction vote actions.
"""
pass
def tpc_finish(self, transaction, f=None):
self._lock_acquire()
try:
if transaction is not self._transaction:
return
try:
if f is not None:
f(self._tid)
u, d, e = self._ude
self._finish(self._tid, u, d, e)
self._clear_temp()
return self._tid
finally:
self._ude = None
self._transaction = None
self._commit_lock_release()
finally:
self._lock_release()
def _finish(self, tid, u, d, e):
"""Subclasses should redefine this to supply transaction finish actions
"""
pass
def undo(self, transaction_id, txn):
if self._is_read_only:
raise POSException.ReadOnlyError()
raise POSException.UndoError, 'non-undoable transaction'
def undoLog(self, first, last, filter=None):
return ()
def versionEmpty(self, version):
return 1
def versions(self, max=None):
return ()
def pack(self, t, referencesf):
if self._is_read_only:
raise POSException.ReadOnlyError()
def getSerial(self, oid):
self._lock_acquire()
try:
v = self.modifiedInVersion(oid)
pickledata, serial = self.load(oid, v)
return serial
finally:
self._lock_release()
def loadSerial(self, oid, serial):
raise POSException.Unsupported, (
"Retrieval of historical revisions is not supported")
def loadBefore(self, oid, tid):
"""Return most recent revision of oid before tid committed."""
# XXX Is it okay for loadBefore() to return current data?
# There doesn't seem to be a good reason to forbid it, even
# though the typical use of this method will never find
# current data. But maybe we should call it loadByTid()?
n = 2
start_time = None
end_time = None
while start_time is None:
# The history() approach is a hack, because the dict
# returned by history() doesn't contain a tid. It
# contains a serialno, which is often the same, but isn't
# required to be. We'll pretend it is for now.
# A second problem is that history() doesn't say anything
# about whether the transaction status. If it falls before
# the pack time, we can't honor the MVCC request.
# Note: history() returns the most recent record first.
# XXX The filter argument to history() only appears to be
# supported by FileStorage. Perhaps it shouldn't be used.
L = self.history(oid, "", n, lambda d: not d["version"])
if not L:
return
for d in L:
if d["serial"] < tid:
start_time = d["serial"]
break
else:
end_time = d["serial"]
if len(L) < n:
break
n *= 2
if start_time is None:
return None
data = self.loadSerial(oid, start_time)
return data, start_time, end_time
def getExtensionMethods(self):
"""getExtensionMethods
This returns a dictionary whose keys are names of extra methods
provided by this storage. Storage proxies (such as ZEO) should
call this method to determine the extra methods that they need
to proxy in addition to the standard storage methods.
Dictionary values should be None; this will be a handy place
for extra marshalling information, should we need it
"""
return {}
def copyTransactionsFrom(self, other, verbose=0):
"""Copy transactions from another storage.
This is typically used for converting data from one storage to
another. `other` must have an .iterator() method.
"""
_ts=None
ok=1
preindex={};
preget=preindex.get # waaaa
# restore() is a new storage API method which has an identical
# signature to store() except that it does not return anything.
# Semantically, restore() is also identical to store() except that it
# doesn't do the ConflictError or VersionLockError consistency
# checks. The reason to use restore() over store() in this method is
# that store() cannot be used to copy transactions spanning a version
# commit or abort, or over transactional undos.
#
# We'll use restore() if it's available, otherwise we'll fall back to
# using store(). However, if we use store, then
# copyTransactionsFrom() may fail with VersionLockError or
# ConflictError.
if hasattr(self, 'restore'):
restoring = 1
else:
restoring = 0
fiter = other.iterator()
for transaction in fiter:
tid=transaction.tid
if _ts is None:
_ts=TimeStamp(tid)
else:
t=TimeStamp(tid)
if t <= _ts:
if ok: print ('Time stamps out of order %s, %s' % (_ts, t))
ok=0
_ts=t.laterThan(_ts)
tid=`_ts`
else:
_ts = t
if not ok:
print ('Time stamps back in order %s' % (t))
ok=1
if verbose:
print _ts
self.tpc_begin(transaction, tid, transaction.status)
for r in transaction:
oid=r.oid
if verbose:
print oid_repr(oid), r.version, len(r.data)
if restoring:
self.restore(oid, r.tid, r.data, r.version,
r.data_txn, transaction)
else:
pre=preget(oid, None)
s=self.store(oid, pre, r.data, r.version, transaction)
preindex[oid]=s
self.tpc_vote(transaction)
self.tpc_finish(transaction)
fiter.close()
class TransactionRecord:
"""Abstract base class for iterator protocol"""
class DataRecord:
"""Abstract base class for iterator protocol"""
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import logging
from cStringIO import StringIO
from cPickle import Unpickler, Pickler
from pickle import PicklingError
from ZODB.POSException import ConflictError
from ZODB.loglevels import BLATHER
logger = logging.getLogger('ZODB.ConflictResolution')
ResolvedSerial = 'rs'
class BadClassName(Exception):
pass
_class_cache = {}
_class_cache_get = _class_cache.get
def find_global(*args):
cls = _class_cache_get(args, 0)
if cls == 0:
# Not cached. Try to import
try:
module = __import__(args[0], {}, {}, ['cluck'])
except ImportError:
cls = 1
else:
cls = getattr(module, args[1], 1)
_class_cache[args] = cls
if cls == 1:
logger.log(BLATHER, "Unable to load class", exc_info=True)
if cls == 1:
# Not importable
raise BadClassName(*args)
return cls
def state(self, oid, serial, prfactory, p=''):
p = p or self.loadSerial(oid, serial)
file = StringIO(p)
unpickler = Unpickler(file)
unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
unpickler.load() # skip the class tuple
return unpickler.load()
class PersistentReference:
def __repr__(self):
return "PR(%s %s)" % (id(self), self.data)
def __getstate__(self):
raise PicklingError, "Can't pickle PersistentReference"
class PersistentReferenceFactory:
data = None
def persistent_load(self, oid):
if self.data is None:
self.data = {}
r = self.data.get(oid, None)
if r is None:
r = PersistentReference()
r.data = oid
self.data[oid] = r
return r
def persistent_id(object):
if getattr(object, '__class__', 0) is not PersistentReference:
return None
return object.data
_unresolvable = {}
def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
committedData=''):
# class_tuple, old, committed, newstate = ('',''), 0, 0, 0
try:
prfactory = PersistentReferenceFactory()
file = StringIO(newpickle)
unpickler = Unpickler(file)
unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
meta = unpickler.load()
if isinstance(meta, tuple):
klass = meta[0]
newargs = meta[1] or ()
if isinstance(klass, tuple):
klass = find_global(*klass)
else:
klass = meta
newargs = ()
if klass in _unresolvable:
return None
newstate = unpickler.load()
inst = klass.__new__(klass, *newargs)
try:
resolve = inst._p_resolveConflict
except AttributeError:
_unresolvable[klass] = 1
return None
old = state(self, oid, oldSerial, prfactory)
committed = state(self, oid, committedSerial, prfactory, committedData)
resolved = resolve(old, committed, newstate)
file = StringIO()
pickler = Pickler(file,1)
pickler.persistent_id = persistent_id
pickler.dump(meta)
pickler.dump(resolved)
return file.getvalue(1)
except (ConflictError, BadClassName):
return None
except:
# If anything else went wrong, catch it here and avoid passing an
# arbitrary exception back to the client. The error here will mask
# the original ConflictError. A client can recover from a
# ConflictError, but not necessarily from other errors. But log
# the error so that any problems can be fixed.
logger.error("Unexpected error", exc_info=True)
return None
class ConflictResolvingStorage:
"Mix-in class that provides conflict resolution handling for storages"
tryToResolveConflict = tryToResolveConflict
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Database connection support
$Id$"""
import logging
import sys
import threading
import warnings
from time import time
from persistent import PickleCache
import transaction
from ZODB.ConflictResolution import ResolvedSerial
from ZODB.ExportImport import ExportImport
from ZODB.POSException \
import ConflictError, ReadConflictError, InvalidObjectReference, \
ConnectionStateError
from ZODB.TmpStore import TmpStore
from ZODB.utils import u64, oid_repr, z64, positive_id
from ZODB.serialize import ObjectWriter, ConnectionObjectReader, myhasattr
from ZODB.interfaces import IConnection
from ZODB.interfaces import implements
global_reset_counter = 0
def resetCaches():
"""Causes all connection caches to be reset as connections are reopened.
Zope's refresh feature uses this. When you reload Python modules,
instances of classes continue to use the old class definitions.
To use the new code immediately, the refresh feature asks ZODB to
clear caches by calling resetCaches(). When the instances are
loaded by subsequent connections, they will use the new class
definitions.
"""
global global_reset_counter
global_reset_counter += 1
class Connection(ExportImport, object):
"""Connection to ZODB for loading and storing objects.
The Connection object serves as a data manager. The root() method
on a Connection returns the root object for the database. This
object and all objects reachable from it are associated with the
Connection that loaded them. When a transaction commits, it uses
the Connection to store modified objects.
Typical use of ZODB is for each thread to have its own
Connection and that no thread should have more than one Connection
to the same database. A thread is associated with a Connection by
loading objects from that Connection. Objects loaded by one
thread should not be used by another thread.
A Connection can be associated with a single version when it is
created. By default, a Connection is not associated with a
version; it uses non-version data.
Each Connection provides an isolated, consistent view of the
database, by managing independent copies of objects in the
database. At transaction boundaries, these copies are updated to
reflect the current state of the database.
You should not instantiate this class directly; instead call the
open() method of a DB instance.
In many applications, root() is the only method of the Connection
that you will need to use.
Synchronization
---------------
A Connection instance is not thread-safe. It is designed to
support a thread model where each thread has its own transaction.
If an application has more than one thread that uses the
connection or the transaction the connection is registered with,
the application should provide locking.
The Connection manages movement of objects in and out of object
storage.
XXX We should document an intended API for using a Connection via
multiple threads.
XXX We should explain that the Connection has a cache and that
multiple calls to get() will return a reference to the same
object, provided that one of the earlier objects is still
referenced. Object identity is preserved within a connection, but
not across connections.
XXX Mention the database pool.
A database connection always presents a consistent view of the
objects in the database, although it may not always present the
most current revision of any particular object. Modifications
made by concurrent transactions are not visible until the next
transaction boundary (abort or commit).
Two options affect consistency. By default, the mvcc and synch
options are enabled by default.
If you pass mvcc=True to db.open(), the Connection will never read
non-current revisions of an object. Instead it will raise a
ReadConflictError to indicate that the current revision is
unavailable because it was written after the current transaction
began.
The logic for handling modifications assumes that the thread that
opened a Connection (called db.open()) is the thread that will use
the Connection. If this is not true, you should pass synch=False
to db.open(). When the synch option is disabled, some transaction
boundaries will be missed by the Connection; in particular, if a
transaction does not involve any modifications to objects loaded
from the Connection and synch is disabled, the Connection will
miss the transaction boundary. Two examples of this behavior are
db.undo() and read-only transactions.
:Groups:
- `User Methods`: root, get, add, close, db, sync, isReadOnly,
cacheGC, cacheFullSweep, cacheMinimize, getVersion,
modifiedInVersion
- `Experimental Methods`: setLocalTransaction, getTransaction,
onCloseCallbacks
- `Transaction Data Manager Methods`: tpc_begin, tpc_vote,
tpc_finish, tpc_abort, sortKey, abort, commit, commit_sub,
abort_sub
- `Database Invalidation Methods`: invalidate, _setDB
- `IPersistentDataManager Methods`: setstate, register,
setklassstate
- `Other Methods`: oldstate, exchange, getDebugInfo, setDebugInfo,
getTransferCounts
"""
implements(IConnection)
_tmp = None
_code_timestamp = 0
def __init__(self, version='', cache_size=400,
cache_deactivate_after=None, mvcc=True, txn_mgr=None,
synch=True):
"""Create a new Connection.
A Connection instance should by instantiated by the DB
instance that it is connected to.
:Parameters:
- `version`: the "version" that all changes will be made
in, defaults to no version.
- `cache_size`: the target size of the in-memory object
cache, measured in objects.
- `cache_deactivate_after`: deprecated, ignored
- `mvcc`: boolean indicating whether MVCC is enabled
- `txn_mgr`: transaction manager to use. None means
used the default transaction manager.
- `synch`: boolean indicating whether Connection should
register for afterCompletion() calls.
"""
self._log = logging.getLogger("ZODB.Connection")
self._storage = None
self._debug_info = ()
self._opened = None # time.time() when DB.open() opened us
self._version = version
self._cache = cache = PickleCache(self, cache_size)
if version:
# Caches for versions end up empty if the version
# is not used for a while. Non-version caches
# keep their content indefinitely.
# XXX Why do we want version caches to behave this way?
self._cache.cache_drain_resistance = 100
self._committed = []
self._added = {}
self._added_during_commit = None
self._reset_counter = global_reset_counter
self._load_count = 0 # Number of objects unghosted
self._store_count = 0 # Number of objects stored
# List of oids of modified objects (to be invalidated on an abort).
self._modified = []
# List of all objects (not oids) registered as modified by the
# persistence machinery.
self._registered_objects = []
# Do we need to join a txn manager?
self._needs_to_join = True
# If a transaction manager is passed to the constructor, use
# it instead of the global transaction manager. The instance
# variable will hold a TM instance.
self._txn_mgr = txn_mgr or transaction.manager
# _synch is a boolean; if True, the Connection will register
# with the TM to receive afterCompletion() calls.
self._synch = synch
# _invalidated queues invalidate messages delivered from the DB
# _inv_lock prevents one thread from modifying the set while
# another is processing invalidations. All the invalidations
# from a single transaction should be applied atomically, so
# the lock must be held when reading _invalidated.
# XXX It sucks that we have to hold the lock to read
# _invalidated. Normally, _invalidated is written by calling
# dict.update, which will execute atomically by virtue of the
# GIL. But some storage might generate oids where hash or
# compare invokes Python code. In that case, the GIL can't
# save us.
self._inv_lock = threading.Lock()
self._invalidated = d = {}
self._invalid = d.has_key
# We intend to prevent committing a transaction in which
# ReadConflictError occurs. _conflicts is the set of oids that
# experienced ReadConflictError. Any time we raise ReadConflictError,
# the oid should be added to this set, and we should be sure that the
# object is registered. Because it's registered, Connection.commit()
# will raise ReadConflictError again (because the oid is in
# _conflicts).
self._conflicts = {}
# If MVCC is enabled, then _mvcc is True and _txn_time stores
# the upper bound on transactions visible to this connection.
# That is, all object revisions must be written before _txn_time.
# If it is None, then the current revisions are acceptable.
# If the connection is in a version, mvcc will be disabled, because
# loadBefore() only returns non-version data.
self._mvcc = mvcc and not version
self._txn_time = None
# To support importFile(), implemented in the ExportImport base
# class, we need to run _importDuringCommit() from our commit()
# method. If _import is not None, it is a two-tuple of arguments
# to pass to _importDuringCommit().
self._import = None
def getTransaction(self):
"""Get the current transaction for this connection.
:deprecated:
The transaction manager's get method works the same as this
method. You can pass a transaction manager (TM) to DB.open()
to control which TM the Connection uses.
"""
warnings.warn("getTransaction() is deprecated. "
"Use the txn_mgr argument to DB.open() instead.",
DeprecationWarning)
return self._txn_mgr.get()
def setLocalTransaction(self):
"""Use a transaction bound to the connection rather than the thread.
:deprecated:
Returns the transaction manager used by the connection. You
can pass a transaction manager (TM) to DB.open() to control
which TM the Connection uses.
"""
warnings.warn("setLocalTransaction() is deprecated. "
"Use the txn_mgr argument to DB.open() instead.",
DeprecationWarning)
if self._txn_mgr is transaction.manager:
if self._synch:
self._txn_mgr.unregisterSynch(self)
self._txn_mgr = transaction.TransactionManager()
if self._synch:
self._txn_mgr.registerSynch(self)
return self._txn_mgr
def _cache_items(self):
# find all items on the lru list
items = self._cache.lru_items()
# fine everything. some on the lru list, some not
everything = self._cache.cache_data
# remove those items that are on the lru list
for k,v in items:
del everything[k]
# return a list of [ghosts....not recently used.....recently used]
return everything.items() + items
def __repr__(self):
if self._version:
ver = ' (in version %s)' % `self._version`
else:
ver = ''
return '<Connection at %08x%s>' % (positive_id(self), ver)
def get(self, oid):
"""Return the persistent object with oid 'oid'.
If the object was not in the cache and the object's class is
ghostable, then a ghost will be returned. If the object is
already in the cache, a reference to the cached object will be
returned.
Applications seldom need to call this method, because objects
are loaded transparently during attribute lookup.
:return: persistent object corresponding to `oid`
:Parameters:
- `oid`: an object id
:Exceptions:
- `KeyError`: if oid does not exist. It is possible that an
object does not exist as of the current transaction, but
existed in the past. It may even exist again in the
future, if the transaction that removed it is undone.
- `ConnectionStateError`: if the connection is closed.
"""
if self._storage is None:
# XXX Should this be a ZODB-specific exception?
raise ConnectionStateError("The database connection is closed")
obj = self._cache.get(oid, None)
if obj is not None:
return obj
obj = self._added.get(oid, None)
if obj is not None:
return obj
p, serial = self._storage.load(oid, self._version)
obj = self._reader.getGhost(p)
obj._p_oid = oid
obj._p_jar = self
obj._p_changed = None
obj._p_serial = serial
self._cache[oid] = obj
return obj
# deprecate this method?
__getitem__ = get
def add(self, obj):
"""Add a new object 'obj' to the database and assign it an oid.
A persistent object is normally added to the database and
assigned an oid when it becomes reachable to an object already in
the database. In some cases, it is useful to create a new
object and use its oid (_p_oid) in a single transaction.
This method assigns a new oid regardless of whether the object
is reachable.
The object is added when the transaction commits. The object
must implement the IPersistent interface and must not
already be associated with a Connection.
:Parameters:
- `obj`: a Persistent object
:Exceptions:
- `TypeError`: if obj is not a persistent object.
- `InvalidObjectReference`: if obj is already associated
with another connection.
- `ConnectionStateError`: if the connection is closed.
"""
if self._storage is None:
raise ConnectionStateError("The database connection is closed")
marker = object()
oid = getattr(obj, "_p_oid", marker)
if oid is marker:
raise TypeError("Only first-class persistent objects may be"
" added to a Connection.", obj)
elif obj._p_jar is None:
assert obj._p_oid is None
oid = obj._p_oid = self._storage.new_oid()
obj._p_jar = self
if self._added_during_commit is not None:
self._added_during_commit.append(obj)
self._register(obj)
# Add to _added after calling register(), so that _added
# can be used as a test for whether the object has been
# registered with the transaction.
self._added[oid] = obj
elif obj._p_jar is not self:
raise InvalidObjectReference(obj, obj._p_jar)
def sortKey(self):
# If two connections use the same storage, give them a
# consistent order using id(). This is unique for the
# lifetime of a connection, which is good enough.
return "%s:%s" % (self._sortKey(), id(self))
def _setDB(self, odb, mvcc=None, txn_mgr=None, synch=None):
"""Register odb, the DB that this Connection uses.
This method is called by the DB every time a Connection
is opened. Any invalidations received while the Connection
was closed will be processed.
If the global module function resetCaches() was called, the
cache will be cleared.
:Parameters:
- `odb`: database that owns the Connection
- `mvcc`: boolean indicating whether MVCC is enabled
- `txn_mgr`: transaction manager to use. None means
used the default transaction manager.
- `synch`: boolean indicating whether Connection should
register for afterCompletion() calls.
"""
# XXX Why do we go to all the trouble of setting _db and
# other attributes on open and clearing them on close?
# A Connection is only ever associated with a single DB
# and Storage.
self._db = odb
self._storage = odb._storage
self._sortKey = odb._storage.sortKey
self.new_oid = odb._storage.new_oid
self._opened = time()
if synch is not None:
self._synch = synch
if mvcc is not None:
self._mvcc = mvcc
self._txn_mgr = txn_mgr or transaction.manager
if self._reset_counter != global_reset_counter:
# New code is in place. Start a new cache.
self._resetCache()
else:
self._flush_invalidations()
if self._synch:
self._txn_mgr.registerSynch(self)
self._reader = ConnectionObjectReader(self, self._cache,
self._db.classFactory)
def _resetCache(self):
"""Creates a new cache, discarding the old one.
See the docstring for the resetCaches() function.
"""
self._reset_counter = global_reset_counter
self._invalidated.clear()
cache_size = self._cache.cache_size
self._cache = cache = PickleCache(self, cache_size)
def abort(self, transaction):
"""Abort the object in the transaction.
This just deactivates the thing.
"""
for obj in self._registered_objects:
oid = obj._p_oid
assert oid is not None
if oid in self._added:
del self._added[oid]
del obj._p_jar
del obj._p_oid
else:
self._cache.invalidate(oid)
self._tpc_cleanup()
# XXX should there be a way to call incrgc directly?
# perhaps "full sweep" should do that?
# XXX we should test what happens when these methods are called
# mid-transaction.
def cacheFullSweep(self, dt=None):
# XXX needs doc string
warnings.warn("cacheFullSweep is deprecated. "
"Use cacheMinimize instead.", DeprecationWarning)
if dt is None:
self._cache.full_sweep()
else:
self._cache.full_sweep(dt)
def cacheMinimize(self, dt=None):
"""Deactivate all unmodified objects in the cache.
Call _p_deactivate() on each cached object, attempting to turn
it into a ghost. It is possible for individual objects to
remain active.
:Parameters:
- `dt`: ignored. It is provided only for backwards compatibility.
"""
if dt is not None:
warnings.warn("The dt argument to cacheMinimize is ignored.",
DeprecationWarning)
self._cache.minimize()
def cacheGC(self):
"""Reduce cache size to target size.
Call _p_deactivate() on cached objects until the cache size
falls under the target size.
"""
self._cache.incrgc()
__onCloseCallbacks = None
def onCloseCallback(self, f):
"""Register a callable, f, to be called by close().
The callable, f, will be called at most once, the next time
the Connection is closed.
:Parameters:
- `f`: object that will be called on `close`
"""
if self.__onCloseCallbacks is None:
self.__onCloseCallbacks = []
self.__onCloseCallbacks.append(f)
def close(self):
"""Close the Connection.
A closed Connection should not be used by client code. It
can't load or store objects. Objects in the cache are not
freed, because Connections are re-used and the cache are
expected to be useful to the next client.
When the Connection is closed, all callbacks registered by
onCloseCallback() are invoked and the cache is scanned for
old objects.
"""
if not self._needs_to_join:
# We're currently joined to a transaction.
raise ConnectionStateError("Cannot close a connection joined to "
"a transaction")
if self._tmp is not None:
# There are no direct modifications pending, but a subtransaction
# is pending.
raise ConnectionStateError("Cannot close a connection with a "
"pending subtransaction")
if self._cache is not None:
self._cache.incrgc() # This is a good time to do some GC
# Call the close callbacks.
if self.__onCloseCallbacks is not None:
for f in self.__onCloseCallbacks:
try:
f()
except: # except what?
f = getattr(f, 'im_self', f)
self._log.error("Close callback failed for %s", f,
exc_info=sys.exc_info())
self.__onCloseCallbacks = None
self._storage = self._tmp = self.new_oid = None
self._debug_info = ()
self._opened = None
# Return the connection to the pool.
if self._db is not None:
if self._synch:
self._txn_mgr.unregisterSynch(self)
self._db._closeConnection(self)
# _closeConnection() set self._db to None. However, we can't
# assert that here, because self may have been reused (by
# another thread) by the time we get back here.
def commit(self, transaction):
if self._import:
# XXX eh?
self._importDuringCommit(transaction, *self._import)
self._import = None
# Just in case an object is added as a side-effect of storing
# a modified object. If, for example, a __getstate__() method
# calls add(), the newly added objects will show up in
# _added_during_commit. This sounds insane, but has actually
# happened.
self._added_during_commit = []
for obj in self._registered_objects:
oid = obj._p_oid
assert oid
if oid in self._conflicts:
raise ReadConflictError(object=obj)
if obj._p_jar is not self:
raise InvalidObjectReference(obj, obj._p_jar)
elif oid in self._added:
assert obj._p_serial == z64
elif obj._p_changed:
if oid in self._invalidated:
resolve = getattr(obj, "_p_resolveConflict", None)
if resolve is None:
raise ConflictError(object=obj)
self._modified.append(oid)
else:
# Nothing to do. It's been said that it's legal, e.g., for
# an object to set _p_changed to false after it's been
# changed and registered.
continue
self._store_objects(ObjectWriter(obj), transaction)
for obj in self._added_during_commit:
self._store_objects(ObjectWriter(obj), transaction)
self._added_during_commit = None
def _store_objects(self, writer, transaction):
for obj in writer:
oid = obj._p_oid
serial = getattr(obj, "_p_serial", z64)
if serial == z64:
# obj is a new object
self._creating.append(oid)
# Because obj was added, it is now in _creating, so it can
# be removed from _added.
self._added.pop(oid, None)
else:
if (oid in self._invalidated
and not hasattr(obj, '_p_resolveConflict')):
raise ConflictError(object=obj)
self._modified.append(oid)
p = writer.serialize(obj) # This calls __getstate__ of obj
s = self._storage.store(oid, serial, p, self._version, transaction)
self._store_count += 1
# Put the object in the cache before handling the
# response, just in case the response contains the
# serial number for a newly created object
try:
self._cache[oid] = obj
except:
# Dang, I bet it's wrapped:
if hasattr(obj, 'aq_base'):
self._cache[oid] = obj.aq_base
else:
raise
self._handle_serial(s, oid)
def commit_sub(self, t):
"""Commit all work done in all subtransactions for this transaction."""
if self._tmp is None:
return
src = self._storage
self._storage = self._tmp
self._tmp = None
self._log.debug("Commiting subtransaction of size %s", src.getSize())
oids = src._index.keys()
self._storage.tpc_begin(t)
# Copy invalidating and creating info from temporary storage:
self._modified.extend(oids)
self._creating.extend(src._creating)
for oid in oids:
data, serial = src.load(oid, src)
s = self._storage.store(oid, serial, data, self._version, t)
self._handle_serial(s, oid, change=False)
def abort_sub(self, t):
"""Abort work done in all subtransactions for this transaction."""
if self._tmp is None:
return
src = self._storage
self._storage = self._tmp
self._tmp = None
self._cache.invalidate(src._index.keys())
self._invalidate_creating(src._creating)
def _invalidate_creating(self, creating=None):
"""Dissown any objects newly saved in an uncommitted transaction."""
if creating is None:
creating = self._creating
self._creating = []
for oid in creating:
o = self._cache.get(oid)
if o is not None:
del self._cache[oid]
del o._p_jar
del o._p_oid
def db(self):
return self._db
def getVersion(self):
if self._storage is None:
raise ConnectionStateError("The database connection is closed")
return self._version
def isReadOnly(self):
if self._storage is None:
raise ConnectionStateError("The database connection is closed")
return self._storage.isReadOnly()
def invalidate(self, tid, oids):
"""Notify the Connection that transaction 'tid' invalidated oids.
When the next transaction boundary is reached, objects will be
invalidated. If any of the invalidated objects is accessed by
the current transaction, the revision written before C{tid}
will be used.
The DB calls this method, even when the Connection is closed.
:Parameters:
- `tid`: the storage-level id of the transaction that committed
- `oids`: oids is a set of oids, represented as a dict with oids
as keys.
"""
self._inv_lock.acquire()
try:
if self._txn_time is None:
self._txn_time = tid
self._invalidated.update(oids)
finally:
self._inv_lock.release()
# The next two methods are callbacks for transaction synchronization.
def beforeCompletion(self, txn):
# We don't do anything before a commit starts.
pass
def afterCompletion(self, txn):
self._flush_invalidations()
def _flush_invalidations(self):
self._inv_lock.acquire()
try:
self._cache.invalidate(self._invalidated)
self._invalidated.clear()
self._txn_time = None
finally:
self._inv_lock.release()
# Now is a good time to collect some garbage
self._cache.incrgc()
def modifiedInVersion(self, oid):
try:
return self._db.modifiedInVersion(oid)
except KeyError:
return self._version
def register(self, obj):
"""Register obj with the current transaction manager.
A subclass could override this method to customize the default
policy of one transaction manager for each thread.
obj must be an object loaded from this Connection.
"""
assert obj._p_jar is self
if obj._p_oid is None:
# There is some old Zope code that assigns _p_jar
# directly. That is no longer allowed, but we need to
# provide support for old code that still does it.
# XXX The actual complaint here is that an object without
# an oid is being registered. I can't think of any way to
# achieve that without assignment to _p_jar. If there is
# a way, this will be a very confusing warning.
warnings.warn("Assigning to _p_jar is deprecated",
DeprecationWarning)
elif obj._p_oid in self._added:
# It was registered before it was added to _added.
return
self._register(obj)
def _register(self, obj=None):
if obj is not None:
self._registered_objects.append(obj)
if self._needs_to_join:
self._txn_mgr.get().join(self)
self._needs_to_join = False
def root(self):
"""Return the database root object.
The root is a persistent.mapping.PersistentMapping.
"""
return self.get(z64)
def setstate(self, obj):
oid = obj._p_oid
if self._storage is None:
msg = ("Shouldn't load state for %s "
"when the connection is closed" % oid_repr(oid))
self._log.error(msg)
raise ConnectionStateError(msg)
try:
self._setstate(obj)
except ConflictError:
raise
except:
self._log.error("Couldn't load state for %s", oid_repr(oid),
exc_info=sys.exc_info())
raise
def _setstate(self, obj):
# Helper for setstate(), which provides logging of failures.
# The control flow is complicated here to avoid loading an
# object revision that we are sure we aren't going to use. As
# a result, invalidation tests occur before and after the
# load. We can only be sure about invalidations after the
# load.
# If an object has been invalidated, there are several cases
# to consider:
# 1. Check _p_independent()
# 2. Try MVCC
# 3. Raise ConflictError.
# Does anything actually use _p_independent()? It would simplify
# the code if we could drop support for it.
# There is a harmless data race with self._invalidated. A
# dict update could go on in another thread, but we don't care
# because we have to check again after the load anyway.
if (obj._p_oid in self._invalidated
and not myhasattr(obj, "_p_independent")):
# If the object has _p_independent(), we will handle it below.
self._load_before_or_conflict(obj)
return
p, serial = self._storage.load(obj._p_oid, self._version)
self._load_count += 1
self._inv_lock.acquire()
try:
invalid = obj._p_oid in self._invalidated
finally:
self._inv_lock.release()
if invalid:
if myhasattr(obj, "_p_independent"):
# This call will raise a ReadConflictError if something
# goes wrong
self._handle_independent(obj)
else:
self._load_before_or_conflict(obj)
return
self._reader.setGhostState(obj, p)
obj._p_serial = serial
def _load_before_or_conflict(self, obj):
"""Load non-current state for obj or raise ReadConflictError."""
if not (self._mvcc and self._setstate_noncurrent(obj)):
self._register(obj)
self._conflicts[obj._p_oid] = True
raise ReadConflictError(object=obj)
def _setstate_noncurrent(self, obj):
"""Set state using non-current data.
Return True if state was available, False if not.
"""
try:
# Load data that was current before the commit at txn_time.
t = self._storage.loadBefore(obj._p_oid, self._txn_time)
except KeyError:
return False
if t is None:
return False
data, start, end = t
# The non-current transaction must have been written before
# txn_time. It must be current at txn_time, but could have
# been modified at txn_time.
assert start < self._txn_time, (u64(start), u64(self._txn_time))
assert end is not None
assert self._txn_time <= end, (u64(self._txn_time), u64(end))
self._reader.setGhostState(obj, data)
obj._p_serial = start
return True
def _handle_independent(self, obj):
# Helper method for setstate() handles possibly independent objects
# Call _p_independent(), if it returns True, setstate() wins.
# Otherwise, raise a ConflictError.
if obj._p_independent():
self._inv_lock.acquire()
try:
try:
del self._invalidated[obj._p_oid]
except KeyError:
pass
finally:
self._inv_lock.release()
else:
self._conflicts[obj._p_oid] = 1
self._register(obj)
raise ReadConflictError(object=obj)
def oldstate(self, obj, tid):
"""Return copy of obj that was written by tid.
XXX The returned object does not have the typical metadata
(_p_jar, _p_oid, _p_serial) set. I'm not sure how references
to other peristent objects are handled.
:return: a persistent object
:Parameters:
- `obj`: a persistent object from this Connection.
- `tid`: id of a transaction that wrote an earlier revision.
:Exceptions:
- `KeyError`: if tid does not exist or if tid deleted a revision
of obj.
"""
assert obj._p_jar is self
p = self._storage.loadSerial(obj._p_oid, tid)
return self._reader.getState(p)
def setklassstate(self, obj):
# Special case code to handle ZClasses, I think.
# Called the cache when an object of type type is invalidated.
try:
oid = obj._p_oid
p, serial = self._storage.load(oid, self._version)
# We call getGhost(), but we actually get a non-ghost back.
# The object is a class, which can't actually be ghosted.
copy = self._reader.getGhost(p)
obj.__dict__.clear()
obj.__dict__.update(copy.__dict__)
obj._p_oid = oid
obj._p_jar = self
obj._p_changed = 0
obj._p_serial = serial
except:
self._log.error("setklassstate failed", exc_info=sys.exc_info())
raise
def tpc_begin(self, transaction, sub=False):
self._modified = []
# _creating is a list of oids of new objects, which is used to
# remove them from the cache if a transaction aborts.
self._creating = []
if sub and self._tmp is None:
# Sub-transaction!
self._tmp = self._storage
self._storage = TmpStore(self._version, self._storage)
self._storage.tpc_begin(transaction)
def tpc_vote(self, transaction):
try:
vote = self._storage.tpc_vote
except AttributeError:
return
s = vote(transaction)
self._handle_serial(s)
def _handle_serial(self, store_return, oid=None, change=1):
"""Handle the returns from store() and tpc_vote() calls."""
# These calls can return different types depending on whether
# ZEO is used. ZEO uses asynchronous returns that may be
# returned in batches by the ClientStorage. ZEO1 can also
# return an exception object and expect that the Connection
# will raise the exception.
# When commit_sub() exceutes a store, there is no need to
# update the _p_changed flag, because the subtransaction
# tpc_vote() calls already did this. The change=1 argument
# exists to allow commit_sub() to avoid setting the flag
# again.
# When conflict resolution occurs, the object state held by
# the connection does not match what is written to the
# database. Invalidate the object here to guarantee that
# the new state is read the next time the object is used.
if not store_return:
return
if isinstance(store_return, str):
assert oid is not None
self._handle_one_serial(oid, store_return, change)
else:
for oid, serial in store_return:
self._handle_one_serial(oid, serial, change)
def _handle_one_serial(self, oid, serial, change):
if not isinstance(serial, str):
raise serial
obj = self._cache.get(oid, None)
if obj is None:
return
if serial == ResolvedSerial:
del obj._p_changed # transition from changed to ghost
else:
if change:
obj._p_changed = 0 # transition from changed to up-to-date
obj._p_serial = serial
def tpc_finish(self, transaction):
# It's important that the storage calls the function we pass
# while it still has its lock. We don't want another thread
# to be able to read any updated data until we've had a chance
# to send an invalidation message to all of the other
# connections!
if self._tmp is not None:
# Commiting a subtransaction!
# There is no need to invalidate anything.
self._storage.tpc_finish(transaction)
self._storage._creating[:0]=self._creating
del self._creating[:]
else:
def callback(tid):
d = {}
for oid in self._modified:
d[oid] = 1
self._db.invalidate(tid, d, self)
self._storage.tpc_finish(transaction, callback)
self._tpc_cleanup()
def tpc_abort(self, transaction):
if self._import:
self._import = None
self._storage.tpc_abort(transaction)
self._cache.invalidate(self._modified)
self._invalidate_creating()
while self._added:
oid, obj = self._added.popitem()
del obj._p_oid
del obj._p_jar
self._tpc_cleanup()
# Common cleanup actions after tpc_finish/tpc_abort.
def _tpc_cleanup(self):
self._conflicts.clear()
if not self._synch:
self._flush_invalidations()
self._needs_to_join = True
self._registered_objects = []
def sync(self):
self._txn_mgr.get().abort()
sync = getattr(self._storage, 'sync', 0)
if sync:
sync()
self._flush_invalidations()
def getDebugInfo(self):
return self._debug_info
def setDebugInfo(self, *args):
self._debug_info = self._debug_info + args
def getTransferCounts(self, clear=False):
"""Returns the number of objects loaded and stored.
If clear is True, reset the counters.
"""
res = self._load_count, self._store_count
if clear:
self._load_count = 0
self._store_count = 0
return res
def exchange(self, old, new):
# called by a ZClasses method that isn't executed by the test suite
oid = old._p_oid
new._p_oid = oid
new._p_jar = self
new._p_changed = 1
self._register(new)
self._cache[oid] = new
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Database objects
$Id$"""
import cPickle, cStringIO, sys
from thread import allocate_lock
from time import time, ctime
import warnings
import logging
from ZODB.broken import find_global
from ZODB.utils import z64
from ZODB.Connection import Connection
from ZODB.serialize import referencesf
import transaction
logger = logging.getLogger('ZODB.DB')
class DB(object):
"""The Object Database
-------------------
The DB class coordinates the activities of multiple database
Connection instances. Most of the work is done by the
Connections created via the open method.
The DB instance manages a pool of connections. If a connection is
closed, it is returned to the pool and its object cache is
preserved. A subsequent call to open() will reuse the connection.
There is a limit to the pool size; if all its connections are in
use, calls to open() will block until one of the open connections
is closed.
The class variable 'klass' is used by open() to create database
connections. It is set to Connection, but a subclass could override
it to provide a different connection implementation.
The database provides a few methods intended for application code
-- open, close, undo, and pack -- and a large collection of
methods for inspecting the database and its connections' caches.
:Cvariables:
- `klass`: Class used by L{open} to create database connections
:Groups:
- `User Methods`: __init__, open, close, undo, pack, classFactory
- `Inspection Methods`: getName, getSize, objectCount,
getActivityMonitor, setActivityMonitor
- `Connection Pool Methods`: getPoolSize, getVersionPoolSize,
removeVersionPool, setPoolSize, setVersionPoolSize
- `Transaction Methods`: invalidate
- `Other Methods`: lastTransaction, connectionDebugInfo
- `Version Methods`: modifiedInVersion, abortVersion, commitVersion,
versionEmpty
- `Cache Inspection Methods`: cacheDetail, cacheExtremeDetail,
cacheFullSweep, cacheLastGCTime, cacheMinimize, cacheSize,
cacheDetailSize, getCacheSize, getVersionCacheSize, setCacheSize,
setVersionCacheSize
- `Deprecated Methods`: getCacheDeactivateAfter,
setCacheDeactivateAfter,
getVersionCacheDeactivateAfter, setVersionCacheDeactivateAfter
"""
klass = Connection # Class to use for connections
_activity_monitor = None
def __init__(self, storage,
pool_size=7,
cache_size=400,
cache_deactivate_after=None,
version_pool_size=3,
version_cache_size=100,
version_cache_deactivate_after=None,
):
"""Create an object database.
:Parameters:
- `storage`: the storage used by the database, e.g. FileStorage
- `pool_size`: maximum number of open connections
- `cache_size`: target size of Connection object cache
- `cache_deactivate_after`: ignored
- `version_pool_size`: maximum number of connections (per version)
- `version_cache_size`: target size of Connection object cache for
version connections
- `version_cache_deactivate_after`: ignored
"""
# Allocate locks:
l = allocate_lock()
self._a = l.acquire
self._r = l.release
# Setup connection pools and cache info
self._pools = {},[]
self._temps = []
self._pool_size = pool_size
self._cache_size = cache_size
self._version_pool_size = version_pool_size
self._version_cache_size = version_cache_size
# warn about use of deprecated arguments
if (cache_deactivate_after is not None or
version_cache_deactivate_after is not None):
warnings.warn("cache_deactivate_after has no effect",
DeprecationWarning)
self._miv_cache = {}
# Setup storage
self._storage=storage
storage.registerDB(self, None)
if not hasattr(storage,'tpc_vote'):
storage.tpc_vote = lambda *args: None
try:
storage.load(z64,'')
except KeyError:
# Create the database's root in the storage if it doesn't exist
from persistent.mapping import PersistentMapping
root = PersistentMapping()
# Manually create a pickle for the root to put in the storage.
# The pickle must be in the special ZODB format.
file = cStringIO.StringIO()
p = cPickle.Pickler(file, 1)
p.dump((root.__class__, None))
p.dump(root.__getstate__())
t = transaction.Transaction()
t.description = 'initial database creation'
storage.tpc_begin(t)
storage.store(z64, None, file.getvalue(), '', t)
storage.tpc_vote(t)
storage.tpc_finish(t)
# Pass through methods:
for m in ['history', 'supportsUndo', 'supportsVersions', 'undoLog',
'versionEmpty', 'versions']:
setattr(self, m, getattr(storage, m))
if hasattr(storage, 'undoInfo'):
self.undoInfo = storage.undoInfo
def _closeConnection(self, connection):
"""Return a connection to the pool.
connection._db must be self on entry.
"""
self._a()
try:
assert connection._db is self
connection._db = None
am = self._activity_monitor
if am is not None:
am.closedConnection(connection)
version = connection._version
pools, pooll = self._pools
try:
pool, allocated, pool_lock = pools[version]
except KeyError:
# No such version. We must have deleted the pool.
# Just let the connection go.
# We need to break circular refs to make it really go.
# XXX What objects are involved in the cycle?
connection.__dict__.clear()
return
pool.append(connection)
if len(pool) == 1:
# Pool now usable again, unlock it.
pool_lock.release()
finally:
self._r()
def _connectionMap(self, f):
self._a()
try:
pools, pooll = self._pools
for pool, allocated in pooll:
for cc in allocated:
f(cc)
temps = self._temps
if temps:
t = []
rc = sys.getrefcount
for cc in temps:
if rc(cc) > 3:
f(cc)
self._temps = t
finally:
self._r()
def abortVersion(self, version, txn=None):
if txn is None:
txn = transaction.get()
txn.register(AbortVersion(self, version))
def cacheDetail(self):
"""Return information on objects in the various caches
Organized by class.
"""
detail = {}
def f(con, detail=detail, have_detail=detail.has_key):
for oid, ob in con._cache.items():
module = getattr(ob.__class__, '__module__', '')
module = module and '%s.' % module or ''
c = "%s%s" % (module, ob.__class__.__name__)
if have_detail(c):
detail[c] += 1
else:
detail[c] = 1
self._connectionMap(f)
detail = detail.items()
detail.sort()
return detail
def cacheExtremeDetail(self):
detail = []
conn_no = [0] # A mutable reference to a counter
def f(con, detail=detail, rc=sys.getrefcount, conn_no=conn_no):
conn_no[0] += 1
cn = conn_no[0]
for oid, ob in con._cache_items():
id = ''
if hasattr(ob, '__dict__'):
d = ob.__dict__
if d.has_key('id'):
id = d['id']
elif d.has_key('__name__'):
id = d['__name__']
module = getattr(ob.__class__, '__module__', '')
module = module and ('%s.' % module) or ''
# What refcount ('rc') should we return? The intent is
# that we return the true Python refcount, but as if the
# cache didn't exist. This routine adds 3 to the true
# refcount: 1 for binding to name 'ob', another because
# ob lives in the con._cache_items() list we're iterating
# over, and calling sys.getrefcount(ob) boosts ob's
# count by 1 too. So the true refcount is 3 less than
# sys.getrefcount(ob) returns. But, in addition to that,
# the cache holds an extra reference on non-ghost objects,
# and we also want to pretend that doesn't exist.
detail.append({
'conn_no': cn,
'oid': oid,
'id': id,
'klass': "%s%s" % (module, ob.__class__.__name__),
'rc': rc(ob) - 3 - (ob._p_changed is not None),
'state': ob._p_changed,
#'references': con.references(oid),
})
self._connectionMap(f)
return detail
def cacheFullSweep(self):
self._connectionMap(lambda c: c._cache.full_sweep())
def cacheLastGCTime(self):
m=[0]
def f(con, m=m):
t = con._cache.cache_last_gc_time
if t > m[0]:
m[0] = t
self._connectionMap(f)
return m[0]
def cacheMinimize(self):
self._connectionMap(lambda c: c._cache.minimize())
def cacheSize(self):
m=[0]
def f(con, m=m):
m[0] += con._cache.cache_non_ghost_count
self._connectionMap(f)
return m[0]
def cacheDetailSize(self):
m = []
def f(con, m=m):
m.append({'connection':repr(con),
'ngsize':con._cache.cache_non_ghost_count,
'size':len(con._cache)})
self._connectionMap(f)
m.sort()
return m
def close(self):
"""Close the database and its underlying storage.
It is important to close the database, because the storage may
flush in-memory data structures to disk when it is closed.
Leaving the storage open with the process exits can cause the
next open to be slow.
What effect does closing the database have on existing
connections? Technically, they remain open, but their storage
is closed, so they stop behaving usefully. Perhaps close()
should also close all the Connections.
"""
self._storage.close()
def commitVersion(self, source, destination='', txn=None):
if txn is None:
txn = transaction.get()
txn.register(CommitVersion(self, source, destination))
def getCacheSize(self):
return self._cache_size
def lastTransaction(self):
return self._storage.lastTransaction()
def getName(self):
return self._storage.getName()
def getPoolSize(self):
return self._pool_size
def getSize(self):
return self._storage.getSize()
def getVersionCacheSize(self):
return self._version_cache_size
def getVersionPoolSize(self):
return self._version_pool_size
def invalidate(self, tid, oids, connection=None, version=''):
"""Invalidate references to a given oid.
This is used to indicate that one of the connections has committed a
change to the object. The connection commiting the change should be
passed in to prevent useless (but harmless) messages to the
connection.
"""
if connection is not None:
version = connection._version
# Update modified in version cache
# XXX must make this work with list or dict to backport to 2.6
for oid in oids.keys():
h = hash(oid) % 131
o = self._miv_cache.get(h, None)
if o is not None and o[0]==oid:
del self._miv_cache[h]
# Notify connections
for pool, allocated in self._pools[1]:
for cc in allocated:
if (cc is not connection and
(not version or cc._version==version)):
if sys.getrefcount(cc) <= 3:
cc.close()
cc.invalidate(tid, oids)
if self._temps:
t = []
for cc in self._temps:
if sys.getrefcount(cc) > 3:
if (cc is not connection and
(not version or cc._version == version)):
cc.invalidate(tid, oids)
t.append(cc)
else:
cc.close()
self._temps = t
def modifiedInVersion(self, oid):
h = hash(oid) % 131
cache = self._miv_cache
o=cache.get(h, None)
if o and o[0]==oid:
return o[1]
v = self._storage.modifiedInVersion(oid)
cache[h] = oid, v
return v
def objectCount(self):
return len(self._storage)
def open(self, version='', transaction=None, temporary=0, force=None,
waitflag=1, mvcc=True, txn_mgr=None, synch=True):
"""Return a database Connection for use by application code.
The optional version argument can be used to specify that a
version connection is desired.
The optional transaction argument can be provided to cause the
connection to be automatically closed when a transaction is
terminated. In addition, connections per transaction are
reused, if possible.
Note that the connection pool is managed as a stack, to
increate the likelihood that the connection's stack will
include useful objects.
:Parameters:
- `version`: the "version" that all changes will be made
in, defaults to no version.
- `transaction`: XXX
- `temporary`: XXX
- `force`: XXX
- `waitflag`: XXX
- `mvcc`: boolean indicating whether MVCC is enabled
- `txn_mgr`: transaction manager to use. None means
used the default transaction manager.
- `synch`: boolean indicating whether Connection should
register for afterCompletion() calls.
"""
self._a()
try:
if transaction is not None:
connections = transaction._connections
if connections:
if connections.has_key(version) and not temporary:
return connections[version]
else:
transaction._connections = connections = {}
transaction = transaction._connections
if temporary:
# This is a temporary connection.
# We won't bother with the pools. This will be
# a one-use connection.
c = self.klass(version=version,
cache_size=self._version_cache_size,
mvcc=mvcc, txn_mgr=txn_mgr, synch=synch)
c._setDB(self)
self._temps.append(c)
if transaction is not None:
transaction[id(c)] = c
return c
pools, pooll = self._pools
# pools is a mapping object:
#
# {version -> (pool, allocated, lock)
#
# where:
#
# pool is the connection pool for the version,
# allocated is a list of all of the allocated
# connections, and
# lock is a lock that is used to block when a pool is
# empty and no more connections can be allocated.
#
# pooll is a list of all of the pools and allocated for
# use in cases where we need to iterate over all
# connections or all inactive connections.
# Pool locks are tricky. Basically, the lock needs to be
# set whenever the pool becomes empty so that threads are
# forced to wait until the pool gets a connection in it.
# The lock is acquired when the (empty) pool is
# created. The lock is acquired just prior to removing
# the last connection from the pool and released just after
# adding a connection to an empty pool.
if pools.has_key(version):
pool, allocated, pool_lock = pools[version]
else:
pool, allocated, pool_lock = pools[version] = (
[], [], allocate_lock())
pooll.append((pool, allocated))
pool_lock.acquire()
if not pool:
c = None
if version:
if self._version_pool_size > len(allocated) or force:
c = self.klass(version=version,
cache_size=self._version_cache_size,
mvcc=mvcc, txn_mgr=txn_mgr)
allocated.append(c)
pool.append(c)
elif self._pool_size > len(allocated) or force:
c = self.klass(version=version,
cache_size=self._cache_size,
mvcc=mvcc, txn_mgr=txn_mgr, synch=synch)
allocated.append(c)
pool.append(c)
if c is None:
if waitflag:
self._r()
pool_lock.acquire()
self._a()
if len(pool) > 1:
# Note that the pool size will normally be 1 here,
# but it could be higher due to a race condition.
pool_lock.release()
else:
return
elif len(pool)==1:
# Taking last one, lock the pool.
# Note that another thread might grab the lock
# before us, so we might actually block, however,
# when we get the lock back, there *will* be a
# connection in the pool. OTOH, there's no limit on
# how long we may need to wait: if the other thread
# grabbed the lock in this section too, we'll wait
# here until another connection is closed.
# checkConcurrentUpdates1Storage provoked this frequently
# on a hyperthreaded machine, with its second thread
# timing out after waiting 5 minutes for DB.open() to
# return. So, if we can't get the pool lock immediately,
# now we make a recursive call. This allows the current
# thread to allocate a new connection instead of waiting
# arbitrarily long for the single connection in the pool
# right now.
self._r()
if not pool_lock.acquire(0):
result = DB.open(self, version, transaction, temporary,
force, waitflag)
self._a()
return result
self._a()
if len(pool) > 1:
# Note that the pool size will normally be 1 here,
# but it could be higher due to a race condition.
pool_lock.release()
c = pool.pop()
c._setDB(self, mvcc=mvcc, txn_mgr=txn_mgr, synch=synch)
for pool, allocated in pooll:
for cc in pool:
cc.cacheGC()
if transaction is not None:
transaction[version] = c
return c
finally:
self._r()
def removeVersionPool(self, version):
pools, pooll = self._pools
info = pools.get(version)
if info:
del pools[version]
pool, allocated, pool_lock = info
pooll.remove((pool, allocated))
try:
pool_lock.release()
except: # XXX Do we actually expect this to fail?
pass
del pool[:]
del allocated[:]
def connectionDebugInfo(self):
r = []
pools, pooll = self._pools
t = time()
for version, (pool, allocated, lock) in pools.items():
for c in allocated:
o = c._opened
d = c._debug_info
if d:
if len(d)==1:
d = d[0]
else:
d=''
d = "%s (%s)" % (d, len(c._cache))
r.append({
'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)),
'info': d,
'version': version,
})
return r
def getActivityMonitor(self):
return self._activity_monitor
def pack(self, t=None, days=0):
"""Pack the storage, deleting unused object revisions.
A pack is always performed relative to a particular time, by
default the current time. All object revisions that are not
reachable as of the pack time are deleted from the storage.
The cost of this operation varies by storage, but it is
usually an expensive operation.
There are two optional arguments that can be used to set the
pack time: t, pack time in seconds since the epcoh, and days,
the number of days to subtract from t or from the current
time if t is not specified.
"""
if t is None:
t = time()
t -= days * 86400
try:
self._storage.pack(t, referencesf)
except:
logger.error("packing", exc_info=True)
raise
def setCacheSize(self, v):
self._cache_size = v
d = self._pools[0]
pool_info = d.get('')
if pool_info is not None:
for c in pool_info[1]:
c._cache.cache_size = v
def classFactory(self, connection, modulename, globalname):
# Zope will rebind this method to arbitrary user code at runtime.
return find_global(modulename, globalname)
def setPoolSize(self, v):
self._pool_size = v
def setActivityMonitor(self, am):
self._activity_monitor = am
def setVersionCacheSize(self, v):
self._version_cache_size = v
for ver in self._pools[0].keys():
if ver:
for c in self._pools[0][ver][1]:
c._cache.cache_size = v
def setVersionPoolSize(self, v):
self._version_pool_size=v
def undo(self, id, txn=None):
"""Undo a transaction identified by id.
A transaction can be undone if all of the objects involved in
the transaction were not modified subsequently, if any
modifications can be resolved by conflict resolution, or if
subsequent changes resulted in the same object state.
The value of id should be generated by calling undoLog()
or undoInfo(). The value of id is not the same as a
transaction id used by other methods; it is unique to undo().
:Parameters:
- `id`: a storage-specific transaction identifier
- `txn`: transaction context to use for undo().
By default, uses the current transaction.
"""
if txn is None:
txn = transaction.get()
txn.register(TransactionalUndo(self, id))
def versionEmpty(self, version):
return self._storage.versionEmpty(version)
# The following methods are deprecated and have no effect
def getCacheDeactivateAfter(self):
"""Deprecated"""
warnings.warn("cache_deactivate_after has no effect",
DeprecationWarning)
def getVersionCacheDeactivateAfter(self):
"""Deprecated"""
warnings.warn("cache_deactivate_after has no effect",
DeprecationWarning)
def setCacheDeactivateAfter(self, v):
"""Deprecated"""
warnings.warn("cache_deactivate_after has no effect",
DeprecationWarning)
def setVersionCacheDeactivateAfter(self, v):
"""Deprecated"""
warnings.warn("cache_deactivate_after has no effect",
DeprecationWarning)
class ResourceManager(object):
"""Transaction participation for a version or undo resource."""
def __init__(self, db):
self._db = db
# Delegate the actual 2PC methods to the storage
self.tpc_vote = self._db._storage.tpc_vote
self.tpc_finish = self._db._storage.tpc_finish
self.tpc_abort = self._db._storage.tpc_abort
def sortKey(self):
return "%s:%s" % (self._db._storage.sortKey(), id(self))
def tpc_begin(self, txn, sub=False):
# XXX we should never be called with sub=True.
if sub:
raise ValueError, "doesn't supoprt sub-transactions"
self._db._storage.tpc_begin(txn)
# The object registers itself with the txn manager, so the ob
# argument to the methods below is self.
def abort(self, obj, txn):
pass
def commit(self, obj, txn):
pass
class CommitVersion(ResourceManager):
def __init__(self, db, version, dest=''):
super(CommitVersion, self).__init__(db)
self._version = version
self._dest = dest
def commit(self, ob, t):
dest = self._dest
tid, oids = self._db._storage.commitVersion(self._version,
self._dest,
t)
oids = dict.fromkeys(oids, 1)
self._db.invalidate(tid, oids, version=self._dest)
if self._dest:
# the code above just invalidated the dest version.
# now we need to invalidate the source!
self._db.invalidate(tid, oids, version=self._version)
class AbortVersion(ResourceManager):
def __init__(self, db, version):
super(AbortVersion, self).__init__(db)
self._version = version
def commit(self, ob, t):
tid, oids = self._db._storage.abortVersion(self._version, t)
self._db.invalidate(tid,
dict.fromkeys(oids, 1),
version=self._version)
class TransactionalUndo(ResourceManager):
def __init__(self, db, tid):
super(TransactionalUndo, self).__init__(db)
self._tid = tid
def commit(self, ob, t):
tid, oids = self._db._storage.undo(self._tid, t)
self._db.invalidate(tid, dict.fromkeys(oids, 1))
BTrees
ZConfig
persistent
transaction
# referenced by ZODB.config and related tests
ZEO
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Demo ZODB storage
The Demo storage serves two purposes:
- Provide an example implementation of a full storage without
distracting storage details,
- Provide a volatile storage that is useful for giving demonstrations.
The demo storage can have a "base" storage that is used in a
read-only fashion. The base storage must not not to contain version
data.
There are three main data structures:
_data -- Transaction logging information necessary for undo
This is a mapping from transaction id to transaction, where
a transaction is simply a 5-tuple:
packed, user, description, extension_data, records
where extension_data is a dictionary or None and records are the
actual records in chronological order. Packed is a flag
indicating whethe the transaction has been packed or not
_index -- A mapping from oid to record
_vindex -- A mapping from version name to version data
where version data is a mapping from oid to record
A record is a tuple:
oid, pre, vdata, p, tid
where:
oid -- object id
pre -- The previous record for this object (or None)
vdata -- version data
None if not a version, ortherwise:
version, non-version-record
p -- the pickle data or None
tid -- the transaction id that wrote the record
The pickle data will be None for a record for an object created in
an aborted version.
It is instructive to watch what happens to the internal data structures
as changes are made. For example, in Zope, you can create an external
method::
import Zope2
def info(RESPONSE):
RESPONSE['Content-type']= 'text/plain'
return Zope2.DB._storage._splat()
and call it to monitor the storage.
"""
import base64, time
from ZODB import POSException, BaseStorage
from ZODB.utils import z64, oid_repr
from persistent.TimeStamp import TimeStamp
from cPickle import loads
from BTrees import OOBTree
class DemoStorage(BaseStorage.BaseStorage):
def __init__(self, name='Demo Storage', base=None, quota=None):
BaseStorage.BaseStorage.__init__(self, name, base)
# We use a BTree because the items are sorted!
self._data = OOBTree.OOBTree()
self._index = {}
self._vindex = {}
self._base = base
self._size = 0
self._quota = quota
self._ltid = None
self._clear_temp()
if base is not None and base.versions():
raise POSException.StorageError, (
"Demo base storage has version data")
def __len__(self):
base=self._base
return (base and len(base) or 0) + len(self._index)
def getSize(self):
s=100
for tid, (p, u, d, e, t) in self._data.items():
s=s+16+24+12+4+16+len(u)+16+len(d)+16+len(e)+16
for oid, pre, vdata, p, tid in t:
s=s+16+24+24+4+4+(p and (16+len(p)) or 4)
if vdata: s=s+12+16+len(vdata[0])+4
s=s+16*len(self._index)
for v in self._vindex.values():
s=s+32+16*len(v)
self._size=s
return s
def abortVersion(self, src, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
if not src:
raise POSException.VersionCommitError("Invalid version")
self._lock_acquire()
try:
v = self._vindex.get(src, None)
if not v:
return
oids = []
for r in v.values():
oid, pre, (version, nv), p, tid = r
oids.append(oid)
if nv:
oid, pre, vdata, p, tid = nv
self._tindex.append([oid, r, None, p, self._tid])
else:
# effectively, delete the thing
self._tindex.append([oid, r, None, None, self._tid])
return self._tid, oids
finally: self._lock_release()
def commitVersion(self, src, dest, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
if not src:
raise POSException.VersionCommitError("Invalid source version")
if src == dest:
raise POSException.VersionCommitError(
"Can't commit to same version: %s" % repr(src))
self._lock_acquire()
try:
v = self._vindex.get(src)
if v is None:
return
newserial = self._tid
tindex = self._tindex
oids = []
for r in v.values():
oid, pre, vdata, p, tid = r
assert vdata is not None
oids.append(oid)
if dest:
new_vdata = dest, vdata[1]
else:
new_vdata = None
tindex.append([oid, r, new_vdata, p, self._tid])
return self._tid, oids
finally:
self._lock_release()
def loadEx(self, oid, version):
self._lock_acquire()
try:
try:
oid, pre, vdata, p, tid = self._index[oid]
except KeyError:
if self._base:
return self._base.load(oid, '')
raise KeyError, oid
ver = ""
if vdata:
oversion, nv = vdata
if oversion != version:
if nv:
# Return the current txn's tid with the non-version
# data.
oid, pre, vdata, p, skiptid = nv
else:
raise KeyError, oid
ver = oversion
if p is None:
raise KeyError, oid
return p, tid, ver
finally: self._lock_release()
def load(self, oid, version):
return self.loadEx(oid, version)[:2]
def modifiedInVersion(self, oid):
self._lock_acquire()
try:
try:
oid, pre, vdata, p, tid = self._index[oid]
if vdata: return vdata[0]
return ''
except: return ''
finally: self._lock_release()
def store(self, oid, serial, data, version, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
self._lock_acquire()
try:
old = self._index.get(oid, None)
if old is None:
# Hm, nothing here, check the base version:
if self._base:
try:
p, tid = self._base.load(oid, '')
except KeyError:
pass
else:
old = oid, None, None, p, tid
nv=None
if old:
oid, pre, vdata, p, tid = old
if vdata:
if vdata[0] != version:
raise POSException.VersionLockError, oid
nv=vdata[1]
else:
nv=old
if serial != tid:
raise POSException.ConflictError(
oid=oid, serials=(tid, serial), data=data)
r = [oid, old, version and (version, nv) or None, data, self._tid]
self._tindex.append(r)
s=self._tsize
s=s+72+(data and (16+len(data)) or 4)
if version: s=s+32+len(version)
if self._quota is not None and s > self._quota:
raise POSException.StorageError, (
'''<b>Quota Exceeded</b><br>
The maximum quota for this demonstration storage
has been exceeded.<br>Have a nice day.''')
finally: self._lock_release()
return self._tid
def supportsVersions(self):
return 1
def _clear_temp(self):
self._tindex = []
self._tsize = self._size + 160
def lastTransaction(self):
return self._ltid
def _begin(self, tid, u, d, e):
self._tsize = self._size + 120 + len(u) + len(d) + len(e)
def _finish(self, tid, user, desc, ext):
self._size = self._tsize
self._data[tid] = None, user, desc, ext, tuple(self._tindex)
for r in self._tindex:
oid, pre, vdata, p, tid = r
old = self._index.get(oid)
# If the object had version data, remove the version data.
if old is not None:
oldvdata = old[2]
if oldvdata:
v = self._vindex[oldvdata[0]]
del v[oid]
if not v:
# If the version info is now empty, remove it.
del self._vindex[oldvdata[0]]
self._index[oid] = r
# If there is version data, then udpate self._vindex, too.
if vdata:
version = vdata[0]
v = self._vindex.get(version)
if v is None:
v = self._vindex[version] = {}
v[oid] = r
self._ltid = self._tid
def undoLog(self, first, last, filter=None):
if last < 0:
last = first - last + 1
self._lock_acquire()
try:
# XXX Shouldn't this be sorted?
transactions = self._data.items()
pos = len(transactions)
r = []
i = 0
while i < last and pos:
pos = pos - 1
if i < first:
i = i + 1
continue
tid, (p, u, d, e, t) = transactions[pos]
if p:
continue
d = {'id': base64.encodestring(tid)[:-1],
'time': TimeStamp(tid).timeTime(),
'user_name': u, 'description': d}
if e:
d.update(loads(e))
if filter is None or filter(d):
r.append(d)
i = i + 1
return r
finally:
self._lock_release()
def versionEmpty(self, version):
return not self._vindex.get(version, None)
def versions(self, max=None):
r = []
for v in self._vindex.keys():
if self.versionEmpty(v):
continue
r.append(v)
if max is not None and len(r) >= max:
break
return r
def _build_indexes(self, stop='\377\377\377\377\377\377\377\377'):
# Rebuild index structures from transaction data
index = {}
vindex = {}
for tid, (p, u, d, e, t) in self._data.items():
if tid >= stop:
break
for r in t:
oid, pre, vdata, p, tid = r
old=index.get(oid, None)
if old is not None:
oldvdata=old[2]
if oldvdata:
v=vindex[oldvdata[0]]
del v[oid]
if not v: del vindex[oldvdata[0]]
index[oid]=r
if vdata:
version=vdata[0]
v=vindex.get(version, None)
if v is None: v=vindex[version]={}
vindex[vdata[0]][oid]=r
return index, vindex
def pack(self, t, referencesf):
# Packing is hard, at least when undo is supported.
# Even for a simple storage like this one, packing
# is pretty complex.
self._lock_acquire()
try:
stop=`TimeStamp(*time.gmtime(t)[:5]+(t%60,))`
# Build indexes up to the pack time:
index, vindex = self._build_indexes(stop)
# XXX This packing algorithm is flawed. It ignores
# references from non-current records after the pack
# time.
# Now build an index of *only* those objects reachable
# from the root.
rootl = [z64]
pindex = {}
while rootl:
oid = rootl.pop()
if oid in pindex:
continue
# Scan non-version pickle for references
r = index.get(oid, None)
if r is None:
if self._base:
p, s = self._base.load(oid, '')
referencesf(p, rootl)
else:
pindex[oid] = r
oid, pre, vdata, p, tid = r
referencesf(p, rootl)
if vdata:
nv = vdata[1]
if nv:
oid, pre, vdata, p, tid = nv
referencesf(p, rootl)
# Now we're ready to do the actual packing.
# We'll simply edit the transaction data in place.
# We'll defer deleting transactions till the end
# to avoid messing up the BTree items.
deleted = []
for tid, (p, u, d, e, records) in self._data.items():
if tid >= stop:
break
o = []
for r in records:
c = pindex.get(r[0])
if c is None:
# GC this record, no longer referenced
continue
if c == r:
# This is the most recent revision.
o.append(r)
else:
# This record is not the indexed record,
# so it may not be current. Let's see.
vdata = r[3]
if vdata:
# Version record are current *only* if they
# are indexed
continue
else:
# OK, this isn't a version record, so it may be the
# non-version record for the indexed record.
vdata = c[3]
if vdata:
if vdata[1] != r:
# This record is not the non-version
# record for the indexed record
continue
else:
# The indexed record is not a version record,
# so this record can not be the non-version
# record for it.
continue
o.append(r)
if o:
if len(o) != len(records):
self._data[tid] = 1, u, d, e, tuple(o) # Reset data
else:
deleted.append(tid)
# Now delete empty transactions
for tid in deleted:
del self._data[tid]
# Now reset previous pointers for "current" records:
for r in pindex.values():
r[1] = None # Previous record
if r[2] and r[2][1]: # vdata
# If this record contains version data and
# non-version data, then clear it out.
r[2][1][2] = None
# Finally, rebuild indexes from transaction data:
self._index, self._vindex = self._build_indexes()
finally:
self._lock_release()
self.getSize()
def _splat(self):
"""Spit out a string showing state.
"""
o=[]
o.append('Transactions:')
for tid, (p, u, d, e, t) in self._data.items():
o.append(" %s %s" % (TimeStamp(tid), p))
for r in t:
oid, pre, vdata, p, tid = r
oid = oid_repr(oid)
tid = oid_repr(tid)
## if serial is not None: serial=str(TimeStamp(serial))
pre=id(pre)
if vdata and vdata[1]: vdata=vdata[0], id(vdata[1])
if p: p=''
o.append(' %s: %s' %
(id(r), `(oid, pre, vdata, p, tid)`))
o.append('\nIndex:')
items=self._index.items()
items.sort()
for oid, r in items:
if r: r=id(r)
o.append(' %s: %s' % (oid_repr(oid), r))
o.append('\nVersion Index:')
items=self._vindex.items()
items.sort()
for version, v in items:
o.append(' '+version)
vitems=v.items()
vitems.sort()
for oid, r in vitems:
if r: r=id(r)
o.append(' %s: %s' % (oid_repr(oid), r))
return '\n'.join(o)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Support for database export and import."""
from cStringIO import StringIO
from cPickle import Pickler, Unpickler
from tempfile import TemporaryFile
import logging
from ZODB.POSException import ExportError
from ZODB.utils import p64, u64
from ZODB.serialize import referencesf
logger = logging.getLogger('ZODB.ExportImport')
class ExportImport:
def exportFile(self, oid, f=None):
if f is None:
f = TemporaryFile()
elif isinstance(f, str):
f = open(f,'w+b')
f.write('ZEXP')
oids = [oid]
done_oids = {}
done=done_oids.has_key
load=self._storage.load
while oids:
oid = oids.pop(0)
if oid in done_oids:
continue
done_oids[oid] = True
try:
p, serial = load(oid, self._version)
except:
logger.debug("broken reference for oid %s", repr(oid),
exc_info=True)
else:
referencesf(p, oids)
f.writelines([oid, p64(len(p)), p])
f.write(export_end_marker)
return f
def importFile(self, f, clue='', customImporters=None):
# This is tricky, because we need to work in a transaction!
if isinstance(f, str):
f = open(f,'rb')
magic = f.read(4)
if magic != 'ZEXP':
if customImporters and customImporters.has_key(magic):
f.seek(0)
return customImporters[magic](self, f, clue)
raise ExportError("Invalid export header")
t = self._txn_mgr.get()
if clue:
t.note(clue)
return_oid_list = []
self._import = f, return_oid_list
self._register()
t.commit(1)
# Return the root imported object.
if return_oid_list:
return self.get(return_oid_list[0])
else:
return None
def _importDuringCommit(self, transaction, f, return_oid_list):
"""Import data during two-phase commit.
Invoked by the transaction manager mid commit.
Appends one item, the OID of the first object created,
to return_oid_list.
"""
oids = {}
def persistent_load(ooid):
"""Remap a persistent id to a new ID and create a ghost for it."""
klass = None
if isinstance(ooid, tuple):
ooid, klass = ooid
if ooid in oids:
oid = oids[ooid]
else:
if klass is None:
oid = self._storage.new_oid()
else:
oid = self._storage.new_oid(), klass
oids[ooid] = oid
return Ghost(oid)
version = self._version
while 1:
h = f.read(16)
if h == export_end_marker:
break
if len(h) != 16:
raise ExportError("Truncated export file")
l = u64(h[8:16])
p = f.read(l)
if len(p) != l:
raise ExportError("Truncated export file")
ooid = h[:8]
if oids:
oid = oids[ooid]
if isinstance(oid, tuple):
oid = oid[0]
else:
oids[ooid] = oid = self._storage.new_oid()
return_oid_list.append(oid)
pfile = StringIO(p)
unpickler = Unpickler(pfile)
unpickler.persistent_load = persistent_load
newp = StringIO()
pickler = Pickler(newp, 1)
pickler.persistent_id = persistent_id
pickler.dump(unpickler.load())
pickler.dump(unpickler.load())
p = newp.getvalue()
self._storage.store(oid, None, p, version, transaction)
export_end_marker = '\377'*16
class Ghost(object):
__slots__ = ("oid",)
def __init__(self, oid):
self.oid = oid
def persistent_id(obj):
if isinstance(obj, Ghost):
return obj.oid
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Storage implementation using a log written to a single file.
$Revision: 1.16 $
"""
import base64
from cPickle import Pickler, Unpickler, loads
import errno
import os
import sys
import time
import logging
from types import StringType, DictType
from struct import pack, unpack
# Not all platforms have fsync
fsync = getattr(os, "fsync", None)
from ZODB import BaseStorage, ConflictResolution, POSException
from ZODB.POSException \
import UndoError, POSKeyError, MultipleUndoErrors, VersionLockError
from persistent.TimeStamp import TimeStamp
from ZODB.lock_file import LockFile
from ZODB.utils import p64, u64, cp, z64
from ZODB.FileStorage.fspack import FileStoragePacker
from ZODB.FileStorage.format \
import FileStorageFormatter, DataHeader, TxnHeader, DATA_HDR, \
DATA_HDR_LEN, TRANS_HDR, TRANS_HDR_LEN, CorruptedDataError
from ZODB.loglevels import BLATHER
try:
from ZODB.fsIndex import fsIndex
except ImportError:
def fsIndex():
return {}
t32 = 1L << 32
packed_version = "FS21"
logger = logging.getLogger('ZODB.FileStorage')
def panic(message, *data):
logger.critical(message, *data)
raise CorruptedTransactionError(message)
class FileStorageError(POSException.StorageError):
pass
class PackError(FileStorageError):
pass
class FileStorageFormatError(FileStorageError):
"""Invalid file format
The format of the given file is not valid.
"""
class CorruptedFileStorageError(FileStorageError,
POSException.StorageSystemError):
"""Corrupted file storage."""
class CorruptedTransactionError(CorruptedFileStorageError):
pass
class FileStorageQuotaError(FileStorageError,
POSException.StorageSystemError):
"""File storage quota exceeded."""
# Intended to be raised only in fspack.py, and ignored here.
class RedundantPackWarning(FileStorageError):
pass
class TempFormatter(FileStorageFormatter):
"""Helper class used to read formatted FileStorage data."""
def __init__(self, afile):
self._file = afile
class FileStorage(BaseStorage.BaseStorage,
ConflictResolution.ConflictResolvingStorage,
FileStorageFormatter):
# Set True while a pack is in progress; undo is blocked for the duration.
_pack_is_in_progress = False
_records_before_save = 10000
def __init__(self, file_name, create=False, read_only=False, stop=None,
quota=None):
if read_only:
self._is_read_only = True
if create:
raise ValueError("can't create a read-only file")
elif stop is not None:
raise ValueError("time-travel only supported in read-only mode")
if stop is None:
stop='\377'*8
# Lock the database and set up the temp file.
if not read_only:
# Create the lock file
self._lock_file = LockFile(file_name + '.lock')
self._tfile = open(file_name + '.tmp', 'w+b')
self._tfmt = TempFormatter(self._tfile)
else:
self._tfile = None
self._file_name = file_name
BaseStorage.BaseStorage.__init__(self, file_name)
(index, vindex, tindex, tvindex,
oid2tid, toid2tid, toid2tid_delete) = self._newIndexes()
self._initIndex(index, vindex, tindex, tvindex,
oid2tid, toid2tid, toid2tid_delete)
# Now open the file
self._file = None
if not create:
try:
self._file = open(file_name, read_only and 'rb' or 'r+b')
except IOError, exc:
if exc.errno == errno.EFBIG:
# The file is too big to open. Fail visibly.
raise
if exc.errno == errno.ENOENT:
# The file doesn't exist. Create it.
create = 1
# If something else went wrong, it's hard to guess
# what the problem was. If the file does not exist,
# create it. Otherwise, fail.
if os.path.exists(file_name):
raise
else:
create = 1
if self._file is None and create:
if os.path.exists(file_name):
os.remove(file_name)
self._file = open(file_name, 'w+b')
self._file.write(packed_version)
r = self._restore_index()
if r is not None:
self._used_index = 1 # Marker for testing
index, vindex, start, maxoid, ltid = r
self._initIndex(index, vindex, tindex, tvindex,
oid2tid, toid2tid, toid2tid_delete)
self._pos, self._oid, tid = read_index(
self._file, file_name, index, vindex, tindex, stop,
ltid=ltid, start=start, maxoid=maxoid,
read_only=read_only,
)
else:
self._used_index = 0 # Marker for testing
self._pos, self._oid, tid = read_index(
self._file, file_name, index, vindex, tindex, stop,
read_only=read_only,
)
self._save_index()
self._records_before_save = max(self._records_before_save,
len(self._index))
self._ltid = tid
# self._pos should always point just past the last
# transaction. During 2PC, data is written after _pos.
# invariant is restored at tpc_abort() or tpc_finish().
self._ts = tid = TimeStamp(tid)
t = time.time()
t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
if tid > t:
seconds = tid.timeTime() - t.timeTime()
complainer = logger.warning
if seconds > 30 * 60: # 30 minutes -- way screwed up
complainer = logger.critical
complainer("%s Database records %d seconds in the future",
file_name, seconds)
self._quota = quota
# tid cache statistics.
self._oid2tid_nlookups = self._oid2tid_nhits = 0
def _initIndex(self, index, vindex, tindex, tvindex,
oid2tid, toid2tid, toid2tid_delete):
self._index=index
self._vindex=vindex
self._tindex=tindex
self._tvindex=tvindex
self._index_get=index.get
self._vindex_get=vindex.get
# .store() needs to compare the passed-in serial to the
# current tid in the database. _oid2tid caches the oid ->
# current tid mapping for non-version data (if the current
# record for oid is version data, the oid is not a key in
# _oid2tid). The point is that otherwise seeking into the
# storage is needed to extract the current tid, and that's
# an expensive operation. For example, if a transaction
# stores 4000 objects, and each random seek + read takes 7ms
# (that was approximately true on Linux and Windows tests in
# mid-2003), that's 28 seconds just to find the old tids.
# XXX Probably better to junk this and redefine _index as mapping
# XXX oid to (offset, tid) pair, via a new memory-efficient
# XXX BTree type.
self._oid2tid = oid2tid
# oid->tid map to transactionally add to _oid2tid.
self._toid2tid = toid2tid
# Set of oids to transactionally delete from _oid2tid (e.g.,
# oids reverted by undo, or for which the most recent record
# becomes version data).
self._toid2tid_delete = toid2tid_delete
def __len__(self):
return len(self._index)
def _newIndexes(self):
# hook to use something other than builtin dict
return fsIndex(), {}, {}, {}, {}, {}, {}
_saved = 0
def _save_index(self):
"""Write the database index to a file to support quick startup."""
index_name = self.__name__ + '.index'
tmp_name = index_name + '.index_tmp'
f=open(tmp_name,'wb')
p=Pickler(f,1)
info={'index': self._index, 'pos': self._pos,
'oid': self._oid, 'vindex': self._vindex}
p.dump(info)
f.flush()
f.close()
try:
try:
os.remove(index_name)
except OSError:
pass
os.rename(tmp_name, index_name)
except: pass
self._saved += 1
def _clear_index(self):
index_name = self.__name__ + '.index'
if os.path.exists(index_name):
try:
os.remove(index_name)
except OSError:
pass
def _sane(self, index, pos):
"""Sanity check saved index data by reading the last undone trans
Basically, we read the last not undone transaction and
check to see that the included records are consistent
with the index. Any invalid record records or inconsistent
object positions cause zero to be returned.
"""
r = self._check_sanity(index, pos)
if not r:
logger.warning("Ignoring index for %s", self._file_name)
return r
def _check_sanity(self, index, pos):
if pos < 100:
return 0 # insane
self._file.seek(0, 2)
if self._file.tell() < pos:
return 0 # insane
ltid = None
max_checked = 5
checked = 0
while checked < max_checked:
self._file.seek(pos - 8)
rstl = self._file.read(8)
tl = u64(rstl)
pos = pos - tl - 8
if pos < 4:
return 0 # insane
h = self._read_txn_header(pos)
if not ltid:
ltid = h.tid
if h.tlen != tl:
return 0 # inconsistent lengths
if h.status == 'u':
continue # undone trans, search back
if h.status not in ' p':
return 0 # insane
if tl < h.headerlen():
return 0 # insane
tend = pos + tl
opos = pos + h.headerlen()
if opos == tend:
continue # empty trans
while opos < tend and checked < max_checked:
# Read the data records for this transaction
h = self._read_data_header(opos)
if opos + h.recordlen() > tend or h.tloc != pos:
return 0
if index.get(h.oid, 0) != opos:
return 0 # insane
checked += 1
opos = opos + h.recordlen()
return ltid
def _restore_index(self):
"""Load database index to support quick startup."""
file_name=self.__name__
index_name=file_name+'.index'
try: f=open(index_name,'rb')
except: return None
p=Unpickler(f)
try:
info=p.load()
except:
exc, err = sys.exc_info()[:2]
logger.warning("Failed to load database index: %s: %s", exc, err)
return None
index = info.get('index')
pos = info.get('pos')
oid = info.get('oid')
vindex = info.get('vindex')
if index is None or pos is None or oid is None or vindex is None:
return None
pos = long(pos)
if isinstance(index, DictType) and not self._is_read_only:
# Convert to fsIndex
newindex = fsIndex()
if type(newindex) is not type(index):
# And we have fsIndex
newindex.update(index)
# Now save the index
f = open(index_name, 'wb')
p = Pickler(f, 1)
info['index'] = newindex
p.dump(info)
f.close()
# Now call this method again to get the new data
return self._restore_index()
tid = self._sane(index, pos)
if not tid:
return None
return index, vindex, pos, oid, tid
def close(self):
self._file.close()
if hasattr(self,'_lock_file'):
self._lock_file.close()
if self._tfile:
self._tfile.close()
try:
self._save_index()
except:
# Log the error and continue
logger.error("Error saving index on close()", exc_info=True)
# Return tid of most recent record for oid if that's in the
# _oid2tid cache. Else return None. It's important to use this
# instead of indexing _oid2tid directly so that cache statistics
# can be logged.
def _get_cached_tid(self, oid):
self._oid2tid_nlookups += 1
result = self._oid2tid.get(oid)
if result is not None:
self._oid2tid_nhits += 1
# Log a msg every ~8000 tries.
if self._oid2tid_nlookups & 0x1fff == 0:
logger.log(BLATHER,
"_oid2tid size %s lookups %s hits %s rate %.1f%%",
len(self._oid2tid),
self._oid2tid_nlookups,
self._oid2tid_nhits,
100.0 * self._oid2tid_nhits / self._oid2tid_nlookups)
return result
def abortVersion(self, src, transaction):
return self.commitVersion(src, '', transaction, abort=True)
def commitVersion(self, src, dest, transaction, abort=False):
# We are going to commit by simply storing back pointers.
if self._is_read_only:
raise POSException.ReadOnlyError()
if not (src and isinstance(src, StringType)
and isinstance(dest, StringType)):
raise POSException.VersionCommitError('Invalid source version')
if src == dest:
raise POSException.VersionCommitError(
"Can't commit to same version: %s" % repr(src))
if dest and abort:
raise POSException.VersionCommitError(
"Internal error, can't abort to a version")
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
self._lock_acquire()
try:
return self._commitVersion(src, dest, transaction, abort)
finally:
self._lock_release()
def _commitVersion(self, src, dest, transaction, abort=False):
# call after checking arguments and acquiring lock
srcpos = self._vindex_get(src, 0)
spos = p64(srcpos)
# middle holds bytes 16:34 of a data record:
# pos of transaction, len of version name, data length
# commit version never writes data, so data length is always 0
middle = pack(">8sH8s", p64(self._pos), len(dest), z64)
if dest:
sd = p64(self._vindex_get(dest, 0))
heredelta = 66 + len(dest)
else:
sd = ''
heredelta = 50
here = self._pos + (self._tfile.tell() + self._thl)
oids = []
current_oids = {}
while srcpos:
h = self._read_data_header(srcpos)
if self._index.get(h.oid) == srcpos:
# This is a current record!
self._tindex[h.oid] = here
oids.append(h.oid)
self._tfile.write(h.oid + self._tid + spos + middle)
if dest:
self._tvindex[dest] = here
self._tfile.write(p64(h.pnv) + sd + dest)
sd = p64(here)
self._tfile.write(abort and p64(h.pnv) or spos)
# data backpointer to src data
here += heredelta
current_oids[h.oid] = 1
else:
# Hm. This is a non-current record. Is there a
# current record for this oid?
if not current_oids.has_key(h.oid):
break
srcpos = h.vprev
spos = p64(srcpos)
self._toid2tid_delete.update(current_oids)
return self._tid, oids
def getSize(self):
return self._pos
def _lookup_pos(self, oid):
try:
return self._index[oid]
except KeyError:
raise POSKeyError(oid)
except TypeError:
raise TypeError("invalid oid %r" % (oid,))
def loadEx(self, oid, version):
# A variant of load() that also returns a transaction id.
# ZEO wants this for managing its cache.
self._lock_acquire()
try:
pos = self._lookup_pos(oid)
h = self._read_data_header(pos, oid)
if h.version and h.version != version:
# Return data and tid from pnv (non-version data).
# If we return the old record's transaction id, then
# it will look to the cache like old data is current.
# The tid for the current data must always be greater
# than any non-current data.
data = self._loadBack_impl(oid, h.pnv)[0]
return data, h.tid, ""
if h.plen:
data = self._file.read(h.plen)
return data, h.tid, h.version
else:
# Get the data from the backpointer, but tid from
# currnt txn.
data, _, _, _ = self._loadBack_impl(oid, h.back)
th = self._read_txn_header(h.tloc)
return data, h.tid, h.version
finally:
self._lock_release()
def load(self, oid, version):
self._lock_acquire()
try:
pos = self._lookup_pos(oid)
h = self._read_data_header(pos, oid)
if h.version and h.version != version:
data = self._loadBack_impl(oid, h.pnv)[0]
return data, h.tid
if h.plen:
return self._file.read(h.plen), h.tid
else:
data = self._loadBack_impl(oid, h.back)[0]
return data, h.tid
finally:
self._lock_release()
def loadSerial(self, oid, serial):
# loadSerial must always return non-version data, because it
# is used by conflict resolution.
self._lock_acquire()
try:
pos = self._lookup_pos(oid)
while 1:
h = self._read_data_header(pos, oid)
if h.tid == serial:
break
pos = h.prev
if not pos:
raise POSKeyError(oid)
if h.version:
return self._loadBack_impl(oid, h.pnv)[0]
if h.plen:
return self._file.read(h.plen)
else:
return self._loadBack_impl(oid, h.back)[0]
finally:
self._lock_release()
def loadBefore(self, oid, tid):
self._lock_acquire()
try:
pos = self._lookup_pos(oid)
end_tid = None
while True:
h = self._read_data_header(pos, oid)
if h.version:
# Just follow the pnv pointer to the previous
# non-version data.
if not h.pnv:
# Object was created in version. There is no
# before data to find.
return None
pos = h.pnv
# The end_tid for the non-version data is not affected
# by versioned data records.
continue
if h.tid < tid:
break
pos = h.prev
end_tid = h.tid
if not pos:
return None
if h.back:
data, _, _, _ = self._loadBack_impl(oid, h.back)
return data, h.tid, end_tid
else:
return self._file.read(h.plen), h.tid, end_tid
finally:
self._lock_release()
def modifiedInVersion(self, oid):
self._lock_acquire()
try:
pos = self._lookup_pos(oid)
h = self._read_data_header(pos, oid)
return h.version
finally:
self._lock_release()
def store(self, oid, serial, data, version, transaction):
if self._is_read_only:
raise POSException.ReadOnlyError()
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
self._lock_acquire()
try:
old = self._index_get(oid, 0)
cached_tid = None
pnv = None
if old:
cached_tid = self._get_cached_tid(oid)
if cached_tid is None:
h = self._read_data_header(old, oid)
if h.version:
if h.version != version:
raise VersionLockError(oid, h.version)
pnv = h.pnv
cached_tid = h.tid
if serial != cached_tid:
rdata = self.tryToResolveConflict(oid, cached_tid,
serial, data)
if rdata is None:
raise POSException.ConflictError(
oid=oid, serials=(cached_tid, serial), data=data)
else:
data = rdata
pos = self._pos
here = pos + self._tfile.tell() + self._thl
self._tindex[oid] = here
new = DataHeader(oid, self._tid, old, pos, len(version),
len(data))
if version:
# Link to last record for this version:
pv = (self._tvindex.get(version, 0)
or self._vindex.get(version, 0))
if pnv is None:
pnv = old
new.setVersion(version, pnv, pv)
self._tvindex[version] = here
self._toid2tid_delete[oid] = 1
else:
self._toid2tid[oid] = self._tid
self._tfile.write(new.asString())
self._tfile.write(data)
# Check quota
if self._quota is not None and here > self._quota:
raise FileStorageQuotaError(
"The storage quota has been exceeded.")
if old and serial != cached_tid:
return ConflictResolution.ResolvedSerial
else:
return self._tid
finally:
self._lock_release()
def _data_find(self, tpos, oid, data):
# Return backpointer for oid. Must call with the lock held.
# This is a file offset to oid's data record if found, else 0.
# The data records in the transaction at tpos are searched for oid.
# If a data record for oid isn't found, returns 0.
# Else if oid's data record contains a backpointer, that
# backpointer is returned.
# Else oid's data record contains the data, and the file offset of
# oid's data record is returned. This data record should contain
# a pickle identical to the 'data' argument.
# XXX If the length of the stored data doesn't match len(data),
# XXX an exception is raised. If the lengths match but the data
# XXX isn't the same, 0 is returned. Why the discrepancy?
self._file.seek(tpos)
h = self._file.read(TRANS_HDR_LEN)
tid, tl, status, ul, dl, el = unpack(TRANS_HDR, h)
self._file.read(ul + dl + el)
tend = tpos + tl + 8
pos = self._file.tell()
while pos < tend:
h = self._read_data_header(pos)
if h.oid == oid:
# Make sure this looks like the right data record
if h.plen == 0:
# This is also a backpointer. Gotta trust it.
return pos
if h.plen != len(data):
# The expected data doesn't match what's in the
# backpointer. Something is wrong.
logger.error("Mismatch between data and"
" backpointer at %d", pos)
return 0
_data = self._file.read(h.plen)
if data != _data:
return 0
return pos
pos += h.recordlen()
self._file.seek(pos)
return 0
def restore(self, oid, serial, data, version, prev_txn, transaction):
# A lot like store() but without all the consistency checks. This
# should only be used when we /know/ the data is good, hence the
# method name. While the signature looks like store() there are some
# differences:
#
# - serial is the serial number of /this/ revision, not of the
# previous revision. It is used instead of self._tid, which is
# ignored.
#
# - Nothing is returned
#
# - data can be None, which indicates a George Bailey object
# (i.e. one who's creation has been transactionally undone).
#
# prev_txn is a backpointer. In the original database, it's possible
# that the data was actually living in a previous transaction. This
# can happen for transactional undo and other operations, and is used
# as a space saving optimization. Under some circumstances the
# prev_txn may not actually exist in the target database (i.e. self)
# for example, if it's been packed away. In that case, the prev_txn
# should be considered just a hint, and is ignored if the transaction
# doesn't exist.
if self._is_read_only:
raise POSException.ReadOnlyError()
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
self._lock_acquire()
try:
prev_pos = 0
if prev_txn is not None:
prev_txn_pos = self._txn_find(prev_txn, 0)
if prev_txn_pos:
prev_pos = self._data_find(prev_txn_pos, oid, data)
old = self._index_get(oid, 0)
# Calculate the file position in the temporary file
here = self._pos + self._tfile.tell() + self._thl
# And update the temp file index
self._tindex[oid] = here
if prev_pos:
# If there is a valid prev_pos, don't write data.
data = None
if data is None:
dlen = 0
else:
dlen = len(data)
# Write the recovery data record
new = DataHeader(oid, serial, old, self._pos, len(version), dlen)
if version:
pnv = self._restore_pnv(oid, old, version, prev_pos) or old
vprev = self._tvindex.get(version, 0)
if not vprev:
vprev = self._vindex.get(version, 0)
new.setVersion(version, pnv, vprev)
self._tvindex[version] = here
self._toid2tid_delete[oid] = 1
else:
self._toid2tid[oid] = serial
self._tfile.write(new.asString())
# Finally, write the data or a backpointer.
if data is None:
if prev_pos:
self._tfile.write(p64(prev_pos))
else:
# Write a zero backpointer, which indicates an
# un-creation transaction.
self._tfile.write(z64)
else:
self._tfile.write(data)
finally:
self._lock_release()
def _restore_pnv(self, oid, prev, version, bp):
# Find a valid pnv (previous non-version) pointer for this version.
# If there is no previous record, there can't be a pnv.
if not prev:
return None
# Load the record pointed to be prev
h = self._read_data_header(prev, oid)
if h.version:
return h.pnv
if h.back:
# XXX Not sure the following is always true:
# The previous record is not for this version, yet we
# have a backpointer to it. The current record must
# be an undo of an abort or commit, so the backpointer
# must be to a version record with a pnv.
h2 = self._read_data_header(h.back, oid)
if h2.version:
return h2.pnv
return None
def supportsUndo(self):
return 1
def supportsVersions(self):
return 1
def _clear_temp(self):
self._tindex.clear()
self._tvindex.clear()
self._toid2tid.clear()
self._toid2tid_delete.clear()
if self._tfile is not None:
self._tfile.seek(0)
def _begin(self, tid, u, d, e):
self._nextpos = 0
self._thl = TRANS_HDR_LEN + len(u) + len(d) + len(e)
if self._thl > 65535:
# one of u, d, or e may be > 65535
# We have to check lengths here because struct.pack
# doesn't raise an exception on overflow!
if len(u) > 65535:
raise FileStorageError('user name too long')
if len(d) > 65535:
raise FileStorageError('description too long')
if len(e) > 65535:
raise FileStorageError('too much extension data')
def tpc_vote(self, transaction):
self._lock_acquire()
try:
if transaction is not self._transaction:
return
dlen = self._tfile.tell()
if not dlen:
return # No data in this trans
self._tfile.seek(0)
user, descr, ext = self._ude
self._file.seek(self._pos)
tl = self._thl + dlen
try:
h = TxnHeader(self._tid, tl, "c", len(user),
len(descr), len(ext))
h.user = user
h.descr = descr
h.ext = ext
self._file.write(h.asString())
cp(self._tfile, self._file, dlen)
self._file.write(p64(tl))
self._file.flush()
except:
# Hm, an error occured writing out the data. Maybe the
# disk is full. We don't want any turd at the end.
self._file.truncate(self._pos)
raise
self._nextpos = self._pos + (tl + 8)
finally:
self._lock_release()
# Keep track of the number of records that we've written
_records_written = 0
def _finish(self, tid, u, d, e):
nextpos=self._nextpos
if nextpos:
file=self._file
# Clear the checkpoint flag
file.seek(self._pos+16)
file.write(self._tstatus)
file.flush()
if fsync is not None: fsync(file.fileno())
self._pos = nextpos
self._index.update(self._tindex)
self._vindex.update(self._tvindex)
self._oid2tid.update(self._toid2tid)
for oid in self._toid2tid_delete.keys():
try:
del self._oid2tid[oid]
except KeyError:
pass
# Update the number of records that we've written
# +1 for the transaction record
self._records_written += len(self._tindex) + 1
if self._records_written >= self._records_before_save:
self._save_index()
self._records_written = 0
self._records_before_save = max(self._records_before_save,
len(self._index))
self._ltid = tid
def _abort(self):
if self._nextpos:
self._file.truncate(self._pos)
self._nextpos=0
def supportsTransactionalUndo(self):
return 1
def _undoDataInfo(self, oid, pos, tpos):
"""Return the tid, data pointer, data, and version for the oid
record at pos"""
if tpos:
pos = tpos - self._pos - self._thl
tpos = self._tfile.tell()
h = self._tfmt._read_data_header(pos, oid)
afile = self._tfile
else:
h = self._read_data_header(pos, oid)
afile = self._file
if h.oid != oid:
raise UndoError("Invalid undo transaction id", oid)
if h.plen:
data = afile.read(h.plen)
else:
data = ''
pos = h.back
if tpos:
self._tfile.seek(tpos) # Restore temp file to end
return h.tid, pos, data, h.version
def getTid(self, oid):
self._lock_acquire()
try:
result = self._get_cached_tid(oid)
if result is None:
pos = self._lookup_pos(oid)
result = self._getTid(oid, pos)
return result
finally:
self._lock_release()
def _getTid(self, oid, pos):
self._file.seek(pos)
h = self._file.read(16)
assert oid == h[:8]
return h[8:]
def _getVersion(self, oid, pos):
h = self._read_data_header(pos, oid)
if h.version:
return h.version, h.pnv
else:
return "", None
def _transactionalUndoRecord(self, oid, pos, tid, pre, version):
"""Get the indo information for a data record
Return a 5-tuple consisting of a pickle, data pointer,
version, packed non-version data pointer, and current
position. If the pickle is true, then the data pointer must
be 0, but the pickle can be empty *and* the pointer 0.
"""
copy = 1 # Can we just copy a data pointer
# First check if it is possible to undo this record.
tpos = self._tindex.get(oid, 0)
ipos = self._index.get(oid, 0)
tipos = tpos or ipos
if tipos != pos:
# Eek, a later transaction modified the data, but,
# maybe it is pointing at the same data we are.
ctid, cdataptr, cdata, cver = self._undoDataInfo(oid, ipos, tpos)
# Versions of undone record and current record *must* match!
if cver != version:
raise UndoError('Current and undone versions differ', oid)
if cdataptr != pos:
# We aren't sure if we are talking about the same data
try:
if (
# The current record wrote a new pickle
cdataptr == tipos
or
# Backpointers are different
self._loadBackPOS(oid, pos) !=
self._loadBackPOS(oid, cdataptr)
):
if pre and not tpos:
copy = 0 # we'll try to do conflict resolution
else:
# We bail if:
# - We don't have a previous record, which should
# be impossible.
raise UndoError("no previous record", oid)
except KeyError:
# LoadBack gave us a key error. Bail.
raise UndoError("_loadBack() failed", oid)
# Return the data that should be written in the undo record.
if not pre:
# There is no previous revision, because the object creation
# is being undone.
return "", 0, "", "", ipos
version, snv = self._getVersion(oid, pre)
if copy:
# we can just copy our previous-record pointer forward
return "", pre, version, snv, ipos
try:
bdata = self._loadBack_impl(oid, pre)[0]
except KeyError:
# couldn't find oid; what's the real explanation for this?
raise UndoError("_loadBack() failed for %s", oid)
data = self.tryToResolveConflict(oid, ctid, tid, bdata, cdata)
if data:
return data, 0, version, snv, ipos
raise UndoError("Some data were modified by a later transaction", oid)
# undoLog() returns a description dict that includes an id entry.
# The id is opaque to the client, but contains the transaction id.
# The transactionalUndo() implementation does a simple linear
# search through the file (from the end) to find the transaction.
def undoLog(self, first=0, last=-20, filter=None):
if last < 0:
last = first - last + 1
self._lock_acquire()
try:
if self._pack_is_in_progress:
raise UndoError(
'Undo is currently disabled for database maintenance.<p>')
us = UndoSearch(self._file, self._pos, first, last, filter)
while not us.finished():
# Hold lock for batches of 20 searches, so default search
# parameters will finish without letting another thread run.
for i in range(20):
if us.finished():
break
us.search()
# Give another thread a chance, so that a long undoLog()
# operation doesn't block all other activity.
self._lock_release()
self._lock_acquire()
return us.results
finally:
self._lock_release()
def undo(self, transaction_id, transaction):
"""Undo a transaction, given by transaction_id.
Do so by writing new data that reverses the action taken by
the transaction.
Usually, we can get by with just copying a data pointer, by
writing a file position rather than a pickle. Sometimes, we
may do conflict resolution, in which case we actually copy
new data that results from resolution.
"""
if self._is_read_only:
raise POSException.ReadOnlyError()
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
self._lock_acquire()
try:
return self._txn_undo(transaction_id)
finally:
self._lock_release()
def _txn_undo(self, transaction_id):
# Find the right transaction to undo and call _txn_undo_write().
tid = base64.decodestring(transaction_id + '\n')
assert len(tid) == 8
tpos = self._txn_find(tid, 1)
tindex = self._txn_undo_write(tpos)
self._tindex.update(tindex)
# Arrange to clear the affected oids from the oid2tid cache.
# It's too painful to try to update them to correct current
# values instead.
self._toid2tid_delete.update(tindex)
return self._tid, tindex.keys()
def _txn_find(self, tid, stop_at_pack):
pos = self._pos
while pos > 39:
self._file.seek(pos - 8)
pos = pos - u64(self._file.read(8)) - 8
self._file.seek(pos)
h = self._file.read(TRANS_HDR_LEN)
_tid = h[:8]
if _tid == tid:
return pos
if stop_at_pack:
# check the status field of the transaction header
if h[16] == 'p':
break
raise UndoError("Invalid transaction id")
def _txn_undo_write(self, tpos):
# a helper function to write the data records for transactional undo
otloc = self._pos
here = self._pos + self._tfile.tell() + self._thl
base = here - self._tfile.tell()
# Let's move the file pointer back to the start of the txn record.
th = self._read_txn_header(tpos)
if th.status != " ":
raise UndoError('non-undoable transaction')
tend = tpos + th.tlen
pos = tpos + th.headerlen()
tindex = {}
# keep track of failures, cause we may succeed later
failures = {}
# Read the data records for this transaction
while pos < tend:
h = self._read_data_header(pos)
if h.oid in failures:
del failures[h.oid] # second chance!
assert base + self._tfile.tell() == here, (here, base,
self._tfile.tell())
try:
p, prev, v, snv, ipos = self._transactionalUndoRecord(
h.oid, pos, h.tid, h.prev, h.version)
except UndoError, v:
# Don't fail right away. We may be redeemed later!
failures[h.oid] = v
else:
new = DataHeader(h.oid, self._tid, ipos, otloc, len(v),
len(p))
if v:
vprev = self._tvindex.get(v, 0) or self._vindex.get(v, 0)
new.setVersion(v, snv, vprev)
self._tvindex[v] = here
# XXX This seek shouldn't be necessary, but some other
# bit of code is messig with the file pointer.
assert self._tfile.tell() == here - base, (here, base,
self._tfile.tell())
self._tfile.write(new.asString())
if p:
self._tfile.write(p)
else:
self._tfile.write(p64(prev))
tindex[h.oid] = here
here += new.recordlen()
pos += h.recordlen()
if pos > tend:
raise UndoError("non-undoable transaction")
if failures:
raise MultipleUndoErrors(failures.items())
return tindex
def versionEmpty(self, version):
if not version:
# The interface is silent on this case. I think that this should
# be an error, but Barry thinks this should return 1 if we have
# any non-version data. This would be excruciatingly painful to
# test, so I must be right. ;)
raise POSException.VersionError(
'The version must be an non-empty string')
self._lock_acquire()
try:
index=self._index
file=self._file
seek=file.seek
read=file.read
srcpos=self._vindex_get(version, 0)
t=tstatus=None
while srcpos:
seek(srcpos)
oid=read(8)
if index[oid]==srcpos: return 0
h=read(50) # serial, prev(oid), tloc, vlen, plen, pnv, pv
tloc=h[16:24]
if t != tloc:
# We haven't checked this transaction before,
# get its status.
t=tloc
seek(u64(t)+16)
tstatus=read(1)
if tstatus != 'u': return 1
spos=h[-8:]
srcpos=u64(spos)
return 1
finally: self._lock_release()
def versions(self, max=None):
r=[]
a=r.append
keys=self._vindex.keys()
if max is not None: keys=keys[:max]
for version in keys:
if self.versionEmpty(version): continue
a(version)
if max and len(r) >= max: return r
return r
def history(self, oid, version=None, size=1, filter=None):
self._lock_acquire()
try:
r = []
pos = self._lookup_pos(oid)
wantver = version
while 1:
if len(r) >= size: return r
h = self._read_data_header(pos)
if h.version:
if wantver is not None and h.version != wantver:
if h.prev:
pos = h.prev
continue
else:
return r
else:
version = ""
wantver = None
th = self._read_txn_header(h.tloc)
if th.ext:
d = loads(th.ext)
else:
d = {}
d.update({"time": TimeStamp(h.tid).timeTime(),
"user_name": th.user,
"description": th.descr,
"tid": h.tid,
"version": h.version,
"size": h.plen,
})
if filter is None or filter(d):
r.append(d)
if h.prev:
pos = h.prev
else:
return r
finally:
self._lock_release()
def _redundant_pack(self, file, pos):
assert pos > 8, pos
file.seek(pos - 8)
p = u64(file.read(8))
file.seek(pos - p + 8)
return file.read(1) not in ' u'
def pack(self, t, referencesf):
"""Copy data from the current database file to a packed file
Non-current records from transactions with time-stamp strings less
than packtss are ommitted. As are all undone records.
Also, data back pointers that point before packtss are resolved and
the associated data are copied, since the old records are not copied.
"""
if self._is_read_only:
raise POSException.ReadOnlyError()
stop=`TimeStamp(*time.gmtime(t)[:5]+(t%60,))`
if stop==z64: raise FileStorageError, 'Invalid pack time'
# If the storage is empty, there's nothing to do.
if not self._index:
return
self._lock_acquire()
try:
if self._pack_is_in_progress:
raise FileStorageError, 'Already packing'
self._pack_is_in_progress = True
current_size = self.getSize()
finally:
self._lock_release()
p = FileStoragePacker(self._file_name, stop,
self._lock_acquire, self._lock_release,
self._commit_lock_acquire,
self._commit_lock_release,
current_size)
try:
opos = None
try:
opos = p.pack()
except RedundantPackWarning, detail:
logger.info(str(detail))
if opos is None:
return
oldpath = self._file_name + ".old"
self._lock_acquire()
try:
self._file.close()
try:
if os.path.exists(oldpath):
os.remove(oldpath)
os.rename(self._file_name, oldpath)
except Exception:
self._file = open(self._file_name, 'r+b')
raise
# OK, we're beyond the point of no return
os.rename(self._file_name + '.pack', self._file_name)
self._file = open(self._file_name, 'r+b')
self._initIndex(p.index, p.vindex, p.tindex, p.tvindex,
p.oid2tid, p.toid2tid,
p.toid2tid_delete)
self._pos = opos
self._save_index()
finally:
self._lock_release()
finally:
if p.locked:
self._commit_lock_release()
self._lock_acquire()
self._pack_is_in_progress = False
self._lock_release()
def iterator(self, start=None, stop=None):
return FileIterator(self._file_name, start, stop)
def lastTransaction(self):
"""Return transaction id for last committed transaction"""
return self._ltid
def lastTid(self, oid):
"""Return last serialno committed for object oid.
If there is no serialno for this oid -- which can only occur
if it is a new object -- return None.
"""
try:
return self.getTid(oid)
except KeyError:
return None
def cleanup(self):
"""Remove all files created by this storage."""
for ext in '', '.old', '.tmp', '.lock', '.index', '.pack':
try:
os.remove(self._file_name + ext)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def shift_transactions_forward(index, vindex, tindex, file, pos, opos):
"""Copy transactions forward in the data file
This might be done as part of a recovery effort
"""
# Cache a bunch of methods
seek=file.seek
read=file.read
write=file.write
index_get=index.get
vindex_get=vindex.get
# Initialize,
pv=z64
p1=opos
p2=pos
offset=p2-p1
# Copy the data in two stages. In the packing stage,
# we skip records that are non-current or that are for
# unreferenced objects. We also skip undone transactions.
#
# After the packing stage, we copy everything but undone
# transactions, however, we have to update various back pointers.
# We have to have the storage lock in the second phase to keep
# data from being changed while we're copying.
pnv=None
while 1:
# Read the transaction record
seek(pos)
h=read(TRANS_HDR_LEN)
if len(h) < TRANS_HDR_LEN: break
tid, stl, status, ul, dl, el = unpack(TRANS_HDR,h)
if status=='c': break # Oops. we found a checkpoint flag.
tl=u64(stl)
tpos=pos
tend=tpos+tl
otpos=opos # start pos of output trans
thl=ul+dl+el
h2=read(thl)
if len(h2) != thl:
raise PackError(opos)
# write out the transaction record
seek(opos)
write(h)
write(h2)
thl=TRANS_HDR_LEN+thl
pos=tpos+thl
opos=otpos+thl
while pos < tend:
# Read the data records for this transaction
seek(pos)
h=read(DATA_HDR_LEN)
oid,serial,sprev,stloc,vlen,splen = unpack(DATA_HDR, h)
plen=u64(splen)
dlen=DATA_HDR_LEN+(plen or 8)
if vlen:
dlen=dlen+(16+vlen)
pnv=u64(read(8))
# skip position of previous version record
seek(8,1)
version=read(vlen)
pv=p64(vindex_get(version, 0))
if status != 'u': vindex[version]=opos
tindex[oid]=opos
if plen: p=read(plen)
else:
p=read(8)
p=u64(p)
if p >= p2: p=p-offset
elif p >= p1:
# Ick, we're in trouble. Let's bail
# to the index and hope for the best
p=index_get(oid, 0)
p=p64(p)
# WRITE
seek(opos)
sprev=p64(index_get(oid, 0))
write(pack(DATA_HDR,
oid,serial,sprev,p64(otpos),vlen,splen))
if vlen:
if not pnv: write(z64)
else:
if pnv >= p2: pnv=pnv-offset
elif pnv >= p1:
pnv=index_get(oid, 0)
write(p64(pnv))
write(pv)
write(version)
write(p)
opos=opos+dlen
pos=pos+dlen
# skip the (intentionally redundant) transaction length
pos=pos+8
if status != 'u':
index.update(tindex) # Record the position
tindex.clear()
write(stl)
opos=opos+8
return opos
def search_back(file, pos):
seek=file.seek
read=file.read
seek(0,2)
s=p=file.tell()
while p > pos:
seek(p-8)
l=u64(read(8))
if l <= 0: break
p=p-l-8
return p, s
def recover(file_name):
file=open(file_name, 'r+b')
index={}
vindex={}
tindex={}
pos, oid, tid = read_index(
file, file_name, index, vindex, tindex, recover=1)
if oid is not None:
print "Nothing to recover"
return
opos=pos
pos, sz = search_back(file, pos)
if pos < sz:
npos = shift_transactions_forward(
index, vindex, tindex, file, pos, opos,
)
file.truncate(npos)
print "Recovered file, lost %s, ended up with %s bytes" % (
pos-opos, npos)
def read_index(file, name, index, vindex, tindex, stop='\377'*8,
ltid=z64, start=4L, maxoid=z64, recover=0, read_only=0):
"""Scan the entire file storage and recreate the index.
Returns file position, max oid, and last transaction id. It also
stores index information in the three dictionary arguments.
Arguments:
file -- a file object (the Data.fs)
name -- the name of the file (presumably file.name)
index -- dictionary, oid -> data record
vindex -- dictionary, oid -> data record for version data
tindex -- dictionary, oid -> data record
XXX tindex is cleared before return, so it will be empty
There are several default arguments that affect the scan or the
return values. XXX should document them.
The file position returned is the position just after the last
valid transaction record. The oid returned is the maximum object
id in the data. The transaction id is the tid of the last
transaction.
"""
read = file.read
seek = file.seek
seek(0, 2)
file_size=file.tell()
fmt = TempFormatter(file)
if file_size:
if file_size < start: raise FileStorageFormatError, file.name
seek(0)
if read(4) != packed_version:
raise FileStorageFormatError, name
else:
if not read_only:
file.write(packed_version)
return 4L, maxoid, ltid
index_get=index.get
pos=start
seek(start)
tid='\0'*7+'\1'
while 1:
# Read the transaction record
h=read(TRANS_HDR_LEN)
if not h: break
if len(h) != TRANS_HDR_LEN:
if not read_only:
logger.warning('%s truncated at %s', name, pos)
seek(pos)
file.truncate()
break
tid, tl, status, ul, dl, el = unpack(TRANS_HDR,h)
if el < 0: el=t32-el
if tid <= ltid:
logger.warning("%s time-stamp reduction at %s", name, pos)
ltid = tid
if pos+(tl+8) > file_size or status=='c':
# Hm, the data were truncated or the checkpoint flag wasn't
# cleared. They may also be corrupted,
# in which case, we don't want to totally lose the data.
if not read_only:
logger.warning("%s truncated, possibly due to damaged"
" records at %s", name, pos)
_truncate(file, name, pos)
break
if status not in ' up':
logger.warning('%s has invalid status, %s, at %s',
name, status, pos)
if tl < (TRANS_HDR_LEN+ul+dl+el):
# We're in trouble. Find out if this is bad data in the
# middle of the file, or just a turd that Win 9x dropped
# at the end when the system crashed.
# Skip to the end and read what should be the transaction length
# of the last transaction.
seek(-8, 2)
rtl=u64(read(8))
# Now check to see if the redundant transaction length is
# reasonable:
if file_size - rtl < pos or rtl < TRANS_HDR_LEN:
logger.critical('%s has invalid transaction header at %s',
name, pos)
if not read_only:
logger.warning(
"It appears that there is invalid data at the end of "
"the file, possibly due to a system crash. %s "
"truncated to recover from bad data at end." % name)
_truncate(file, name, pos)
break
else:
if recover: return pos, None, None
panic('%s has invalid transaction header at %s', name, pos)
if tid >= stop:
break
tpos=pos
tend=tpos+tl
if status=='u':
# Undone transaction, skip it
seek(tend)
h=u64(read(8))
if h != tl:
if recover: return tpos, None, None
panic('%s has inconsistent transaction length at %s',
name, pos)
pos=tend+8
continue
pos = tpos+ TRANS_HDR_LEN + ul + dl + el
while pos < tend:
# Read the data records for this transaction
h = fmt._read_data_header(pos)
dlen = h.recordlen()
tindex[h.oid] = pos
if h.version:
vindex[h.version] = pos
if pos + dlen > tend or h.tloc != tpos:
if recover:
return tpos, None, None
panic("%s data record exceeds transaction record at %s",
name, pos)
if index_get(h.oid, 0) != h.prev:
if prev:
if recover: return tpos, None, None
logger.error("%s incorrect previous pointer at %s",
name, pos)
else:
logger.warning("%s incorrect previous pointer at %s",
name, pos)
pos=pos+dlen
if pos != tend:
if recover: return tpos, None, None
panic("%s data records don't add up at %s",name,tpos)
# Read the (intentionally redundant) transaction length
seek(pos)
h = u64(read(8))
if h != tl:
if recover: return tpos, None, None
panic("%s redundant transaction length check failed at %s",
name, pos)
pos=pos+8
if tindex: # avoid the pathological empty transaction case
_maxoid = max(tindex.keys()) # in 2.2, just max(tindex)
maxoid = max(_maxoid, maxoid)
index.update(tindex)
tindex.clear()
return pos, maxoid, ltid
def _truncate(file, name, pos):
file.seek(0, 2)
file_size = file.tell()
try:
i = 0
while 1:
oname='%s.tr%s' % (name, i)
if os.path.exists(oname):
i += 1
else:
logger.warning("Writing truncated data from %s to %s",
name, oname)
o = open(oname,'wb')
file.seek(pos)
cp(file, o, file_size-pos)
o.close()
break
except:
logger.error("couldn\'t write truncated data for %s", name,
exc_info=True)
raise POSException.StorageSystemError, (
"Couldn't save truncated data")
file.seek(pos)
file.truncate()
class Iterator:
"""A General simple iterator that uses the Python for-loop index protocol
"""
__index=-1
__current=None
def __getitem__(self, i):
__index=self.__index
while i > __index:
__index=__index+1
self.__current=self.next(__index)
self.__index=__index
return self.__current
class FileIterator(Iterator, FileStorageFormatter):
"""Iterate over the transactions in a FileStorage file.
"""
_ltid = z64
_file = None
def __init__(self, file, start=None, stop=None):
if isinstance(file, str):
file = open(file, 'rb')
self._file = file
if file.read(4) != packed_version:
raise FileStorageFormatError, file.name
file.seek(0,2)
self._file_size = file.tell()
self._pos = 4L
assert start is None or isinstance(start, str)
assert stop is None or isinstance(stop, str)
if start:
self._skip_to_start(start)
self._stop = stop
def __len__(self):
# Define a bogus __len__() to make the iterator work
# with code like builtin list() and tuple() in Python 2.1.
# There's a lot of C code that expects a sequence to have
# an __len__() but can cope with any sort of mistake in its
# implementation. So just return 0.
return 0
# This allows us to pass an iterator as the `other' argument to
# copyTransactionsFrom() in BaseStorage. The advantage here is that we
# can create the iterator manually, e.g. setting start and stop, and then
# just let copyTransactionsFrom() do its thing.
def iterator(self):
return self
def close(self):
file = self._file
if file is not None:
self._file = None
file.close()
def _skip_to_start(self, start):
# Scan through the transaction records doing almost no sanity
# checks.
while 1:
self._file.seek(self._pos)
h = self._file.read(16)
if len(h) < 16:
return
tid, stl = unpack(">8s8s", h)
if tid >= start:
return
tl = u64(stl)
try:
self._pos += tl + 8
except OverflowError:
self._pos = long(self._pos) + tl + 8
if __debug__:
# Sanity check
self._file.seek(self._pos - 8, 0)
rtl = self._file.read(8)
if rtl != stl:
pos = self._file.tell() - 8
panic("%s has inconsistent transaction length at %s "
"(%s != %s)",
self._file.name, pos, u64(rtl), u64(stl))
def next(self, index=0):
if self._file is None:
# A closed iterator. XXX: Is IOError the best we can do? For
# now, mimic a read on a closed file.
raise IOError, 'iterator is closed'
pos = self._pos
while 1:
# Read the transaction record
try:
h = self._read_txn_header(pos)
except CorruptedDataError, err:
# If buf is empty, we've reached EOF.
if not err.buf:
break
raise
if h.tid <= self._ltid:
logger.warning("%s time-stamp reduction at %s",
self._file.name, pos)
self._ltid = h.tid
if self._stop is not None and h.tid > self._stop:
raise IndexError, index
if h.status == "c":
# Assume we've hit the last, in-progress transaction
raise IndexError, index
if pos + h.tlen + 8 > self._file_size:
# Hm, the data were truncated or the checkpoint flag wasn't
# cleared. They may also be corrupted,
# in which case, we don't want to totally lose the data.
logger.warning("%s truncated, possibly due to"
" damaged records at %s", self._file.name, pos)
break
if h.status not in " up":
logger.warning('%s has invalid status,'
' %s, at %s', self._file.name, h.status, pos)
if h.tlen < h.headerlen():
# We're in trouble. Find out if this is bad data in
# the middle of the file, or just a turd that Win 9x
# dropped at the end when the system crashed. Skip to
# the end and read what should be the transaction
# length of the last transaction.
self._file.seek(-8, 2)
rtl = u64(self._file.read(8))
# Now check to see if the redundant transaction length is
# reasonable:
if self._file_size - rtl < pos or rtl < TRANS_HDR_LEN:
logger.critical("%s has invalid transaction header at %s",
self._file.name, pos)
logger.warning(
"It appears that there is invalid data at the end of "
"the file, possibly due to a system crash. %s "
"truncated to recover from bad data at end."
% self._file.name)
break
else:
logger.warning("%s has invalid transaction header at %s",
self._file.name, pos)
break
tpos = pos
tend = tpos + h.tlen
if h.status != "u":
pos = tpos + h.headerlen()
e = {}
if h.elen:
try:
e = loads(h.ext)
except:
pass
result = RecordIterator(h.tid, h.status, h.user, h.descr,
e, pos, tend, self._file, tpos)
# Read the (intentionally redundant) transaction length
self._file.seek(tend)
rtl = u64(self._file.read(8))
if rtl != h.tlen:
logger.warning("%s redundant transaction length check"
" failed at %s", self._file.name, tend)
break
self._pos = tend + 8
return result
raise IndexError, index
class RecordIterator(Iterator, BaseStorage.TransactionRecord,
FileStorageFormatter):
"""Iterate over the transactions in a FileStorage file."""
def __init__(self, tid, status, user, desc, ext, pos, tend, file, tpos):
self.tid = tid
self.status = status
self.user = user
self.description = desc
self._extension = ext
self._pos = pos
self._tend = tend
self._file = file
self._tpos = tpos
def next(self, index=0):
pos = self._pos
while pos < self._tend:
# Read the data records for this transaction
h = self._read_data_header(pos)
dlen = h.recordlen()
if pos + dlen > self._tend or h.tloc != self._tpos:
logger.warning("%s data record exceeds transaction"
" record at %s", file.name, pos)
break
self._pos = pos + dlen
prev_txn = None
if h.plen:
data = self._file.read(h.plen)
else:
if h.back == 0:
# If the backpointer is 0, then this transaction
# undoes the object creation. It either aborts
# the version that created the object or undid the
# transaction that created it. Return None
# instead of a pickle to indicate this.
data = None
else:
data, tid = self._loadBackTxn(h.oid, h.back, False)
# XXX looks like this only goes one link back, should
# it go to the original data like BDBFullStorage?
prev_txn = self.getTxnFromData(h.oid, h.back)
r = Record(h.oid, h.tid, h.version, data, prev_txn, pos)
return r
raise IndexError, index
class Record(BaseStorage.DataRecord):
"""An abstract database record."""
def __init__(self, oid, tid, version, data, prev, pos):
self.oid = oid
self.tid = tid
self.version = version
self.data = data
self.data_txn = prev
self.pos = pos
class UndoSearch:
def __init__(self, file, pos, first, last, filter=None):
self.file = file
self.pos = pos
self.first = first
self.last = last
self.filter = filter
self.i = 0
self.results = []
self.stop = 0
def finished(self):
"""Return True if UndoSearch has found enough records."""
# BAW: Why 39 please? This makes no sense (see also below).
return self.i >= self.last or self.pos < 39 or self.stop
def search(self):
"""Search for another record."""
dict = self._readnext()
if dict is not None and (self.filter is None or self.filter(dict)):
if self.i >= self.first:
self.results.append(dict)
self.i += 1
def _readnext(self):
"""Read the next record from the storage."""
self.file.seek(self.pos - 8)
self.pos -= u64(self.file.read(8)) + 8
self.file.seek(self.pos)
h = self.file.read(TRANS_HDR_LEN)
tid, tl, status, ul, dl, el = unpack(TRANS_HDR, h)
if status == 'p':
self.stop = 1
return None
if status != ' ':
return None
d = u = ''
if ul:
u = self.file.read(ul)
if dl:
d = self.file.read(dl)
e = {}
if el:
try:
e = loads(self.file.read(el))
except:
pass
d = {'id': base64.encodestring(tid).rstrip(),
'time': TimeStamp(tid).timeTime(),
'user_name': u,
'description': d}
d.update(e)
return d
# this is a package
from ZODB.FileStorage.FileStorage \
import FileStorage, RecordIterator, FileIterator, Record, packed_version
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
#
# File-based ZODB storage
#
# Files are arranged as follows.
#
# - The first 4 bytes are a file identifier.
#
# - The rest of the file consists of a sequence of transaction
# "records".
#
# A transaction record consists of:
#
# - 8-byte transaction id, which is also a time stamp.
#
# - 8-byte transaction record length - 8.
#
# - 1-byte status code
# ' ' (a blank) completed transaction that hasn't been packed
# 'p' completed transaction that has been packed
# 'c' checkpoint -- a transaction in progress, at the end of the file;
# it's been thru vote() but not finish(); if finish() completes
# normally, it will be overwritten with a blank; if finish() dies
# (e.g., out of disk space), cleanup code will try to truncate
# the file to chop off this incomplete transaction
# 'u' uncertain; no longer used; was previously used to record something
# about non-transactional undo
#
# - 2-byte length of user name
#
# - 2-byte length of description
#
# - 2-byte length of extension attributes
#
# - user name
#
# - description
#
# - extension attributes
#
# * A sequence of data records
#
# - 8-byte redundant transaction length -8
#
# A data record consists of
#
# - 8-byte oid.
#
# - 8-byte tid, which matches the transaction id in the transaction record.
#
# - 8-byte previous-record file-position.
#
# - 8-byte beginning of transaction record file position.
#
# - 2-byte version length
#
# - 8-byte data length
#
# ? 8-byte position of non-version data
# (if version length > 0)
#
# ? 8-byte position of previous record in this version
# (if version length > 0)
#
# ? version string
# (if version length > 0)
#
# ? data
# (data length > 0)
#
# ? 8-byte position of data record containing data
# (data length == 0)
#
# Note that the lengths and positions are all big-endian.
# Also, the object ids time stamps are big-endian, so comparisons
# are meaningful.
#
# Version handling
#
# There isn't a separate store for versions. Each record has a
# version field, indicating what version it is in. The records in a
# version form a linked list. Each record that has a non-empty
# version string has a pointer to the previous record in the version.
# Version back pointers are retained *even* when versions are
# committed or aborted or when transactions are undone.
#
# There is a notion of "current" version records, which are the
# records in a version that are the current records for their
# respective objects. When a version is comitted, the current records
# are committed to the destination version. When a version is
# aborted, the current records are aborted.
#
# When committing or aborting, we search backward through the linked
# list until we find a record for an object that does not have a
# current record in the version. If we find a record for which the
# non-version pointer is the same as the previous pointer, then we
# forget that the corresponding object had a current record in the
# version. This strategy allows us to avoid searching backward through
# previously committed or aborted version records.
#
# Of course, we ignore records in undone transactions when committing
# or aborting.
#
# Backpointers
#
# When we commit or abort a version, we don't copy (or delete)
# and data. Instead, we write records with back pointers.
#
# A version record *never* has a back pointer to a non-version
# record, because we never abort to a version. A non-version record
# may have a back pointer to a version record or to a non-version
# record.
import struct
import logging
from ZODB.POSException import POSKeyError
from ZODB.utils import u64, oid_repr, t32
class CorruptedError(Exception):
pass
class CorruptedDataError(CorruptedError):
def __init__(self, oid=None, buf=None, pos=None):
self.oid = oid
self.buf = buf
self.pos = pos
def __str__(self):
if self.oid:
msg = "Error reading oid %s. Found %r" % (oid_repr(self.oid),
self.buf)
else:
msg = "Error reading unknown oid. Found %r" % self.buf
if self.pos:
msg += " at %d" % self.pos
return msg
# the struct formats for the headers
TRANS_HDR = ">8sQcHHH"
DATA_HDR = ">8s8sQQHQ"
# constants to support various header sizes
TRANS_HDR_LEN = 23
DATA_HDR_LEN = 42
DATA_VERSION_HDR_LEN = 58
assert struct.calcsize(TRANS_HDR) == TRANS_HDR_LEN
assert struct.calcsize(DATA_HDR) == DATA_HDR_LEN
logger = logging.getLogger('ZODB.FileStorage.format')
class FileStorageFormatter(object):
"""Mixin class that can read and write the low-level format."""
# subclasses must provide _file
_metadata_size = 4L
_format_version = "21"
def _read_num(self, pos):
"""Read an 8-byte number."""
self._file.seek(pos)
return u64(self._file.read(8))
def _read_data_header(self, pos, oid=None):
"""Return a DataHeader object for data record at pos.
If ois is not None, raise CorruptedDataError if oid passed
does not match oid in file.
If there is version data, reads the version part of the header.
If there is no pickle data, reads the back pointer.
"""
self._file.seek(pos)
s = self._file.read(DATA_HDR_LEN)
if len(s) != DATA_HDR_LEN:
raise CorruptedDataError(oid, s, pos)
h = DataHeaderFromString(s)
if oid is not None and oid != h.oid:
raise CorruptedDataError(oid, s, pos)
if h.vlen:
s = self._file.read(16 + h.vlen)
h.parseVersion(s)
if not h.plen:
h.back = u64(self._file.read(8))
return h
def _write_version_header(self, file, pnv, vprev, version):
s = struct.pack(">8s8s", pnv, vprev)
file.write(s + version)
def _read_txn_header(self, pos, tid=None):
self._file.seek(pos)
s = self._file.read(TRANS_HDR_LEN)
if len(s) != TRANS_HDR_LEN:
raise CorruptedDataError(tid, s, pos)
h = TxnHeaderFromString(s)
if tid is not None and tid != h.tid:
raise CorruptedDataError(tid, s, pos)
h.user = self._file.read(h.ulen)
h.descr = self._file.read(h.dlen)
h.ext = self._file.read(h.elen)
return h
def _loadBack_impl(self, oid, back, fail=True):
# shared implementation used by various _loadBack methods
#
# If the backpointer ultimately resolves to 0:
# If fail is True, raise KeyError for zero backpointer.
# If fail is False, return the empty data from the record
# with no backpointer.
while 1:
if not back:
# If backpointer is 0, object does not currently exist.
raise POSKeyError(oid)
h = self._read_data_header(back)
if h.plen:
return self._file.read(h.plen), h.tid, back, h.tloc
if h.back == 0 and not fail:
return None, h.tid, back, h.tloc
back = h.back
def _loadBackTxn(self, oid, back, fail=True):
"""Return data and txn id for backpointer."""
return self._loadBack_impl(oid, back, fail)[:2]
def _loadBackPOS(self, oid, back):
return self._loadBack_impl(oid, back)[2]
def getTxnFromData(self, oid, back):
"""Return transaction id for data at back."""
h = self._read_data_header(back, oid)
return h.tid
def fail(self, pos, msg, *args):
s = ("%s:%s:" + msg) % ((self._name, pos) + args)
logger.error(s)
raise CorruptedError(s)
def checkTxn(self, th, pos):
if th.tid <= self.ltid:
self.fail(pos, "time-stamp reduction: %s <= %s",
oid_repr(th.tid), oid_repr(self.ltid))
self.ltid = th.tid
if th.status == "c":
self.fail(pos, "transaction with checkpoint flag set")
if not th.status in " pu": # recognize " ", "p", and "u" as valid
self.fail(pos, "invalid transaction status: %r", th.status)
if th.tlen < th.headerlen():
self.fail(pos, "invalid transaction header: "
"txnlen (%d) < headerlen(%d)", th.tlen, th.headerlen())
def checkData(self, th, tpos, dh, pos):
if dh.tloc != tpos:
self.fail(pos, "data record does not point to transaction header"
": %d != %d", dh.tloc, tpos)
if pos + dh.recordlen() > tpos + th.tlen:
self.fail(pos, "data record size exceeds transaction size: "
"%d > %d", pos + dh.recordlen(), tpos + th.tlen)
if dh.prev >= pos:
self.fail(pos, "invalid previous pointer: %d", dh.prev)
if dh.back:
if dh.back >= pos:
self.fail(pos, "invalid back pointer: %d", dh.prev)
if dh.plen:
self.fail(pos, "data record has back pointer and data")
def DataHeaderFromString(s):
return DataHeader(*struct.unpack(DATA_HDR, s))
class DataHeader(object):
"""Header for a data record."""
__slots__ = (
"oid", "tid", "prev", "tloc", "vlen", "plen", "back",
# These three attributes are only defined when vlen > 0
"pnv", "vprev", "version")
def __init__(self, oid, tid, prev, tloc, vlen, plen):
self.back = 0 # default
self.version = "" # default
self.oid = oid
self.tid = tid
self.prev = prev
self.tloc = tloc
self.vlen = vlen
self.plen = plen
def asString(self):
s = struct.pack(DATA_HDR, self.oid, self.tid, self.prev,
self.tloc, self.vlen, self.plen)
if self.version:
v = struct.pack(">QQ", self.pnv, self.vprev)
return s + v + self.version
else:
return s
def setVersion(self, version, pnv, vprev):
self.version = version
self.vlen = len(version)
self.pnv = pnv
self.vprev = vprev
def parseVersion(self, buf):
pnv, vprev = struct.unpack(">QQ", buf[:16])
self.pnv = pnv
self.vprev = vprev
self.version = buf[16:]
def recordlen(self):
rlen = DATA_HDR_LEN + (self.plen or 8)
if self.version:
rlen += 16 + self.vlen
return rlen
def TxnHeaderFromString(s):
return TxnHeader(*struct.unpack(TRANS_HDR, s))
class TxnHeader(object):
"""Header for a transaction record."""
__slots__ = ("tid", "tlen", "status", "user", "descr", "ext",
"ulen", "dlen", "elen")
def __init__(self, tid, tlen, status, ulen, dlen, elen):
self.tid = tid
self.tlen = tlen
self.status = status
self.ulen = ulen
self.dlen = dlen
self.elen = elen
if elen < 0:
self.elen = t32 - elen
def asString(self):
s = struct.pack(TRANS_HDR, self.tid, self.tlen, self.status,
self.ulen, self.dlen, self.elen)
return "".join(map(str, [s, self.user, self.descr, self.ext]))
def headerlen(self):
return TRANS_HDR_LEN + self.ulen + self.dlen + self.elen
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import md5
import struct
from ZODB.FileStorage import FileIterator
from ZODB.FileStorage.format \
import TRANS_HDR, TRANS_HDR_LEN, DATA_HDR, DATA_HDR_LEN
from ZODB.TimeStamp import TimeStamp
from ZODB.utils import u64, get_pickle_metadata
from ZODB.tests.StorageTestBase import zodb_unpickle
def fsdump(path, file=None, with_offset=1):
i = 0
iter = FileIterator(path)
for trans in iter:
if with_offset:
print >> file, "Trans #%05d tid=%016x time=%s offset=%d" % \
(i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._pos)
else:
print >> file, "Trans #%05d tid=%016x time=%s" % \
(i, u64(trans.tid), str(TimeStamp(trans.tid)))
print >> file, "\tstatus=%s user=%s description=%s" % \
(`trans.status`, trans.user, trans.description)
j = 0
for rec in trans:
if rec.data is None:
fullclass = "undo or abort of object creation"
else:
modname, classname = get_pickle_metadata(rec.data)
dig = md5.new(rec.data).hexdigest()
fullclass = "%s.%s" % (modname, classname)
# special case for testing purposes
if fullclass == "ZODB.tests.MinPO.MinPO":
obj = zodb_unpickle(rec.data)
fullclass = "%s %s" % (fullclass, obj.value)
if rec.version:
version = "version=%s " % rec.version
else:
version = ''
if rec.data_txn:
# XXX It would be nice to print the transaction number
# (i) but it would be too expensive to keep track of.
bp = "bp=%016x" % u64(rec.data_txn)
else:
bp = ""
print >> file, " data #%05d oid=%016x %sclass=%s %s" % \
(j, u64(rec.oid), version, fullclass, bp)
j += 1
print >> file
i += 1
iter.close()
def fmt(p64):
# Return a nicely formatted string for a packaged 64-bit value
return "%016x" % u64(p64)
class Dumper:
"""A very verbose dumper for debuggin FileStorage problems."""
# XXX Should revise this class to use FileStorageFormatter.
def __init__(self, path, dest=None):
self.file = open(path, "rb")
self.dest = dest
def dump(self):
fid = self.file.read(4)
print >> self.dest, "*" * 60
print >> self.dest, "file identifier: %r" % fid
while self.dump_txn():
pass
def dump_txn(self):
pos = self.file.tell()
h = self.file.read(TRANS_HDR_LEN)
if not h:
return False
tid, tlen, status, ul, dl, el = struct.unpack(TRANS_HDR, h)
end = pos + tlen
print >> self.dest, "=" * 60
print >> self.dest, "offset: %d" % pos
print >> self.dest, "end pos: %d" % end
print >> self.dest, "transaction id: %s" % fmt(tid)
print >> self.dest, "trec len: %d" % tlen
print >> self.dest, "status: %r" % status
user = descr = extra = ""
if ul:
user = self.file.read(ul)
if dl:
descr = self.file.read(dl)
if el:
extra = self.file.read(el)
print >> self.dest, "user: %r" % user
print >> self.dest, "description: %r" % descr
print >> self.dest, "len(extra): %d" % el
while self.file.tell() < end:
self.dump_data(pos)
stlen = self.file.read(8)
print >> self.dest, "redundant trec len: %d" % u64(stlen)
return 1
def dump_data(self, tloc):
pos = self.file.tell()
h = self.file.read(DATA_HDR_LEN)
assert len(h) == DATA_HDR_LEN
oid, revid, prev, tloc, vlen, dlen = struct.unpack(DATA_HDR, h)
print >> self.dest, "-" * 60
print >> self.dest, "offset: %d" % pos
print >> self.dest, "oid: %s" % fmt(oid)
print >> self.dest, "revid: %s" % fmt(revid)
print >> self.dest, "previous record offset: %d" % prev
print >> self.dest, "transaction offset: %d" % tloc
if vlen:
pnv = self.file.read(8)
sprevdata = self.file.read(8)
version = self.file.read(vlen)
print >> self.dest, "version: %r" % version
print >> self.dest, "non-version data offset: %d" % u64(pnv)
print >> self.dest, \
"previous version data offset: %d" % u64(sprevdata)
print >> self.dest, "len(data): %d" % dlen
self.file.read(dlen)
if not dlen:
sbp = self.file.read(8)
print >> self.dest, "backpointer: %d" % u64(sbp)
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""FileStorage helper to perform pack.
A storage contains an ordered set of object revisions. When a storage
is packed, object revisions that are not reachable as of the pack time
are deleted. The notion of reachability is complicated by
backpointers -- object revisions that point to earlier revisions of
the same object.
An object revisions is reachable at a certain time if it is reachable
from the revision of the root at that time or if it is reachable from
a backpointer after that time.
"""
import os
from ZODB.serialize import referencesf
from ZODB.utils import p64, u64, z64
from ZODB.fsIndex import fsIndex
from ZODB.FileStorage.format \
import FileStorageFormatter, CorruptedDataError, DataHeader, \
TRANS_HDR_LEN
class DataCopier(FileStorageFormatter):
"""Mixin class for copying transactions into a storage.
The restore() and pack() methods share a need to copy data records
and update pointers to data in earlier transaction records. This
class provides the shared logic.
The mixin extends the FileStorageFormatter with a copy() method.
It also requires that the concrete class provides the following
attributes:
_file -- file with earlier destination data
_tfile -- destination file for copied data
_pos -- file pos of destination transaction
_tindex -- maps oid to data record file pos
_tvindex -- maps version name to data record file pos
_tindex and _tvindex are updated by copy().
The copy() method does not do any locking.
"""
def _txn_find(self, tid, stop_at_pack):
# _pos always points just past the last transaction
pos = self._pos
while pos > 4:
self._file.seek(pos - 8)
pos = pos - u64(self._file.read(8)) - 8
self._file.seek(pos)
h = self._file.read(TRANS_HDR_LEN)
_tid = h[:8]
if _tid == tid:
return pos
if stop_at_pack:
if h[16] == 'p':
break
raise UndoError(None, "Invalid transaction id")
def _data_find(self, tpos, oid, data):
# Return backpointer for oid. Must call with the lock held.
# This is a file offset to oid's data record if found, else 0.
# The data records in the transaction at tpos are searched for oid.
# If a data record for oid isn't found, returns 0.
# Else if oid's data record contains a backpointer, that
# backpointer is returned.
# Else oid's data record contains the data, and the file offset of
# oid's data record is returned. This data record should contain
# a pickle identical to the 'data' argument.
# XXX If the length of the stored data doesn't match len(data),
# XXX an exception is raised. If the lengths match but the data
# XXX isn't the same, 0 is returned. Why the discrepancy?
h = self._read_txn_header(tpos)
tend = tpos + h.tlen
pos = self._file.tell()
while pos < tend:
h = self._read_data_header(pos)
if h.oid == oid:
# Make sure this looks like the right data record
if h.plen == 0:
# This is also a backpointer. Gotta trust it.
return pos
if h.plen != len(data):
# The expected data doesn't match what's in the
# backpointer. Something is wrong.
error("Mismatch between data and backpointer at %d", pos)
return 0
_data = self._file.read(h.plen)
if data != _data:
return 0
return pos
pos += h.recordlen()
return 0
def _restore_pnv(self, oid, prev, version, bp):
# Find a valid pnv (previous non-version) pointer for this version.
# If there is no previous record, there can't be a pnv.
if not prev:
return None
pnv = None
h = self._read_data_header(prev, oid)
# If the previous record is for a version, it must have
# a valid pnv.
if h.version:
return h.pnv
elif bp:
# XXX Not sure the following is always true:
# The previous record is not for this version, yet we
# have a backpointer to it. The current record must
# be an undo of an abort or commit, so the backpointer
# must be to a version record with a pnv.
h2 = self._read_data_header(bp, oid)
if h2.version:
return h2.pnv
else:
warn("restore could not find previous non-version data "
"at %d or %d", prev, bp)
return None
def _resolve_backpointer(self, prev_txn, oid, data):
prev_pos = 0
if prev_txn is not None:
prev_txn_pos = self._txn_find(prev_txn, 0)
if prev_txn_pos:
prev_pos = self._data_find(prev_txn_pos, oid, data)
return prev_pos
def copy(self, oid, serial, data, version, prev_txn,
txnpos, datapos):
prev_pos = self._resolve_backpointer(prev_txn, oid, data)
old = self._index.get(oid, 0)
# Calculate the pos the record will have in the storage.
here = datapos
# And update the temp file index
self._tindex[oid] = here
if prev_pos:
# If there is a valid prev_pos, don't write data.
data = None
if data is None:
dlen = 0
else:
dlen = len(data)
# Write the recovery data record
h = DataHeader(oid, serial, old, txnpos, len(version), dlen)
if version:
h.version = version
pnv = self._restore_pnv(oid, old, version, prev_pos)
if pnv is not None:
h.pnv = pnv
else:
h.pnv = old
# Link to the last record for this version
h.vprev = self._tvindex.get(version, 0)
if not h.vprev:
h.vprev = self._vindex.get(version, 0)
self._tvindex[version] = here
self._tfile.write(h.asString())
# Write the data or a backpointer
if data is None:
if prev_pos:
self._tfile.write(p64(prev_pos))
else:
# Write a zero backpointer, which indicates an
# un-creation transaction.
self._tfile.write(z64)
else:
self._tfile.write(data)
class GC(FileStorageFormatter):
def __init__(self, file, eof, packtime):
self._file = file
self._name = file.name
self.eof = eof
self.packtime = packtime
# packpos: position of first txn header after pack time
self.packpos = None
self.oid2curpos = fsIndex() # maps oid to current data record position
self.oid2verpos = fsIndex() # maps oid to current version data
# The set of reachable revisions of each object.
#
# This set as managed using two data structures. The first is
# an fsIndex mapping oids to one data record pos. Since only
# a few objects will have more than one revision, we use this
# efficient data structure to handle the common case. The
# second is a dictionary mapping objects to lists of
# positions; it is used to handle the same number of objects
# for which we must keep multiple revisions.
self.reachable = fsIndex()
self.reach_ex = {}
# keep ltid for consistency checks during initial scan
self.ltid = z64
def isReachable(self, oid, pos):
"""Return 1 if revision of `oid` at `pos` is reachable."""
rpos = self.reachable.get(oid)
if rpos is None:
return 0
if rpos == pos:
return 1
return pos in self.reach_ex.get(oid, [])
def findReachable(self):
self.buildPackIndex()
self.findReachableAtPacktime([z64])
self.findReachableFromFuture()
# These mappings are no longer needed and may consume a lot
# of space.
del self.oid2verpos
del self.oid2curpos
def buildPackIndex(self):
pos = 4L
# We make the initial assumption that the database has been
# packed before and set unpacked to True only after seeing the
# first record with a status == " ". If we get to the packtime
# and unpacked is still False, we need to watch for a redundant
# pack.
unpacked = False
while pos < self.eof:
th = self._read_txn_header(pos)
if th.tid > self.packtime:
break
self.checkTxn(th, pos)
if th.status != "p":
unpacked = True
tpos = pos
end = pos + th.tlen
pos += th.headerlen()
while pos < end:
dh = self._read_data_header(pos)
self.checkData(th, tpos, dh, pos)
if dh.version:
self.oid2verpos[dh.oid] = pos
else:
self.oid2curpos[dh.oid] = pos
pos += dh.recordlen()
tlen = self._read_num(pos)
if tlen != th.tlen:
self.fail(pos, "redundant transaction length does not "
"match initial transaction length: %d != %d",
tlen, th.tlen)
pos += 8
self.packpos = pos
if unpacked:
return
# check for a redundant pack. If the first record following
# the newly computed packpos has status 'p', then it was
# packed earlier and the current pack is redudant.
try:
th = self._read_txn_header(pos)
except CorruptedDataError, err:
if err.buf != "":
raise
if th.status == 'p':
# Delay import to cope with circular imports.
# XXX put exceptions in a separate module
from ZODB.FileStorage.FileStorage import RedundantPackWarning
raise RedundantPackWarning(
"The database has already been packed to a later time"
" or no changes have been made since the last pack")
def findReachableAtPacktime(self, roots):
"""Mark all objects reachable from the oids in roots as reachable."""
todo = list(roots)
while todo:
oid = todo.pop()
if self.reachable.has_key(oid):
continue
L = []
pos = self.oid2curpos.get(oid)
if pos is not None:
L.append(pos)
todo.extend(self.findrefs(pos))
pos = self.oid2verpos.get(oid)
if pos is not None:
L.append(pos)
todo.extend(self.findrefs(pos))
if not L:
continue
pos = L.pop()
self.reachable[oid] = pos
if L:
self.reach_ex[oid] = L
def findReachableFromFuture(self):
# In this pass, the roots are positions of object revisions.
# We add a pos to extra_roots when there is a backpointer to a
# revision that was not current at the packtime. The
# non-current revision could refer to objects that were
# otherwise unreachable at the packtime.
extra_roots = []
pos = self.packpos
while pos < self.eof:
th = self._read_txn_header(pos)
self.checkTxn(th, pos)
tpos = pos
end = pos + th.tlen
pos += th.headerlen()
while pos < end:
dh = self._read_data_header(pos)
self.checkData(th, tpos, dh, pos)
if dh.back and dh.back < self.packpos:
if self.reachable.has_key(dh.oid):
L = self.reach_ex.setdefault(dh.oid, [])
if dh.back not in L:
L.append(dh.back)
extra_roots.append(dh.back)
else:
self.reachable[dh.oid] = dh.back
if dh.version and dh.pnv:
if self.reachable.has_key(dh.oid):
L = self.reach_ex.setdefault(dh.oid, [])
if dh.pnv not in L:
L.append(dh.pnv)
extra_roots.append(dh.pnv)
else:
self.reachable[dh.oid] = dh.back
pos += dh.recordlen()
tlen = self._read_num(pos)
if tlen != th.tlen:
self.fail(pos, "redundant transaction length does not "
"match initial transaction length: %d != %d",
tlen, th.tlen)
pos += 8
for pos in extra_roots:
refs = self.findrefs(pos)
self.findReachableAtPacktime(refs)
def findrefs(self, pos):
"""Return a list of oids referenced as of packtime."""
dh = self._read_data_header(pos)
# Chase backpointers until we get to the record with the refs
while dh.back:
dh = self._read_data_header(dh.back)
if dh.plen:
return referencesf(self._file.read(dh.plen))
else:
return []
class PackCopier(DataCopier):
# PackCopier has to cope with _file and _tfile being the
# same file. The copy() implementation is written assuming
# that they are different, so that using one object doesn't
# mess up the file pointer for the other object.
# PackCopier overrides _resolve_backpointer() and _restore_pnv()
# to guarantee that they keep the file pointer for _tfile in
# the right place.
def __init__(self, f, index, vindex, tindex, tvindex):
self._file = f
self._tfile = f
self._index = index
self._vindex = vindex
self._tindex = tindex
self._tvindex = tvindex
self._pos = None
def setTxnPos(self, pos):
self._pos = pos
def _resolve_backpointer(self, prev_txn, oid, data):
pos = self._tfile.tell()
try:
return DataCopier._resolve_backpointer(self, prev_txn, oid, data)
finally:
self._tfile.seek(pos)
def _restore_pnv(self, oid, prev, version, bp):
pos = self._tfile.tell()
try:
return DataCopier._restore_pnv(self, oid, prev, version, bp)
finally:
self._tfile.seek(pos)
class FileStoragePacker(FileStorageFormatter):
# path is the storage file path.
# stop is the pack time, as a TimeStamp.
# la and lr are the acquire() and release() methods of the storage's lock.
# cla and clr similarly, for the storage's commit lock.
# current_size is the storage's _pos. All valid data at the start
# lives before that offset (there may be a checkpoint transaction in
# progress after it).
def __init__(self, path, stop, la, lr, cla, clr, current_size):
self._name = path
# We open our own handle on the storage so that much of pack can
# proceed in parallel. It's important to close this file at every
# return point, else on Windows the caller won't be able to rename
# or remove the storage file.
self._file = open(path, "rb")
self._path = path
self._stop = stop
self.locked = 0
self.file_end = current_size
self.gc = GC(self._file, self.file_end, self._stop)
# The packer needs to acquire the parent's commit lock
# during the copying stage, so the two sets of lock acquire
# and release methods are passed to the constructor.
self._lock_acquire = la
self._lock_release = lr
self._commit_lock_acquire = cla
self._commit_lock_release = clr
# The packer will use several indexes.
# index: oid -> pos
# vindex: version -> pos of XXX
# tindex: oid -> pos, for current txn
# tvindex: version -> pos of XXX, for current txn
# oid2tid: not used by the packer
self.index = fsIndex()
self.vindex = {}
self.tindex = {}
self.tvindex = {}
self.oid2tid = {}
self.toid2tid = {}
self.toid2tid_delete = {}
# Index for non-version data. This is a temporary structure
# to reduce I/O during packing
self.nvindex = fsIndex()
def pack(self):
# Pack copies all data reachable at the pack time or later.
#
# Copying occurs in two phases. In the first phase, txns
# before the pack time are copied if the contain any reachable
# data. In the second phase, all txns after the pack time
# are copied.
#
# Txn and data records contain pointers to previous records.
# Because these pointers are stored as file offsets, they
# must be updated when we copy data.
# XXX Need to add sanity checking to pack
self.gc.findReachable()
# Setup the destination file and copy the metadata.
# XXX rename from _tfile to something clearer
self._tfile = open(self._name + ".pack", "w+b")
self._file.seek(0)
self._tfile.write(self._file.read(self._metadata_size))
self._copier = PackCopier(self._tfile, self.index, self.vindex,
self.tindex, self.tvindex)
ipos, opos = self.copyToPacktime()
assert ipos == self.gc.packpos
if ipos == opos:
# pack didn't free any data. there's no point in continuing.
self._tfile.close()
self._file.close()
os.remove(self._name + ".pack")
return None
self._commit_lock_acquire()
self.locked = 1
self._lock_acquire()
try:
# Re-open the file in unbuffered mode.
# The main thread may write new transactions to the file,
# which creates the possibility that we will read a status
# 'c' transaction into the pack thread's stdio buffer even
# though we're acquiring the commit lock. Transactions
# can still be in progress throughout much of packing, and
# are written to the same physical file but via a distinct
# Python file object. The code used to leave off the
# trailing 0 argument, and then on every platform except
# native Windows it was observed that we could read stale
# data from the tail end of the file.
self._file.close() # else self.gc keeps the original alive & open
self._file = open(self._path, "rb", 0)
self._file.seek(0, 2)
self.file_end = self._file.tell()
finally:
self._lock_release()
if ipos < self.file_end:
self.copyRest(ipos)
# OK, we've copied everything. Now we need to wrap things up.
pos = self._tfile.tell()
self._tfile.flush()
self._tfile.close()
self._file.close()
return pos
def copyToPacktime(self):
offset = 0L # the amount of space freed by packing
pos = self._metadata_size
new_pos = pos
while pos < self.gc.packpos:
th = self._read_txn_header(pos)
new_tpos, pos = self.copyDataRecords(pos, th)
if new_tpos:
new_pos = self._tfile.tell() + 8
tlen = new_pos - new_tpos - 8
# Update the transaction length
self._tfile.seek(new_tpos + 8)
self._tfile.write(p64(tlen))
self._tfile.seek(new_pos - 8)
self._tfile.write(p64(tlen))
tlen = self._read_num(pos)
if tlen != th.tlen:
self.fail(pos, "redundant transaction length does not "
"match initial transaction length: %d != %d",
tlen, th.tlen)
pos += 8
return pos, new_pos
def fetchBackpointer(self, oid, back):
"""Return data and refs backpointer `back` to object `oid.
If `back` is 0 or ultimately resolves to 0, return None
and None. In this case, the transaction undoes the object
creation.
"""
if back == 0:
return None
data, tid = self._loadBackTxn(oid, back, 0)
return data
def copyDataRecords(self, pos, th):
"""Copy any current data records between pos and tend.
Returns position of txn header in output file and position
of next record in the input file.
If any data records are copied, also write txn header (th).
"""
copy = 0
new_tpos = 0L
tend = pos + th.tlen
pos += th.headerlen()
while pos < tend:
h = self._read_data_header(pos)
if not self.gc.isReachable(h.oid, pos):
pos += h.recordlen()
continue
pos += h.recordlen()
# If we are going to copy any data, we need to copy
# the transaction header. Note that we will need to
# patch up the transaction length when we are done.
if not copy:
th.status = "p"
s = th.asString()
new_tpos = self._tfile.tell()
self._tfile.write(s)
new_pos = new_tpos + len(s)
copy = 1
if h.plen:
data = self._file.read(h.plen)
else:
# If a current record has a backpointer, fetch
# refs and data from the backpointer. We need
# to write the data in the new record.
data = self.fetchBackpointer(h.oid, h.back)
self.writePackedDataRecord(h, data, new_tpos)
new_pos = self._tfile.tell()
return new_tpos, pos
def writePackedDataRecord(self, h, data, new_tpos):
# Update the header to reflect current information, then write
# it to the output file.
if data is None:
data = ""
h.prev = 0
h.back = 0
h.plen = len(data)
h.tloc = new_tpos
pos = self._tfile.tell()
if h.version:
h.pnv = self.index.get(h.oid, 0)
h.vprev = self.vindex.get(h.version, 0)
self.vindex[h.version] = pos
self.index[h.oid] = pos
if h.version:
self.vindex[h.version] = pos
self._tfile.write(h.asString())
self._tfile.write(data)
if not data:
# Packed records never have backpointers (?).
# If there is no data, write a z64 backpointer.
# This is a George Bailey event.
self._tfile.write(z64)
def copyRest(self, ipos):
# After the pack time, all data records are copied.
# Copy one txn at a time, using copy() for data.
# Release the commit lock every 20 copies
self._lock_counter = 0
try:
while 1:
ipos = self.copyOne(ipos)
except CorruptedDataError, err:
# The last call to copyOne() will raise
# CorruptedDataError, because it will attempt to read past
# the end of the file. Double-check that the exception
# occurred for this reason.
self._file.seek(0, 2)
endpos = self._file.tell()
if endpos != err.pos:
raise
def copyOne(self, ipos):
# The call below will raise CorruptedDataError at EOF.
th = self._read_txn_header(ipos)
self._lock_counter += 1
if self._lock_counter % 20 == 0:
self._commit_lock_release()
pos = self._tfile.tell()
self._copier.setTxnPos(pos)
self._tfile.write(th.asString())
tend = ipos + th.tlen
ipos += th.headerlen()
while ipos < tend:
h = self._read_data_header(ipos)
ipos += h.recordlen()
prev_txn = None
if h.plen:
data = self._file.read(h.plen)
else:
data = self.fetchBackpointer(h.oid, h.back)
if h.back:
prev_txn = self.getTxnFromData(h.oid, h.back)
self._copier.copy(h.oid, h.tid, data, h.version,
prev_txn, pos, self._tfile.tell())
tlen = self._tfile.tell() - pos
assert tlen == th.tlen
self._tfile.write(p64(tlen))
ipos += 8
self.index.update(self.tindex)
self.tindex.clear()
self.vindex.update(self.tvindex)
self.tvindex.clear()
if self._lock_counter % 20 == 0:
self._commit_lock_acquire()
return ipos
##############################################################################
#
# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Very Simple Mapping ZODB storage
The Mapping storage provides an extremely simple storage implementation that
doesn't provide undo or version support.
It is meant to illustrate the simplest possible storage.
The Mapping storage uses a single data structure to map object ids to data.
"""
from ZODB.utils import u64, z64
from ZODB.BaseStorage import BaseStorage
from ZODB import POSException
from persistent.TimeStamp import TimeStamp
class MappingStorage(BaseStorage):
def __init__(self, name='Mapping Storage'):
BaseStorage.__init__(self, name)
self._index = {}
# FIXME: Why we don't use dict for _tindex?
self._tindex = []
self._ltid = None
# Note: If you subclass this and use a persistent mapping facility
# (e.g. a dbm file), you will need to get the maximum key and save it
# as self._oid. See dbmStorage.
def __len__(self):
return len(self._index)
def getSize(self):
self._lock_acquire()
try:
# These constants are for Python object memory overheads
s = 32
for p in self._index.itervalues():
s += 56 + len(p)
return s
finally:
self._lock_release()
def load(self, oid, version):
self._lock_acquire()
try:
p = self._index[oid]
return p[8:], p[:8] # pickle, serial
finally:
self._lock_release()
def loadEx(self, oid, version):
self._lock_acquire()
try:
# Since this storage doesn't support versions, tid and
# serial will always be the same.
p = self._index[oid]
return p[8:], p[:8], "" # pickle, tid, version
finally:
self._lock_release()
def getTid(self, oid):
self._lock_acquire()
try:
# The tid is the first 8 bytes of the buffer.
return self._index[oid][:8]
finally:
self._lock_release()
def store(self, oid, serial, data, version, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
if version:
raise POSException.Unsupported("Versions aren't supported")
self._lock_acquire()
try:
if oid in self._index:
oserial = self._index[oid][:8]
if serial != oserial:
raise POSException.ConflictError(oid=oid,
serials=(oserial, serial),
data=data)
self._tindex.append((oid, self._tid + data))
finally:
self._lock_release()
return self._tid
def _clear_temp(self):
self._tindex = []
def _finish(self, tid, user, desc, ext):
self._index.update(dict(self._tindex))
self._ltid = self._tid
def lastTransaction(self):
return self._ltid
def pack(self, t, referencesf):
self._lock_acquire()
try:
if not self._index:
return
# Build an index of *only* those objects reachable from the root.
rootl = [z64]
pindex = {}
while rootl:
oid = rootl.pop()
if oid in pindex:
continue
# Scan non-version pickle for references
r = self._index[oid]
pindex[oid] = r
referencesf(r[8:], rootl)
# Now delete any unreferenced entries:
for oid in self._index.keys():
if oid not in pindex:
del self._index[oid]
finally:
self._lock_release()
def _splat(self):
"""Spit out a string showing state."""
o = ['Index:']
keys = self._index.keys()
keys.sort()
for oid in keys:
r = self._index[oid]
o.append(' %s: %s, %s' %
(u64(oid), TimeStamp(r[:8]), repr(r[8:])))
return '\n'.join(o)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Mounted database support
$Id$"""
import time
import thread
import logging
import persistent
import Acquisition
from Acquisition import aq_base
from POSException import MountedStorageError
logger = logging.getLogger('ZODB.Mount')
# dbs is a holder for all DB objects, needed to overcome
# threading issues. It maps connection params to a DB object
# and a mapping of mount points.
dbs = {}
# dblock is locked every time dbs is accessed.
dblock=thread.allocate_lock()
def parentClassFactory(jar, module, name):
# Use the class factory from the parent database.
parent_conn = getattr(jar, '_mount_parent_jar', None)
parent_db = getattr(parent_conn, '_db', None)
if parent_db is None:
_globals = {}
_silly = ('__doc__',)
return getattr(__import__(
module, _globals, _globals, _silly), name)
else:
return parent_db.classFactory(parent_conn, module, name)
class MountPoint(persistent.Persistent, Acquisition.Implicit):
'''The base class for a Zope object which, when traversed,
accesses a different database.
'''
# Default values for non-persistent variables.
_v_db = None
_v_data = None
_v_connect_error = None
def __init__(self, path, params=None, classDefsFromRoot=1):
'''
@arg path The path within the mounted database from which
to derive the root.
@arg params The parameters used to connect to the database.
No particular format required.
If there is more than one mount point referring to a
database, MountPoint will detect the matching params
and use the existing database. Include the class name of
the storage. For example,
ZEO params might be "ZODB.ZEOClient localhost 1081".
@arg classDefsFromRoot If true (the default), MountPoint will
try to get ZClass definitions from the root database rather
than the mounted database.
'''
# The only reason we need a __mountpoint_id is to
# be sure we don't close a database prematurely when
# it is mounted more than once and one of the points
# is unmounted.
self.__mountpoint_id = '%s_%f' % (id(self), time.time())
if params is None:
# We still need something to use as a hash in
# the "dbs" dictionary.
params = self.__mountpoint_id
self._params = repr(params)
self._path = path
self._classDefsFromRoot = classDefsFromRoot
def _createDB(self):
'''Gets the database object, usually by creating a Storage object
and returning ZODB.DB(storage).
'''
raise NotImplementedError
def _getDB(self):
'''Creates or opens a DB object.
'''
newMount = 0
dblock.acquire()
try:
params = self._params
dbInfo = dbs.get(params, None)
if dbInfo is None:
logger.info('Opening database for mounting: %s', params)
db = self._createDB()
newMount = 1
dbs[params] = (db, {self.__mountpoint_id:1})
if getattr(self, '_classDefsFromRoot', 1):
db.classFactory = parentClassFactory
else:
db, mounts = dbInfo
# Be sure this object is in the list of mount points.
if not mounts.has_key(self.__mountpoint_id):
newMount = 1
mounts[self.__mountpoint_id] = 1
self._v_db = db
finally:
dblock.release()
return db, newMount
def _getMountpointId(self):
return self.__mountpoint_id
def _getMountParams(self):
return self._params
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, repr(self._path),
self._params)
def _openMountableConnection(self, parent):
# Opens a new connection to the database.
db = self._v_db
if db is None:
self._v_close_db = 0
db, newMount = self._getDB()
else:
newMount = 0
jar = getattr(self, '_p_jar', None)
if jar is None:
# Get _p_jar from parent.
self._p_jar = jar = parent._p_jar
conn = db.open(version=jar.getVersion())
# Add an attribute to the connection which
# makes it possible for us to find the primary
# database connection. See ClassFactoryForMount().
conn._mount_parent_jar = jar
mcc = MountedConnectionCloser(self, conn)
jar.onCloseCallback(mcc)
return conn, newMount, mcc
def _getObjectFromConnection(self, conn):
obj = self._getMountRoot(conn.root())
data = aq_base(obj)
# Store the data object in a tuple to hide from acquisition.
self._v_data = (data,)
return data
def _getOrOpenObject(self, parent):
t = self._v_data
if t is None:
self._v_connect_error = None
conn = None
newMount = 0
mcc = None
try:
conn, newMount, mcc = self._openMountableConnection(parent)
data = self._getObjectFromConnection(conn)
except:
# Possibly broken database.
if mcc is not None:
# Note that the next line may be a little rash--
# if, for example, a working database throws an
# exception rather than wait for a new connection,
# this will likely cause the database to be closed
# prematurely. Perhaps DB.py needs a
# countActiveConnections() method.
mcc.setCloseDb()
self._logConnectException()
raise
if newMount:
try: id = data.getId()
except: id = '???' # data has no getId() method. Bad.
p = '/'.join(parent.getPhysicalPath() + (id,))
logger.info('Mounted database %s at %s',
self._getMountParams(), p)
else:
data = t[0]
return data.__of__(parent)
def __of__(self, parent):
# Accesses the database, returning an acquisition
# wrapper around the connected object rather than around self.
try:
return self._getOrOpenObject(parent)
except:
return Acquisition.ImplicitAcquisitionWrapper(
self, parent)
def _test(self, parent):
'''Tests the database connection.
'''
self._getOrOpenObject(parent)
return 1
def _getMountRoot(self, root):
'''Gets the object to be mounted.
Can be overridden to provide different behavior.
'''
try:
app = root['Application']
except:
raise MountedStorageError, (
"No 'Application' object exists in the mountable database.")
try:
return app.unrestrictedTraverse(self._path)
except:
raise MountedStorageError, (
"The path '%s' was not found in the mountable database."
% self._path)
def _logConnectException(self):
'''Records info about the exception that just occurred.
'''
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
import traceback
logger.warning('Failed to mount database. %s (%s)', exc[:2],
exc_info=True)
f=StringIO()
traceback.print_tb(exc[2], 100, f)
self._v_connect_error = (exc[0], exc[1], f.getvalue())
exc = None
class MountedConnectionCloser:
'''Closes the connection used by the mounted database
while performing other cleanup.
'''
close_db = 0
def __init__(self, mountpoint, conn):
# conn is the child connection.
self.mp = mountpoint
self.conn = conn
def setCloseDb(self):
self.close_db = 1
def __call__(self):
# The onCloseCallback handler.
# Closes a single connection to the database
# and possibly the database itself.
conn = self.conn
close_db = 0
if conn is not None:
mp = self.mp
# Remove potential circular references.
self.conn = None
self.mp = None
# Detect whether we should close the database.
close_db = self.close_db
t = mp.__dict__.get('_v_data', None)
if t is not None:
del mp.__dict__['_v_data']
data = t[0]
if not close_db and data.__dict__.get(
'_v__object_deleted__', 0):
# This mount point has been deleted.
del data.__dict__['_v__object_deleted__']
close_db = 1
# Close the child connection.
try:
del conn._mount_parent_jar
except:
pass
conn.close()
if close_db:
# Stop using this database. Close it if no other
# MountPoint is using it.
dblock.acquire()
try:
params = mp._getMountParams()
mp._v_db = None
if dbs.has_key(params):
dbInfo = dbs[params]
db, mounts = dbInfo
try: del mounts[mp._getMountpointId()]
except: pass
if len(mounts) < 1:
# No more mount points are using this database.
del dbs[params]
db.close()
logger.info('Closed database: %s', params)
finally:
dblock.release()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""ZODB-defined exceptions
$Id$"""
from ZODB.utils import oid_repr, readable_tid_repr
def _fmt_undo(oid, reason):
s = reason and (": %s" % reason) or ""
return "Undo error %s%s" % (oid_repr(oid), s)
class POSError(StandardError):
"""Persistent object system error."""
class POSKeyError(KeyError, POSError):
"""Key not found in database."""
def __str__(self):
return oid_repr(self.args[0])
class TransactionError(POSError):
"""An error occured due to normal transaction processing."""
class TransactionFailedError(POSError):
"""Cannot perform an operation on a transaction that previously failed.
An attempt was made to commit a transaction, or to join a transaction,
but this transaction previously raised an exception during an attempt
to commit it. The transaction must be explicitly aborted, either by
invoking abort() on the transaction, or begin() on its transaction
manager.
"""
class ConflictError(TransactionError):
"""Two transactions tried to modify the same object at once.
This transaction should be resubmitted.
Instance attributes:
oid : string
the OID (8-byte packed string) of the object in conflict
class_name : string
the fully-qualified name of that object's class
message : string
a human-readable explanation of the error
serials : (string, string)
a pair of 8-byte packed strings; these are the serial numbers
related to conflict. The first is the revision of object that
is in conflict, the currently committed serial. The second is
the revision the current transaction read when it started.
data : string
The database record that failed to commit, used to put the
class name in the error message.
The caller should pass either object or oid as a keyword argument,
but not both of them. If object is passed, it should be a
persistent object with an _p_oid attribute.
"""
def __init__(self, message=None, object=None, oid=None, serials=None,
data=None):
if message is None:
self.message = "database conflict error"
else:
self.message = message
if object is None:
self.oid = None
self.class_name = None
else:
self.oid = object._p_oid
klass = object.__class__
self.class_name = klass.__module__ + "." + klass.__name__
if oid is not None:
assert self.oid is None
self.oid = oid
if data is not None:
# avoid circular import chain
from ZODB.utils import get_pickle_metadata
self.class_name = "%s.%s" % get_pickle_metadata(data)
## else:
## if message != "data read conflict error":
## raise RuntimeError
self.serials = serials
def __str__(self):
extras = []
if self.oid:
extras.append("oid %s" % oid_repr(self.oid))
if self.class_name:
extras.append("class %s" % self.class_name)
if self.serials:
current, old = self.serials
extras.append("serial this txn started with %s" %
readable_tid_repr(old))
extras.append("serial currently committed %s" %
readable_tid_repr(current))
if extras:
return "%s (%s)" % (self.message, ", ".join(extras))
else:
return self.message
def get_oid(self):
return self.oid
def get_class_name(self):
return self.class_name
def get_old_serial(self):
return self.serials[1]
def get_new_serial(self):
return self.serials[0]
def get_serials(self):
return self.serials
class ReadConflictError(ConflictError):
"""Conflict detected when object was loaded.
An attempt was made to read an object that has changed in another
transaction (eg. another thread or process).
"""
def __init__(self, message=None, object=None, serials=None):
if message is None:
message = "database read conflict error"
ConflictError.__init__(self, message=message, object=object,
serials=serials)
class BTreesConflictError(ConflictError):
"""A special subclass for BTrees conflict errors."""
msgs = [# 0; i2 or i3 bucket split; positions are all -1
'Conflicting bucket split',
# 1; keys the same, but i2 and i3 values differ, and both values
# differ from i1's value
'Conflicting changes',
# 2; i1's value changed in i2, but key+value deleted in i3
'Conflicting delete and change',
# 3; i1's value changed in i3, but key+value deleted in i2
'Conflicting delete and change',
# 4; i1 and i2 both added the same key, or both deleted the
# same key
'Conflicting inserts or deletes',
# 5; i2 and i3 both deleted the same key
'Conflicting deletes',
# 6; i2 and i3 both added the same key
'Conflicting inserts',
# 7; i2 and i3 both deleted the same key, or i2 changed the value
# associated with a key and i3 deleted that key
'Conflicting deletes, or delete and change',
# 8; i2 and i3 both deleted the same key, or i3 changed the value
# associated with a key and i2 deleted that key
'Conflicting deletes, or delete and change',
# 9; i2 and i3 both deleted the same key
'Conflicting deletes',
# 10; i2 and i3 deleted all the keys, and didn't insert any,
# leaving an empty bucket; conflict resolution doesn't have
# enough info to unlink an empty bucket from its containing
# BTree correctly
'Empty bucket from deleting all keys',
# 11; conflicting changes in an internal BTree node
'Conflicting changes in an internal BTree node',
]
def __init__(self, p1, p2, p3, reason):
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.reason = reason
def __repr__(self):
return "BTreesConflictError(%d, %d, %d, %d)" % (self.p1,
self.p2,
self.p3,
self.reason)
def __str__(self):
return "BTrees conflict error at %d/%d/%d: %s" % (
self.p1, self.p2, self.p3, self.msgs[self.reason])
class DanglingReferenceError(TransactionError):
"""An object has a persistent reference to a missing object.
If an object is stored and it has a reference to another object
that does not exist (for example, it was deleted by pack), this
exception may be raised. Whether a storage supports this feature,
it a quality of implementation issue.
Instance attributes:
referer: oid of the object being written
missing: referenced oid that does not have a corresponding object
"""
def __init__(self, Aoid, Boid):
self.referer = Aoid
self.missing = Boid
def __str__(self):
return "from %s to %s" % (oid_repr(self.referer),
oid_repr(self.missing))
class VersionError(POSError):
"""An error in handling versions occurred."""
class VersionCommitError(VersionError):
"""An invalid combination of versions was used in a version commit."""
class VersionLockError(VersionError, TransactionError):
"""Modification to an object modified in an unsaved version.
An attempt was made to modify an object that has been modified in an
unsaved version.
"""
class UndoError(POSError):
"""An attempt was made to undo a non-undoable transaction."""
def __init__(self, reason, oid=None):
self._reason = reason
self._oid = oid
def __str__(self):
return _fmt_undo(self._oid, self._reason)
class MultipleUndoErrors(UndoError):
"""Several undo errors occured during a single transaction."""
def __init__(self, errs):
# provide a reason and oid for clients that only look at that
UndoError.__init__(self, *errs[0])
self._errs = errs
def __str__(self):
return "\n".join([_fmt_undo(*pair) for pair in self._errs])
class StorageError(POSError):
"""Base class for storage based exceptions."""
class StorageTransactionError(StorageError):
"""An operation was invoked for an invalid transaction or state."""
class StorageSystemError(StorageError):
"""Panic! Internal storage error!"""
class MountedStorageError(StorageError):
"""Unable to access mounted storage."""
class ReadOnlyError(StorageError):
"""Unable to modify objects in a read-only storage."""
class TransactionTooLargeError(StorageTransactionError):
"""The transaction exhausted some finite storage resource."""
class ExportError(POSError):
"""An export file doesn't have the right format."""
class Unsupported(POSError):
"""A feature was used that is not supported by the storage."""
class InvalidObjectReference(POSError):
"""An object contains an invalid reference to another object.
An invalid reference may be one of:
o A reference to a wrapped persistent object.
o A reference to an object in a different database connection.
XXX The exception ought to have a member that is the invalid object.
"""
class ConnectionStateError(POSError):
"""A Connection isn't in the required state for an operation.
o An operation such as a load is attempted on a closed connection.
o An attempt to close a connection is made while the connection is
still joined to a transaction (for example, a transaction is in
progress, with uncommitted modifications in the connection).
"""
# Extension information for zpkg.
<extension winlock>
source winlock.c
</extension>
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from ZODB import POSException
from ZODB.utils import p64, u64, z64
import tempfile
class TmpStore:
"""A storage to support subtransactions."""
_bver = ''
def __init__(self, base_version, storage):
self._transaction = None
self._storage = storage
if base_version:
self._bver = base_version
self._file = tempfile.TemporaryFile()
# _pos: current file position
# _tpos: file position at last commit point
self._pos = self._tpos = 0L
# _index: map oid to pos of last committed version
self._index = {}
# _tindex: map oid to pos for new updates
self._tindex = {}
self._creating = []
def close(self):
self._file.close()
def getName(self):
return self._storage.getName()
def getSize(self):
return self._pos
def load(self, oid, version):
pos = self._index.get(oid)
if pos is None:
return self._storage.load(oid, self._bver)
self._file.seek(pos)
h = self._file.read(8)
oidlen = u64(h)
read_oid = self._file.read(oidlen)
if read_oid != oid:
raise POSException.StorageSystemError('Bad temporary storage')
h = self._file.read(16)
size = u64(h[8:])
serial = h[:8]
return self._file.read(size), serial
# XXX clarify difference between self._storage & self._db._storage
def modifiedInVersion(self, oid):
if self._index.has_key(oid):
return self._bver
return self._storage.modifiedInVersion(oid)
def new_oid(self):
return self._storage.new_oid()
def registerDB(self, db, limit):
pass
def store(self, oid, serial, data, version, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
self._file.seek(self._pos)
l = len(data)
if serial is None:
serial = z64
header = p64(len(oid)) + oid + serial + p64(l)
self._file.write(header)
self._file.write(data)
self._tindex[oid] = self._pos
self._pos += l + len(header)
return serial
def tpc_abort(self, transaction):
if transaction is not self._transaction:
return
self._tindex.clear()
self._transaction = None
self._pos = self._tpos
def tpc_begin(self, transaction):
if self._transaction is transaction:
return
self._transaction = transaction
self._tindex.clear() # Just to be sure!
self._pos = self._tpos
def tpc_vote(self, transaction):
pass
def tpc_finish(self, transaction, f=None):
if transaction is not self._transaction:
return
if f is not None:
f()
self._index.update(self._tindex)
self._tindex.clear()
self._tpos = self._pos
def undoLog(self, first, last, filter=None):
return ()
def versionEmpty(self, version):
# XXX what is this supposed to do?
if version == self._bver:
return len(self._index)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Provide backward compatibility with storages that only have undoLog()."""
class UndoLogCompatible:
def undoInfo(self, first=0, last=-20, specification=None):
if specification:
def filter(desc, spec=specification.items()):
get=desc.get
for k, v in spec:
if get(k, None) != v:
return 0
return 1
else: filter=None
return self.undoLog(first, last, filter)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Implement an bobo_application object that is BoboPOS3 aware
This module provides a wrapper that causes a database connection to be created
and used when bobo publishes a bobo_application object.
"""
import transaction
connection_open_hooks = []
class ZApplicationWrapper:
def __init__(self, db, name, klass= None, klass_args= (),
version_cookie_name=None):
self._stuff = db, name, version_cookie_name
if klass is not None:
conn=db.open()
root=conn.root()
if not root.has_key(name):
root[name]=klass()
transaction.commit()
conn.close()
self._klass=klass
# This hack is to overcome a bug in Bobo!
def __getattr__(self, name):
return getattr(self._klass, name)
def __bobo_traverse__(self, REQUEST=None, name=None):
db, aname, version_support = self._stuff
if version_support is not None and REQUEST is not None:
version=REQUEST.get(version_support,'')
else: version=''
conn=db.open(version)
if connection_open_hooks:
for hook in connection_open_hooks:
hook(conn)
# arrange for the connection to be closed when the request goes away
cleanup = Cleanup(conn)
REQUEST._hold(cleanup)
conn.setDebugInfo(REQUEST.environ, REQUEST.other)
v=conn.root()[aname]
if name is not None:
if hasattr(v, '__bobo_traverse__'):
return v.__bobo_traverse__(REQUEST, name)
if hasattr(v,name): return getattr(v,name)
return v[name]
return v
def __call__(self, connection=None):
db, aname, version_support = self._stuff
if connection is None:
connection=db.open()
elif isinstance(type, basestring):
connection=db.open(connection)
return connection.root()[aname]
class Cleanup:
def __init__(self, jar):
self._jar = jar
def __del__(self):
self._jar.close()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# The next line must use double quotes, so release.py recognizes it.
__version__ = "3.3.1a1"
import sys
import __builtin__
from persistent import TimeStamp
from DB import DB
from transaction import get as get_transaction
# Backward compat for old imports. I don't think TimeStamp should
# really be in persistent anyway.
sys.modules['ZODB.TimeStamp'] = sys.modules['persistent.TimeStamp']
# TODO Issue deprecation warning if this variant is used?
__builtin__.get_transaction = get_transaction
del __builtin__
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Broken object support
$Id$
"""
import sys
import persistent
broken_cache = {}
class Broken(object):
"""Broken object base class
Broken objects are placeholders for objects that can no longer be
created because their class has gone away.
Broken objects don't really do much of anything, except hold their
state. The Broken class is used as a base class for creating
classes in leu of missing classes::
>>> Atall = type('Atall', (Broken, ), {'__module__': 'not.there'})
The only thing the class can be used for is to create new objects::
>>> Atall()
<broken not.there.Atall instance>
>>> Atall().__Broken_newargs__
()
>>> Atall().__Broken_initargs__
()
>>> Atall(1, 2).__Broken_newargs__
(1, 2)
>>> Atall(1, 2).__Broken_initargs__
(1, 2)
>>> a = Atall.__new__(Atall, 1, 2)
>>> a
<broken not.there.Atall instance>
>>> a.__Broken_newargs__
(1, 2)
>>> a.__Broken_initargs__
You can't modify broken objects::
>>> a.x = 1
Traceback (most recent call last):
...
BrokenModified: Can't change broken objects
But you can set their state::
>>> a.__setstate__({'x': 1, })
You can pickle broken objects::
>>> r = a.__reduce__()
>>> len(r)
3
>>> r[0] is rebuild
True
>>> r[1]
('not.there', 'Atall', 1, 2)
>>> r[2]
{'x': 1}
>>> import cPickle
>>> a2 = cPickle.loads(cPickle.dumps(a, 1))
>>> a2
<broken not.there.Atall instance>
>>> a2.__Broken_newargs__
(1, 2)
>>> a2.__Broken_initargs__
>>> a2.__Broken_state__
{'x': 1}
Cleanup::
>>> broken_cache.clear()
"""
__Broken_state__ = __Broken_initargs__ = None
__name__ = 'bob XXX'
def __new__(class_, *args):
result = object.__new__(class_)
result.__dict__['__Broken_newargs__'] = args
return result
def __init__(self, *args):
self.__dict__['__Broken_initargs__'] = args
def __reduce__(self):
"""We pickle broken objects in hope of being able to fix them later
"""
return (rebuild,
((self.__class__.__module__, self.__class__.__name__)
+ self.__Broken_newargs__),
self.__Broken_state__,
)
def __setstate__(self, state):
self.__dict__['__Broken_state__'] = state
def __repr__(self):
return "<broken %s.%s instance>" % (
self.__class__.__module__, self.__class__.__name__)
def __setattr__(self, name, value):
raise BrokenModified("Can't change broken objects")
def find_global(modulename, globalname,
# These are *not* optimizations. Callers can override these.
Broken=Broken, type=type,
):
"""Find a global object, returning a broken class if it can't be found.
This function looks up global variable in modules::
>>> import sys
>>> find_global('sys', 'path') is sys.path
True
If an object can't be found, a broken class is returned::
>>> broken = find_global('ZODB.not.there', 'atall')
>>> issubclass(broken, Broken)
True
>>> broken.__module__
'ZODB.not.there'
>>> broken.__name__
'atall'
Broken classes are cached::
>>> find_global('ZODB.not.there', 'atall') is broken
True
If we "repair" a missing global::
>>> class ZODBnotthere:
... atall = []
>>> sys.modules['ZODB.not'] = ZODBnotthere
>>> sys.modules['ZODB.not.there'] = ZODBnotthere
we can then get the repaired value::
>>> find_global('ZODB.not.there', 'atall') is ZODBnotthere.atall
True
Of course, if we beak it again::
>>> del sys.modules['ZODB.not']
>>> del sys.modules['ZODB.not.there']
we get the broken value::
>>> find_global('ZODB.not.there', 'atall') is broken
True
Cleanup::
>>> broken_cache.clear()
"""
try:
__import__(modulename)
except ImportError:
pass
else:
module = sys.modules[modulename]
try:
return getattr(module, globalname)
except AttributeError:
pass
try:
return broken_cache[(modulename, globalname)]
except KeyError:
pass
class_ = type(globalname, (Broken, ), {'__module__': modulename})
broken_cache[(modulename, globalname)] = class_
return class_
def rebuild(modulename, globalname, *args):
"""Recreate a broken object, possibly recreating the missing class
This functions unpickles broken objects::
>>> broken = rebuild('ZODB.notthere', 'atall', 1, 2)
>>> broken
<broken ZODB.notthere.atall instance>
>>> broken.__Broken_newargs__
(1, 2)
If we "repair" the brokenness::
>>> class notthere: # fake notthere module
... class atall(object):
... def __new__(self, *args):
... ob = object.__new__(self)
... ob.args = args
... return ob
... def __repr__(self):
... return 'atall %s %s' % self.args
>>> sys.modules['ZODB.notthere'] = notthere
>>> rebuild('ZODB.notthere', 'atall', 1, 2)
atall 1 2
>>> del sys.modules['ZODB.notthere']
Cleanup::
>>> broken_cache.clear()
"""
class_ = find_global(modulename, globalname)
return class_.__new__(class_, *args)
class BrokenModified(TypeError):
"""Attempt to modify a broken object
"""
class PersistentBroken(Broken, persistent.Persistent):
r"""Persistent broken objects
Persistent broken objects are used for broken objects that are
also persistent. In addition to having to track the original
object data, they need to handle persistent meta data.
Persistent broken classes are created from existing broken classes
using the persistentBroken, function::
>>> Atall = type('Atall', (Broken, ), {'__module__': 'not.there'})
>>> PAtall = persistentBroken(Atall)
(Note that we always get the *same* persistent broken class
for a given broken class::
>>> persistentBroken(Atall) is PAtall
True
)
Persistent broken classes work a lot like broken classes::
>>> a = PAtall.__new__(PAtall, 1, 2)
>>> a
<persistent broken not.there.Atall instance None>
>>> a.__Broken_newargs__
(1, 2)
>>> a.__Broken_initargs__
>>> a.x = 1
Traceback (most recent call last):
...
BrokenModified: Can't change broken objects
Unlike regular broken objects, persistent broken objects keep
track of persistence meta data:
>>> a._p_oid = '\0\0\0\0****'
>>> a
<persistent broken not.there.Atall instance '\x00\x00\x00\x00****'>
and persistent broken objects aren't directly picklable:
>>> a.__reduce__()
Traceback (most recent call last):
...
BrokenModified: """ \
r"""<persistent broken not.there.Atall instance '\x00\x00\x00\x00****'>
but you can get their state:
>>> a.__setstate__({'y': 2})
>>> a.__getstate__()
{'y': 2}
Cleanup::
>>> broken_cache.clear()
"""
def __new__(class_, *args):
result = persistent.Persistent.__new__(class_)
result.__dict__['__Broken_newargs__'] = args
return result
def __reduce__(self, *args):
raise BrokenModified(self)
def __getstate__(self):
return self.__Broken_state__
def __setattr__(self, name, value):
if name.startswith('_p_'):
persistent.Persistent.__setattr__(self, name, value)
else:
raise BrokenModified("Can't change broken objects")
def __repr__(self):
return "<persistent broken %s.%s instance %r>" % (
self.__class__.__module__, self.__class__.__name__,
self._p_oid)
def __getnewargs__(self):
return self.__Broken_newargs__
def persistentBroken(class_):
try:
return class_.__dict__['__Broken_Persistent__']
except KeyError:
class_.__Broken_Persistent__ = (
type(class_.__name__,
(PersistentBroken, class_),
{'__module__': class_.__module__},
)
)
return class_.__dict__['__Broken_Persistent__']
<component prefix="ZODB.config">
<!-- XXX needs descriptions for everything -->
<abstracttype name="ZODB.storage"/>
<abstracttype name="ZODB.database"/>
<sectiontype name="filestorage" datatype=".FileStorage"
implements="ZODB.storage">
<key name="path" required="yes">
<description>
Path name to the main storage file. The names for
supplemental files, including index and lock files, will be
computed from this.
</description>
</key>
<key name="create" datatype="boolean" default="false">
<description>
Flag that indicates whether the storage should be truncated if
it already exists.
</description>
</key>
<key name="read-only" datatype="boolean" default="false">
<description>
If true, only reads may be executed against the storage. Note
that the "pack" operation is not considered a write operation
and is still allowed on a read-only filestorage.
</description>
</key>
<key name="quota" datatype="byte-size">
<description>
Maximum allowed size of the storage file. Operations which
would cause the size of the storage to exceed the quota will
result in a ZODB.FileStorage.FileStorageQuotaError being
raised.
</description>
</key>
</sectiontype>
<sectiontype name="mappingstorage" datatype=".MappingStorage"
implements="ZODB.storage">
<key name="name" default="Mapping Storage"/>
</sectiontype>
<!-- The BDB storages probably need to be revised somewhat still.
The extension relationship seems a little odd.
-->
<sectiontype name="fullstorage" datatype=".BDBFullStorage"
implements="ZODB.storage">
<key name="envdir" required="yes" />
<key name="interval" datatype="time-interval" default="2m" />
<key name="kbyte" datatype="integer" default="0" />
<key name="min" datatype="integer" default="0" />
<key name="logdir" />
<key name="cachesize" datatype="byte-size" default="128MB" />
<key name="frequency" datatype="time-interval" default="0" />
<key name="packtime" datatype="time-interval" default="4h" />
<key name="gcpack" datatype="integer" default="0" />
<key name="read-only" datatype="boolean" default="off"/>
</sectiontype>
<sectiontype name="minimalstorage" datatype=".BDBMinimalStorage"
implements="ZODB.storage" extends="fullstorage"/>
<sectiontype name="zeoclient" datatype=".ZEOClient"
implements="ZODB.storage">
<multikey name="server" datatype="socket-address" required="yes"/>
<key name="storage" default="1">
<description>
The name of the storage that the client wants to use. If the
ZEO server serves more than one storage, the client selects
the storage it wants to use by name. The default name is '1',
which is also the default name for the ZEO server.
</description>
</key>
<key name="cache-size" datatype="byte-size" default="20MB">
<description>
The maximum size of the client cache, in bytes, KB or MB.
</description>
</key>
<key name="name" default="">
<description>
The storage name. If unspecified, the address of the server
will be used as the name.
</description>
</key>
<key name="client">
<description>
Enables persistent cache files. The string passed here is
used to construct the cache filenames. If it is not
specified, the client creates a temporary cache that will
only be used by the current object.
</description>
</key>
<key name="var">
<description>
The directory where persistent cache files are stored. By
default cache files, if they are persistent, are stored in
the current directory.
</description>
</key>
<key name="min-disconnect-poll" datatype="integer" default="5">
<description>
The minimum delay in seconds between attempts to connect to
the server, in seconds. Defaults to 5 seconds.
</description>
</key>
<key name="max-disconnect-poll" datatype="integer" default="300">
<description>
The maximum delay in seconds between attempts to connect to
the server, in seconds. Defaults to 300 seconds.
</description>
</key>
<key name="wait" datatype="boolean" default="on">
<description>
A boolean indicating whether the constructor should wait
for the client to connect to the server and verify the cache
before returning. The default is true.
</description>
</key>
<key name="read-only" datatype="boolean" default="off">
<description>
A flag indicating whether this should be a read-only storage,
defaulting to false (i.e. writing is allowed by default).
</description>
</key>
<key name="read-only-fallback" datatype="boolean" default="off">
<description>
A flag indicating whether a read-only remote storage should be
acceptable as a fallback when no writable storages are
available. Defaults to false. At most one of read_only and
read_only_fallback should be true.
</description>
</key>
<key name="realm" required="no">
<description>
The authentication realm of the server. Some authentication
schemes use a realm to identify the logic set of usernames
that are accepted by this server.
</description>
</key>
</sectiontype>
<sectiontype name="demostorage" datatype=".DemoStorage"
implements="ZODB.storage">
<key name="name" default="Demo Storage"/>
<section type="ZODB.storage" name="*" attribute="base"/>
<key name="quota" datatype="integer"/>
</sectiontype>
<sectiontype name="zodb" datatype=".ZODBDatabase"
implements="ZODB.database">
<section type="ZODB.storage" name="*" attribute="storage"/>
<key name="cache-size" datatype="integer" default="5000"/>
<key name="pool-size" datatype="integer" default="7"/>
<key name="version-pool-size" datatype="integer" default="3"/>
<key name="version-cache-size" datatype="integer" default="100"/>
</sectiontype>
</component>
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Open database and storage from a configuration.
$Id$"""
import os
from cStringIO import StringIO
import ZConfig
import ZODB
db_schema_path = os.path.join(ZODB.__path__[0], "config.xml")
_db_schema = None
s_schema_path = os.path.join(ZODB.__path__[0], "storage.xml")
_s_schema = None
def getDbSchema():
global _db_schema
if _db_schema is None:
_db_schema = ZConfig.loadSchema(db_schema_path)
return _db_schema
def getStorageSchema():
global _s_schema
if _s_schema is None:
_s_schema = ZConfig.loadSchema(s_schema_path)
return _s_schema
def databaseFromString(s):
return databaseFromFile(StringIO(s))
def databaseFromFile(f):
config, handle = ZConfig.loadConfigFile(getDbSchema(), f)
return databaseFromConfig(config.database)
def databaseFromURL(url):
config, handler = ZConfig.loadConfig(getDbSchema(), url)
return databaseFromConfig(config.database)
def databaseFromConfig(section):
return section.open()
def storageFromString(s):
return storageFromFile(StringIO(s))
def storageFromFile(f):
config, handle = ZConfig.loadConfigFile(getStorageSchema(), f)
return storageFromConfig(config.storage)
def storageFromURL(url):
config, handler = ZConfig.loadConfig(getStorageSchema(), url)
return storageFromConfig(config.storage)
def storageFromConfig(section):
return section.open()
class BaseConfig:
"""Object representing a configured storage or database.
Methods:
open() -- open and return the configured object
Attributes:
name -- name of the storage
"""
def __init__(self, config):
self.config = config
self.name = config.getSectionName()
def open(self):
"""Open and return the storage object."""
raise NotImplementedError
class ZODBDatabase(BaseConfig):
def open(self):
section = self.config
storage = section.storage.open()
try:
return ZODB.DB(storage,
pool_size=section.pool_size,
cache_size=section.cache_size,
version_pool_size=section.version_pool_size,
version_cache_size=section.version_cache_size)
except:
storage.close()
raise
class MappingStorage(BaseConfig):
def open(self):
from ZODB.MappingStorage import MappingStorage
return MappingStorage(self.config.name)
class DemoStorage(BaseConfig):
def open(self):
from ZODB.DemoStorage import DemoStorage
if self.config.base:
base = self.config.base.open()
else:
base = None
return DemoStorage(self.config.name,
base=base,
quota=self.config.quota)
class FileStorage(BaseConfig):
def open(self):
from ZODB.FileStorage import FileStorage
return FileStorage(self.config.path,
create=self.config.create,
read_only=self.config.read_only,
quota=self.config.quota)
class ZEOClient(BaseConfig):
def open(self):
from ZEO.ClientStorage import ClientStorage
# config.server is a multikey of socket-address values
# where the value is a socket family, address tuple.
L = [server.address for server in self.config.server]
return ClientStorage(
L,
storage=self.config.storage,
cache_size=self.config.cache_size,
name=self.config.name,
client=self.config.client,
var=self.config.var,
min_disconnect_poll=self.config.min_disconnect_poll,
max_disconnect_poll=self.config.max_disconnect_poll,
wait=self.config.wait,
read_only=self.config.read_only,
read_only_fallback=self.config.read_only_fallback)
class BDBStorage(BaseConfig):
def open(self):
from BDBStorage.BerkeleyBase import BerkeleyConfig
storageclass = self.get_storageclass()
bconf = BerkeleyConfig()
for name in dir(BerkeleyConfig):
if name.startswith('_'):
continue
setattr(bconf, name, getattr(self.config, name))
return storageclass(self.config.envdir, config=bconf)
class BDBMinimalStorage(BDBStorage):
def get_storageclass(self):
import BDBStorage.BDBMinimalStorage
return BDBStorage.BDBMinimalStorage.BDBMinimalStorage
class BDBFullStorage(BDBStorage):
def get_storageclass(self):
import BDBStorage.BDBFullStorage
return BDBStorage.BDBFullStorage.BDBFullStorage
<schema prefix="ZODB.config">
<import package="ZODB"/>
<section type="ZODB.database" name="*" attribute="database"/>
</schema>
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import persistent.mapping
class fixer:
def __of__(self, parent):
def __setstate__(state, self=parent):
self._container=state
del self.__setstate__
return __setstate__
fixer=fixer()
class hack: pass
hack=hack()
def __basicnew__():
r=persistent.mapping.PersistentMapping()
r.__setstate__=fixer
return r
hack.__basicnew__=__basicnew__
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Very Simple dbm-based ZODB storage
This storage provides for use of dbm files as storages that
don't support versions or Undo. This may be useful when implementing
objects like hit counters that don't need or want to participate
in undo or versions.
"""
from ZODB.utils import z64
from MappingStorage import MappingStorage
from BaseStorage import BaseStorage
import anydbm, os
class anydbmStorage(MappingStorage):
def __init__(self, filename, flag='r', mode=0666):
BaseStorage.__init__(self, filename)
self._index=anydbm.open(filename, flag, mode)
self._tindex=[]
keys=self._index.keys()
if keys: self._oid=max(keys)
def getSize(self):
# This is a little iffy, since we aren't entirely sure what the file is
self._lock_acquire()
try:
try:
return (os.stat(self.__name__+'.data')[6] +
os.stat(self.__name__+'.dir')[6]
)
except:
try: return os.stat(self.__name__)[6]
except: return 0
finally: self._lock_release()
class gdbmStorage(anydbmStorage):
def __init__(self, filename, flag='r', mode=0666):
BaseStorage.__init__(self, filename)
import gdbm
self._index=index=gdbm.open(filename, flag[:1]+'f', mode)
self._tindex=[]
m=z64
oid=index.firstkey()
while oid != None:
m=max(m, oid)
oid=index.nextkey(oid)
self._oid=m
def getSize(self):
self._lock_acquire()
try: return os.stat(self.__name__)[6]
finally: self._lock_release()
def pack(self, t, referencesf):
self._lock_acquire()
try:
# Build an index of *only* those objects reachable
# from the root.
index=self._index
rootl=[z64]
pop=rootl.pop
pindex={}
referenced=pindex.has_key
while rootl:
oid=pop()
if referenced(oid): continue
# Scan non-version pickle for references
r=index[oid]
pindex[oid]=r
p=r[8:]
referencesf(p, rootl)
# Now delete any unreferenced entries:
deleted=[]
oid=index.firstkey()
while oid != None:
if not referenced(oid): deleted.append(oid)
oid=index.nextkey(oid)
pindex=referenced=None
for oid in deleted: del index[oid]
index.sync()
index.reorganize()
finally: self._lock_release()
def _finish(self, tid, user, desc, ext):
index=self._index
for oid, p in self._tindex: index[oid]=p
index.sync()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Implement an OID to File-position (long integer) mapping."""
# To save space, we do two things:
#
# 1. We split the keys (OIDS) into 6-byte prefixes and 2-byte suffixes.
# We use the prefixes as keys in a mapping from prefix to mappings
# of suffix to data:
#
# data is {prefix -> {suffix -> data}}
#
# 2. We limit the data size to 48 bits. This should allow databases
# as large as 256 terabytes.
#
# Mostof the space is consumed by items in the mappings from 2-byte
# suffix to 6-byte data. This should reduce the overall memory usage to
# 8-16 bytes per OID.
#
# Since the mapping from suffix to data contains at most 256 entries,
# we use a BTree bucket instead of a full BTree to store the results.
#
# We use p64 to convert integers to 8-byte strings and lop off the two
# high-order bytes when saving. On loading data, we add the leading
# bytes back before using u64 to convert the data back to (long)
# integers.
import struct
from BTrees._fsBTree import fsBucket
# convert between numbers and six-byte strings
def num2str(n):
return struct.pack(">Q", n)[2:]
def str2num(s):
return struct.unpack(">Q", "\000\000" + s)[0]
class fsIndex:
def __init__(self):
self._data = {}
def __getitem__(self, key):
return str2num(self._data[key[:6]][key[6:]])
def get(self, key, default=None):
tree = self._data.get(key[:6], default)
if tree is default:
return default
v = tree.get(key[6:], default)
if v is default:
return default
return str2num(v)
def __setitem__(self, key, value):
value = num2str(value)
treekey = key[:6]
tree = self._data.get(treekey)
if tree is None:
tree = fsBucket()
self._data[treekey] = tree
tree[key[6:]] = value
def __len__(self):
r = 0
for tree in self._data.values():
r += len(tree)
return r
def update(self, mapping):
for k, v in mapping.items():
self[k] = v
def has_key(self, key):
v=self.get(key, self)
return v is not self
def __contains__(self, key):
tree = self._data.get(key[:6])
if tree is None:
return False
v = tree.get(key[6:], None)
if v is None:
return False
return True
def clear(self):
self._data.clear()
def __iter__(self):
for prefix, tree in self._data.items():
for suffix in tree:
yield prefix + suffix
def keys(self):
r = []
for prefix, tree in self._data.items():
for suffix in tree.keys():
r.append(prefix + suffix)
return r
def items(self):
r = []
for prefix, tree in self._data.items():
for suffix, v in tree.items():
r.append(((prefix + suffix), str2num(v)))
return r
def values(self):
r = []
for prefix, tree in self._data.items():
for v in tree.values():
r.append(str2num(v))
return r
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Simple script for repairing damaged FileStorage files.
Usage: %s [-f] [-v level] [-p] [-P seconds] input output
Recover data from a FileStorage data file, skipping over damaged data. Any
damaged data will be lost. This could lead to useless output if critical
data is lost.
Options:
-f
Overwrite output file even if it exists.
-v level
Set the verbosity level:
0 -- show progress indicator (default)
1 -- show transaction times and sizes
2 -- show transaction times and sizes, and show object (record)
ids, versions, and sizes
-p
Copy partial transactions. If a data record in the middle of a
transaction is bad, the data up to the bad data are packed. The
output record is marked as packed. If this option is not used,
transactions with any bad data are skipped.
-P t
Pack data to t seconds in the past. Note that if the "-p" option is
used, then t should be 0.
Important: The ZODB package must be importable. You may need to adjust
PYTHONPATH accordingly.
"""
# Algorithm:
#
# position to start of input
# while 1:
# if end of file:
# break
# try:
# copy_transaction
# except:
# scan for transaction
# continue
import sys
import os
import getopt
import time
from struct import unpack
from cPickle import loads
try:
import ZODB
except ImportError:
if os.path.exists('ZODB'):
sys.path.append('.')
elif os.path.exists('FileStorage.py'):
sys.path.append('..')
import ZODB
import ZODB.FileStorage
from ZODB.utils import t32, u64
from ZODB.FileStorage import RecordIterator
from persistent.TimeStamp import TimeStamp
def die(mess='', show_docstring=False):
if mess:
print >> sys.stderr, mess + '\n'
if show_docstring:
print >> sys.stderr, __doc__ % sys.argv[0]
sys.exit(1)
class ErrorFound(Exception):
pass
def error(mess, *args):
raise ErrorFound(mess % args)
def read_txn_header(f, pos, file_size, outp, ltid):
# Read the transaction record
f.seek(pos)
h = f.read(23)
if len(h) < 23:
raise EOFError
tid, stl, status, ul, dl, el = unpack(">8s8scHHH",h)
if el < 0: el=t32-el
tl = u64(stl)
if pos + (tl + 8) > file_size:
error("bad transaction length at %s", pos)
if tl < (23 + ul + dl + el):
error("invalid transaction length, %s, at %s", tl, pos)
if ltid and tid < ltid:
error("time-stamp reducation %s < %s, at %s", u64(tid), u64(ltid), pos)
if status == "c":
truncate(f, pos, file_size, outp)
raise EOFError
if status not in " up":
error("invalid status, %r, at %s", status, pos)
tpos = pos
tend = tpos + tl
if status == "u":
# Undone transaction, skip it
f.seek(tend)
h = f.read(8)
if h != stl:
error("inconsistent transaction length at %s", pos)
pos = tend + 8
return pos, None, tid
pos = tpos+(23+ul+dl+el)
user = f.read(ul)
description = f.read(dl)
if el:
try: e=loads(f.read(el))
except: e={}
else: e={}
result = RecordIterator(tid, status, user, description, e, pos, tend,
f, tpos)
pos = tend
# Read the (intentionally redundant) transaction length
f.seek(pos)
h = f.read(8)
if h != stl:
error("redundant transaction length check failed at %s", pos)
pos += 8
return pos, result, tid
def truncate(f, pos, file_size, outp):
"""Copy data from pos to end of f to a .trNNN file."""
i = 0
while 1:
trname = outp + ".tr%d" % i
if os.path.exists(trname):
i += 1
tr = open(trname, "wb")
copy(f, tr, file_size - pos)
f.seek(pos)
tr.close()
def copy(src, dst, n):
while n:
buf = src.read(8096)
if not buf:
break
if len(buf) > n:
buf = buf[:n]
dst.write(buf)
n -= len(buf)
def scan(f, pos):
"""Return a potential transaction location following pos in f.
This routine scans forward from pos looking for the last data
record in a transaction. A period '.' always occurs at the end of
a pickle, and an 8-byte transaction length follows the last
pickle. If a period is followed by a plausible 8-byte transaction
length, assume that we have found the end of a transaction.
The caller should try to verify that the returned location is
actually a transaction header.
"""
while 1:
f.seek(pos)
data = f.read(8096)
if not data:
return 0
s = 0
while 1:
l = data.find(".", s)
if l < 0:
pos += len(data)
break
# If we are less than 8 bytes from the end of the
# string, we need to read more data.
s = l + 1
if s > len(data) - 8:
pos += l
break
tl = u64(data[s:s+8])
if tl < pos:
return pos + s + 8
def iprogress(i):
if i % 2:
print ".",
else:
print (i/2) % 10,
sys.stdout.flush()
def progress(p):
for i in range(p):
iprogress(i)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "fv:pP:")
except getopt.error, msg:
die(str(msg), show_docstring=True)
if len(args) != 2:
die("two positional arguments required", show_docstring=True)
inp, outp = args
force = partial = False
verbose = 0
pack = None
for opt, v in opts:
if opt == "-v":
verbose = int(v)
elif opt == "-p":
partial = True
elif opt == "-f":
force = True
elif opt == "-P":
pack = time.time() - float(v)
recover(inp, outp, verbose, partial, force, pack)
def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
print "Recovering", inp, "into", outp
if os.path.exists(outp) and not force:
die("%s exists" % outp)
f = open(inp, "rb")
if f.read(4) != ZODB.FileStorage.packed_version:
die("input is not a file storage")
f.seek(0,2)
file_size = f.tell()
ofs = ZODB.FileStorage.FileStorage(outp, create=1)
_ts = None
ok = 1
prog1 = 0
undone = 0
pos = 4L
ltid = None
while pos:
try:
npos, txn, tid = read_txn_header(f, pos, file_size, outp, ltid)
except EOFError:
break
except (KeyboardInterrupt, SystemExit):
raise
except Exception, err:
print "error reading txn header:", err
if not verbose:
progress(prog1)
pos = scan(f, pos)
if verbose > 1:
print "looking for valid txn header at", pos
continue
ltid = tid
if txn is None:
undone = undone + npos - pos
pos = npos
continue
else:
pos = npos
tid = txn.tid
if _ts is None:
_ts = TimeStamp(tid)
else:
t = TimeStamp(tid)
if t <= _ts:
if ok:
print ("Time stamps out of order %s, %s" % (_ts, t))
ok = 0
_ts = t.laterThan(_ts)
tid = `_ts`
else:
_ts = t
if not ok:
print ("Time stamps back in order %s" % (t))
ok = 1
ofs.tpc_begin(txn, tid, txn.status)
if verbose:
print "begin", pos, _ts,
if verbose > 1:
print
sys.stdout.flush()
nrec = 0
try:
for r in txn:
if verbose > 1:
if r.data is None:
l = "bp"
else:
l = len(r.data)
print "%7d %s %s" % (u64(r.oid), l, r.version)
ofs.restore(r.oid, r.tid, r.data, r.version, r.data_txn,
txn)
nrec += 1
except (KeyboardInterrupt, SystemExit):
raise
except Exception, err:
if partial and nrec:
ofs._status = "p"
ofs.tpc_vote(txn)
ofs.tpc_finish(txn)
if verbose:
print "partial"
else:
ofs.tpc_abort(txn)
print "error copying transaction:", err
if not verbose:
progress(prog1)
pos = scan(f, pos)
if verbose > 1:
print "looking for valid txn header at", pos
else:
ofs.tpc_vote(txn)
ofs.tpc_finish(txn)
if verbose:
print "finish"
sys.stdout.flush()
if not verbose:
prog = pos * 20l / file_size
while prog > prog1:
prog1 = prog1 + 1
iprogress(prog1)
bad = file_size - undone - ofs._pos
print "\n%s bytes removed during recovery" % bad
if undone:
print "%s bytes of undone transaction data were skipped" % undone
if pack is not None:
print "Packing ..."
from ZODB.serialize import referencesf
ofs.pack(pack, referencesf)
ofs.close()
if __name__ == "__main__":
main()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Tools for using FileStorage data files.
XXX This module needs tests.
XXX This file needs to be kept in sync with FileStorage.py.
"""
import cPickle
import struct
from ZODB.FileStorage.format import TRANS_HDR, DATA_HDR, TRANS_HDR_LEN, \
DATA_HDR_LEN, DATA_VERSION_HDR_LEN
from ZODB.utils import u64
from persistent.TimeStamp import TimeStamp
class TxnHeader:
"""Object representing a transaction record header.
Attribute Position Value
--------- -------- -----
tid 0- 8 transaction id
length 8-16 length of entire transaction record - 8
status 16-17 status of transaction (' ', 'u', 'p'?)
user_len 17-19 length of user field (pack code H)
descr_len 19-21 length of description field (pack code H)
ext_len 21-23 length of extensions (pack code H)
"""
def __init__(self, file, pos):
self._file = file
self._pos = pos
self._read_header()
def _read_header(self):
self._file.seek(self._pos)
self._hdr = self._file.read(TRANS_HDR_LEN)
(self.tid, self.length, self.status, self.user_len, self.descr_len,
self.ext_len) = struct.unpack(TRANS_HDR, self._hdr)
def read_meta(self):
"""Load user, descr, and ext attributes."""
self.user = ""
self.descr = ""
self.ext = {}
if not (self.user_len or self.descr_len or self.ext_len):
return
self._file.seek(self._pos + TRANS_HDR_LEN)
if self.user_len:
self.user = self._file.read(self.user_len)
if self.descr_len:
self.descr = self._file.read(self.descr_len)
if self.ext_len:
self._ext = self._file.read(self.ext_len)
self.ext = cPickle.loads(self._ext)
def get_data_offset(self):
return (self._pos + TRANS_HDR_LEN + self.user_len + self.descr_len
+ self.ext_len)
def get_timestamp(self):
return TimeStamp(self.tid)
def get_raw_data(self):
data_off = self.get_data_offset()
data_len = self.length - (data_off - self._pos)
self._file.seek(data_off)
return self._file.read(data_len)
def next_txn(self):
off = self._pos + self.length + 8
self._file.seek(off)
s = self._file.read(8)
if not s:
return None
return TxnHeader(self._file, off)
def prev_txn(self):
if self._pos == 4:
return None
self._file.seek(self._pos - 8)
tlen = u64(self._file.read(8))
return TxnHeader(self._file, self._pos - (tlen + 8))
class DataHeader:
"""Object representing a data record header.
Attribute Position Value
--------- -------- -----
oid 0- 8 object id
serial 8-16 object serial numver
prev_rec_pos 16-24 position of previous data record for object
txn_pos 24-32 position of txn header
version_len 32-34 length of version
data_len 34-42 length of data
nonversion_pos 42-50* position of nonversion data record
prev_version_pos 50-58* pos of previous version data record
* these attributes are only present if version_len != 0.
"""
def __init__(self, file, pos):
self._file = file
self._pos = pos
self._read_header()
def _read_header(self):
self._file.seek(self._pos)
self._hdr = self._file.read(DATA_VERSION_HDR_LEN)
# always read the longer header, just in case
(self.oid, self.serial, prev_rec_pos, txn_pos, self.version_len,
data_len) = struct.unpack(DATA_HDR, self._hdr[:DATA_HDR_LEN])
self.prev_rec_pos = u64(prev_rec_pos)
self.txn_pos = u64(txn_pos)
self.data_len = u64(data_len)
if self.version_len:
s = self._hdr[DATA_HDR_LEN:]
self.nonversion_pos = u64(s[:8])
self.prev_version_pos = u64(s[8:])
else:
self.nonversion_pos = None
self.prev_version_pos = None
def next_offset(self):
"""Return offset of next record."""
off = self._pos + self.data_len
if self.version_len:
off += self.version_len + DATA_VERSION_HDR_LEN
else:
off += DATA_HDR_LEN
if self.data_len == 0:
off += 8 # backpointer
return off
def prev_txn(f):
"""Return transaction located before current file position."""
f.seek(-8, 1)
tlen = u64(f.read(8)) + 8
return TxnHeader(f, f.tell() - tlen)
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interfaces for ZODB.
$Id$
"""
try:
from zope.interface import Interface, Attribute, implements
from zope.interface.verify import verifyObject
except ImportError:
class Interface:
pass
class Attribute:
def __init__(self, __name__, __doc__):
self.__name__ = __name__
self.__doc__ = __doc__
def implements(*args):
pass
def verifyObject(*args):
pass
class IDataManager(Interface):
"""Objects that manage transactional storage.
These object's may manage data for other objects, or they may manage
non-object storages, such as relational databases.
"""
def abort_sub(transaction):
"""Discard all subtransaction data.
See subtransaction.txt
This is called when top-level transactions are aborted.
No further subtransactions can be started once abort_sub()
has been called; this is only used when the transaction is
being aborted.
abort_sub also implies the abort of a 2-phase commit.
This should never fail.
"""
def commit_sub(transaction):
"""Commit all changes made in subtransactions and begin 2-phase commit
Data are saved *as if* they are part of the current transaction.
That is, they will not be persistent unless the current transaction
is committed.
This is called when the current top-level transaction is committed.
No further subtransactions can be started once commit_sub()
has been called; this is only used when the transaction is
being committed.
This call also implied the beginning of 2-phase commit.
"""
# Two-phase commit protocol. These methods are called by the
# ITransaction object associated with the transaction being
# committed.
def tpc_begin(transaction, subtransaction=False):
"""Begin commit of a transaction, starting the two-phase commit.
transaction is the ITransaction instance associated with the
transaction being committed.
subtransaction is a Boolean flag indicating whether the
two-phase commit is being invoked for a subtransaction.
Important note: Subtransactions are modelled in the sense that
when you commit a subtransaction, subsequent commits should be
for subtransactions as well. That is, there must be a
commit_sub() call between a tpc_begin() call with the
subtransaction flag set to true and a tpc_begin() with the
flag set to false.
"""
def tpc_abort(transaction):
"""Abort a transaction.
This is always called after a tpc_begin call.
transaction is the ITransaction instance associated with the
transaction being committed.
This should never fail.
"""
def tpc_finish(transaction):
"""Indicate confirmation that the transaction is done.
transaction is the ITransaction instance associated with the
transaction being committed.
This should never fail. If this raises an exception, the
database is not expected to maintain consistency; it's a
serious error.
"""
def tpc_vote(transaction):
"""Verify that a data manager can commit the transaction
This is the last chance for a data manager to vote 'no'. A
data manager votes 'no' by raising an exception.
transaction is the ITransaction instance associated with the
transaction being committed.
"""
def commit(object, transaction):
"""CCCommit changes to an object
Save the object as part of the data to be made persistent if
the transaction commits.
"""
def abort(object, transaction):
"""Abort changes to an object
Only changes made since the last transaction or
sub-transaction boundary are discarded.
This method may be called either:
o Outside of two-phase commit, or
o In the first phase of two-phase commit
"""
def sortKey():
"""
Return a key to use for ordering registered DataManagers
ZODB uses a global sort order to prevent deadlock when it commits
transactions involving multiple resource managers. The resource
manager must define a sortKey() method that provides a global ordering
for resource managers.
"""
class ITransaction(Interface):
"""Object representing a running transaction.
Objects with this interface may represent different transactions
during their lifetime (.begin() can be called to start a new
transaction using the same instance).
"""
user = Attribute(
"user",
"The name of the user on whose behalf the transaction is being\n"
"performed. The format of the user name is defined by the\n"
"application.")
# XXX required to be a string?
description = Attribute(
"description",
"Textual description of the transaction.")
def begin(info=None, subtransaction=None):
"""Begin a new transaction.
If the transaction is in progress, it is aborted and a new
transaction is started using the same transaction object.
"""
def commit(subtransaction=None):
"""Finalize the transaction.
This executes the two-phase commit algorithm for all
IDataManager objects associated with the transaction.
"""
def abort(subtransaction=0, freeme=1):
"""Abort the transaction.
This is called from the application. This can only be called
before the two-phase commit protocol has been started.
"""
def join(datamanager):
"""Add a datamanager to the transaction.
The datamanager must implement the
transactions.interfaces.IDataManager interface, and be
adaptable to ZODB.interfaces.IDataManager.
"""
def register(object):
"""Register the given object for transaction control."""
def note(text):
"""Add text to the transaction description.
If a description has already been set, text is added to the
end of the description following two newline characters.
Surrounding whitespace is stripped from text.
"""
# XXX does impl do the right thing with ''? Not clear what
# the "right thing" is.
def setUser(user_name, path="/"):
"""Set the user name.
path should be provided if needed to further qualify the
identified user.
"""
def setExtendedInfo(name, value):
"""Add extension data to the transaction.
name is the name of the extension property to set; value must
be a picklable value.
Storage implementations may limit the amount of extension data
which can be stored.
"""
# XXX is this this allowed to cause an exception here, during
# the two-phase commit, or can it toss data silently?
class IConnection(Interface):
"""ZODB connection.
XXX: This interface is incomplete.
"""
def add(ob):
"""Add a new object 'obj' to the database and assign it an oid.
A persistent object is normally added to the database and
assigned an oid when it becomes reachable to an object already in
the database. In some cases, it is useful to create a new
object and use its oid (_p_oid) in a single transaction.
This method assigns a new oid regardless of whether the object
is reachable.
The object is added when the transaction commits. The object
must implement the IPersistent interface and must not
already be associated with a Connection.
"""
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import os
import errno
import logging
logger = logging.getLogger("ZODB.lock_file")
try:
import fcntl
except ImportError:
try:
from winlock import LockFile as _LockFile
from winlock import UnlockFile as _UnlockFile
except ImportError:
def lock_file(file):
logger.info('No file-locking support on this platform')
# Windows
def lock_file(file):
# Lock just the first byte
_LockFile(file.fileno(), 0, 0, 1, 0)
def unlock_file(file):
_UnlockFile(file.fileno(), 0, 0, 1, 0)
else:
# Unix
_flags = fcntl.LOCK_EX | fcntl.LOCK_NB
def lock_file(file):
fcntl.flock(file.fileno(), _flags)
def unlock_file(file):
# File is automatically unlocked on close
pass
# This is a better interface to use than the lockfile.lock_file() interface.
# Creating the instance acquires the lock. The file remains open. Calling
# close both closes and unlocks the lock file.
class LockFile:
def __init__(self, path):
self._path = path
try:
self._fp = open(path, 'r+')
except IOError, e:
if e.errno <> errno.ENOENT: raise
self._fp = open(path, 'w+')
# Acquire the lock and piss on the hydrant
try:
lock_file(self._fp)
except:
logger.exception("Error locking file %s", path)
raise
print >> self._fp, os.getpid()
self._fp.flush()
def close(self):
if self._fp is not None:
unlock_file(self._fp)
self._fp.close()
os.unlink(self._path)
self._fp = None
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Supplies custom logging levels BLATHER and TRACE.
$Revision: 1.1 $
"""
import logging
__all__ = ["BLATHER", "TRACE"]
# In the days of zLOG, there were 7 standard log levels, and ZODB/ZEO used
# all of them. Here's how they map to the logging package's 5 standard
# levels:
#
# zLOG logging
# ------------- ---------------
# PANIC (300) FATAL, CRITICAL (50)
# ERROR (200) ERROR (40)
# WARNING, PROBLEM (100) WARN (30)
# INFO (0) INFO (20)
# BLATHER (-100) none -- defined here as BLATHER (15)
# DEBUG (-200) DEBUG (10)
# TRACE (-300) none -- defined here as TRACE (5)
#
# TRACE is used by ZEO for extremely verbose trace output, enabled only
# when chasing bottom-level communications bugs. It really should be at
# a lower level than DEBUG.
#
# BLATHER is a harder call, and various instances could probably be folded
# into INFO or DEBUG without real harm.
BLATHER = 15
TRACE = 5
logging.addLevelName("BLATHER", BLATHER)
logging.addLevelName("TRACE", TRACE)
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Support for ZODB object serialization.
ZODB serializes objects using a custom format based on Python pickles.
When an object is unserialized, it can be loaded as either a ghost or
a real object. A ghost is a persistent object of the appropriate type
but without any state. The first time a ghost is accessed, the
persistence machinery traps access and loads the actual state. A
ghost allows many persistent objects to be loaded while minimizing the
memory consumption of referenced but otherwise unused objects.
Pickle format
-------------
ZODB stores serialized objects using a custom format based on pickle.
Each serialized object has two parts: the class description and the
object state. The class description must provide enough information
to call the class's ``__new__`` and create an empty object. Once the
object exists as a ghost, its state is passed to ``__setstate__``.
The class description can be in a variety of formats, in part to
provide backwards compatibility with earlier versions of Zope. The
two current formats for class description are:
1. type(obj)
2. type(obj), obj.__getnewargs__()
The second of these options is used if the object has a __getnewargs__()
method. It is intended to support objects like persistent classes that have
custom C layouts that are determined by arguments to __new__().
The type object is usually stored using the standard pickle mechanism, which
involves the pickle GLOBAL opcode (giving the type's module and name as
strings). The type may itself be a persistent object, in which case a
persistent reference (see below) is used.
It's unclear what "usually" means in the last paragraph. There are two
useful places to concentrate confusion about exactly which formats exist:
- BaseObjectReader.getClassName() below returns a dotted "module.class"
string, via actually loading a pickle. This requires that the
implementation of application objects be available.
- ZODB/utils.py's get_pickle_metadata() tries to return the module and
class names (as strings) without importing any application modules or
classes, via analyzing the pickle.
Earlier versions of Zope supported several other kinds of class
descriptions. The current serialization code reads these descriptions, but
does not write them. The four earlier formats are:
3. (module name, class name), None
4. (module name, class name), __getinitargs__()
5. class, None
6. class, __getinitargs__()
Formats 4 and 6 are used only if the class defines a __getinitargs__()
method. Formats 5 and 6 are used if the class does not have a __module__
attribute (I'm not sure when this applies, but I think it occurs for some
but not all ZClasses).
Persistent references
---------------------
A persistent reference is a pair containing an oid and class metadata.
When one persistent object pickle refers to another persistent object,
the database uses a persistent reference. The format allows a
significant optimization, because ghosts can be created directly from
persistent references. If the reference was just an oid, a database
access would be required to determine the class of the ghost.
Because the persistent reference includes the class, it is not
possible to change the class of a persistent object. If a transaction
changed the class of an object, a new record with new class metadata
would be written but all the old references would still include the
old class.
"""
import cPickle
import cStringIO
import logging
from persistent import Persistent
from persistent.wref import WeakRefMarker, WeakRef
from ZODB import broken
from ZODB.broken import Broken
from ZODB.POSException import InvalidObjectReference
# Might to update or redo coptimizations to reflect weakrefs:
# from ZODB.coptimizations import new_persistent_id
def myhasattr(obj, name, _marker=object()):
"""Make sure we don't mask exceptions like hasattr().
We don't want exceptions other than AttributeError to be masked,
since that too often masks other programming errors.
Three-argument getattr() doesn't mask those, so we use that to
implement our own hasattr() replacement.
"""
return getattr(obj, name, _marker) is not _marker
class BaseObjectWriter:
"""Serializes objects for storage in the database.
The ObjectWriter creates object pickles in the ZODB format. It
also detects new persistent objects reachable from the current
object.
"""
def __init__(self, jar=None):
self._file = cStringIO.StringIO()
self._p = cPickle.Pickler(self._file, 1)
self._stack = []
self._p.persistent_id = self.persistent_id
if jar is not None:
assert myhasattr(jar, "new_oid")
self._jar = jar
def persistent_id(self, obj):
"""Return the persistent id for obj.
>>> from ZODB.tests.util import P
>>> class DummyJar:
... def new_oid(self):
... return 42
>>> jar = DummyJar()
>>> writer = BaseObjectWriter(jar)
Normally, object references include the oid and a cached
reference to the class. Having the class available allows
fast creation of the ghost, avoiding requiring an additional
database lookup.
>>> bob = P('bob')
>>> oid, cls = writer.persistent_id(bob)
>>> oid
42
>>> cls is P
True
If a persistent object does not already have an oid and jar,
these will be assigned by persistent_id():
>>> bob._p_oid
42
>>> bob._p_jar is jar
True
If the object already has a persistent id, the id is not changed:
>>> bob._p_oid = 24
>>> oid, cls = writer.persistent_id(bob)
>>> oid
24
>>> cls is P
True
If the jar doesn't match that of the writer, an error is raised:
>>> bob._p_jar = DummyJar()
>>> writer.persistent_id(bob)
Traceback (most recent call last):
...
InvalidObjectReference: Attempt to store an object from a """ \
"""foreign database connection
Constructor arguments used by __new__(), as returned by
__getnewargs__(), can affect memory allocation, but may also
change over the life of the object. This makes it useless to
cache even the object's class.
>>> class PNewArgs(P):
... def __getnewargs__(self):
... return ()
>>> sam = PNewArgs('sam')
>>> writer.persistent_id(sam)
42
>>> sam._p_oid
42
>>> sam._p_jar is jar
True
Check that simple objects don't get accused of persistence:
>>> writer.persistent_id(42)
>>> writer.persistent_id(object())
Check that a classic class doesn't get identified improperly:
>>> class ClassicClara:
... pass
>>> clara = ClassicClara()
>>> writer.persistent_id(clara)
"""
# Most objects are not persistent. The following cheap test
# identifies most of them. For these, we return None,
# signalling that the object should be pickled normally.
if not isinstance(obj, (Persistent, type, WeakRef)):
# Not persistent, pickle normally
return None
# Any persistent object must have an oid:
try:
oid = obj._p_oid
except AttributeError:
# Not persistent, pickle normally
return None
if not (oid is None or isinstance(oid, str)):
# Deserves a closer look:
# Make sure it's not a descr
if hasattr(oid, '__get__'):
# The oid is a decriptor. That means obj is a non-persistent
# class whose instances are persistent, so ...
# Not persistent, pickle normally
return None
if oid is WeakRefMarker:
# we have a weakref, see weakref.py
oid = obj.oid
if oid is None:
obj = obj() # get the referenced object
oid = obj._p_oid
if oid is None:
# Here we are causing the object to be saved in
# the database. One could argue that we shouldn't
# do this, because a weakref should not cause an object
# to be added. We'll be optimistic, though, and
# assume that the object will be added eventually.
oid = self._jar.new_oid()
obj._p_jar = self._jar
obj._p_oid = oid
self._stack.append(obj)
return [oid]
# Since we have an oid, we have either a persistent instance
# (an instance of Persistent), or a persistent class.
# NOTE! Persistent classes don't (and can't) subclass persistent.
if oid is None:
oid = obj._p_oid = self._jar.new_oid()
obj._p_jar = self._jar
self._stack.append(obj)
elif obj._p_jar is not self._jar:
raise InvalidObjectReference(
"Attempt to store an object from a foreign "
"database connection"
)
klass = type(obj)
if hasattr(klass, '__getnewargs__'):
# We don't want to save newargs in object refs.
# It's possible that __getnewargs__ is degenerate and
# returns (), but we don't want to have to deghostify
# the object to find out.
return oid
return oid, klass
def serialize(self, obj):
# We don't use __class__ here, because obj could be a persistent proxy.
# We don't want to be fooled by proxies.
klass = type(obj)
newargs = getattr(obj, "__getnewargs__", None)
if newargs is None:
meta = klass
else:
meta = klass, newargs()
return self._dump(meta, obj.__getstate__())
def _dump(self, classmeta, state):
# To reuse the existing cStringIO object, we must reset
# the file position to 0 and truncate the file after the
# new pickle is written.
self._file.seek(0)
self._p.clear_memo()
self._p.dump(classmeta)
self._p.dump(state)
self._file.truncate()
return self._file.getvalue()
class ObjectWriter(BaseObjectWriter):
def __init__(self, obj):
BaseObjectWriter.__init__(self, obj._p_jar)
self._stack.append(obj)
def __iter__(self):
return NewObjectIterator(self._stack)
class NewObjectIterator:
# The pickler is used as a forward iterator when the connection
# is looking for new objects to pickle.
def __init__(self, stack):
self._stack = stack
def __iter__(self):
return self
def next(self):
if self._stack:
elt = self._stack.pop()
return elt
else:
raise StopIteration
class BaseObjectReader:
def _persistent_load(self, oid):
# subclasses must define _persistent_load().
raise NotImplementedError
def _get_class(self, module, name):
# subclasses must define _get_class()
raise NotImplementedError
def _get_unpickler(self, pickle):
file = cStringIO.StringIO(pickle)
unpickler = cPickle.Unpickler(file)
unpickler.persistent_load = self._persistent_load
return unpickler
def _new_object(self, klass, args):
if not args and not myhasattr(klass, "__getnewargs__"):
obj = klass.__new__(klass)
else:
obj = klass(*args)
if not isinstance(klass, type):
obj.__dict__.clear()
return obj
def getClassName(self, pickle):
unpickler = self._get_unpickler(pickle)
klass = unpickler.load()
if isinstance(klass, tuple):
klass, args = klass
if isinstance(klass, tuple):
# old style reference
return "%s.%s" % klass
return "%s.%s" % (klass.__module__, klass.__name__)
def getGhost(self, pickle):
unpickler = self._get_unpickler(pickle)
klass = unpickler.load()
if isinstance(klass, tuple):
# Here we have a separate class and args.
# This could be an old record, so the class module ne a named
# refernce
klass, args = klass
if isinstance(klass, tuple):
# Old module_name, class_name tuple
klass = self._get_class(*klass)
if args is None:
args = ()
else:
# Definitely new style direct class reference
args = ()
if issubclass(klass, Broken):
# We got a broken class. We might need to make it
# PersistentBroken
if not issubclass(klass, broken.PersistentBroken):
klass = broken.persistentBroken(klass)
return klass.__new__(klass, *args)
def getState(self, pickle):
unpickler = self._get_unpickler(pickle)
try:
unpickler.load() # skip the class metadata
return unpickler.load()
except EOFError, msg:
log = logging.getLogger("ZODB.serialize")
log.exception("Unpickling error: %r", pickle)
raise
def setGhostState(self, obj, pickle):
state = self.getState(pickle)
obj.__setstate__(state)
class ExternalReference(object):
pass
class SimpleObjectReader(BaseObjectReader):
"""Can be used to inspect a single object pickle.
It returns an ExternalReference() object for other persistent
objects. It can't instantiate the object.
"""
ext_ref = ExternalReference()
def _persistent_load(self, oid):
return self.ext_ref
def _get_class(self, module, name):
return None
class ConnectionObjectReader(BaseObjectReader):
def __init__(self, conn, cache, factory):
self._conn = conn
self._cache = cache
self._factory = factory
def _get_class(self, module, name):
return self._factory(self._conn, module, name)
def _get_unpickler(self, pickle):
unpickler = BaseObjectReader._get_unpickler(self, pickle)
factory = self._factory
conn = self._conn
def find_global(modulename, name):
return factory(conn, modulename, name)
unpickler.find_global = find_global
return unpickler
def _persistent_load(self, oid):
if isinstance(oid, tuple):
# Quick instance reference. We know all we need to know
# to create the instance w/o hitting the db, so go for it!
oid, klass = oid
obj = self._cache.get(oid, None) # XXX it's not a dict
if obj is not None:
return obj
if isinstance(klass, tuple):
klass = self._get_class(*klass)
if issubclass(klass, Broken):
# We got a broken class. We might need to make it
# PersistentBroken
if not issubclass(klass, broken.PersistentBroken):
klass = broken.persistentBroken(klass)
try:
obj = klass.__new__(klass)
except TypeError:
# Couldn't create the instance. Maybe there's more
# current data in the object's actual record!
return self._conn.get(oid)
# XXX should be done by connection
obj._p_oid = oid
obj._p_jar = self._conn
# When an object is created, it is put in the UPTODATE
# state. We must explicitly deactivate it to turn it into
# a ghost.
obj._p_changed = None
self._cache[oid] = obj
return obj
elif isinstance(oid, list):
# see weakref.py
[oid] = oid
obj = WeakRef.__new__(WeakRef)
obj.oid = oid
obj.dm = self._conn
return obj
obj = self._cache.get(oid, None)
if obj is not None:
return obj
return self._conn.get(oid)
def referencesf(p, rootl=None):
if rootl is None:
rootl = []
u = cPickle.Unpickler(cStringIO.StringIO(p))
l = len(rootl)
u.persistent_load = rootl
u.noload()
try:
u.noload()
except:
# Hm. We failed to do second load. Maybe there wasn't a
# second pickle. Let's check:
f = cStringIO.StringIO(p)
u = cPickle.Unpickler(f)
u.persistent_load = []
u.noload()
if len(p) > f.tell():
raise ValueError, 'Error unpickling, %s' % p
# References may be:
#
# - A tuple, in which case they are an oid and class.
# In this case, just extract the first element, which is
# the oid
#
# - A list, which is a weak reference. We skip those.
#
# - Anything else must be an oid. This means that an oid
# may not be a list or a tuple. This is a bit lame.
# We could avoid this lamosity by allowing single-element
# tuples, so that we wrap oids that are lists or tuples in
# tuples.
#
# - oids may *not* be False. I'm not sure why.
out = []
for v in rootl:
assert v # Let's see if we ever get empty ones
if type(v) is list:
# skip wekrefs
continue
if type(v) is tuple:
v = v[0]
out.append(v)
rootl[:] = out
return rootl
<schema>
<import package="ZODB"/>
<section type="ZODB.storage" name="*" attribute="storage"/>
</schema>
=========================
Subtransactions in ZODB 3
=========================
ZODB 3 provides limited support for subtransactions. Subtransactions
are nested to *one* level. There are top-level transactions and
subtransactions. When a transaction is committed, a flag is passed
indicating whether it is a subtransaction or a top-level transaction.
Consider the following exampler commit calls:
- commit()
A regular top-level transaction is committed.
- commit(1)
A subtransaction is committed. There is now one subtransaction of
the current top-level transaction.
- commit(1)
A subtransaction is committed. There are now two subtransactions of
the current top-level transaction.
- abort(1)
A subtransaction is aborted. There are still two subtransactions of
the current top-level transaction; work done since the last
commit(1) call is discarded.
- commit()
We now commit a top-level transaction. The work done in the previous
two subtransactions *plus* work done since the last abort(1) call
is saved.
- commit(1)
A subtransaction is committed. There is now one subtransaction of
the current top-level transaction.
- commit(1)
A subtransaction is committed. There are now two subtransactions of
the current top-level transaction.
- abort()
We now abort a top-level transaction. We discard the work done in
the previous two subtransactions *plus* work done since the last
commit(1) call.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Run the basic tests for a storage as described in the official storage API
The most complete and most out-of-date description of the interface is:
http://www.zope.org/Documentation/Developer/Models/ZODB/ZODB_Architecture_Storage_Interface_Info.html
All storages should be able to pass these tests.
"""
from ZODB import POSException
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase \
import zodb_unpickle, zodb_pickle, handle_serials
import transaction
ZERO = '\0'*8
class BasicStorage:
def checkBasics(self):
t = transaction.Transaction()
self._storage.tpc_begin(t)
# This should simply return
self._storage.tpc_begin(t)
# Aborting is easy
self._storage.tpc_abort(t)
# Test a few expected exceptions when we're doing operations giving a
# different Transaction object than the one we've begun on.
self._storage.tpc_begin(t)
self.assertRaises(
POSException.StorageTransactionError,
self._storage.store,
0, 0, 0, 0, transaction.Transaction())
try:
self._storage.abortVersion('dummy', transaction.Transaction())
except (POSException.StorageTransactionError,
POSException.VersionCommitError):
pass # test passed ;)
else:
assert 0, "Should have failed, invalid transaction."
try:
self._storage.commitVersion('dummy', 'dummer',
transaction.Transaction())
except (POSException.StorageTransactionError,
POSException.VersionCommitError):
pass # test passed ;)
else:
assert 0, "Should have failed, invalid transaction."
self.assertRaises(
POSException.StorageTransactionError,
self._storage.store,
0, 1, 2, 3, transaction.Transaction())
self._storage.tpc_abort(t)
def checkSerialIsNoneForInitialRevision(self):
eq = self.assertEqual
oid = self._storage.new_oid()
txn = transaction.Transaction()
self._storage.tpc_begin(txn)
# Use None for serial. Don't use _dostore() here because that coerces
# serial=None to serial=ZERO.
r1 = self._storage.store(oid, None, zodb_pickle(MinPO(11)),
'', txn)
r2 = self._storage.tpc_vote(txn)
self._storage.tpc_finish(txn)
newrevid = handle_serials(oid, r1, r2)
data, revid = self._storage.load(oid, '')
value = zodb_unpickle(data)
eq(value, MinPO(11))
eq(revid, newrevid)
def checkNonVersionStore(self):
revid = ZERO
newrevid = self._dostore(revid=None)
# Finish the transaction.
self.assertNotEqual(newrevid, revid)
def checkNonVersionStoreAndLoad(self):
eq = self.assertEqual
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(7))
data, revid = self._storage.load(oid, '')
value = zodb_unpickle(data)
eq(value, MinPO(7))
# Now do a bunch of updates to an object
for i in range(13, 22):
revid = self._dostore(oid, revid=revid, data=MinPO(i))
# Now get the latest revision of the object
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(21))
def checkNonVersionModifiedInVersion(self):
oid = self._storage.new_oid()
self._dostore(oid=oid)
self.assertEqual(self._storage.modifiedInVersion(oid), '')
def checkConflicts(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
self._dostore(oid, revid=revid1, data=MinPO(12))
self.assertRaises(POSException.ConflictError,
self._dostore,
oid, revid=revid1, data=MinPO(13))
def checkWriteAfterAbort(self):
oid = self._storage.new_oid()
t = transaction.Transaction()
self._storage.tpc_begin(t)
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
# Now abort this transaction
self._storage.tpc_abort(t)
# Now start all over again
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(6))
def checkAbortAfterVote(self):
oid1 = self._storage.new_oid()
revid1 = self._dostore(oid=oid1, data=MinPO(-2))
oid = self._storage.new_oid()
t = transaction.Transaction()
self._storage.tpc_begin(t)
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
# Now abort this transaction
self._storage.tpc_vote(t)
self._storage.tpc_abort(t)
# Now start all over again
oid = self._storage.new_oid()
revid = self._dostore(oid=oid, data=MinPO(6))
for oid, revid in [(oid1, revid1), (oid, revid)]:
data, _revid = self._storage.load(oid, '')
self.assertEqual(revid, _revid)
def checkStoreTwoObjects(self):
noteq = self.assertNotEqual
p31, p32, p51, p52 = map(MinPO, (31, 32, 51, 52))
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
noteq(oid1, oid2)
revid1 = self._dostore(oid1, data=p31)
revid2 = self._dostore(oid2, data=p51)
noteq(revid1, revid2)
revid3 = self._dostore(oid1, revid=revid1, data=p32)
revid4 = self._dostore(oid2, revid=revid2, data=p52)
noteq(revid3, revid4)
def checkGetSerial(self):
if not hasattr(self._storage, 'getSerial'):
return
eq = self.assertEqual
p41, p42 = map(MinPO, (41, 42))
oid = self._storage.new_oid()
self.assertRaises(KeyError, self._storage.getSerial, oid)
# Now store a revision
revid1 = self._dostore(oid, data=p41)
eq(revid1, self._storage.getSerial(oid))
# And another one
revid2 = self._dostore(oid, revid=revid1, data=p42)
eq(revid2, self._storage.getSerial(oid))
def checkTwoArgBegin(self):
# XXX how standard is three-argument tpc_begin()?
t = transaction.Transaction()
tid = '\0\0\0\0\0psu'
self._storage.tpc_begin(t, tid)
oid = self._storage.new_oid()
data = zodb_pickle(MinPO(8))
self._storage.store(oid, None, data, '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def checkLen(self):
# len(storage) reports the number of objects.
# check it is zero when empty
self.assertEqual(len(self._storage),0)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self._dostore(data=MinPO(22))
self._dostore(data=MinPO(23))
self.assert_(len(self._storage) in [0,2])
def checkGetSize(self):
self._dostore(data=MinPO(25))
size = self._storage.getSize()
# The storage API doesn't make any claims about what size
# means except that it ought to be printable.
str(size)
def checkNote(self):
oid = self._storage.new_oid()
t = transaction.Transaction()
self._storage.tpc_begin(t)
t.note('this is a test')
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def checkGetExtensionMethods(self):
m = self._storage.getExtensionMethods()
self.assertEqual(type(m),type({}))
for k,v in m.items():
self.assertEqual(v,None)
self.assert_(callable(getattr(self._storage,k)))
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests for application-level conflict resolution."""
from ZODB.POSException import ConflictError, UndoError
from persistent import Persistent
from transaction import Transaction
from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
class PCounter(Persistent):
_value = 0
def __repr__(self):
return "<PCounter %d>" % self._value
def inc(self):
self._value = self._value + 1
def _p_resolveConflict(self, oldState, savedState, newState):
savedDiff = savedState['_value'] - oldState['_value']
newDiff = newState['_value'] - oldState['_value']
oldState['_value'] = oldState['_value'] + savedDiff + newDiff
return oldState
# XXX What if _p_resolveConflict _thinks_ it resolved the
# conflict, but did something wrong?
class PCounter2(PCounter):
def _p_resolveConflict(self, oldState, savedState, newState):
raise ConflictError
class PCounter3(PCounter):
def _p_resolveConflict(self, oldState, savedState, newState):
raise AttributeError, "no attribute (testing conflict resolution)"
class PCounter4(PCounter):
def _p_resolveConflict(self, oldState, savedState):
raise RuntimeError, "Can't get here; not enough args"
class ConflictResolvingStorage:
def checkResolve(self):
obj = PCounter()
obj.inc()
oid = self._storage.new_oid()
revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
obj.inc()
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
data, serialno = self._storage.load(oid, '')
inst = zodb_unpickle(data)
self.assertEqual(inst._value, 5)
def checkUnresolvable(self):
obj = PCounter2()
obj.inc()
oid = self._storage.new_oid()
revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
obj.inc()
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
try:
self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
except ConflictError, err:
self.assert_("PCounter2" in str(err))
else:
self.fail("Expected ConflictError")
def checkZClassesArentResolved(self):
from ZODB.ConflictResolution import find_global, BadClassName
dummy_class_tuple = ('*foobar', ())
self.assertRaises(BadClassName, find_global, '*foobar', ())
def checkBuggyResolve1(self):
obj = PCounter3()
obj.inc()
oid = self._storage.new_oid()
revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
obj.inc()
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
self.assertRaises(ConflictError,
self._dostoreNP,
oid, revid=revid1, data=zodb_pickle(obj))
def checkBuggyResolve2(self):
obj = PCounter4()
obj.inc()
oid = self._storage.new_oid()
revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
obj.inc()
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
self.assertRaises(ConflictError,
self._dostoreNP,
oid, revid=revid1, data=zodb_pickle(obj))
class ConflictResolvingTransUndoStorage:
def checkUndoConflictResolution(self):
# This test is based on checkNotUndoable in the
# TransactionalUndoStorage test suite. Except here, conflict
# resolution should allow us to undo the transaction anyway.
obj = PCounter()
obj.inc()
oid = self._storage.new_oid()
revid_a = self._dostore(oid, data=obj)
obj.inc()
revid_b = self._dostore(oid, revid=revid_a, data=obj)
obj.inc()
revid_c = self._dostore(oid, revid=revid_b, data=obj)
# Start the undo
info = self._storage.undoInfo()
tid = info[1]['id']
t = Transaction()
self._storage.tpc_begin(t)
self._storage.undo(tid, t)
self._storage.tpc_finish(t)
def checkUndoUnresolvable(self):
# This test is based on checkNotUndoable in the
# TransactionalUndoStorage test suite. Except here, conflict
# resolution should allow us to undo the transaction anyway.
obj = PCounter2()
obj.inc()
oid = self._storage.new_oid()
revid_a = self._dostore(oid, data=obj)
obj.inc()
revid_b = self._dostore(oid, revid=revid_a, data=obj)
obj.inc()
revid_c = self._dostore(oid, revid=revid_b, data=obj)
# Start the undo
info = self._storage.undoInfo()
tid = info[1]['id']
t = Transaction()
self._storage.tpc_begin(t)
self.assertRaises(UndoError, self._storage.undo,
tid, t)
self._storage.tpc_abort(t)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Do some minimal tests of data corruption"""
import os
import random
import stat
import tempfile
import ZODB, ZODB.FileStorage
from StorageTestBase import StorageTestBase
class FileStorageCorruptTests(StorageTestBase):
def setUp(self):
self.path = tempfile.mktemp()
self._storage = ZODB.FileStorage.FileStorage(self.path, create=1)
def tearDown(self):
self._storage.close()
self._storage.cleanup()
def _do_stores(self):
oids = []
for i in range(5):
oid = self._storage.new_oid()
revid = self._dostore(oid)
oids.append((oid, revid))
return oids
def _check_stores(self, oids):
for oid, revid in oids:
data, s_revid = self._storage.load(oid, '')
self.assertEqual(s_revid, revid)
def checkTruncatedIndex(self):
oids = self._do_stores()
self._close()
# truncation the index file
path = self.path + '.index'
self.failUnless(os.path.exists(path))
f = open(path, 'r+')
f.seek(0, 2)
size = f.tell()
f.seek(size / 2)
f.truncate()
f.close()
self._storage = ZODB.FileStorage.FileStorage(self.path)
self._check_stores(oids)
def checkCorruptedIndex(self):
oids = self._do_stores()
self._close()
# truncation the index file
path = self.path + '.index'
self.failUnless(os.path.exists(path))
size = os.stat(path)[stat.ST_SIZE]
f = open(path, 'r+')
while f.tell() < size:
f.seek(random.randrange(1, size / 10), 1)
f.write('\000')
f.close()
self._storage = ZODB.FileStorage.FileStorage(self.path)
self._check_stores(oids)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Run the history() related tests for a storage.
Any storage that supports the history() method should be able to pass
all these tests.
"""
from ZODB.tests.MinPO import MinPO
from transaction import Transaction
class HistoryStorage:
def checkSimpleHistory(self):
eq = self.assertEqual
# Store a couple of non-version revisions of the object
oid = self._storage.new_oid()
self.assertRaises(KeyError,self._storage.history,oid)
revid1 = self._dostore(oid, data=MinPO(11))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
# Now get various snapshots of the object's history
h = self._storage.history(oid, size=1)
eq(len(h), 1)
d = h[0]
eq(d['tid'], revid3)
eq(d['version'], '')
# Try to get 2 historical revisions
h = self._storage.history(oid, size=2)
eq(len(h), 2)
d = h[0]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid2)
eq(d['version'], '')
# Try to get all 3 historical revisions
h = self._storage.history(oid, size=3)
eq(len(h), 3)
d = h[0]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[2]
eq(d['tid'], revid1)
eq(d['version'], '')
# There should be no more than 3 revisions
h = self._storage.history(oid, size=4)
eq(len(h), 3)
d = h[0]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[2]
eq(d['tid'], revid1)
eq(d['version'], '')
def checkVersionHistory(self):
if not self._storage.supportsVersions():
return
eq = self.assertEqual
# Store a couple of non-version revisions
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
# Now store some new revisions in a version
version = 'test-version'
revid4 = self._dostore(oid, revid=revid3, data=MinPO(14),
version=version)
revid5 = self._dostore(oid, revid=revid4, data=MinPO(15),
version=version)
revid6 = self._dostore(oid, revid=revid5, data=MinPO(16),
version=version)
# Now, try to get the six historical revisions (first three are in
# 'test-version', followed by the non-version revisions).
h = self._storage.history(oid, version, 100)
eq(len(h), 6)
d = h[0]
eq(d['tid'], revid6)
eq(d['version'], version)
d = h[1]
eq(d['tid'], revid5)
eq(d['version'], version)
d = h[2]
eq(d['tid'], revid4)
eq(d['version'], version)
d = h[3]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[4]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[5]
eq(d['tid'], revid1)
eq(d['version'], '')
def checkHistoryAfterVersionCommit(self):
if not self._storage.supportsVersions():
return
eq = self.assertEqual
# Store a couple of non-version revisions
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
# Now store some new revisions in a version
version = 'test-version'
revid4 = self._dostore(oid, revid=revid3, data=MinPO(14),
version=version)
revid5 = self._dostore(oid, revid=revid4, data=MinPO(15),
version=version)
revid6 = self._dostore(oid, revid=revid5, data=MinPO(16),
version=version)
# Now commit the version
t = Transaction()
self._storage.tpc_begin(t)
self._storage.commitVersion(version, '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# After consultation with Jim, we agreed that the semantics of
# revision id's after a version commit is that the committed object
# gets a new serial number (a.k.a. revision id). Note that
# FileStorage is broken here; the serial number in the post-commit
# non-version revision will be the same as the serial number of the
# previous in-version revision.
#
# BAW: Using load() is the only way to get the serial number of the
# current revision of the object. But at least this works for both
# broken and working storages.
ign, revid7 = self._storage.load(oid, '')
# Now, try to get the six historical revisions (first three are in
# 'test-version', followed by the non-version revisions).
h = self._storage.history(oid, version, 100)
eq(len(h), 7)
d = h[0]
eq(d['tid'], revid7)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid6)
eq(d['version'], version)
d = h[2]
eq(d['tid'], revid5)
eq(d['version'], version)
d = h[3]
eq(d['tid'], revid4)
eq(d['version'], version)
d = h[4]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[5]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[6]
eq(d['tid'], revid1)
eq(d['version'], '')
def checkHistoryAfterVersionAbort(self):
if not self._storage.supportsVersions():
return
eq = self.assertEqual
# Store a couple of non-version revisions
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
# Now store some new revisions in a version
version = 'test-version'
revid4 = self._dostore(oid, revid=revid3, data=MinPO(14),
version=version)
revid5 = self._dostore(oid, revid=revid4, data=MinPO(15),
version=version)
revid6 = self._dostore(oid, revid=revid5, data=MinPO(16),
version=version)
# Now commit the version
t = Transaction()
self._storage.tpc_begin(t)
self._storage.abortVersion(version, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# After consultation with Jim, we agreed that the semantics of
# revision id's after a version commit is that the committed object
# gets a new serial number (a.k.a. revision id). Note that
# FileStorage is broken here; the serial number in the post-commit
# non-version revision will be the same as the serial number of the
# previous in-version revision.
#
# BAW: Using load() is the only way to get the serial number of the
# current revision of the object. But at least this works for both
# broken and working storages.
ign, revid7 = self._storage.load(oid, '')
# Now, try to get the six historical revisions (first three are in
# 'test-version', followed by the non-version revisions).
h = self._storage.history(oid, version, 100)
eq(len(h), 7)
d = h[0]
eq(d['tid'], revid7)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid6)
eq(d['version'], version)
d = h[2]
eq(d['tid'], revid5)
eq(d['version'], version)
d = h[3]
eq(d['tid'], revid4)
eq(d['version'], version)
d = h[4]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[5]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[6]
eq(d['tid'], revid1)
eq(d['version'], '')
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Run tests against the iterator() interface for storages.
Any storage that supports the iterator() method should be able to pass
all these tests.
"""
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_pickle, zodb_unpickle
from ZODB.utils import U64, p64
from transaction import Transaction
class IteratorCompare:
def iter_verify(self, txniter, revids, val0):
eq = self.assertEqual
oid = self._oid
val = val0
for reciter, revid in zip(txniter, revids + [None]):
eq(reciter.tid, revid)
for rec in reciter:
eq(rec.oid, oid)
eq(rec.tid, revid)
eq(rec.version, '')
eq(zodb_unpickle(rec.data), MinPO(val))
val = val + 1
eq(val, val0 + len(revids))
txniter.close()
class IteratorStorage(IteratorCompare):
def checkSimpleIteration(self):
# Store a bunch of revisions of a single object
self._oid = oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
# Now iterate over all the transactions and compare carefully
txniter = self._storage.iterator()
self.iter_verify(txniter, [revid1, revid2, revid3], 11)
def checkClose(self):
self._oid = oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
txniter = self._storage.iterator()
txniter.close()
self.assertRaises(IOError, txniter.__getitem__, 0)
def checkVersionIterator(self):
if not self._storage.supportsVersions():
return
self._dostore()
self._dostore(version='abort')
self._dostore()
self._dostore(version='abort')
t = Transaction()
self._storage.tpc_begin(t)
self._storage.abortVersion('abort', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
self._dostore(version='commit')
self._dostore()
self._dostore(version='commit')
t = Transaction()
self._storage.tpc_begin(t)
self._storage.commitVersion('commit', '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
txniter = self._storage.iterator()
for trans in txniter:
for data in trans:
pass
def checkUndoZombieNonVersion(self):
if not hasattr(self._storage, 'supportsTransactionalUndo'):
return
if not self._storage.supportsTransactionalUndo():
return
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(94))
# Get the undo information
info = self._storage.undoInfo()
tid = info[0]['id']
# Undo the creation of the object, rendering it a zombie
t = Transaction()
self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# Now attempt to iterator over the storage
iter = self._storage.iterator()
for txn in iter:
for rec in txn:
pass
# The last transaction performed an undo of the transaction that
# created object oid. (As Barry points out, the object is now in the
# George Bailey state.) Assert that the final data record contains
# None in the data attribute.
self.assertEqual(rec.oid, oid)
self.assertEqual(rec.data, None)
def checkTransactionExtensionFromIterator(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(1))
iter = self._storage.iterator()
count = 0
for txn in iter:
self.assertEqual(txn._extension, {})
count +=1
self.assertEqual(count, 1)
def checkIterationIntraTransaction(self):
# XXX try this test with logging enabled. If you see something like
#
# ZODB FS FS21 warn: FileStorageTests.fs truncated, possibly due to
# damaged records at 4
#
# Then the code in FileIterator.next() hasn't yet been fixed.
oid = self._storage.new_oid()
t = Transaction()
data = zodb_pickle(MinPO(0))
try:
self._storage.tpc_begin(t)
self._storage.store(oid, '\0'*8, data, '', t)
self._storage.tpc_vote(t)
# Don't do tpc_finish yet
it = self._storage.iterator()
for x in it:
pass
finally:
self._storage.tpc_finish(t)
def checkLoadEx(self):
oid = self._storage.new_oid()
self._dostore(oid, data=42)
data, tid, ver = self._storage.loadEx(oid, "")
self.assertEqual(zodb_unpickle(data), MinPO(42))
match = False
for txn in self._storage.iterator():
for rec in txn:
if rec.oid == oid and rec.tid == tid:
self.assertEqual(txn.tid, tid)
match = True
if not match:
self.fail("Could not find transaction with matching id")
class ExtendedIteratorStorage(IteratorCompare):
def checkExtendedIteration(self):
# Store a bunch of revisions of a single object
self._oid = oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
revid4 = self._dostore(oid, revid=revid3, data=MinPO(14))
# Note that the end points are included
# Iterate over all of the transactions with explicit start/stop
txniter = self._storage.iterator(revid1, revid4)
self.iter_verify(txniter, [revid1, revid2, revid3, revid4], 11)
# Iterate over some of the transactions with explicit start
txniter = self._storage.iterator(revid3)
self.iter_verify(txniter, [revid3, revid4], 13)
# Iterate over some of the transactions with explicit stop
txniter = self._storage.iterator(None, revid2)
self.iter_verify(txniter, [revid1, revid2], 11)
# Iterate over some of the transactions with explicit start+stop
txniter = self._storage.iterator(revid2, revid3)
self.iter_verify(txniter, [revid2, revid3], 12)
# Specify an upper bound somewhere in between values
revid3a = p64((U64(revid3) + U64(revid4)) / 2)
txniter = self._storage.iterator(revid2, revid3a)
self.iter_verify(txniter, [revid2, revid3], 12)
# Specify a lower bound somewhere in between values.
# revid2 == revid1+1 is very likely on Windows. Adding 1 before
# dividing ensures that "the midpoint" we compute is strictly larger
# than revid1.
revid1a = p64((U64(revid1) + 1 + U64(revid2)) / 2)
assert revid1 < revid1a
txniter = self._storage.iterator(revid1a, revid3a)
self.iter_verify(txniter, [revid2, revid3], 12)
# Specify an empty range
txniter = self._storage.iterator(revid3, revid2)
self.iter_verify(txniter, [], 13)
# Specify a singleton range
txniter = self._storage.iterator(revid3, revid3)
self.iter_verify(txniter, [revid3], 13)
class IteratorDeepCompare:
def compare(self, storage1, storage2):
eq = self.assertEqual
iter1 = storage1.iterator()
iter2 = storage2.iterator()
for txn1, txn2 in zip(iter1, iter2):
eq(txn1.tid, txn2.tid)
eq(txn1.status, txn2.status)
eq(txn1.user, txn2.user)
eq(txn1.description, txn2.description)
eq(txn1._extension, txn2._extension)
for rec1, rec2 in zip(txn1, txn2):
eq(rec1.oid, rec2.oid)
eq(rec1.tid, rec2.tid)
eq(rec1.version, rec2.version)
eq(rec1.data, rec2.data)
# Make sure there are no more records left in rec1 and rec2,
# meaning they were the same length.
self.assertRaises(IndexError, txn1.next)
self.assertRaises(IndexError, txn2.next)
# Make sure ther are no more records left in txn1 and txn2, meaning
# they were the same length
self.assertRaises(IndexError, iter1.next)
self.assertRaises(IndexError, iter2.next)
iter1.close()
iter2.close()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
class LocalStorage:
"""A single test that only make sense for local storages.
A local storage is one that doens't use ZEO. The __len__()
implementation for ZEO is inexact.
"""
def checkLen(self):
eq = self.assertEqual
# The length of the database ought to grow by one each time
eq(len(self._storage), 0)
self._dostore()
eq(len(self._storage), 1)
self._dostore()
eq(len(self._storage), 2)
import random
import sys
import threading
import time
from persistent.mapping import PersistentMapping
import transaction
import ZODB
from ZODB.tests.StorageTestBase \
import zodb_pickle, zodb_unpickle, handle_serials
from ZODB.tests.MinPO import MinPO
from ZODB.POSException import ConflictError
SHORT_DELAY = 0.01
def sort(l):
"Sort a list in place and return it."
l.sort()
return l
class TestThread(threading.Thread):
"""Base class for defining threads that run from unittest.
If the thread exits with an uncaught exception, catch it and
re-raise it when the thread is joined. The re-raise will cause
the test to fail.
The subclass should define a runtest() method instead of a run()
method.
"""
def __init__(self):
threading.Thread.__init__(self)
self._exc_info = None
def run(self):
try:
self.runtest()
except:
self._exc_info = sys.exc_info()
def join(self, timeout=None):
threading.Thread.join(self, timeout)
if self._exc_info:
raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
class ZODBClientThread(TestThread):
__super_init = TestThread.__init__
def __init__(self, db, test, commits=10, delay=SHORT_DELAY):
self.__super_init()
self.setDaemon(1)
self.db = db
self.test = test
self.commits = commits
self.delay = delay
def runtest(self):
conn = self.db.open()
conn.sync()
root = conn.root()
d = self.get_thread_dict(root)
if d is None:
self.test.fail()
else:
for i in range(self.commits):
self.commit(d, i)
self.test.assertEqual(sort(d.keys()), range(self.commits))
def commit(self, d, num):
d[num] = time.time()
time.sleep(self.delay)
transaction.commit()
time.sleep(self.delay)
def get_thread_dict(self, root):
name = self.getName()
# arbitrarily limit to 10 re-tries
for i in range(10):
try:
m = PersistentMapping()
root[name] = m
transaction.commit()
break
except ConflictError, err:
transaction.abort()
root._p_jar.sync()
for i in range(10):
try:
return root.get(name)
except ConflictError:
transaction.abort()
class StorageClientThread(TestThread):
__super_init = TestThread.__init__
def __init__(self, storage, test, commits=10, delay=SHORT_DELAY):
self.__super_init()
self.storage = storage
self.test = test
self.commits = commits
self.delay = delay
self.oids = {}
def runtest(self):
for i in range(self.commits):
self.dostore(i)
self.check()
def check(self):
for oid, revid in self.oids.items():
data, serial = self.storage.load(oid, '')
self.test.assertEqual(serial, revid)
obj = zodb_unpickle(data)
self.test.assertEqual(obj.value[0], self.getName())
def pause(self):
time.sleep(self.delay)
def oid(self):
oid = self.storage.new_oid()
self.oids[oid] = None
return oid
def dostore(self, i):
data = zodb_pickle(MinPO((self.getName(), i)))
t = transaction.Transaction()
oid = self.oid()
self.pause()
self.storage.tpc_begin(t)
self.pause()
# Always create a new object, signified by None for revid
r1 = self.storage.store(oid, None, data, '', t)
self.pause()
r2 = self.storage.tpc_vote(t)
self.pause()
self.storage.tpc_finish(t)
self.pause()
revid = handle_serials(oid, r1, r2)
self.oids[oid] = revid
class ExtStorageClientThread(StorageClientThread):
def runtest(self):
# pick some other storage ops to execute, depending in part
# on the features provided by the storage.
names = ["do_load", "do_modifiedInVersion"]
if self.storage.supportsUndo():
names += ["do_loadSerial", "do_undoLog", "do_iterator"]
ops = [getattr(self, meth) for meth in names]
assert ops, "Didn't find an storage ops in %s" % self.storage
# do a store to guarantee there's at least one oid in self.oids
self.dostore(0)
for i in range(self.commits - 1):
meth = random.choice(ops)
meth()
self.dostore(i)
self.check()
def pick_oid(self):
return random.choice(self.oids.keys())
def do_load(self):
oid = self.pick_oid()
self.storage.load(oid, '')
def do_loadSerial(self):
oid = self.pick_oid()
self.storage.loadSerial(oid, self.oids[oid])
def do_modifiedInVersion(self):
oid = self.pick_oid()
self.storage.modifiedInVersion(oid)
def do_undoLog(self):
self.storage.undoLog(0, -20)
def do_iterator(self):
try:
iter = self.storage.iterator()
except AttributeError:
# XXX It's hard to detect that a ZEO ClientStorage
# doesn't have this method, but does have all the others.
return
for obj in iter:
pass
class MTStorage:
"Test a storage with multiple client threads executing concurrently."
def _checkNThreads(self, n, constructor, *args):
threads = [constructor(*args) for i in range(n)]
for t in threads:
t.start()
for t in threads:
t.join(60)
for t in threads:
self.failIf(t.isAlive(), "thread failed to finish in 60 seconds")
def check2ZODBThreads(self):
db = ZODB.DB(self._storage)
self._checkNThreads(2, ZODBClientThread, db, self)
db.close()
def check7ZODBThreads(self):
db = ZODB.DB(self._storage)
self._checkNThreads(7, ZODBClientThread, db, self)
db.close()
def check2StorageThreads(self):
self._checkNThreads(2, StorageClientThread, self._storage, self)
def check7StorageThreads(self):
self._checkNThreads(7, StorageClientThread, self._storage, self)
def check4ExtStorageThread(self):
self._checkNThreads(4, ExtStorageClientThread, self._storage, self)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A minimal persistent object to use for tests"""
from persistent import Persistent
class MinPO(Persistent):
def __init__(self, value=None):
self.value = value
def __cmp__(self, aMinPO):
return cmp(self.value, aMinPO.value)
def __repr__(self):
return "MinPO(%s)" % self.value
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Run some tests relevant for storages that support pack()."""
try:
import cPickle
pickle = cPickle
#import cPickle as pickle
except ImportError:
import pickle
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import time
from persistent import Persistent
from persistent.mapping import PersistentMapping
import transaction
from ZODB import DB
from ZODB.serialize import referencesf
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import snooze
from ZODB.POSException import ConflictError, StorageError
from ZODB.tests.MTStorage import TestThread
ZERO = '\0'*8
# This class is for the root object. It must not contain a getoid() method
# (really, attribute). The persistent pickling machinery -- in the dumps()
# function below -- will pickle Root objects as normal, but any attributes
# which reference persistent Object instances will get pickled as persistent
# ids, not as the object's state. This makes the referencesf stuff work,
# because it pickle sniffs for persistent ids (so we have to get those
# persistent ids into the root object's pickle).
class Root:
pass
# This is the persistent Object class. Because it has a getoid() method, the
# persistent pickling machinery -- in the dumps() function below -- will
# pickle the oid string instead of the object's actual state. Yee haw, this
# stuff is deep. ;)
class Object:
def __init__(self, oid):
self._oid = oid
def getoid(self):
return self._oid
class C(Persistent):
pass
# Here's where all the magic occurs. Sadly, the pickle module is a bit
# underdocumented, but here's what happens: by setting the persistent_id
# attribute to getpersid() on the pickler, that function gets called for every
# object being pickled. By returning None when the object has no getoid
# attribute, it signals pickle to serialize the object as normal. That's how
# the Root instance gets pickled correctly. But, if the object has a getoid
# attribute, then by returning that method's value, we tell pickle to
# serialize the persistent id of the object instead of the object's state.
# That sets the pickle up for proper sniffing by the referencesf machinery.
# Fun, huh?
def dumps(obj):
def getpersid(obj):
if hasattr(obj, 'getoid'):
return obj.getoid()
return None
s = StringIO()
p = pickle.Pickler(s)
p.persistent_id = getpersid
p.dump(obj)
return s.getvalue()
class PackableStorageBase:
# We keep a cache of object ids to instances so that the unpickler can
# easily return any persistent object.
_cache = {}
def _newobj(self):
# This is a convenience method to create a new persistent Object
# instance. It asks the storage for a new object id, creates the
# instance with the given oid, populates the cache and returns the
# object.
oid = self._storage.new_oid()
obj = Object(oid)
self._cache[obj.getoid()] = obj
return obj
def _makeloader(self):
# This is the other side of the persistent pickling magic. We need a
# custom unpickler to mirror our custom pickler above. By setting the
# persistent_load function of the unpickler to self._cache.get(),
# whenever a persistent id is unpickled, it will actually return the
# Object instance out of the cache. As far as returning a function
# with an argument bound to an instance attribute method, we do it
# this way because it makes the code in the tests more succinct.
#
# BUT! Be careful in your use of loads() vs. pickle.loads(). loads()
# should only be used on the Root object's pickle since it's the only
# special one. All the Object instances should use pickle.loads().
def loads(str, persfunc=self._cache.get):
fp = StringIO(str)
u = pickle.Unpickler(fp)
u.persistent_load = persfunc
return u.load()
return loads
def _initroot(self):
try:
self._storage.load(ZERO, '')
except KeyError:
from transaction import Transaction
file = StringIO()
p = cPickle.Pickler(file, 1)
p.dump((PersistentMapping, None))
p.dump({'_container': {}})
t=Transaction()
t.description='initial database creation'
self._storage.tpc_begin(t)
self._storage.store(ZERO, None, file.getvalue(), '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
class PackableStorage(PackableStorageBase):
def checkPackEmptyStorage(self):
self._storage.pack(time.time(), referencesf)
def checkPackTomorrow(self):
self._initroot()
self._storage.pack(time.time() + 10000, referencesf)
def checkPackYesterday(self):
self._initroot()
self._storage.pack(time.time() - 10000, referencesf)
def _PackWhileWriting(self, pack_now):
# A storage should allow some reading and writing during
# a pack. This test attempts to exercise locking code
# in the storage to test that it is safe. It generates
# a lot of revisions, so that pack takes a long time.
db = DB(self._storage)
conn = db.open()
root = conn.root()
for i in range(10):
root[i] = MinPO(i)
transaction.commit()
snooze()
packt = time.time()
choices = range(10)
for dummy in choices:
for i in choices:
root[i].value = MinPO(i)
transaction.commit()
# How many client threads should we run, and how long should we
# wait for them to finish? Hard to say. Running 4 threads and
# waiting 30 seconds too often left a thread still alive on Tim's
# Win98SE box, during ZEO flavors of this test. Those tend to
# run one thread at a time to completion, and take about 10 seconds
# per thread. There doesn't appear to be a compelling reason to
# run that many threads. Running 3 threads and waiting up to a
# minute seems to work well in practice. The ZEO tests normally
# finish faster than that, and the non-ZEO tests very much faster
# than that.
NUM_LOOP_TRIP = 50
timer = ElapsedTimer(time.time())
threads = [ClientThread(db, choices, NUM_LOOP_TRIP, timer, i)
for i in range(3)]
for t in threads:
t.start()
if pack_now:
db.pack(time.time())
else:
db.pack(packt)
for t in threads:
t.join(60)
liveness = [t.isAlive() for t in threads]
if True in liveness:
# They should have finished by now.
print 'Liveness:', liveness
# Combine the outcomes, and sort by start time.
outcomes = []
for t in threads:
outcomes.extend(t.outcomes)
# each outcome list has as many of these as a loop trip got thru:
# thread_id
# elapsed millis at loop top
# elapsed millis at attempt to assign to self.root[index]
# index into self.root getting replaced
# elapsed millis when outcome known
# 'OK' or 'Conflict'
# True if we got beyond this line, False if it raised an
# exception (one possible Conflict cause):
# self.root[index].value = MinPO(j)
def cmp_by_time(a, b):
return cmp((a[1], a[0]), (b[1], b[0]))
outcomes.sort(cmp_by_time)
counts = [0] * 4
for outcome in outcomes:
n = len(outcome)
assert n >= 2
tid = outcome[0]
print 'tid:%d top:%5d' % (tid, outcome[1]),
if n > 2:
print 'commit:%5d' % outcome[2],
if n > 3:
print 'index:%2d' % outcome[3],
if n > 4:
print 'known:%5d' % outcome[4],
if n > 5:
print '%8s' % outcome[5],
if n > 6:
print 'assigned:%5s' % outcome[6],
counts[tid] += 1
if counts[tid] == NUM_LOOP_TRIP:
print 'thread %d done' % tid,
print
self.fail('a thread is still alive')
# Iterate over the storage to make sure it's sane, but not every
# storage supports iterators.
if not hasattr(self._storage, "iterator"):
return
it = self._storage.iterator()
for txn in it:
for data in txn:
pass
it.close()
def checkPackWhileWriting(self):
self._PackWhileWriting(pack_now=False)
def checkPackNowWhileWriting(self):
self._PackWhileWriting(pack_now=True)
def checkPackLotsWhileWriting(self):
# This is like the other pack-while-writing tests, except it packs
# repeatedly until the client thread is done. At the time it was
# introduced, it reliably provoked
# CorruptedError: ... transaction with checkpoint flag set
# in the ZEO flavor of the FileStorage tests.
db = DB(self._storage)
conn = db.open()
root = conn.root()
choices = range(10)
for i in choices:
root[i] = MinPO(i)
transaction.commit()
snooze()
packt = time.time()
for dummy in choices:
for i in choices:
root[i].value = MinPO(i)
transaction.commit()
NUM_LOOP_TRIP = 100
timer = ElapsedTimer(time.time())
thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
thread.start()
while thread.isAlive():
db.pack(packt)
snooze()
packt = time.time()
thread.join()
# Iterate over the storage to make sure it's sane.
if not hasattr(self._storage, "iterator"):
return
it = self._storage.iterator()
for txn in it:
for data in txn:
pass
it.close()
class PackableUndoStorage(PackableStorageBase):
def checkPackAllRevisions(self):
self._initroot()
eq = self.assertEqual
raises = self.assertRaises
# Create a `persistent' object
obj = self._newobj()
oid = obj.getoid()
obj.value = 1
# Commit three different revisions
revid1 = self._dostoreNP(oid, data=pickle.dumps(obj))
obj.value = 2
revid2 = self._dostoreNP(oid, revid=revid1, data=pickle.dumps(obj))
obj.value = 3
revid3 = self._dostoreNP(oid, revid=revid2, data=pickle.dumps(obj))
# Now make sure all three revisions can be extracted
data = self._storage.loadSerial(oid, revid1)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 1)
data = self._storage.loadSerial(oid, revid2)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 2)
data = self._storage.loadSerial(oid, revid3)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 3)
# Now pack all transactions; need to sleep a second to make
# sure that the pack time is greater than the last commit time.
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
self._storage.pack(packtime, referencesf)
# All revisions of the object should be gone, since there is no
# reference from the root object to this object.
raises(KeyError, self._storage.loadSerial, oid, revid1)
raises(KeyError, self._storage.loadSerial, oid, revid2)
raises(KeyError, self._storage.loadSerial, oid, revid3)
def checkPackJustOldRevisions(self):
eq = self.assertEqual
raises = self.assertRaises
loads = self._makeloader()
# Create a root object. This can't be an instance of Object,
# otherwise the pickling machinery will serialize it as a persistent
# id and not as an object that contains references (persistent ids) to
# other objects.
root = Root()
# Create a persistent object, with some initial state
obj = self._newobj()
oid = obj.getoid()
# Link the root object to the persistent object, in order to keep the
# persistent object alive. Store the root object.
root.obj = obj
root.value = 0
revid0 = self._dostoreNP(ZERO, data=dumps(root))
# Make sure the root can be retrieved
data, revid = self._storage.load(ZERO, '')
eq(revid, revid0)
eq(loads(data).value, 0)
# Commit three different revisions of the other object
obj.value = 1
revid1 = self._dostoreNP(oid, data=pickle.dumps(obj))
obj.value = 2
revid2 = self._dostoreNP(oid, revid=revid1, data=pickle.dumps(obj))
obj.value = 3
revid3 = self._dostoreNP(oid, revid=revid2, data=pickle.dumps(obj))
# Now make sure all three revisions can be extracted
data = self._storage.loadSerial(oid, revid1)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 1)
data = self._storage.loadSerial(oid, revid2)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 2)
data = self._storage.loadSerial(oid, revid3)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 3)
# Now pack just revisions 1 and 2. The object's current revision
# should stay alive because it's pointed to by the root.
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
self._storage.pack(packtime, referencesf)
# Make sure the revisions are gone, but that object zero and revision
# 3 are still there and correct
data, revid = self._storage.load(ZERO, '')
eq(revid, revid0)
eq(loads(data).value, 0)
raises(KeyError, self._storage.loadSerial, oid, revid1)
raises(KeyError, self._storage.loadSerial, oid, revid2)
data = self._storage.loadSerial(oid, revid3)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 3)
data, revid = self._storage.load(oid, '')
eq(revid, revid3)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 3)
def checkPackOnlyOneObject(self):
eq = self.assertEqual
raises = self.assertRaises
loads = self._makeloader()
# Create a root object. This can't be an instance of Object,
# otherwise the pickling machinery will serialize it as a persistent
# id and not as an object that contains references (persistent ids) to
# other objects.
root = Root()
# Create a persistent object, with some initial state
obj1 = self._newobj()
oid1 = obj1.getoid()
# Create another persistent object, with some initial state. Make
# sure it's oid is greater than the first object's oid.
obj2 = self._newobj()
oid2 = obj2.getoid()
self.failUnless(oid2 > oid1)
# Link the root object to the persistent objects, in order to keep
# them alive. Store the root object.
root.obj1 = obj1
root.obj2 = obj2
root.value = 0
revid0 = self._dostoreNP(ZERO, data=dumps(root))
# Make sure the root can be retrieved
data, revid = self._storage.load(ZERO, '')
eq(revid, revid0)
eq(loads(data).value, 0)
# Commit three different revisions of the first object
obj1.value = 1
revid1 = self._dostoreNP(oid1, data=pickle.dumps(obj1))
obj1.value = 2
revid2 = self._dostoreNP(oid1, revid=revid1, data=pickle.dumps(obj1))
obj1.value = 3
revid3 = self._dostoreNP(oid1, revid=revid2, data=pickle.dumps(obj1))
# Now make sure all three revisions can be extracted
data = self._storage.loadSerial(oid1, revid1)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid1)
eq(pobj.value, 1)
data = self._storage.loadSerial(oid1, revid2)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid1)
eq(pobj.value, 2)
data = self._storage.loadSerial(oid1, revid3)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid1)
eq(pobj.value, 3)
# Now commit a revision of the second object
obj2.value = 11
revid4 = self._dostoreNP(oid2, data=pickle.dumps(obj2))
# And make sure the revision can be extracted
data = self._storage.loadSerial(oid2, revid4)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid2)
eq(pobj.value, 11)
# Now pack just revisions 1 and 2 of object1. Object1's current
# revision should stay alive because it's pointed to by the root, as
# should Object2's current revision.
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
self._storage.pack(packtime, referencesf)
# Make sure the revisions are gone, but that object zero, object2, and
# revision 3 of object1 are still there and correct.
data, revid = self._storage.load(ZERO, '')
eq(revid, revid0)
eq(loads(data).value, 0)
raises(KeyError, self._storage.loadSerial, oid1, revid1)
raises(KeyError, self._storage.loadSerial, oid1, revid2)
data = self._storage.loadSerial(oid1, revid3)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid1)
eq(pobj.value, 3)
data, revid = self._storage.load(oid1, '')
eq(revid, revid3)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid1)
eq(pobj.value, 3)
data, revid = self._storage.load(oid2, '')
eq(revid, revid4)
eq(loads(data).value, 11)
data = self._storage.loadSerial(oid2, revid4)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid2)
eq(pobj.value, 11)
def checkPackUnlinkedFromRoot(self):
eq = self.assertEqual
db = DB(self._storage)
conn = db.open()
root = conn.root()
txn = transaction.get()
txn.note('root')
txn.commit()
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
obj = C()
obj.value = 7
root['obj'] = obj
txn = transaction.get()
txn.note('root -> o1')
txn.commit()
del root['obj']
txn = transaction.get()
txn.note('root -x-> o1')
txn.commit()
self._storage.pack(packtime, referencesf)
log = self._storage.undoLog()
tid = log[0]['id']
db.undo(tid)
txn = transaction.get()
txn.note('undo root -x-> o1')
txn.commit()
conn.sync()
eq(root['obj'].value, 7)
def checkRedundantPack(self):
# It is an error to perform a pack with a packtime earlier
# than a previous packtime. The storage can't do a full
# traversal as of the packtime, because the previous pack may
# have removed revisions necessary for a full traversal.
# It should be simple to test that a storage error is raised,
# but this test case goes to the trouble of constructing a
# scenario that would lose data if the earlier packtime was
# honored.
self._initroot()
db = DB(self._storage)
conn = db.open()
root = conn.root()
root["d"] = d = PersistentMapping()
transaction.commit()
snooze()
obj = d["obj"] = C()
obj.value = 1
transaction.commit()
snooze()
packt1 = time.time()
lost_oid = obj._p_oid
obj = d["anotherobj"] = C()
obj.value = 2
transaction.commit()
snooze()
packt2 = time.time()
db.pack(packt2)
# BDBStorage allows the second pack, but doesn't lose data.
try:
db.pack(packt1)
except StorageError:
pass
# This object would be removed by the second pack, even though
# it is reachable.
self._storage.load(lost_oid, "")
def checkPackUndoLog(self):
self._initroot()
# Create a `persistent' object
obj = self._newobj()
oid = obj.getoid()
obj.value = 1
# Commit two different revisions
revid1 = self._dostoreNP(oid, data=pickle.dumps(obj))
obj.value = 2
snooze()
packtime = time.time()
snooze()
self._dostoreNP(oid, revid=revid1, data=pickle.dumps(obj))
# Now pack the first transaction
self.assertEqual(3, len(self._storage.undoLog()))
self._storage.pack(packtime, referencesf)
# The undo log contains only the most resent transaction
self.assertEqual(1,len(self._storage.undoLog()))
def dont_checkPackUndoLogUndoable(self):
# A disabled test. I wanted to test that the content of the
# undo log was consistent, but every storage appears to
# include something slightly different. If the result of this
# method is only used to fill a GUI then this difference
# doesnt matter. Perhaps re-enable this test once we agree
# what should be asserted.
self._initroot()
# Create two `persistent' object
obj1 = self._newobj()
oid1 = obj1.getoid()
obj1.value = 1
obj2 = self._newobj()
oid2 = obj2.getoid()
obj2.value = 2
# Commit the first revision of each of them
revid11 = self._dostoreNP(oid1, data=pickle.dumps(obj1),
description="1-1")
revid22 = self._dostoreNP(oid2, data=pickle.dumps(obj2),
description="2-2")
# remember the time. everything above here will be packed away
snooze()
packtime = time.time()
snooze()
# Commit two revisions of the first object
obj1.value = 3
revid13 = self._dostoreNP(oid1, revid=revid11,
data=pickle.dumps(obj1), description="1-3")
obj1.value = 4
self._dostoreNP(oid1, revid=revid13,
data=pickle.dumps(obj1), description="1-4")
# Commit one revision of the second object
obj2.value = 5
self._dostoreNP(oid2, revid=revid22,
data=pickle.dumps(obj2), description="2-5")
# Now pack
self.assertEqual(6,len(self._storage.undoLog()))
print '\ninitial undoLog was'
for r in self._storage.undoLog(): print r
self._storage.pack(packtime, referencesf)
# The undo log contains only two undoable transaction.
print '\nafter packing undoLog was'
for r in self._storage.undoLog(): print r
# what can we assert about that?
# A number of these threads are kicked off by _PackWhileWriting(). Their
# purpose is to abuse the database passed to the constructor with lots of
# random write activity while the main thread is packing it.
class ClientThread(TestThread):
def __init__(self, db, choices, loop_trip, timer, thread_id):
TestThread.__init__(self)
self.root = db.open().root()
self.choices = choices
self.loop_trip = loop_trip
self.millis = timer.elapsed_millis
self.thread_id = thread_id
# list of lists; each list has as many of these as a loop trip
# got thru:
# thread_id
# elapsed millis at loop top
# elapsed millis at attempt
# index into self.root getting replaced
# elapsed millis when outcome known
# 'OK' or 'Conflict'
# True if we got beyond this line, False if it raised an exception:
# self.root[index].value = MinPO(j)
self.outcomes = []
def runtest(self):
from random import choice
for j in range(self.loop_trip):
assign_worked = False
alist = [self.thread_id, self.millis()]
self.outcomes.append(alist)
try:
index = choice(self.choices)
alist.extend([self.millis(), index])
self.root[index].value = MinPO(j)
assign_worked = True
transaction.commit()
alist.append(self.millis())
alist.append('OK')
except ConflictError:
alist.append(self.millis())
alist.append('Conflict')
transaction.abort()
alist.append(assign_worked)
class ElapsedTimer:
def __init__(self, start_time):
self.start_time = start_time
def elapsed_millis(self):
return int((time.time() - self.start_time) * 1000)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test that a storage's values persist across open and close."""
class PersistentStorage:
def checkUpdatesPersist(self):
oids = []
def new_oid_wrapper(l=oids, new_oid=self._storage.new_oid):
oid = new_oid()
l.append(oid)
return oid
self._storage.new_oid = new_oid_wrapper
self._dostore()
oid = self._storage.new_oid()
revid = self._dostore(oid)
if self._storage.supportsVersions():
self._dostore(oid, revid, data=8, version='b')
oid = self._storage.new_oid()
revid = self._dostore(oid, data=1)
revid = self._dostore(oid, revid, data=2)
self._dostore(oid, revid, data=3)
# keep copies of all the objects
objects = []
for oid in oids:
p, s = self._storage.load(oid, '')
objects.append((oid, '', p, s))
ver = self._storage.modifiedInVersion(oid)
if ver:
p, s = self._storage.load(oid, ver)
objects.append((oid, ver, p, s))
self._storage.close()
self.open()
# keep copies of all the objects
for oid, ver, p, s in objects:
_p, _s = self._storage.load(oid, ver)
self.assertEquals(p, _p)
self.assertEquals(s, _s)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from ZODB.POSException import ReadOnlyError, Unsupported
import transaction
class ReadOnlyStorage:
def _create_data(self):
# test a read-only storage that already has some data
self.oids = {}
for i in range(10):
oid = self._storage.new_oid()
revid = self._dostore(oid)
self.oids[oid] = revid
def _make_readonly(self):
self._storage.close()
self.open(read_only=True)
self.assert_(self._storage.isReadOnly())
def checkReadMethods(self):
self._create_data()
self._make_readonly()
# XXX not going to bother checking all read methods
for oid in self.oids.keys():
data, revid = self._storage.load(oid, '')
self.assertEqual(revid, self.oids[oid])
self.assert_(not self._storage.modifiedInVersion(oid))
# Storages without revisions may not have loadSerial().
try:
_data = self._storage.loadSerial(oid, revid)
self.assertEqual(data, _data)
except Unsupported:
pass
def checkWriteMethods(self):
self._make_readonly()
self.assertRaises(ReadOnlyError, self._storage.new_oid)
t = transaction.Transaction()
self.assertRaises(ReadOnlyError, self._storage.tpc_begin, t)
if self._storage.supportsVersions():
self.assertRaises(ReadOnlyError, self._storage.abortVersion,
'', t)
self.assertRaises(ReadOnlyError, self._storage.commitVersion,
'', '', t)
self.assertRaises(ReadOnlyError, self._storage.store,
'\000' * 8, None, '', '', t)
if self._storage.supportsTransactionalUndo():
self.assertRaises(ReadOnlyError, self._storage.undo,
'\000' * 8, t)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""More recovery and iterator tests."""
import transaction
from transaction import Transaction
from ZODB.tests.IteratorStorage import IteratorDeepCompare
from ZODB.tests.StorageTestBase import MinPO, zodb_unpickle, snooze
from ZODB import DB
from ZODB.serialize import referencesf
import time
class RecoveryStorage(IteratorDeepCompare):
# Requires a setUp() that creates a self._dst destination storage
def checkSimpleRecovery(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=11)
revid = self._dostore(oid, revid=revid, data=12)
revid = self._dostore(oid, revid=revid, data=13)
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
def checkRecoveryAcrossVersions(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=21)
revid = self._dostore(oid, revid=revid, data=22)
revid = self._dostore(oid, revid=revid, data=23, version='one')
revid = self._dostore(oid, revid=revid, data=34, version='one')
# Now commit the version
t = Transaction()
self._storage.tpc_begin(t)
self._storage.commitVersion('one', '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
def checkRecoverAbortVersion(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=21, version="one")
revid = self._dostore(oid, revid=revid, data=23, version='one')
revid = self._dostore(oid, revid=revid, data=34, version='one')
# Now abort the version and the creation
t = Transaction()
self._storage.tpc_begin(t)
tid, oids = self._storage.abortVersion('one', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
self.assertEqual(oids, [oid])
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
# Also make sure the the last transaction has a data record
# with None for its data attribute, because we've undone the
# object.
for s in self._storage, self._dst:
iter = s.iterator()
for trans in iter:
pass # iterate until we get the last one
data = trans[0]
self.assertRaises(IndexError, lambda i, t=trans: t[i], 1)
self.assertEqual(data.oid, oid)
self.assertEqual(data.data, None)
def checkRecoverUndoInVersion(self):
oid = self._storage.new_oid()
version = "aVersion"
revid_a = self._dostore(oid, data=MinPO(91))
revid_b = self._dostore(oid, revid=revid_a, version=version,
data=MinPO(92))
revid_c = self._dostore(oid, revid=revid_b, version=version,
data=MinPO(93))
self._undo(self._storage.undoInfo()[0]['id'], [oid])
self._commitVersion(version, '')
self._undo(self._storage.undoInfo()[0]['id'], [oid])
# now copy the records to a new storage
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
# The last two transactions were applied directly rather than
# copied. So we can't use compare() to verify that they new
# transactions are applied correctly. (The new transactions
# will have different timestamps for each storage.)
self._abortVersion(version)
self.assert_(self._storage.versionEmpty(version))
self._undo(self._storage.undoInfo()[0]['id'], [oid])
self.assert_(not self._storage.versionEmpty(version))
# check the data is what we expect it to be
data, revid = self._storage.load(oid, version)
self.assertEqual(zodb_unpickle(data), MinPO(92))
data, revid = self._storage.load(oid, '')
self.assertEqual(zodb_unpickle(data), MinPO(91))
# and swap the storages
tmp = self._storage
self._storage = self._dst
self._abortVersion(version)
self.assert_(self._storage.versionEmpty(version))
self._undo(self._storage.undoInfo()[0]['id'], [oid])
self.assert_(not self._storage.versionEmpty(version))
# check the data is what we expect it to be
data, revid = self._storage.load(oid, version)
self.assertEqual(zodb_unpickle(data), MinPO(92))
data, revid = self._storage.load(oid, '')
self.assertEqual(zodb_unpickle(data), MinPO(91))
# swap them back
self._storage = tmp
# Now remove _dst and copy all the transactions a second time.
# This time we will be able to confirm via compare().
self._dst.close()
self._dst.cleanup()
self._dst = self.new_dest()
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
def checkRestoreAcrossPack(self):
db = DB(self._storage)
c = db.open()
r = c.root()
obj = r["obj1"] = MinPO(1)
transaction.commit()
obj = r["obj2"] = MinPO(1)
transaction.commit()
self._dst.copyTransactionsFrom(self._storage)
self._dst.pack(time.time(), referencesf)
self._undo(self._storage.undoInfo()[0]['id'])
# copy the final transaction manually. even though there
# was a pack, the restore() ought to succeed.
it = self._storage.iterator()
final = list(it)[-1]
self._dst.tpc_begin(final, final.tid, final.status)
for r in final:
self._dst.restore(r.oid, r.tid, r.data, r.version, r.data_txn,
final)
it.close()
self._dst.tpc_vote(final)
self._dst.tpc_finish(final)
def checkPackWithGCOnDestinationAfterRestore(self):
raises = self.assertRaises
db = DB(self._storage)
conn = db.open()
root = conn.root()
root.obj = obj1 = MinPO(1)
txn = transaction.get()
txn.note('root -> obj')
txn.commit()
root.obj.obj = obj2 = MinPO(2)
txn = transaction.get()
txn.note('root -> obj -> obj')
txn.commit()
del root.obj
txn = transaction.get()
txn.note('root -X->')
txn.commit()
# Now copy the transactions to the destination
self._dst.copyTransactionsFrom(self._storage)
# Now pack the destination.
snooze()
self._dst.pack(time.time(), referencesf)
# And check to see that the root object exists, but not the other
# objects.
data, serial = self._dst.load(root._p_oid, '')
raises(KeyError, self._dst.load, obj1._p_oid, '')
raises(KeyError, self._dst.load, obj2._p_oid, '')
def checkRestoreWithMultipleObjectsInUndoRedo(self):
from ZODB.FileStorage import FileStorage
# Undo creates backpointers in (at least) FileStorage. ZODB 3.2.1
# FileStorage._data_find() had an off-by-8 error, neglecting to
# account for the size of the backpointer when searching a
# transaction with multiple data records. The results were
# unpredictable. For example, it could raise a Python exception
# due to passing a negative offset to file.seek(), or could
# claim that a transaction didn't have data for an oid despite
# that it actually did.
#
# The former failure mode was seen in real life, in a ZRS secondary
# doing recovery. On my box today, the second failure mode is
# what happens in this test (with an unpatched _data_find, of
# course). Note that the error can only "bite" if more than one
# data record is in a transaction, and the oid we're looking for
# follows at least one data record with a backpointer.
#
# Unfortunately, _data_find() is a low-level implementation detail,
# and this test does some horrid white-box abuse to test it.
is_filestorage = isinstance(self._storage, FileStorage)
db = DB(self._storage)
c = db.open()
r = c.root()
# Create some objects.
r["obj1"] = MinPO(1)
r["obj2"] = MinPO(1)
transaction.commit()
# Add x attributes to them.
r["obj1"].x = 'x1'
r["obj2"].x = 'x2'
transaction.commit()
r = db.open().root()
self.assertEquals(r["obj1"].x, 'x1')
self.assertEquals(r["obj2"].x, 'x2')
# Dirty tricks.
if is_filestorage:
obj1_oid = r["obj1"]._p_oid
obj2_oid = r["obj2"]._p_oid
# This will be the offset of the next transaction, which
# will contain two backpointers.
pos = self._storage.getSize()
# Undo the attribute creation.
info = self._storage.undoInfo()
tid = info[0]['id']
t = Transaction()
self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
r = db.open().root()
self.assertRaises(AttributeError, getattr, r["obj1"], 'x')
self.assertRaises(AttributeError, getattr, r["obj2"], 'x')
if is_filestorage:
# _data_find should find data records for both objects in that
# transaction. Without the patch, the second assert failed
# (it claimed it couldn't find a data record for obj2) on my
# box, but other failure modes were possible.
self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0)
self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0)
# The offset of the next ("redo") transaction.
pos = self._storage.getSize()
# Undo the undo (restore the attributes).
info = self._storage.undoInfo()
tid = info[0]['id']
t = Transaction()
self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
r = db.open().root()
self.assertEquals(r["obj1"].x, 'x1')
self.assertEquals(r["obj2"].x, 'x2')
if is_filestorage:
# Again _data_find should find both objects in this txn, and
# again the second assert failed on my box.
self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0)
self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0)
# Indirectly provoke .restore(). .restore in turn indirectly
# provokes _data_find too, but not usefully for the purposes of
# the specific bug this test aims at: copyTransactionsFrom() uses
# storage iterators that chase backpointers themselves, and
# return the data they point at instead. The result is that
# _data_find didn't actually see anything dangerous in this
# part of the test.
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Check loadSerial() on storages that support historical revisions."""
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import \
zodb_unpickle, zodb_pickle, snooze, handle_serials
from ZODB.utils import p64, u64
import transaction
ZERO = '\0'*8
class RevisionStorage:
def checkLoadSerial(self):
oid = self._storage.new_oid()
revid = ZERO
revisions = {}
for i in range(31, 38):
revid = self._dostore(oid, revid=revid, data=MinPO(i))
revisions[revid] = MinPO(i)
# Now make sure all the revisions have the correct value
for revid, value in revisions.items():
data = self._storage.loadSerial(oid, revid)
self.assertEqual(zodb_unpickle(data), value)
def checkLoadBefore(self):
# Store 10 revisions of one object and then make sure that we
# can get all the non-current revisions back.
oid = self._storage.new_oid()
revs = []
revid = None
for i in range(10):
# We need to ensure that successive timestamps are at least
# two apart, so that a timestamp exists that's unambiguously
# between successive timestamps. Each call to snooze()
# guarantees that the next timestamp will be at least one
# larger (and probably much more than that) than the previous
# one.
snooze()
snooze()
revid = self._dostore(oid, revid, data=MinPO(i))
revs.append(self._storage.loadEx(oid, ""))
prev = u64(revs[0][1])
for i in range(1, 10):
tid = revs[i][1]
cur = u64(tid)
middle = prev + (cur - prev) // 2
assert prev < middle < cur # else the snooze() trick failed
prev = cur
t = self._storage.loadBefore(oid, p64(middle))
self.assert_(t is not None)
data, start, end = t
self.assertEqual(revs[i-1][0], data)
self.assertEqual(tid, end)
def checkLoadBeforeEdges(self):
# Check the edges cases for a non-current load.
oid = self._storage.new_oid()
self.assertRaises(KeyError, self._storage.loadBefore,
oid, p64(0))
revid1 = self._dostore(oid, data=MinPO(1))
self.assertEqual(self._storage.loadBefore(oid, p64(0)), None)
self.assertEqual(self._storage.loadBefore(oid, revid1), None)
cur = p64(u64(revid1) + 1)
data, start, end = self._storage.loadBefore(oid, cur)
self.assertEqual(zodb_unpickle(data), MinPO(1))
self.assertEqual(start, revid1)
self.assertEqual(end, None)
revid2 = self._dostore(oid, revid=revid1, data=MinPO(2))
data, start, end = self._storage.loadBefore(oid, cur)
self.assertEqual(zodb_unpickle(data), MinPO(1))
self.assertEqual(start, revid1)
self.assertEqual(end, revid2)
def checkLoadBeforeOld(self):
# Look for a very old revision. With the BaseStorage implementation
# this should require multple history() calls.
oid = self._storage.new_oid()
revs = []
revid = None
for i in range(50):
revid = self._dostore(oid, revid, data=MinPO(i))
revs.append(revid)
data, start, end = self._storage.loadBefore(oid, revs[12])
self.assertEqual(zodb_unpickle(data), MinPO(11))
self.assertEqual(start, revs[11])
self.assertEqual(end, revs[12])
# XXX Is it okay to assume everyone testing against RevisionStorage
# implements undo?
def checkLoadBeforeUndo(self):
# Do several transactions then undo them.
oid = self._storage.new_oid()
revid = None
for i in range(5):
revid = self._dostore(oid, revid, data=MinPO(i))
revs = []
for i in range(4):
info = self._storage.undoInfo()
tid = info[0]["id"]
# Always undo the most recent txn, so the value will
# alternate between 3 and 4.
self._undo(tid, [oid], note="undo %d" % i)
revs.append(self._storage.loadEx(oid, ""))
prev_tid = None
for i, (data, tid, ver) in enumerate(revs):
t = self._storage.loadBefore(oid, p64(u64(tid) + 1))
self.assertEqual(data, t[0])
self.assertEqual(tid, t[1])
if prev_tid:
self.assert_(prev_tid < t[1])
prev_tid = t[1]
if i < 3:
self.assertEqual(revs[i+1][1], t[2])
else:
self.assertEqual(None, t[2])
def checkLoadBeforeConsecutiveTids(self):
eq = self.assertEqual
oid = self._storage.new_oid()
def helper(tid, revid, x):
data = zodb_pickle(MinPO(x))
t = transaction.Transaction()
try:
self._storage.tpc_begin(t, p64(tid))
r1 = self._storage.store(oid, revid, data, '', t)
# Finish the transaction
r2 = self._storage.tpc_vote(t)
newrevid = handle_serials(oid, r1, r2)
self._storage.tpc_finish(t)
except:
self._storage.tpc_abort(t)
raise
return newrevid
revid1 = helper(1, None, 1)
revid2 = helper(2, revid1, 2)
revid3 = helper(3, revid2, 3)
data, start_tid, end_tid = self._storage.loadBefore(oid, p64(2))
eq(zodb_unpickle(data), MinPO(1))
eq(u64(start_tid), 1)
eq(u64(end_tid), 2)
def checkLoadBeforeCreation(self):
eq = self.assertEqual
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
revid1 = self._dostore(oid1)
revid2 = self._dostore(oid2)
results = self._storage.loadBefore(oid2, revid2)
eq(results, None)
# XXX There are other edge cases to handle, including pack.
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Provide a mixin base class for storage tests.
The StorageTestBase class provides basic setUp() and tearDown()
semantics (which you can override), and it also provides a helper
method _dostore() which performs a complete store transaction for a
single object revision.
"""
import sys
import time
import types
import unittest
from cPickle import Pickler, Unpickler
from cStringIO import StringIO
import transaction
from ZODB.utils import u64
from ZODB.tests.MinPO import MinPO
ZERO = '\0'*8
def snooze():
# In Windows, it's possible that two successive time.time() calls return
# the same value. Tim guarantees that time never runs backwards. You
# usually want to call this before you pack a storage, or must make other
# guarantees about increasing timestamps.
now = time.time()
while now == time.time():
time.sleep(0.1)
def _persistent_id(obj):
oid = getattr(obj, "_p_oid", None)
if getattr(oid, "__get__", None) is not None:
return None
else:
return oid
def zodb_pickle(obj):
"""Create a pickle in the format expected by ZODB."""
f = StringIO()
p = Pickler(f, 1)
p.persistent_id = _persistent_id
klass = obj.__class__
assert not hasattr(obj, '__getinitargs__'), "not ready for constructors"
args = None
mod = getattr(klass, '__module__', None)
if mod is not None:
klass = mod, klass.__name__
state = obj.__getstate__()
p.dump((klass, args))
p.dump(state)
return f.getvalue(1)
def persistent_load(pid):
# helper for zodb_unpickle
return "ref to %s.%s oid=%s" % (pid[1][0], pid[1][1], u64(pid[0]))
def zodb_unpickle(data):
"""Unpickle an object stored using the format expected by ZODB."""
f = StringIO(data)
u = Unpickler(f)
u.persistent_load = persistent_load
klass_info = u.load()
if isinstance(klass_info, types.TupleType):
if isinstance(klass_info[0], type):
# XXX what is the second part of klass_info?
klass, xxx = klass_info
assert not xxx
else:
if isinstance(klass_info[0], tuple):
modname, klassname = klass_info[0]
else:
modname, klassname = klass_info
if modname == "__main__":
ns = globals()
else:
mod = import_helper(modname)
ns = mod.__dict__
try:
klass = ns[klassname]
except KeyError:
print >> sys.stderr, "can't find %s in %r" % (klassname, ns)
inst = klass()
else:
raise ValueError, "expected class info: %s" % repr(klass_info)
state = u.load()
inst.__setstate__(state)
return inst
def handle_all_serials(oid, *args):
"""Return dict of oid to serialno from store() and tpc_vote().
Raises an exception if one of the calls raised an exception.
The storage interface got complicated when ZEO was introduced.
Any individual store() call can return None or a sequence of
2-tuples where the 2-tuple is either oid, serialno or an
exception to be raised by the client.
The original interface just returned the serialno for the
object.
"""
d = {}
for arg in args:
if isinstance(arg, types.StringType):
d[oid] = arg
elif arg is None:
pass
else:
for oid, serial in arg:
if not isinstance(serial, types.StringType):
raise serial # error from ZEO server
d[oid] = serial
return d
def handle_serials(oid, *args):
"""Return the serialno for oid based on multiple return values.
A helper for function _handle_all_serials().
"""
return handle_all_serials(oid, *args)[oid]
def import_helper(name):
__import__(name)
return sys.modules[name]
class StorageTestBase(unittest.TestCase):
# XXX It would be simpler if concrete tests didn't need to extend
# setUp() and tearDown().
def setUp(self):
# You need to override this with a setUp that creates self._storage
self._storage = None
def _close(self):
# You should override this if closing your storage requires additional
# shutdown operations.
if self._storage is not None:
self._storage.close()
def tearDown(self):
self._close()
def _dostore(self, oid=None, revid=None, data=None, version=None,
already_pickled=0, user=None, description=None):
"""Do a complete storage transaction. The defaults are:
- oid=None, ask the storage for a new oid
- revid=None, use a revid of ZERO
- data=None, pickle up some arbitrary data (the integer 7)
- version=None, use the empty string version
Returns the object's new revision id.
"""
if oid is None:
oid = self._storage.new_oid()
if revid is None:
revid = ZERO
if data is None:
data = MinPO(7)
if type(data) == types.IntType:
data = MinPO(data)
if not already_pickled:
data = zodb_pickle(data)
if version is None:
version = ''
# Begin the transaction
t = transaction.Transaction()
if user is not None:
t.user = user
if description is not None:
t.description = description
try:
self._storage.tpc_begin(t)
# Store an object
r1 = self._storage.store(oid, revid, data, version, t)
# Finish the transaction
r2 = self._storage.tpc_vote(t)
revid = handle_serials(oid, r1, r2)
self._storage.tpc_finish(t)
except:
self._storage.tpc_abort(t)
raise
return revid
def _dostoreNP(self, oid=None, revid=None, data=None, version=None,
user=None, description=None):
return self._dostore(oid, revid, data, version, 1, user, description)
# The following methods depend on optional storage features.
def _undo(self, tid, expected_oids=None, note=None):
# Undo a tid that affects a single object (oid).
# XXX This is very specialized
t = transaction.Transaction()
t.note(note or "undo")
self._storage.tpc_begin(t)
tid, oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
if expected_oids is not None:
self.assertEqual(len(oids), len(expected_oids), repr(oids))
for oid in expected_oids:
self.assert_(oid in oids)
return self._storage.lastTransaction()
def _commitVersion(self, src, dst):
t = transaction.Transaction()
t.note("commit %r to %r" % (src, dst))
self._storage.tpc_begin(t)
tid, oids = self._storage.commitVersion(src, dst, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
return oids
def _abortVersion(self, ver):
t = transaction.Transaction()
t.note("abort %r" % ver)
self._storage.tpc_begin(t)
tid, oids = self._storage.abortVersion(ver, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
return oids
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the storage's implemenetation of the storage synchronization spec.
The Synchronization spec
http://www.zope.org/Documentation/Developer/Models/ZODB/
ZODB_Architecture_Storage_Interface_State_Synchronization_Diag.html
It specifies two states committing and non-committing. A storage
starts in the non-committing state. tpc_begin() transfers to the
committting state; tpc_abort() and tpc_finish() transfer back to
non-committing.
Several other methods are only allowed in one state or another. Many
methods allowed only in the committing state require that they apply
to the currently committing transaction.
The spec is silent on a variety of methods that don't appear to modify
the state, e.g. load(), undoLog(), pack(). It's unclear whether there
is a separate set of synchronization rules that apply to these methods
or if the synchronization is implementation dependent, i.e. only what
is need to guarantee a corrected implementation.
The synchronization spec is also silent on whether there is any
contract implied with the caller. If the storage can assume that a
single client is single-threaded and that it will not call, e.g., store()
until after it calls tpc_begin(), the implementation can be
substantially simplified.
New and/or unspecified methods:
tpc_vote(): handled like tpc_abort
undo(): how's that handled?
Methods that have nothing to do with committing/non-committing:
load(), loadSerial(), getName(), getSize(), __len__(), history(),
undoLog(), modifiedInVersion(), versionEmpty(), versions(), pack().
Specific questions:
The spec & docs say that undo() takes three arguments, the second
being a transaction. If the specified arg isn't the current
transaction, the undo() should raise StorageTransactionError. This
isn't implemented anywhere. It looks like undo can be called at
anytime.
FileStorage does not allow undo() during a pack. How should this be
tested? Is it a general restriction?
"""
from transaction import Transaction
from ZODB.POSException import StorageTransactionError
VERSION = "testversion"
OID = "\000" * 8
SERIALNO = "\000" * 8
TID = "\000" * 8
class SynchronizedStorage:
## def verifyCommitting(self, callable, *args):
## self.assertRaises(StorageTransactionError, callable *args)
def verifyNotCommitting(self, callable, *args):
self.assertRaises(StorageTransactionError, callable, *args)
def verifyWrongTrans(self, callable, *args):
t = Transaction()
self._storage.tpc_begin(t)
self.assertRaises(StorageTransactionError, callable, *args)
self._storage.tpc_abort(t)
def checkAbortVersionNotCommitting(self):
self.verifyNotCommitting(self._storage.abortVersion,
VERSION, Transaction())
def checkAbortVersionWrongTrans(self):
self.verifyWrongTrans(self._storage.abortVersion,
VERSION, Transaction())
def checkCommitVersionNotCommitting(self):
self.verifyNotCommitting(self._storage.commitVersion,
VERSION, "", Transaction())
def checkCommitVersionWrongTrans(self):
self.verifyWrongTrans(self._storage.commitVersion,
VERSION, "", Transaction())
def checkStoreNotCommitting(self):
self.verifyNotCommitting(self._storage.store,
OID, SERIALNO, "", "", Transaction())
def checkStoreWrongTrans(self):
self.verifyWrongTrans(self._storage.store,
OID, SERIALNO, "", "", Transaction())
## def checkNewOidNotCommitting(self):
## self.verifyNotCommitting(self._storage.new_oid)
## def checkNewOidWrongTrans(self):
## self.verifyWrongTrans(self._storage.new_oid)
def checkAbortNotCommitting(self):
self._storage.tpc_abort(Transaction())
def checkAbortWrongTrans(self):
t = Transaction()
self._storage.tpc_begin(t)
self._storage.tpc_abort(Transaction())
self._storage.tpc_abort(t)
def checkFinishNotCommitting(self):
t = Transaction()
self._storage.tpc_finish(t)
self._storage.tpc_abort(t)
def checkFinishWrongTrans(self):
t = Transaction()
self._storage.tpc_begin(t)
self._storage.tpc_finish(Transaction())
self._storage.tpc_abort(t)
def checkBeginCommitting(self):
t = Transaction()
self._storage.tpc_begin(t)
self._storage.tpc_begin(t)
self._storage.tpc_abort(t)
# XXX how to check undo?
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Check undo().
Any storage that supports undo() must pass these tests.
"""
import time
import types
from persistent import Persistent
import transaction
from transaction import Transaction
from ZODB import POSException
from ZODB.serialize import referencesf
from ZODB.utils import p64
from ZODB import DB
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_pickle, zodb_unpickle
ZERO = '\0'*8
class C(Persistent):
pass
def snooze():
# In Windows, it's possible that two successive time.time() calls return
# the same value. Tim guarantees that time never runs backwards. You
# usually want to call this before you pack a storage, or must make other
# guarantees about increasing timestamps.
now = time.time()
while now == time.time():
time.sleep(0.1)
def listeq(L1, L2):
"""Return True if L1.sort() == L2.sort()"""
c1 = L1[:]
c2 = L2[:]
c1.sort()
c2.sort()
return c1 == c2
class TransactionalUndoStorage:
def _transaction_begin(self):
self.__serials = {}
def _transaction_store(self, oid, rev, data, vers, trans):
r = self._storage.store(oid, rev, data, vers, trans)
if r:
if type(r) == types.StringType:
self.__serials[oid] = r
else:
for oid, serial in r:
self.__serials[oid] = serial
def _transaction_vote(self, trans):
r = self._storage.tpc_vote(trans)
if r:
for oid, serial in r:
self.__serials[oid] = serial
def _transaction_newserial(self, oid):
return self.__serials[oid]
def _multi_obj_transaction(self, objs):
newrevs = {}
t = Transaction()
self._storage.tpc_begin(t)
self._transaction_begin()
for oid, rev, data in objs:
self._transaction_store(oid, rev, data, '', t)
newrevs[oid] = None
self._transaction_vote(t)
self._storage.tpc_finish(t)
for oid in newrevs.keys():
newrevs[oid] = self._transaction_newserial(oid)
return newrevs
def _iterate(self):
"""Iterate over the storage in its final state."""
# This is testing that the iterator() code works correctly.
# The hasattr() guards against ZEO, which doesn't support iterator.
if not hasattr(self._storage, "iterator"):
return
iter = self._storage.iterator()
for txn in iter:
for rec in txn:
pass
def undo(self, tid, note):
t = Transaction()
t.note(note)
self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
return oids
def checkSimpleTransactionalUndo(self):
eq = self.assertEqual
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(23))
revid = self._dostore(oid, revid=revid, data=MinPO(24))
revid = self._dostore(oid, revid=revid, data=MinPO(25))
info = self._storage.undoInfo()
# Now start an undo transaction
self._undo(info[0]["id"], [oid], note="undo1")
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(24))
# Do another one
info = self._storage.undoInfo()
self._undo(info[2]["id"], [oid], note="undo2")
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(23))
# Try to undo the first record
info = self._storage.undoInfo()
self._undo(info[4]["id"], [oid], note="undo3")
# This should fail since we've undone the object's creation
self.assertRaises(KeyError,
self._storage.load, oid, '')
# And now let's try to redo the object's creation
info = self._storage.undoInfo()
self._undo(info[0]["id"], [oid])
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(23))
self._iterate()
def checkCreationUndoneGetSerial(self):
# create an object
oid = self._storage.new_oid()
self._dostore(oid, data=MinPO(23))
# undo its creation
info = self._storage.undoInfo()
tid = info[0]['id']
t = Transaction()
t.note('undo1')
self._storage.tpc_begin(t)
self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# Check that calling getSerial on an uncreated object raises a KeyError
# The current version of FileStorage fails this test
self.assertRaises(KeyError, self._storage.getSerial, oid)
def checkUndoCreationBranch1(self):
eq = self.assertEqual
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
revid = self._dostore(oid, revid=revid, data=MinPO(12))
# Undo the last transaction
info = self._storage.undoInfo()
self._undo(info[0]['id'], [oid])
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(11))
# Now from here, we can either redo the last undo, or undo the object
# creation. Let's undo the object creation.
info = self._storage.undoInfo()
self._undo(info[2]['id'], [oid])
self.assertRaises(KeyError, self._storage.load, oid, '')
self._iterate()
def checkUndoCreationBranch2(self):
eq = self.assertEqual
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
revid = self._dostore(oid, revid=revid, data=MinPO(12))
# Undo the last transaction
info = self._storage.undoInfo()
self._undo(info[0]['id'], [oid])
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(11))
# Now from here, we can either redo the last undo, or undo the object
# creation. Let's redo the last undo
info = self._storage.undoInfo()
self._undo(info[0]['id'], [oid])
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(12))
self._iterate()
def checkTwoObjectUndo(self):
eq = self.assertEqual
# Convenience
p31, p32, p51, p52 = map(zodb_pickle,
map(MinPO, (31, 32, 51, 52)))
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
revid1 = revid2 = ZERO
# Store two objects in the same transaction
t = Transaction()
self._storage.tpc_begin(t)
self._transaction_begin()
self._transaction_store(oid1, revid1, p31, '', t)
self._transaction_store(oid2, revid2, p51, '', t)
# Finish the transaction
self._transaction_vote(t)
revid1 = self._transaction_newserial(oid1)
revid2 = self._transaction_newserial(oid2)
self._storage.tpc_finish(t)
eq(revid1, revid2)
# Update those same two objects
t = Transaction()
self._storage.tpc_begin(t)
self._transaction_begin()
self._transaction_store(oid1, revid1, p32, '', t)
self._transaction_store(oid2, revid2, p52, '', t)
# Finish the transaction
self._transaction_vote(t)
revid1 = self._transaction_newserial(oid1)
revid2 = self._transaction_newserial(oid2)
self._storage.tpc_finish(t)
eq(revid1, revid2)
# Make sure the objects have the current value
data, revid1 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(32))
data, revid2 = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(52))
# Now attempt to undo the transaction containing two objects
info = self._storage.undoInfo()
self._undo(info[0]['id'], [oid1, oid2])
data, revid1 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(31))
data, revid2 = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(51))
self._iterate()
def checkTwoObjectUndoAtOnce(self):
# Convenience
eq = self.assertEqual
unless = self.failUnless
p30, p31, p32, p50, p51, p52 = map(zodb_pickle,
map(MinPO,
(30, 31, 32, 50, 51, 52)))
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
revid1 = revid2 = ZERO
# Store two objects in the same transaction
d = self._multi_obj_transaction([(oid1, revid1, p30),
(oid2, revid2, p50),
])
eq(d[oid1], d[oid2])
# Update those same two objects
d = self._multi_obj_transaction([(oid1, d[oid1], p31),
(oid2, d[oid2], p51),
])
eq(d[oid1], d[oid2])
# Update those same two objects
d = self._multi_obj_transaction([(oid1, d[oid1], p32),
(oid2, d[oid2], p52),
])
eq(d[oid1], d[oid2])
revid1 = self._transaction_newserial(oid1)
revid2 = self._transaction_newserial(oid2)
eq(revid1, revid2)
# Make sure the objects have the current value
data, revid1 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(32))
data, revid2 = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(52))
# Now attempt to undo the transaction containing two objects
info = self._storage.undoInfo()
tid = info[0]['id']
tid1 = info[1]['id']
t = Transaction()
self._storage.tpc_begin(t)
tid, oids = self._storage.undo(tid, t)
tid, oids1 = self._storage.undo(tid1, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# We get the finalization stuff called an extra time:
eq(len(oids), 2)
eq(len(oids1), 2)
unless(oid1 in oids)
unless(oid2 in oids)
data, revid1 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(30))
data, revid2 = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(50))
# Now try to undo the one we just did to undo, whew
info = self._storage.undoInfo()
self._undo(info[0]['id'], [oid1, oid2])
data, revid1 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(32))
data, revid2 = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(52))
self._iterate()
def checkTwoObjectUndoAgain(self):
eq = self.assertEqual
p31, p32, p33, p51, p52, p53 = map(
zodb_pickle,
map(MinPO, (31, 32, 33, 51, 52, 53)))
# Like the above, but the first revision of the objects are stored in
# different transactions.
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
revid1 = self._dostore(oid1, data=p31, already_pickled=1)
revid2 = self._dostore(oid2, data=p51, already_pickled=1)
# Update those same two objects
t = Transaction()
self._storage.tpc_begin(t)
self._transaction_begin()
self._transaction_store(oid1, revid1, p32, '', t)
self._transaction_store(oid2, revid2, p52, '', t)
# Finish the transaction
self._transaction_vote(t)
self._storage.tpc_finish(t)
revid1 = self._transaction_newserial(oid1)
revid2 = self._transaction_newserial(oid2)
eq(revid1, revid2)
# Now attempt to undo the transaction containing two objects
info = self._storage.undoInfo()
self._undo(info[0]["id"], [oid1, oid2])
data, revid1 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(31))
data, revid2 = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(51))
# Like the above, but this time, the second transaction contains only
# one object.
t = Transaction()
self._storage.tpc_begin(t)
self._transaction_begin()
self._transaction_store(oid1, revid1, p33, '', t)
self._transaction_store(oid2, revid2, p53, '', t)
# Finish the transaction
self._transaction_vote(t)
self._storage.tpc_finish(t)
revid1 = self._transaction_newserial(oid1)
revid2 = self._transaction_newserial(oid2)
eq(revid1, revid2)
# Update in different transactions
revid1 = self._dostore(oid1, revid=revid1, data=MinPO(34))
revid2 = self._dostore(oid2, revid=revid2, data=MinPO(54))
# Now attempt to undo the transaction containing two objects
info = self._storage.undoInfo()
tid = info[1]['id']
t = Transaction()
self._storage.tpc_begin(t)
tid, oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
eq(len(oids), 1)
self.failUnless(oid1 in oids)
self.failUnless(not oid2 in oids)
data, revid1 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(33))
data, revid2 = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(54))
self._iterate()
def checkNotUndoable(self):
eq = self.assertEqual
# Set things up so we've got a transaction that can't be undone
oid = self._storage.new_oid()
revid_a = self._dostore(oid, data=MinPO(51))
revid_b = self._dostore(oid, revid=revid_a, data=MinPO(52))
revid_c = self._dostore(oid, revid=revid_b, data=MinPO(53))
# Start the undo
info = self._storage.undoInfo()
tid = info[1]['id']
t = Transaction()
self._storage.tpc_begin(t)
self.assertRaises(POSException.UndoError,
self._storage.undo,
tid, t)
self._storage.tpc_abort(t)
# Now have more fun: object1 and object2 are in the same transaction,
# which we'll try to undo to, but one of them has since modified in
# different transaction, so the undo should fail.
oid1 = oid
revid1 = revid_c
oid2 = self._storage.new_oid()
revid2 = ZERO
p81, p82, p91, p92 = map(zodb_pickle,
map(MinPO, (81, 82, 91, 92)))
t = Transaction()
self._storage.tpc_begin(t)
self._transaction_begin()
self._transaction_store(oid1, revid1, p81, '', t)
self._transaction_store(oid2, revid2, p91, '', t)
self._transaction_vote(t)
self._storage.tpc_finish(t)
revid1 = self._transaction_newserial(oid1)
revid2 = self._transaction_newserial(oid2)
eq(revid1, revid2)
# Make sure the objects have the expected values
data, revid_11 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(81))
data, revid_22 = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(91))
eq(revid_11, revid1)
eq(revid_22, revid2)
# Now modify oid2
revid2 = self._dostore(oid2, revid=revid2, data=MinPO(92))
self.assertNotEqual(revid1, revid2)
self.assertNotEqual(revid2, revid_22)
info = self._storage.undoInfo()
tid = info[1]['id']
t = Transaction()
self._storage.tpc_begin(t)
self.assertRaises(POSException.UndoError,
self._storage.undo,
tid, t)
self._storage.tpc_abort(t)
self._iterate()
def checkTransactionalUndoAfterPack(self):
eq = self.assertEqual
# Add a few object revisions
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(51))
packtime = time.time()
snooze() # time.time() now distinct from packtime
revid2 = self._dostore(oid, revid=revid1, data=MinPO(52))
self._dostore(oid, revid=revid2, data=MinPO(53))
# Now get the undo log
info = self._storage.undoInfo()
eq(len(info), 3)
tid = info[0]['id']
# Now pack just the initial revision of the object. We need the
# second revision otherwise we won't be able to undo the third
# revision!
self._storage.pack(packtime, referencesf)
# Make some basic assertions about the undo information now
info2 = self._storage.undoInfo()
eq(len(info2), 2)
# And now attempt to undo the last transaction
t = Transaction()
self._storage.tpc_begin(t)
tid, oids = self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
eq(len(oids), 1)
eq(oids[0], oid)
data, revid = self._storage.load(oid, '')
# The object must now be at the second state
eq(zodb_unpickle(data), MinPO(52))
self._iterate()
def checkTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self):
eq = self.assertEqual
db = DB(self._storage)
conn = db.open()
root = conn.root()
o1 = C()
o2 = C()
root['obj'] = o1
o1.obj = o2
txn = transaction.get()
txn.note('o1 -> o2')
txn.commit()
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
o3 = C()
o2.obj = o3
txn = transaction.get()
txn.note('o1 -> o2 -> o3')
txn.commit()
o1.obj = o3
txn = transaction.get()
txn.note('o1 -> o3')
txn.commit()
log = self._storage.undoLog()
eq(len(log), 4)
for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3',
'o1 -> o2', 'initial database creation')):
eq(entry[0]['description'], entry[1])
self._storage.pack(packtime, referencesf)
log = self._storage.undoLog()
for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3')):
eq(entry[0]['description'], entry[1])
tid = log[0]['id']
db.undo(tid)
txn = transaction.get()
txn.note('undo')
txn.commit()
# undo does a txn-undo, but doesn't invalidate
conn.sync()
log = self._storage.undoLog()
for entry in zip(log, ('undo', 'o1 -> o3', 'o1 -> o2 -> o3')):
eq(entry[0]['description'], entry[1])
eq(o1.obj, o2)
eq(o1.obj.obj, o3)
self._iterate()
def checkPackAfterUndoDeletion(self):
db = DB(self._storage)
cn = db.open()
root = cn.root()
pack_times = []
def set_pack_time():
pack_times.append(time.time())
snooze()
root["key0"] = MinPO(0)
root["key1"] = MinPO(1)
root["key2"] = MinPO(2)
txn = transaction.get()
txn.note("create 3 keys")
txn.commit()
set_pack_time()
del root["key1"]
txn = transaction.get()
txn.note("delete 1 key")
txn.commit()
set_pack_time()
root._p_deactivate()
cn.sync()
self.assert_(listeq(root.keys(), ["key0", "key2"]))
L = db.undoInfo()
db.undo(L[0]["id"])
txn = transaction.get()
txn.note("undo deletion")
txn.commit()
set_pack_time()
root._p_deactivate()
cn.sync()
self.assert_(listeq(root.keys(), ["key0", "key1", "key2"]))
for t in pack_times:
self._storage.pack(t, referencesf)
root._p_deactivate()
cn.sync()
self.assert_(listeq(root.keys(), ["key0", "key1", "key2"]))
for i in range(3):
obj = root["key%d" % i]
self.assertEqual(obj.value, i)
root.items()
self._inter_pack_pause()
def checkPackAfterUndoManyTimes(self):
db = DB(self._storage)
cn = db.open()
rt = cn.root()
rt["test"] = MinPO(1)
transaction.commit()
rt["test2"] = MinPO(2)
transaction.commit()
rt["test"] = MinPO(3)
txn = transaction.get()
txn.note("root of undo")
txn.commit()
packtimes = []
for i in range(10):
L = db.undoInfo()
db.undo(L[0]["id"])
txn = transaction.get()
txn.note("undo %d" % i)
txn.commit()
rt._p_deactivate()
cn.sync()
self.assertEqual(rt["test"].value, i % 2 and 3 or 1)
self.assertEqual(rt["test2"].value, 2)
packtimes.append(time.time())
snooze()
for t in packtimes:
self._storage.pack(t, referencesf)
cn.sync()
# XXX Is _cache supposed to have a clear() method, or not?
# cn._cache.clear()
# The last undo set the value to 3 and pack should
# never change that.
self.assertEqual(rt["test"].value, 3)
self.assertEqual(rt["test2"].value, 2)
self._inter_pack_pause()
def _inter_pack_pause(self):
# DirectoryStorage needs a pause between packs,
# most other storages dont.
pass
def checkTransactionalUndoIterator(self):
# check that data_txn set in iterator makes sense
if not hasattr(self._storage, "iterator"):
return
s = self._storage
BATCHES = 4
OBJECTS = 4
orig = []
for i in range(BATCHES):
t = Transaction()
tid = p64(i + 1)
s.tpc_begin(t, tid)
for j in range(OBJECTS):
oid = s.new_oid()
obj = MinPO(i * OBJECTS + j)
revid = s.store(oid, None, zodb_pickle(obj), '', t)
orig.append((tid, oid, revid))
s.tpc_vote(t)
s.tpc_finish(t)
i = 0
for tid, oid, revid in orig:
self._dostore(oid, revid=revid, data=MinPO(revid),
description="update %s" % i)
# Undo the OBJECTS transactions that modified objects created
# in the ith original transaction.
def undo(i):
info = s.undoInfo()
t = Transaction()
s.tpc_begin(t)
base = i * OBJECTS + i
for j in range(OBJECTS):
tid = info[base + j]['id']
s.undo(tid, t)
s.tpc_vote(t)
s.tpc_finish(t)
for i in range(BATCHES):
undo(i)
# There are now (2 + OBJECTS) * BATCHES transactions:
# BATCHES original transactions, followed by
# OBJECTS * BATCHES modifications, followed by
# BATCHES undos
iter = s.iterator()
offset = 0
eq = self.assertEqual
for i in range(BATCHES):
txn = iter[offset]
offset += 1
tid = p64(i + 1)
eq(txn.tid, tid)
L1 = [(rec.oid, rec.tid, rec.data_txn) for rec in txn]
L2 = [(oid, revid, None) for _tid, oid, revid in orig
if _tid == tid]
eq(L1, L2)
for i in range(BATCHES * OBJECTS):
txn = iter[offset]
offset += 1
eq(len([rec for rec in txn if rec.data_txn is None]), 1)
for i in range(BATCHES):
txn = iter[offset]
offset += 1
# The undos are performed in reverse order.
otid = p64(BATCHES - i)
L1 = [(rec.oid, rec.data_txn) for rec in txn]
L2 = [(oid, otid) for _tid, oid, revid in orig
if _tid == otid]
L1.sort()
L2.sort()
eq(L1, L2)
self.assertRaises(IndexError, iter.__getitem__, offset)
def checkUndoLogMetadata(self):
# test that the metadata is correct in the undo log
t = transaction.get()
t.note('t1')
t.setExtendedInfo('k2','this is transaction metadata')
t.setUser('u3',path='p3')
db = DB(self._storage)
conn = db.open()
root = conn.root()
o1 = C()
root['obj'] = o1
txn = transaction.get()
txn.commit()
l = self._storage.undoLog()
self.assertEqual(len(l),2)
d = l[0]
self.assertEqual(d['description'],'t1')
self.assertEqual(d['k2'],'this is transaction metadata')
self.assertEqual(d['user_name'],'p3 u3')
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# Check interactions between undo() and versions. Any storage that
# supports both undo() and versions must pass these tests.
import time
import transaction
from ZODB.serialize import referencesf
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle
class TransactionalUndoVersionStorage:
def _x_dostore(self, *args, **kwargs):
# ugh: backwards compatibilty for ZEO 1.0 which runs these
# tests but has a _dostore() method that does not support the
# description kwarg.
try:
return self._dostore(*args, **kwargs)
except TypeError:
# assume that the type error means we've got a _dostore()
# without the description kwarg
try:
del kwargs['description']
except KeyError:
pass # not expected
return self._dostore(*args, **kwargs)
def checkUndoInVersion(self):
eq = self.assertEqual
unless = self.failUnless
def check_objects(nonversiondata, versiondata):
data, revid = self._storage.load(oid, version)
self.assertEqual(zodb_unpickle(data), MinPO(versiondata))
data, revid = self._storage.load(oid, '')
self.assertEqual(zodb_unpickle(data), MinPO(nonversiondata))
oid = self._storage.new_oid()
version = 'one'
revid_a = self._dostore(oid, data=MinPO(91))
revid_b = self._dostore(oid, revid=revid_a, data=MinPO(92),
version=version)
revid_c = self._dostore(oid, revid=revid_b, data=MinPO(93),
version=version)
info = self._storage.undoInfo()
self._undo(info[0]['id'], [oid])
data, revid = self._storage.load(oid, '')
## eq(revid, revid_a)
eq(zodb_unpickle(data), MinPO(91))
data, revid = self._storage.load(oid, version)
unless(revid > revid_b and revid > revid_c)
eq(zodb_unpickle(data), MinPO(92))
# Now commit the version...
oids = self._commitVersion(version, "")
eq(len(oids), 1)
eq(oids[0], oid)
check_objects(92, 92)
# ...and undo the commit
info = self._storage.undoInfo()
self._undo(info[0]['id'], [oid])
check_objects(91, 92)
oids = self._abortVersion(version)
assert len(oids) == 1
assert oids[0] == oid
check_objects(91, 91)
# Now undo the abort
info=self._storage.undoInfo()
self._undo(info[0]['id'], [oid])
check_objects(91, 92)
def checkUndoCommitVersion(self):
def load_value(oid, version=''):
data, revid = self._storage.load(oid, version)
return zodb_unpickle(data).value
# create a bunch of packable transactions
oid = self._storage.new_oid()
revid = '\000' * 8
for i in range(4):
revid = self._x_dostore(oid, revid, description='packable%d' % i)
pt = time.time()
time.sleep(1)
oid1 = self._storage.new_oid()
version = 'version'
revid1 = self._x_dostore(oid1, data=MinPO(0), description='create1')
revid2 = self._x_dostore(oid1, data=MinPO(1), revid=revid1,
version=version, description='version1')
self._x_dostore(oid1, data=MinPO(2), revid=revid2,
version=version, description='version2')
self._x_dostore(description='create2')
t = transaction.Transaction()
t.description = 'commit version'
self._storage.tpc_begin(t)
self._storage.commitVersion(version, '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
info = self._storage.undoInfo()
t_id = info[0]['id']
self.assertEqual(load_value(oid1), 2)
self.assertEqual(load_value(oid1, version), 2)
self._storage.pack(pt, referencesf)
self._undo(t_id, note="undo commit version")
self.assertEqual(load_value(oid1), 0)
self.assertEqual(load_value(oid1, version), 2)
data, tid, ver = self._storage.loadEx(oid1, "")
# After undoing the version commit, the non-version data
# once again becomes the non-version data from 'create1'.
self.assertEqual(tid, self._storage.lastTransaction())
self.assertEqual(ver, "")
# The current version data comes from an undo record, which
# means that it gets data via the backpointer but tid from the
# current txn.
data, tid, ver = self._storage.loadEx(oid1, version)
self.assertEqual(ver, version)
self.assertEqual(tid, self._storage.lastTransaction())
def checkUndoAbortVersion(self):
def load_value(oid, version=''):
data, revid = self._storage.load(oid, version)
return zodb_unpickle(data).value
# create a bunch of packable transactions
oid = self._storage.new_oid()
revid = '\000' * 8
for i in range(3):
revid = self._x_dostore(oid, revid, description='packable%d' % i)
pt = time.time()
time.sleep(1)
oid1 = self._storage.new_oid()
version = 'version'
revid1 = self._x_dostore(oid1, data=MinPO(0), description='create1')
revid2 = self._x_dostore(oid1, data=MinPO(1), revid=revid1,
version=version, description='version1')
self._x_dostore(oid1, data=MinPO(2), revid=revid2,
version=version, description='version2')
self._x_dostore(description='create2')
self._abortVersion(version)
info = self._storage.undoInfo()
t_id = info[0]['id']
self.assertEqual(load_value(oid1), 0)
# after abort, we should see non-version data
self.assertEqual(load_value(oid1, version), 0)
self._undo(t_id, note="undo abort version")
self.assertEqual(load_value(oid1), 0)
# t undo will re-create the version
self.assertEqual(load_value(oid1, version), 2)
info = self._storage.undoInfo()
t_id = info[0]['id']
self._storage.pack(pt, referencesf)
self._undo(t_id, note="undo undo")
# undo of undo will put as back where we started
self.assertEqual(load_value(oid1), 0)
# after abort, we should see non-version data
self.assertEqual(load_value(oid1, version), 0)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Run the version related tests for a storage.
Any storage that supports versions should be able to pass all these tests.
"""
import time
import transaction
from transaction import Transaction
from ZODB import POSException
from ZODB.serialize import referencesf
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle, snooze
from ZODB import DB
class VersionStorage:
def checkCommitVersionSerialno(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(12))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(13),
version="version")
oids = self._commitVersion("version", "")
self.assertEqual([oid], oids)
data, revid3 = self._storage.load(oid, "")
# use repr() to avoid getting binary data in a traceback on error
self.assertNotEqual(`revid1`, `revid3`)
self.assertNotEqual(`revid2`, `revid3`)
def checkAbortVersionSerialno(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(12))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(13),
version="version")
data, tid, ver = self._storage.loadEx(oid, "version")
self.assertEqual(revid2, tid)
self.assertEqual(zodb_unpickle(data), MinPO(13))
oids = self._abortVersion("version")
self.assertEqual([oid], oids)
data, revid3 = self._storage.load(oid, "")
# use repr() to avoid getting binary data in a traceback on error
self.assertNotEqual(revid1, revid3)
self.assertNotEqual(revid2, revid3)
data, tid, ver = self._storage.loadEx(oid, "")
self.assertEqual(revid3, tid)
self.assertEqual(zodb_unpickle(data), MinPO(12))
self.assertEqual(tid, self._storage.lastTransaction())
def checkVersionedStoreAndLoad(self):
eq = self.assertEqual
# Store a couple of non-version revisions of the object
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
revid1 = self._dostore(oid, revid=revid, data=MinPO(12))
# And now store some new revisions in a version
version = 'test-version'
revid = self._dostore(oid, revid=revid1, data=MinPO(13),
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(14),
version=version)
revid2 = self._dostore(oid, revid=revid, data=MinPO(15),
version=version)
# Now read back the object in both the non-version and version and
# make sure the values jive.
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(12))
data, vrevid = self._storage.load(oid, version)
eq(zodb_unpickle(data), MinPO(15))
if hasattr(self._storage, 'getSerial'):
s = self._storage.getSerial(oid)
eq(s, max(revid, vrevid))
data, tid, ver = self._storage.loadEx(oid, version)
eq(zodb_unpickle(data), MinPO(15))
eq(tid, revid2)
data, tid, ver = self._storage.loadEx(oid, "other version")
eq(zodb_unpickle(data), MinPO(12))
eq(tid, revid2)
# loadSerial returns non-version data
try:
data = self._storage.loadSerial(oid, revid)
eq(zodb_unpickle(data), MinPO(12))
data = self._storage.loadSerial(oid, revid2)
eq(zodb_unpickle(data), MinPO(12))
except POSException.Unsupported:
pass
def checkVersionedLoadErrors(self):
oid = self._storage.new_oid()
version = 'test-version'
revid = self._dostore(oid, data=MinPO(11))
revid = self._dostore(oid, revid=revid, data=MinPO(12),
version=version)
# Try to load a bogus oid
self.assertRaises(KeyError,
self._storage.load,
self._storage.new_oid(), '')
data, revid = self._storage.load(oid, 'bogus')
self.assertEqual(zodb_unpickle(data), MinPO(11))
def checkVersionLock(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
version = 'test-version'
revid = self._dostore(oid, revid=revid, data=MinPO(12),
version=version)
self.assertRaises(POSException.VersionLockError,
self._dostore,
oid, revid=revid, data=MinPO(14),
version='another-version')
def checkVersionEmpty(self):
# Before we store anything, these versions ought to be empty
version = 'test-version'
self.failUnless(self._storage.versionEmpty(version))
# Now store some objects
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
revid = self._dostore(oid, revid=revid, data=MinPO(12))
revid = self._dostore(oid, revid=revid, data=MinPO(13),
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(14),
version=version)
# The blank version should not be empty
# Neither should 'test-version'
self.failUnless(not self._storage.versionEmpty(version))
# But this non-existant version should be empty
self.failUnless(self._storage.versionEmpty('bogus'))
def checkVersions(self):
unless = self.failUnless
# Store some objects in the non-version
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
oid3 = self._storage.new_oid()
revid1 = self._dostore(oid1, data=MinPO(11))
revid2 = self._dostore(oid2, data=MinPO(12))
revid3 = self._dostore(oid3, data=MinPO(13))
# Now create some new versions
revid1 = self._dostore(oid1, revid=revid1, data=MinPO(14),
version='one')
revid2 = self._dostore(oid2, revid=revid2, data=MinPO(15),
version='two')
revid3 = self._dostore(oid3, revid=revid3, data=MinPO(16),
version='three')
# Ask for the versions
versions = self._storage.versions()
unless('one' in versions)
unless('two' in versions)
unless('three' in versions)
# Now flex the `max' argument
versions = self._storage.versions(1)
self.assertEqual(len(versions), 1)
unless('one' in versions or 'two' in versions or 'three' in versions)
def _setup_version(self, version='test-version'):
# Store some revisions in the non-version
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(49))
revid = self._dostore(oid, revid=revid, data=MinPO(50))
revid = self._dostore(oid, revid=revid, data=MinPO(51))
# Now do some stores in a version
revid = self._dostore(oid, revid=revid, data=MinPO(52),
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(53),
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(54),
version=version)
return oid, version
def checkAbortVersion(self):
eq = self.assertEqual
oid, version = self._setup_version()
# XXX Not sure I can write a test for getSerial() in the
# presence of aborted versions, because FileStorage and
# Berkeley storage give a different answer. I think Berkeley
# is right and FS is wrong.
oids = self._abortVersion(version)
eq(len(oids), 1)
eq(oids[0], oid)
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(51))
def checkAbortVersionNonCurrent(self):
# Make sure the non-current serial number is correctly
# after a version is aborted.
oid, version = self._setup_version()
self._abortVersion(version)
data, tid, ver = self._storage.loadEx(oid, "")
# write a new revision of oid so that the aborted-version txn
# is not current
self._dostore(oid, revid=tid, data=MinPO(17))
ltid = self._storage.lastTransaction()
ncdata, ncstart, end = self._storage.loadBefore(oid, ltid)
self.assertEqual(data, ncdata)
self.assertEqual(tid, ncstart)
def checkAbortVersionErrors(self):
eq = self.assertEqual
oid, version = self._setup_version()
# Now abort a bogus version
t = Transaction()
self._storage.tpc_begin(t)
# And try to abort the empty version
if (hasattr(self._storage, 'supportsTransactionalUndo')
and self._storage.supportsTransactionalUndo()):
# XXX FileStorage used to be broken on this one
self.assertRaises(POSException.VersionError,
self._storage.abortVersion,
'', t)
# But now we really try to abort the version
tid, oids = self._storage.abortVersion(version, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
eq(len(oids), 1)
eq(oids[0], oid)
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(51))
def checkCommitVersionErrors(self):
if not (hasattr(self._storage, 'supportsTransactionalUndo')
and self._storage.supportsTransactionalUndo()):
# XXX FileStorage used to be broken on this one
return
eq = self.assertEqual
oid1, version1 = self._setup_version('one')
data, revid1 = self._storage.load(oid1, version1)
eq(zodb_unpickle(data), MinPO(54))
t = Transaction()
self._storage.tpc_begin(t)
try:
self.assertRaises(POSException.VersionCommitError,
self._storage.commitVersion,
'one', 'one', t)
finally:
self._storage.tpc_abort(t)
def checkNewSerialOnCommitVersionToVersion(self):
oid, version = self._setup_version()
data, vtid = self._storage.load(oid, version)
data, ntid = self._storage.load(oid, '')
version2 = 'test version 2'
self._commitVersion(version, version2)
data, tid = self._storage.load(oid, version2)
self.failUnless(tid != vtid and tid != ntid,
"New tid, %r, should be different from the old "
"version, %r, and non-version, %r, tids."
% (tid, vtid, ntid))
def checkModifyAfterAbortVersion(self):
eq = self.assertEqual
oid, version = self._setup_version()
self._abortVersion(version)
data, revid = self._storage.load(oid, '')
# And modify it a few times
revid = self._dostore(oid, revid=revid, data=MinPO(52))
revid = self._dostore(oid, revid=revid, data=MinPO(53))
revid = self._dostore(oid, revid=revid, data=MinPO(54))
data, newrevid = self._storage.load(oid, '')
eq(newrevid, revid)
eq(zodb_unpickle(data), MinPO(54))
def checkCommitToNonVersion(self):
eq = self.assertEqual
oid, version = self._setup_version()
data, revid = self._storage.load(oid, version)
eq(zodb_unpickle(data), MinPO(54))
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(51))
self._commitVersion(version, '')
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(54))
def checkCommitToOtherVersion(self):
eq = self.assertEqual
oid1, version1 = self._setup_version('one')
data, revid1 = self._storage.load(oid1, version1)
eq(zodb_unpickle(data), MinPO(54))
oid2, version2 = self._setup_version('two')
data, revid2 = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
# make sure we see the non-version data when appropriate
data, revid2 = self._storage.load(oid1, version2)
eq(zodb_unpickle(data), MinPO(51))
data, revid2 = self._storage.load(oid2, version1)
eq(zodb_unpickle(data), MinPO(51))
data, revid2 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
# Okay, now let's commit object1 to version2
oids = self._commitVersion(version1, version2)
eq(len(oids), 1)
eq(oids[0], oid1)
data, revid = self._storage.load(oid1, version2)
eq(zodb_unpickle(data), MinPO(54))
data, revid = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
# an object can only exist in one version, so a load from
# version1 should now give the non-version data
data, revid2 = self._storage.load(oid1, version1)
eq(zodb_unpickle(data), MinPO(51))
# as should a version that has never been used
data, revid2 = self._storage.load(oid1, 'bela lugosi')
eq(zodb_unpickle(data), MinPO(51))
def checkAbortOneVersionCommitTheOther(self):
eq = self.assertEqual
oid1, version1 = self._setup_version('one')
data, revid1 = self._storage.load(oid1, version1)
eq(zodb_unpickle(data), MinPO(54))
oid2, version2 = self._setup_version('two')
data, revid2 = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
# Let's make sure we can't get object1 in version2
data, revid2 = self._storage.load(oid1, version2)
eq(zodb_unpickle(data), MinPO(51))
oids = self._abortVersion(version1)
eq(len(oids), 1)
eq(oids[0], oid1)
data, revid = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
data, revid = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
data, revid = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
data, revid = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(51))
data, revid = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
# Okay, now let's commit version2 back to the trunk
oids = self._commitVersion(version2, '')
eq(len(oids), 1)
eq(oids[0], oid2)
data, revid = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
# But the trunk should be up to date now
data, revid = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(54))
data, revid = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
oid = self._storage.new_oid()
revid = self._dostore(oid, revid=revid, data=MinPO(54), version='one')
self.assertRaises(KeyError,
self._storage.load, oid, '')
self.assertRaises(KeyError,
self._storage.load, oid, 'two')
def checkCreateObjectInVersionWithAbort(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=21, version="one")
revid = self._dostore(oid, revid=revid, data=23, version='one')
revid = self._dostore(oid, revid=revid, data=34, version='one')
# Now abort the version and the creation
t = Transaction()
self._storage.tpc_begin(t)
tid, oids = self._storage.abortVersion('one', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
self.assertEqual(oids, [oid])
def checkPackVersions(self):
db = DB(self._storage)
cn = db.open(version="testversion")
root = cn.root()
obj = root["obj"] = MinPO("obj")
root["obj2"] = MinPO("obj2")
txn = transaction.get()
txn.note("create 2 objs in version")
txn.commit()
obj.value = "77"
txn = transaction.get()
txn.note("modify obj in version")
txn.commit()
# undo the modification to generate a mix of backpointers
# and versions for pack to chase
info = db.undoInfo()
db.undo(info[0]["id"])
txn = transaction.get()
txn.note("undo modification")
txn.commit()
snooze()
self._storage.pack(time.time(), referencesf)
db.commitVersion("testversion")
txn = transaction.get()
txn.note("commit version")
txn.commit()
cn = db.open()
root = cn.root()
root["obj"] = "no version"
txn = transaction.get()
txn.note("modify obj")
txn.commit()
self._storage.pack(time.time(), referencesf)
def checkPackVersionsInPast(self):
db = DB(self._storage)
cn = db.open(version="testversion")
root = cn.root()
obj = root["obj"] = MinPO("obj")
root["obj2"] = MinPO("obj2")
txn = transaction.get()
txn.note("create 2 objs in version")
txn.commit()
obj.value = "77"
txn = transaction.get()
txn.note("modify obj in version")
txn.commit()
t0 = time.time()
snooze()
# undo the modification to generate a mix of backpointers
# and versions for pack to chase
info = db.undoInfo()
db.undo(info[0]["id"])
txn = transaction.get()
txn.note("undo modification")
txn.commit()
self._storage.pack(t0, referencesf)
db.commitVersion("testversion")
txn = transaction.get()
txn.note("commit version")
txn.commit()
cn = db.open()
root = cn.root()
root["obj"] = "no version"
txn = transaction.get()
txn.note("modify obj")
txn.commit()
self._storage.pack(time.time(), referencesf)
def checkPackVersionReachable(self):
db = DB(self._storage)
cn = db.open()
root = cn.root()
names = "a", "b", "c"
for name in names:
root[name] = MinPO(name)
transaction.commit()
for name in names:
cn2 = db.open(version=name)
rt2 = cn2.root()
obj = rt2[name]
obj.value = MinPO("version")
transaction.commit()
cn2.close()
root["d"] = MinPO("d")
transaction.commit()
snooze()
self._storage.pack(time.time(), referencesf)
cn.sync()
# make sure all the non-version data is there
for name, obj in root.items():
self.assertEqual(name, obj.value)
# make sure all the version-data is there,
# and create a new revision in the version
for name in names:
cn2 = db.open(version=name)
rt2 = cn2.root()
obj = rt2[name].value
self.assertEqual(obj.value, "version")
obj.value = "still version"
transaction.commit()
cn2.close()
db.abortVersion("b")
txn = transaction.get()
txn.note("abort version b")
txn.commit()
t = time.time()
snooze()
L = db.undoInfo()
db.undo(L[0]["id"])
txn = transaction.get()
txn.note("undo abort")
txn.commit()
self._storage.pack(t, referencesf)
cn2 = db.open(version="b")
rt2 = cn2.root()
self.assertEqual(rt2["b"].value.value, "still version")
def checkLoadBeforeVersion(self):
eq = self.assertEqual
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=1)
revid2 = self._dostore(oid, data=2, revid=revid1, version="kobe")
revid3 = self._dostore(oid, data=3, revid=revid2, version="kobe")
data, start_tid, end_tid = self._storage.loadBefore(oid, revid3)
eq(zodb_unpickle(data), MinPO(1))
eq(start_tid, revid1)
eq(end_tid, None)
# Having this makes debugging better.
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Functional test to produce a dangling reference."""
import time
import transaction
from ZODB.FileStorage import FileStorage
from ZODB import DB
from persistent import Persistent
class P(Persistent):
pass
def create_dangling_ref(db):
rt = db.open().root()
rt[1] = o1 = P()
transaction.get().note("create o1")
transaction.commit()
rt[2] = o2 = P()
transaction.get().note("create o2")
transaction.commit()
c = o1.child = P()
transaction.get().note("set child on o1")
transaction.commit()
o1.child = P()
transaction.get().note("replace child on o1")
transaction.commit()
time.sleep(2)
# The pack should remove the reference to c, because it is no
# longer referenced from o1. But the object still exists and has
# an oid, so a new commit of it won't create a new object.
db.pack()
print repr(c._p_oid)
o2.child = c
transaction.get().note("set child on o2")
transaction.commit()
def main():
fs = FileStorage("dangle.fs")
db = DB(fs)
create_dangling_ref(db)
db.close()
if __name__ == "__main__":
main()
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Support for testing logging code
If you want to test that your code generates proper log output, you
can create and install a handler that collects output:
>>> handler = InstalledHandler('foo.bar')
The handler is installed into loggers for all of the names passed. In
addition, the logger level is set to 1, which means, log
everything. If you want to log less than everything, you can provide a
level keyword argument. The level setting effects only the named
loggers.
Then, any log output is collected in the handler:
>>> logging.getLogger('foo.bar').exception('eek')
>>> logging.getLogger('foo.bar').info('blah blah')
>>> for record in handler.records:
... print record.name, record.levelname
... print ' ', record.getMessage()
foo.bar ERROR
eek
foo.bar INFO
blah blah
A similar effect can be gotten by just printing the handler:
>>> print handler
foo.bar ERROR
eek
foo.bar INFO
blah blah
After checking the log output, you need to uninstall the handler:
>>> handler.uninstall()
At which point, the handler won't get any more log output.
Let's clear the handler:
>>> handler.clear()
>>> handler.records
[]
And then log something:
>>> logging.getLogger('foo.bar').info('blah')
and, sure enough, we still have no output:
>>> handler.records
[]
$Id$
"""
import logging
class Handler(logging.Handler):
def __init__(self, *names, **kw):
logging.Handler.__init__(self)
self.names = names
self.records = []
self.setLoggerLevel(**kw)
def setLoggerLevel(self, level=1):
self.level = level
self.oldlevels = {}
def emit(self, record):
self.records.append(record)
def clear(self):
del self.records[:]
def install(self):
for name in self.names:
logger = logging.getLogger(name)
self.oldlevels[name] = logger.level
logger.setLevel(self.level)
logger.addHandler(self)
def uninstall(self):
for name in self.names:
logger = logging.getLogger(name)
logger.setLevel(self.oldlevels[name])
logger.removeHandler(self)
def __str__(self):
return '\n'.join(
[("%s %s\n %s" %
(record.name, record.levelname,
'\n'.join([line
for line in record.getMessage().split('\n')
if line.strip()])
)
)
for record in self.records]
)
class InstalledHandler(Handler):
def __init__(self, *names):
Handler.__init__(self, *names)
self.install()
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Sample objects for use in tests
$Id$
"""
class DataManager(object):
"""Sample data manager
This class provides a trivial data-manager implementation and doc
strings to illustrate the the protocol and to provide a tool for
writing tests.
Our sample data manager has state that is updated through an inc
method and through transaction operations.
When we create a sample data manager:
>>> dm = DataManager()
It has two bits of state, state:
>>> dm.state
0
and delta:
>>> dm.delta
0
Both of which are initialized to 0. state is meant to model
committed state, while delta represents tentative changes within a
transaction. We change the state by calling inc:
>>> dm.inc()
which updates delta:
>>> dm.delta
1
but state isn't changed until we commit the transaction:
>>> dm.state
0
To commit the changes, we use 2-phase commit. We execute the first
stage by calling prepare. We need to pass a transation. Our
sample data managers don't really use the transactions for much,
so we'll be lazy and use strings for transactions:
>>> t1 = '1'
>>> dm.prepare(t1)
The sample data manager updates the state when we call prepare:
>>> dm.state
1
>>> dm.delta
1
This is mainly so we can detect some affect of calling the methods.
Now if we call commit:
>>> dm.commit(t1)
Our changes are"permanent". The state reflects the changes and the
delta has been reset to 0.
>>> dm.state
1
>>> dm.delta
0
"""
def __init__(self):
self.state = 0
self.sp = 0
self.transaction = None
self.delta = 0
self.prepared = False
def inc(self, n=1):
self.delta += n
def prepare(self, transaction):
"""Prepare to commit data
>>> dm = DataManager()
>>> dm.inc()
>>> t1 = '1'
>>> dm.prepare(t1)
>>> dm.commit(t1)
>>> dm.state
1
>>> dm.inc()
>>> t2 = '2'
>>> dm.prepare(t2)
>>> dm.abort(t2)
>>> dm.state
1
It is en error to call prepare more than once without an intervening
commit or abort:
>>> dm.prepare(t1)
>>> dm.prepare(t1)
Traceback (most recent call last):
...
TypeError: Already prepared
>>> dm.prepare(t2)
Traceback (most recent call last):
...
TypeError: Already prepared
>>> dm.abort(t1)
If there was a preceeding savepoint, the transaction must match:
>>> rollback = dm.savepoint(t1)
>>> dm.prepare(t2)
Traceback (most recent call last):
,,,
TypeError: ('Transaction missmatch', '2', '1')
>>> dm.prepare(t1)
"""
if self.prepared:
raise TypeError('Already prepared')
self._checkTransaction(transaction)
self.prepared = True
self.transaction = transaction
self.state += self.delta
def _checkTransaction(self, transaction):
if (transaction is not self.transaction
and self.transaction is not None):
raise TypeError("Transaction missmatch",
transaction, self.transaction)
def abort(self, transaction):
"""Abort a transaction
The abort method can be called before two-phase commit to
throw away work done in the transaction:
>>> dm = DataManager()
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
>>> t1 = '1'
>>> dm.abort(t1)
>>> dm.state, dm.delta
(0, 0)
The abort method also throws away work done in savepoints:
>>> dm.inc()
>>> r = dm.savepoint(t1)
>>> dm.inc()
>>> r = dm.savepoint(t1)
>>> dm.state, dm.delta
(0, 2)
>>> dm.abort(t1)
>>> dm.state, dm.delta
(0, 0)
If savepoints are used, abort must be passed the same
transaction:
>>> dm.inc()
>>> r = dm.savepoint(t1)
>>> t2 = '2'
>>> dm.abort(t2)
Traceback (most recent call last):
...
TypeError: ('Transaction missmatch', '2', '1')
>>> dm.abort(t1)
The abort method is also used to abort a two-phase commit:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
>>> dm.prepare(t1)
>>> dm.state, dm.delta
(1, 1)
>>> dm.abort(t1)
>>> dm.state, dm.delta
(0, 0)
Of course, the transactions passed to prepare and abort must
match:
>>> dm.prepare(t1)
>>> dm.abort(t2)
Traceback (most recent call last):
...
TypeError: ('Transaction missmatch', '2', '1')
>>> dm.abort(t1)
"""
self._checkTransaction(transaction)
if self.transaction is not None:
self.transaction = None
if self.prepared:
self.state -= self.delta
self.prepared = False
self.delta = 0
def commit(self, transaction):
"""Complete two-phase commit
>>> dm = DataManager()
>>> dm.state
0
>>> dm.inc()
We start two-phase commit by calling prepare:
>>> t1 = '1'
>>> dm.prepare(t1)
We complete it by calling commit:
>>> dm.commit(t1)
>>> dm.state
1
It is an error ro call commit without calling prepare first:
>>> dm.inc()
>>> t2 = '2'
>>> dm.commit(t2)
Traceback (most recent call last):
...
TypeError: Not prepared to commit
>>> dm.prepare(t2)
>>> dm.commit(t2)
If course, the transactions given to prepare and commit must
be the same:
>>> dm.inc()
>>> t3 = '3'
>>> dm.prepare(t3)
>>> dm.commit(t2)
Traceback (most recent call last):
...
TypeError: ('Transaction missmatch', '2', '3')
"""
if not self.prepared:
raise TypeError('Not prepared to commit')
self._checkTransaction(transaction)
self.delta = 0
self.transaction = None
self.prepared = False
def savepoint(self, transaction):
"""Provide the ability to rollback transaction state
Savepoints provide a way to:
- Save partial transaction work. For some data managers, this
could allow resources to be used more efficiently.
- Provide the ability to revert state to a point in a
transaction without aborting the entire transaction. In
other words, savepoints support partial aborts.
Savepoints don't use two-phase commit. If there are errors in
setting or rolling back to savepoints, the application should
abort the containing transaction. This is *not* the
responsibility of the data manager.
Savepoints are always associated with a transaction. Any work
done in a savepoint's transaction is tentative until the
transaction is committed using two-phase commit.
>>> dm = DataManager()
>>> dm.inc()
>>> t1 = '1'
>>> r = dm.savepoint(t1)
>>> dm.state, dm.delta
(0, 1)
>>> dm.inc()
>>> dm.state, dm.delta
(0, 2)
>>> r.rollback()
>>> dm.state, dm.delta
(0, 1)
>>> dm.prepare(t1)
>>> dm.commit(t1)
>>> dm.state, dm.delta
(1, 0)
Savepoints must have the same transaction:
>>> r1 = dm.savepoint(t1)
>>> dm.state, dm.delta
(1, 0)
>>> dm.inc()
>>> dm.state, dm.delta
(1, 1)
>>> t2 = '2'
>>> r2 = dm.savepoint(t2)
Traceback (most recent call last):
...
TypeError: ('Transaction missmatch', '2', '1')
>>> r2 = dm.savepoint(t1)
>>> dm.inc()
>>> dm.state, dm.delta
(1, 2)
If we rollback to an earlier savepoint, we discard all work
done later:
>>> r1.rollback()
>>> dm.state, dm.delta
(1, 0)
and we can no longer rollback to the later savepoint:
>>> r2.rollback()
Traceback (most recent call last):
...
TypeError: ('Attempt to roll back to invalid save point', 3, 2)
We can roll back to a savepoint as often as we like:
>>> r1.rollback()
>>> r1.rollback()
>>> r1.rollback()
>>> dm.state, dm.delta
(1, 0)
>>> dm.inc()
>>> dm.inc()
>>> dm.inc()
>>> dm.state, dm.delta
(1, 3)
>>> r1.rollback()
>>> dm.state, dm.delta
(1, 0)
But we can't rollback to a savepoint after it has been
committed:
>>> dm.prepare(t1)
>>> dm.commit(t1)
>>> r1.rollback()
Traceback (most recent call last):
...
TypeError: Attempt to rollback stale rollback
"""
if self.prepared:
raise TypeError("Can't get savepoint during two-phase commit")
self._checkTransaction(transaction)
self.transaction = transaction
self.sp += 1
return Rollback(self)
class Rollback(object):
def __init__(self, dm):
self.dm = dm
self.sp = dm.sp
self.delta = dm.delta
self.transaction = dm.transaction
def rollback(self):
if self.transaction is not self.dm.transaction:
raise TypeError("Attempt to rollback stale rollback")
if self.dm.sp < self.sp:
raise TypeError("Attempt to roll back to invalid save point",
self.sp, self.dm.sp)
self.dm.sp = self.sp
self.dm.delta = self.delta
def test_suite():
from doctest import DocTestSuite
return DocTestSuite()
if __name__ == '__main__':
unittest.main()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
usage="""Test speed of a ZODB storage
Options:
-d file The data file to use as input.
The default is this script.
-n n The number of repititions
-s module A module that defines a 'Storage'
attribute, which is an open storage.
If not specified, a FileStorage will ne
used.
-z Test compressing data
-D Run in debug mode
-L Test loads as well as stores by minimizing
the cache after eachrun
-M Output means only
"""
import sys, os, getopt, string, time
sys.path.insert(0, os.getcwd())
import ZODB, ZODB.FileStorage
import persistent
import transaction
class P(persistent.Persistent): pass
def main(args):
opts, args = getopt.getopt(args, 'zd:n:Ds:LM')
z=s=None
data=sys.argv[0]
nrep=5
minimize=0
detailed=1
for o, v in opts:
if o=='-n': nrep=string.atoi(v)
elif o=='-d': data=v
elif o=='-s': s=v
elif o=='-z':
global zlib
import zlib
z=compress
elif o=='-L':
minimize=1
elif o=='-M':
detailed=0
elif o=='-D':
global debug
os.environ['STUPID_LOG_FILE']=''
os.environ['STUPID_LOG_SEVERITY']='-999'
if s:
s=__import__(s, globals(), globals(), ('__doc__',))
s=s.Storage
else:
s=ZODB.FileStorage.FileStorage('zeo_speed.fs', create=1)
data=open(data).read()
db=ZODB.DB(s,
# disable cache deactivation
cache_size=4000,
cache_deactivate_after=6000,)
results={1:0, 10:0, 100:0, 1000:0}
for j in range(nrep):
for r in 1, 10, 100, 1000:
t=time.time()
jar=db.open()
transaction.begin()
rt=jar.root()
key='s%s' % r
if rt.has_key(key): p=rt[key]
else: rt[key]=p=P()
for i in range(r):
if z is not None: d=z(data)
else: d=data
v=getattr(p, str(i), P())
v.d=d
setattr(p,str(i),v)
transaction.commit()
jar.close()
t=time.time()-t
if detailed:
sys.stderr.write("%s\t%s\t%.4f\n" % (j, r, t))
sys.stdout.flush()
results[r]=results[r]+t
rt=d=p=v=None # release all references
if minimize:
time.sleep(3)
jar.cacheMinimize(3)
if detailed: print '-'*24
for r in 1, 10, 100, 1000:
t=results[r]/nrep
sys.stderr.write("mean:\t%s\t%.4f\t%.4f (s/o)\n" % (r, t, t/r))
db.close()
def compress(s):
c=zlib.compressobj()
o=c.compress(s)
return o+c.flush()
if __name__=='__main__': main(sys.argv[1:])
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests of the default activity monitor.
See ZODB/ActivityMonitor.py
$Id$
"""
import unittest
import time
from ZODB.ActivityMonitor import ActivityMonitor
class FakeConnection:
loads = 0
stores = 0
def _transferred(self, loads, stores):
self.loads = self.loads + loads
self.stores = self.stores + stores
def getTransferCounts(self, clear=0):
res = self.loads, self.stores
if clear:
self.loads = self.stores = 0
return res
class Tests(unittest.TestCase):
def testAddLogEntries(self):
am = ActivityMonitor(history_length=3600)
self.assertEqual(len(am.log), 0)
c = FakeConnection()
c._transferred(1, 2)
am.closedConnection(c)
c._transferred(3, 7)
am.closedConnection(c)
self.assertEqual(len(am.log), 2)
def testTrim(self):
am = ActivityMonitor(history_length=0.1)
c = FakeConnection()
c._transferred(1, 2)
am.closedConnection(c)
time.sleep(0.2)
c._transferred(3, 7)
am.closedConnection(c)
self.assert_(len(am.log) <= 1)
def testSetHistoryLength(self):
am = ActivityMonitor(history_length=3600)
c = FakeConnection()
c._transferred(1, 2)
am.closedConnection(c)
time.sleep(0.2)
c._transferred(3, 7)
am.closedConnection(c)
self.assertEqual(len(am.log), 2)
am.setHistoryLength(0.1)
self.assertEqual(am.getHistoryLength(), 0.1)
self.assert_(len(am.log) <= 1)
def testActivityAnalysis(self):
am = ActivityMonitor(history_length=3600)
c = FakeConnection()
c._transferred(1, 2)
am.closedConnection(c)
c._transferred(3, 7)
am.closedConnection(c)
res = am.getActivityAnalysis(start=0, end=0, divisions=10)
lastend = 0
for n in range(9):
div = res[n]
self.assertEqual(div['stores'], 0)
self.assertEqual(div['loads'], 0)
self.assert_(div['start'] > 0)
self.assert_(div['start'] >= lastend)
self.assert_(div['start'] < div['end'])
lastend = div['end']
div = res[9]
self.assertEqual(div['stores'], 9)
self.assertEqual(div['loads'], 4)
self.assert_(div['start'] > 0)
self.assert_(div['start'] >= lastend)
self.assert_(div['start'] < div['end'])
def test_suite():
return unittest.makeSuite(Tests)
if __name__=='__main__':
unittest.main(defaultTest='test_suite')
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test broken-object suppport
$Id$
"""
import sys
import unittest
import persistent
import transaction
from doctest import DocTestSuite
from ZODB.tests.util import DB
def test_integration():
"""Test the integration of broken object support with the databse:
>>> db = DB()
We'll create a fake module with a class:
>>> class NotThere:
... Atall = type('Atall', (persistent.Persistent, ),
... {'__module__': 'ZODB.not.there'})
And stuff this into sys.modules to simulate a regular module:
>>> sys.modules['ZODB.not.there'] = NotThere
>>> sys.modules['ZODB.not'] = NotThere
Now, we'll create and save an instance, and make sure we can
load it in another connection:
>>> a = NotThere.Atall()
>>> a.x = 1
>>> conn1 = db.open()
>>> conn1.root()['a'] = a
>>> transaction.commit()
>>> conn2 = db.open()
>>> a2 = conn2.root()['a']
>>> a2.__class__ is a.__class__
True
>>> a2.x
1
Now, we'll uninstall the module, simulating having the module
go away:
>>> del sys.modules['ZODB.not.there']
and we'll try to load the object in another connection:
>>> conn3 = db.open()
>>> a3 = conn3.root()['a']
>>> a3
<persistent broken ZODB.not.there.Atall instance """ \
r"""'\x00\x00\x00\x00\x00\x00\x00\x01'>
>>> a3.__Broken_state__
{'x': 1}
Let's clean up:
>>> db.close()
>>> del sys.modules['ZODB.not']
Cleanup:
>>> import ZODB.broken
>>> ZODB.broken.broken_cache.clear()
"""
def test_suite():
return unittest.TestSuite((
DocTestSuite('ZODB.broken'),
DocTestSuite(),
))
if __name__ == '__main__': unittest.main()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A few simple tests of the public cache API.
Each DB Connection has a separate PickleCache. The Cache serves two
purposes. It acts like a memo for unpickling. It also keeps recent
objects in memory under the assumption that they may be used again.
"""
import gc
import time
import unittest
import threading
from persistent.cPickleCache import PickleCache
from persistent.mapping import PersistentMapping
import transaction
import ZODB
import ZODB.MappingStorage
from ZODB.tests.MinPO import MinPO
from ZODB.utils import p64
from persistent import Persistent
class CacheTestBase(unittest.TestCase):
def setUp(self):
store = ZODB.MappingStorage.MappingStorage()
self.db = ZODB.DB(store,
cache_size = self.CACHE_SIZE)
self.conns = []
def tearDown(self):
for conn in self.conns:
conn.close()
self.db.close()
CACHE_SIZE = 20
def noodle_new_connection(self):
"""Do some reads and writes on a new connection."""
c = self.db.open()
self.conns.append(c)
self.noodle_connection(c)
def noodle_connection(self, c):
r = c.root()
i = len(self.conns)
d = r.get(i)
if d is None:
d = r[i] = PersistentMapping()
transaction.commit()
for i in range(15):
o = d.get(i)
if o is None:
o = d[i] = MinPO(i)
o.value += 1
transaction.commit()
# CantGetRidOfMe is used by checkMinimizeTerminates.
make_trouble = True
class CantGetRidOfMe(MinPO):
def __init__(self, value):
MinPO.__init__(self, value)
self.an_attribute = 42
def __del__(self):
# Referencing an attribute of self causes self to be
# loaded into the cache again, which also resurrects
# self.
if make_trouble:
self.an_attribute
class DBMethods(CacheTestBase):
__super_setUp = CacheTestBase.setUp
def setUp(self):
self.__super_setUp()
for i in range(4):
self.noodle_new_connection()
def checkCacheDetail(self):
for name, count in self.db.cacheDetail():
self.assert_(isinstance(name, str))
self.assert_(isinstance(count, int))
def checkCacheExtremeDetail(self):
expected = ['conn_no', 'id', 'oid', 'rc', 'klass', 'state']
for dict in self.db.cacheExtremeDetail():
for k, v in dict.items():
self.assert_(k in expected)
# XXX not really sure how to do a black box test of the cache.
# should the full sweep and minimize calls always remove things?
def checkFullSweep(self):
old_size = self.db.cacheSize()
self.db.cacheFullSweep()
new_size = self.db.cacheSize()
self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size))
def checkMinimize(self):
old_size = self.db.cacheSize()
self.db.cacheMinimize()
new_size = self.db.cacheSize()
self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size))
def checkMinimizeTerminates(self):
# This is tricky. cPickleCache had a case where it could get into
# an infinite loop, but we don't want the test suite to hang
# if this bug reappears. So this test spawns a thread to run the
# dangerous operation, and the main thread complains if the worker
# thread hasn't finished in 30 seconds (arbitrary, but way more
# than enough). In that case, the worker thread will continue
# running forever (until killed externally), but at least the
# test suite will move on.
#
# The bug was triggered by having a persistent object whose __del__
# method references an attribute of the object. An attempt to
# ghostify such an object will clear the attribute, and if the
# cache also releases the last Python reference to the object then
# (due to ghostifying it), the __del__ method gets invoked.
# Referencing the attribute loads the object again, and also
# puts it back into the cPickleCache. If the cache implementation
# isn't looking out for this, it can get into an infinite loop
# then, endlessly trying to ghostify an object that in turn keeps
# unghostifying itself again.
class Worker(threading.Thread):
def __init__(self, testcase):
threading.Thread.__init__(self)
self.testcase = testcase
def run(self):
global make_trouble
# Make CantGetRidOfMe.__del__ dangerous.
make_trouble = True
conn = self.testcase.conns[0]
r = conn.root()
d = r[1]
for i in range(len(d)):
d[i] = CantGetRidOfMe(i)
get_transaction().commit()
self.testcase.db.cacheMinimize()
# Defang the nasty objects. Else, because they're
# immortal now, they hang around and create trouble
# for subsequent tests.
make_trouble = False
self.testcase.db.cacheMinimize()
w = Worker(self)
w.start()
w.join(30)
if w.isAlive():
self.fail("cacheMinimize still running after 30 seconds -- "
"almost certainly in an infinite loop")
# XXX don't have an explicit test for incrgc, because the
# connection and database call it internally
# XXX same for the get and invalidate methods
def checkLRUitems(self):
# get a cache
c = self.conns[0]._cache
c.lru_items()
def checkClassItems(self):
c = self.conns[0]._cache
c.klass_items()
class LRUCacheTests(CacheTestBase):
def checkLRU(self):
# verify the LRU behavior of the cache
dataset_size = 5
CACHE_SIZE = dataset_size*2+1
# a cache big enough to hold the objects added in two
# transactions, plus the root object
self.db.setCacheSize(CACHE_SIZE)
c = self.db.open()
r = c.root()
l = {}
# the root is the only thing in the cache, because all the
# other objects are new
self.assertEqual(len(c._cache), 1)
# run several transactions
for t in range(5):
for i in range(dataset_size):
l[(t,i)] = r[i] = MinPO(i)
transaction.commit()
# commit() will register the objects, placing them in the
# cache. at the end of commit, the cache will be reduced
# down to CACHE_SIZE items
if len(l)>CACHE_SIZE:
self.assertEqual(c._cache.ringlen(), CACHE_SIZE)
for i in range(dataset_size):
# Check objects added in the first two transactions.
# They must all be ghostified.
self.assertEqual(l[(0,i)]._p_changed, None)
self.assertEqual(l[(1,i)]._p_changed, None)
# Check objects added in the last two transactions.
# They must all still exist in memory, but have
# had their changes flushed
self.assertEqual(l[(3,i)]._p_changed, 0)
self.assertEqual(l[(4,i)]._p_changed, 0)
# Of the objects added in the middle transaction, most
# will have been ghostified. There is one cache slot
# that may be occupied by either one of those objects or
# the root, depending on precise order of access. We do
# not bother to check this
def checkSize(self):
self.assertEqual(self.db.cacheSize(), 0)
self.assertEqual(self.db.cacheDetailSize(), [])
CACHE_SIZE = 10
self.db.setCacheSize(CACHE_SIZE)
CONNS = 3
for i in range(CONNS):
self.noodle_new_connection()
self.assertEquals(self.db.cacheSize(), CACHE_SIZE * CONNS)
details = self.db.cacheDetailSize()
self.assertEquals(len(details), CONNS)
for d in details:
self.assertEquals(d['ngsize'], CACHE_SIZE)
# The assertion below is non-sensical
# The (poorly named) cache size is a target for non-ghosts.
# The cache *usually* contains non-ghosts, so that the
# size normally exceeds the target size.
#self.assertEquals(d['size'], CACHE_SIZE)
def checkDetail(self):
CACHE_SIZE = 10
self.db.setCacheSize(CACHE_SIZE)
CONNS = 3
for i in range(CONNS):
self.noodle_new_connection()
gc.collect()
# XXX The above gc.collect call is necessary to make this test
# pass.
#
# This test then only works because the order of computations
# and object accesses in the "noodle" calls is such that the
# persistent mapping containing the MinPO objects is
# deactivated before the MinPO objects.
#
# - Without the gc call, the cache will contain ghost MinPOs
# and the check of the MinPO count below will fail. That's
# because the counts returned by cacheDetail include ghosts.
#
# - If the mapping object containing the MinPOs isn't
# deactivated, there will be one fewer non-ghost MinPO and
# the test will fail anyway.
#
# This test really needs to be thought through and documented
# better.
for klass, count in self.db.cacheDetail():
if klass.endswith('MinPO'):
self.assertEqual(count, CONNS * CACHE_SIZE)
if klass.endswith('PersistentMapping'):
# one root per connection
self.assertEqual(count, CONNS)
for details in self.db.cacheExtremeDetail():
# one 'details' dict per object
if details['klass'].endswith('PersistentMapping'):
self.assertEqual(details['state'], None)
else:
self.assert_(details['klass'].endswith('MinPO'))
self.assertEqual(details['state'], 0)
# The cache should never hold an unreferenced ghost.
if details['state'] is None: # i.e., it's a ghost
self.assert_(details['rc'] > 0)
class StubDataManager:
def setklassstate(self, object):
pass
class StubObject(Persistent):
pass
class CacheErrors(unittest.TestCase):
def setUp(self):
self.jar = StubDataManager()
self.cache = PickleCache(self.jar)
def checkGetBogusKey(self):
self.assertEqual(self.cache.get(p64(0)), None)
try:
self.cache[12]
except KeyError:
pass
else:
self.fail("expected KeyError")
try:
self.cache[12] = 12
except TypeError:
pass
else:
self.fail("expected TyepError")
try:
del self.cache[12]
except TypeError:
pass
else:
self.fail("expected TypeError")
def checkBogusObject(self):
def add(key, obj):
self.cache[key] = obj
key = p64(2)
# value isn't persistent
self.assertRaises(TypeError, add, key, 12)
o = StubObject()
# o._p_oid == None
self.assertRaises(TypeError, add, key, o)
o._p_oid = p64(3)
self.assertRaises(ValueError, add, key, o)
o._p_oid = key
# o._p_jar == None
self.assertRaises(Exception, add, key, o)
o._p_jar = self.jar
self.cache[key] = o
# make sure it can be added multiple times
self.cache[key] = o
# same object, different keys
self.assertRaises(ValueError, add, p64(0), o)
def checkTwoCaches(self):
jar2 = StubDataManager()
cache2 = PickleCache(jar2)
o = StubObject()
key = o._p_oid = p64(1)
o._p_jar = jar2
cache2[key] = o
try:
self.cache[key] = o
except ValueError:
pass
else:
self.fail("expected ValueError because object already in cache")
def checkReadOnlyAttrsWhenCached(self):
o = StubObject()
key = o._p_oid = p64(1)
o._p_jar = self.jar
self.cache[key] = o
try:
o._p_oid = p64(2)
except ValueError:
pass
else:
self.fail("expect that you can't change oid of cached object")
try:
del o._p_jar
except ValueError:
pass
else:
self.fail("expect that you can't delete jar of cached object")
def test_suite():
s = unittest.makeSuite(DBMethods, 'check')
s.addTest(unittest.makeSuite(LRUCacheTests, 'check'))
s.addTest(unittest.makeSuite(CacheErrors, 'check'))
return s
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import tempfile
import unittest
import transaction
import ZODB.config
from ZODB.POSException import ReadOnlyError
class ConfigTestBase(unittest.TestCase):
def _opendb(self, s):
return ZODB.config.databaseFromString(s)
def tearDown(self):
if getattr(self, "storage", None) is not None:
self.storage.cleanup()
def _test(self, s):
db = self._opendb(s)
self.storage = db._storage
# Do something with the database to make sure it works
cn = db.open()
rt = cn.root()
rt["test"] = 1
transaction.commit()
db.close()
class ZODBConfigTest(ConfigTestBase):
def test_map_config1(self):
self._test(
"""
<zodb>
<mappingstorage/>
</zodb>
""")
def test_map_config2(self):
self._test(
"""
<zodb>
<mappingstorage/>
cache-size 1000
</zodb>
""")
def test_file_config1(self):
path = tempfile.mktemp()
self._test(
"""
<zodb>
<filestorage>
path %s
</filestorage>
</zodb>
""" % path)
def test_file_config2(self):
path = tempfile.mktemp()
cfg = """
<zodb>
<filestorage>
path %s
create false
read-only true
</filestorage>
</zodb>
""" % path
self.assertRaises(ReadOnlyError, self._test, cfg)
def test_demo_config(self):
cfg = """
<zodb unused-name>
<demostorage>
name foo
<mappingstorage/>
</demostorage>
</zodb>
"""
self._test(cfg)
class ZEOConfigTest(ConfigTestBase):
def test_zeo_config(self):
# We're looking for a port that doesn't exist so a
# connection attempt will fail. Instead of elaborate
# logic to loop over a port calculation, we'll just pick a
# simple "random", likely to not-exist port number and add
# an elaborate comment explaining this instead. Go ahead,
# grep for 9.
from ZEO.ClientStorage import ClientDisconnected
cfg = """
<zodb>
<zeoclient>
server localhost:56897
wait false
</zeoclient>
</zodb>
"""
self.assertRaises(ClientDisconnected, self._test, cfg)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZODBConfigTest))
suite.addTest(unittest.makeSuite(ZEOConfigTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Unit tests for the Connection class."""
import doctest
import unittest
import warnings
from persistent import Persistent
import transaction
from ZODB.config import databaseFromString
from ZODB.utils import p64, u64
from ZODB.tests.warnhook import WarningsHook
from ZODB.interfaces import verifyObject
class ConnectionDotAdd(unittest.TestCase):
def setUp(self):
from ZODB.Connection import Connection
self.datamgr = Connection()
self.db = StubDatabase()
self.datamgr._setDB(self.db)
self.transaction = StubTransaction()
def tearDown(self):
transaction.abort()
def check_add(self):
from ZODB.POSException import InvalidObjectReference
obj = StubObject()
self.assert_(obj._p_oid is None)
self.assert_(obj._p_jar is None)
self.datamgr.add(obj)
self.assert_(obj._p_oid is not None)
self.assert_(obj._p_jar is self.datamgr)
self.assert_(self.datamgr.get(obj._p_oid) is obj)
# Only first-class persistent objects may be added.
self.assertRaises(TypeError, self.datamgr.add, object())
# Adding to the same connection does not fail. Object keeps the
# same oid.
oid = obj._p_oid
self.datamgr.add(obj)
self.assertEqual(obj._p_oid, oid)
# Cannot add an object from a different connection.
obj2 = StubObject()
obj2._p_jar = object()
self.assertRaises(InvalidObjectReference, self.datamgr.add, obj2)
def checkResetOnAbort(self):
# Check that _p_oid and _p_jar are reset when a transaction is
# aborted.
obj = StubObject()
self.datamgr.add(obj)
oid = obj._p_oid
self.datamgr.abort(self.transaction)
self.assert_(obj._p_oid is None)
self.assert_(obj._p_jar is None)
self.assertRaises(KeyError, self.datamgr.get, oid)
def checkResetOnTpcAbort(self):
obj = StubObject()
self.datamgr.add(obj)
oid = obj._p_oid
# Simulate an error while committing some other object.
self.datamgr.tpc_begin(self.transaction)
# Let's pretend something bad happens here.
# Call tpc_abort, clearing everything.
self.datamgr.tpc_abort(self.transaction)
self.assert_(obj._p_oid is None)
self.assert_(obj._p_jar is None)
self.assertRaises(KeyError, self.datamgr.get, oid)
def checkTpcAbortAfterCommit(self):
obj = StubObject()
self.datamgr.add(obj)
oid = obj._p_oid
self.datamgr.tpc_begin(self.transaction)
self.datamgr.commit(self.transaction)
# Let's pretend something bad happened here.
self.datamgr.tpc_abort(self.transaction)
self.assert_(obj._p_oid is None)
self.assert_(obj._p_jar is None)
self.assertRaises(KeyError, self.datamgr.get, oid)
self.assertEquals(self.db._storage._stored, [oid])
def checkCommit(self):
obj = StubObject()
self.datamgr.add(obj)
oid = obj._p_oid
self.datamgr.tpc_begin(self.transaction)
self.datamgr.commit(self.transaction)
self.datamgr.tpc_finish(self.transaction)
self.assert_(obj._p_oid is oid)
self.assert_(obj._p_jar is self.datamgr)
# This next assert_ is covered by an assert in tpc_finish.
##self.assert_(not self.datamgr._added)
self.assertEquals(self.db._storage._stored, [oid])
self.assertEquals(self.db._storage._finished, [oid])
def checkModifyOnGetstate(self):
member = StubObject()
subobj = StubObject()
subobj.member = member
obj = ModifyOnGetStateObject(subobj)
self.datamgr.add(obj)
self.datamgr.tpc_begin(self.transaction)
self.datamgr.commit(self.transaction)
self.datamgr.tpc_finish(self.transaction)
storage = self.db._storage
self.assert_(obj._p_oid in storage._stored, "object was not stored")
self.assert_(subobj._p_oid in storage._stored,
"subobject was not stored")
self.assert_(member._p_oid in storage._stored, "member was not stored")
self.assert_(self.datamgr._added_during_commit is None)
def checkUnusedAddWorks(self):
# When an object is added, but not committed, it shouldn't be stored,
# but also it should be an error.
obj = StubObject()
self.datamgr.add(obj)
self.datamgr.tpc_begin(self.transaction)
self.datamgr.tpc_finish(self.transaction)
self.assert_(obj._p_oid not in self.datamgr._storage._stored)
class UserMethodTests(unittest.TestCase):
# XXX add isn't tested here, because there are a bunch of traditional
# unit tests for it.
# XXX the version tests would require a storage that supports versions
# which is a bit more work.
def test_root(self):
r"""doctest of root() method
The root() method is simple, and the tests are pretty minimal.
Ensure that a new database has a root and that it is a
PersistentMapping.
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open()
>>> root = cn.root()
>>> type(root).__name__
'PersistentMapping'
>>> root._p_oid
'\x00\x00\x00\x00\x00\x00\x00\x00'
>>> root._p_jar is cn
True
>>> db.close()
"""
def test_get(self):
r"""doctest of get() method
The get() method return the persistent object corresponding to
an oid.
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open()
>>> obj = cn.get(p64(0))
>>> obj._p_oid
'\x00\x00\x00\x00\x00\x00\x00\x00'
The object is a ghost.
>>> obj._p_state
-1
And multiple calls with the same oid, return the same object.
>>> obj2 = cn.get(p64(0))
>>> obj is obj2
True
If all references to the object are released, then a new
object will be returned. The cache doesn't keep unreferenced
ghosts alive. (The next object returned my still have the
same id, because Python may re-use the same memory.)
>>> del obj, obj2
>>> cn._cache.get(p64(0), None)
If the object is unghosted, then it will stay in the cache
after the last reference is released. (This is true only if
there is room in the cache and the object is recently used.)
>>> obj = cn.get(p64(0))
>>> obj._p_activate()
>>> y = id(obj)
>>> del obj
>>> obj = cn.get(p64(0))
>>> id(obj) == y
True
>>> obj._p_state
0
A request for an object that doesn't exist will raise a KeyError.
>>> cn.get(p64(1))
Traceback (most recent call last):
...
KeyError: '\x00\x00\x00\x00\x00\x00\x00\x01'
"""
def test_close(self):
r"""doctest of close() method
This is a minimal test, because most of the interesting
effects on closing a connection involve its interaction with the
database and the transaction.
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open()
It's safe to close a connection multiple times.
>>> cn.close()
>>> cn.close()
>>> cn.close()
It's not possible to load or store objects once the storage is closed.
>>> cn.get(p64(0))
Traceback (most recent call last):
...
ConnectionStateError: The database connection is closed
>>> p = Persistent()
>>> cn.add(p)
Traceback (most recent call last):
...
ConnectionStateError: The database connection is closed
"""
def test_close_with_pending_changes(self):
r"""doctest to ensure close() w/ pending changes complains
>>> import transaction
Just opening and closing is fine.
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open()
>>> cn.close()
Opening, making a change, committing, and closing is fine.
>>> cn = db.open()
>>> cn.root()['a'] = 1
>>> transaction.commit()
>>> cn.close()
Opening, making a change, and aborting is fine.
>>> cn = db.open()
>>> cn.root()['a'] = 1
>>> transaction.abort()
>>> cn.close()
But trying to close with a change pending complains.
>>> cn = db.open()
>>> cn.root()['a'] = 10
>>> cn.close()
Traceback (most recent call last):
...
ConnectionStateError: Cannot close a connection joined to a transaction
This leaves the connection as it was, so we can still commit
the change.
>>> transaction.commit()
>>> cn2 = db.open()
>>> cn2.root()['a']
10
>>> cn.close(); cn2.close()
Bug: We weren't catching the case where the only changes pending
were in a subtransaction.
>>> cn = db.open()
>>> cn.root()['a'] = 100
>>> transaction.commit(True)
>>> cn.close() # this was succeeding
Traceback (most recent call last):
...
ConnectionStateError: Cannot close a connection with a pending subtransaction
Again this leaves the connection as it was.
>>> transaction.commit()
>>> cn2 = db.open()
>>> cn2.root()['a']
100
>>> cn.close(); cn2.close()
Make sure we can still close a connection after aborting a pending
subtransaction.
>>> cn = db.open()
>>> cn.root()['a'] = 1000
>>> transaction.commit(True)
>>> cn.root()['a']
1000
>>> transaction.abort()
>>> cn.root()['a']
100
>>> cn.close()
>>> db.close()
"""
def test_onCloseCallbacks(self):
r"""doctest of onCloseCallback() method
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open()
Every function registered is called, even if it raises an
exception. They are only called once.
>>> L = []
>>> def f():
... L.append("f")
>>> def g():
... L.append("g")
... return 1 / 0
>>> cn.onCloseCallback(g)
>>> cn.onCloseCallback(f)
>>> cn.close()
>>> L
['g', 'f']
>>> del L[:]
>>> cn.close()
>>> L
[]
The implementation keeps a list of callbacks that is reset
to a class variable (which is bound to None) after the connection
is closed.
>>> cn._Connection__onCloseCallbacks
"""
def test_db(self):
r"""doctest of db() method
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open()
>>> cn.db() is db
True
>>> cn.close()
>>> cn.db()
"""
def test_isReadOnly(self):
r"""doctest of isReadOnly() method
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open()
>>> cn.isReadOnly()
False
>>> cn.close()
>>> cn.isReadOnly()
Traceback (most recent call last):
...
ConnectionStateError: The database connection is closed
An expedient way to create a read-only storage:
>>> db._storage._is_read_only = True
>>> cn = db.open()
>>> cn.isReadOnly()
True
"""
def test_cache(self):
r"""doctest of cacheMinimize() and cacheFullSweep() methods.
These tests are fairly minimal, just verifying that the
methods can be called and have some effect. We need other
tests that verify the cache works as intended.
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open()
>>> r = cn.root()
>>> cn.cacheMinimize()
>>> r._p_state
-1
The next couple of tests are involved because they have to
cater to backwards compatibility issues. The cacheMinimize()
method used to take an argument, but now ignores it.
cacheFullSweep() used to do something different than
cacheMinimize(), but it doesn't anymore. We want to verify
that these methods do something, but all cause deprecation
warnings. To do that, we need a warnings hook.
>>> hook = WarningsHook()
>>> hook.install()
>>> r._p_activate()
>>> cn.cacheMinimize(12)
>>> r._p_state
-1
>>> len(hook.warnings)
1
>>> message, category, filename, lineno = hook.warnings[0]
>>> message
'The dt argument to cacheMinimize is ignored.'
>>> category.__name__
'DeprecationWarning'
>>> hook.clear()
cacheFullSweep() is a doozy. It generates two deprecation
warnings, one from the Connection and one from the
cPickleCache. Maybe we should drop the cPickleCache warning,
but it's there for now. When passed an argument, it acts like
cacheGC(). When it isn't passed an argument it acts like
cacheMinimize().
>>> r._p_activate()
>>> cn.cacheFullSweep(12)
>>> r._p_state
0
>>> len(hook.warnings)
2
>>> message, category, filename, lineno = hook.warnings[0]
>>> message
'cacheFullSweep is deprecated. Use cacheMinimize instead.'
>>> category.__name__
'DeprecationWarning'
>>> message, category, filename, lineno = hook.warnings[1]
>>> message
'No argument expected'
>>> category.__name__
'DeprecationWarning'
We have to uninstall the hook so that other warnings don't get lost.
>>> hook.uninstall()
"""
class InvalidationTests(unittest.TestCase):
# It's harder to write serious tests, because some of the critical
# correctness issues relate to concurrency. We'll have to depend
# on the various concurrent updates and NZODBThreads tests to
# handle these.
def test_invalidate(self):
r"""
This test initializes the database with several persistent
objects, then manually delivers invalidations and verifies that
they have the expected effect.
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open()
>>> p1 = Persistent()
>>> p2 = Persistent()
>>> p3 = Persistent()
>>> r = cn.root()
>>> r.update(dict(p1=p1, p2=p2, p3=p3))
>>> transaction.commit()
Transaction ids are 8-byte strings, just like oids; p64() will
create one from an int.
>>> cn.invalidate(p64(1), {p1._p_oid: 1})
>>> cn._txn_time
'\x00\x00\x00\x00\x00\x00\x00\x01'
>>> p1._p_oid in cn._invalidated
True
>>> p2._p_oid in cn._invalidated
False
>>> cn.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1})
>>> cn._txn_time
'\x00\x00\x00\x00\x00\x00\x00\x01'
>>> p1._p_oid in cn._invalidated
True
>>> p2._p_oid in cn._invalidated
True
Calling invalidate() doesn't affect the object state until
a transaction boundary.
>>> p1._p_state
0
>>> p2._p_state
0
>>> p3._p_state
0
The sync() method will abort the current transaction and
process any pending invalidations.
>>> cn.sync()
>>> p1._p_state
-1
>>> p2._p_state
-1
>>> p3._p_state
0
>>> cn._invalidated
{}
"""
# ---- stubs
class StubObject(Persistent):
pass
class StubTransaction:
pass
class ErrorOnGetstateException(Exception):
pass
class ErrorOnGetstateObject(Persistent):
def __getstate__(self):
raise ErrorOnGetstateException
class ModifyOnGetStateObject(Persistent):
def __init__(self, p):
self._v_p = p
def __getstate__(self):
self._p_jar.add(self._v_p)
self.p = self._v_p
return Persistent.__getstate__(self)
class StubStorage:
"""Very simple in-memory storage that does *just* enough to support tests.
Only one concurrent transaction is supported.
Voting is not supported.
Versions are not supported.
Inspect self._stored and self._finished to see how the storage has been
used during a unit test. Whenever an object is stored in the store()
method, its oid is appended to self._stored. When a transaction is
finished, the oids that have been stored during the transaction are
appended to self._finished.
"""
# internal
_oid = 1
_transaction = None
def __init__(self):
# internal
self._stored = []
self._finished = []
self._data = {}
self._transdata = {}
self._transstored = []
def new_oid(self):
oid = str(self._oid)
self._oid += 1
return oid
def sortKey(self):
return 'StubStorage sortKey'
def tpc_begin(self, transaction):
if transaction is None:
raise TypeError('transaction may not be None')
elif self._transaction is None:
self._transaction = transaction
elif self._transaction != transaction:
raise RuntimeError(
'StubStorage uses only one transaction at a time')
def tpc_abort(self, transaction):
if transaction is None:
raise TypeError('transaction may not be None')
elif self._transaction != transaction:
raise RuntimeError(
'StubStorage uses only one transaction at a time')
del self._transaction
self._transdata.clear()
def tpc_finish(self, transaction, callback):
if transaction is None:
raise TypeError('transaction may not be None')
elif self._transaction != transaction:
raise RuntimeError(
'StubStorage uses only one transaction at a time')
self._finished.extend(self._transstored)
self._data.update(self._transdata)
callback(transaction)
del self._transaction
self._transdata.clear()
self._transstored = []
def load(self, oid, version):
if version != '':
raise TypeError('StubStorage does not support versions.')
return self._data[oid]
def store(self, oid, serial, p, version, transaction):
if version != '':
raise TypeError('StubStorage does not support versions.')
if transaction is None:
raise TypeError('transaction may not be None')
elif self._transaction != transaction:
raise RuntimeError(
'StubStorage uses only one transaction at a time')
self._stored.append(oid)
self._transstored.append(oid)
self._transdata[oid] = (p, serial)
# Explicitly returing None, as we're not pretending to be a ZEO
# storage
return None
class TestConnectionInterface(unittest.TestCase):
def test(self):
from ZODB.interfaces import IConnection
db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
cn = db.open()
verifyObject(IConnection, cn)
class StubDatabase:
def __init__(self):
self._storage = StubStorage()
classFactory = None
def invalidate(self, transaction, dict_with_oid_keys, connection):
pass
def test_suite():
s = unittest.makeSuite(ConnectionDotAdd, 'check')
s.addTest(doctest.DocTestSuite())
s.addTest(unittest.makeSuite(TestConnectionInterface))
return s
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import os
import time
import unittest
import warnings
import transaction
import ZODB
import ZODB.FileStorage
from ZODB.tests.MinPO import MinPO
class DBTests(unittest.TestCase):
def setUp(self):
self.__path = os.path.abspath('test.fs')
store = ZODB.FileStorage.FileStorage(self.__path)
self.db = ZODB.DB(store)
def tearDown(self):
self.db.close()
for s in ('', '.index', '.lock', '.tmp'):
if os.path.exists(self.__path+s):
os.remove(self.__path+s)
def dowork(self, version=''):
c = self.db.open(version)
r = c.root()
o = r[time.time()] = MinPO(0)
transaction.commit()
for i in range(25):
o.value = MinPO(i)
transaction.commit()
o = o.value
c.close()
# make sure the basic methods are callable
def testSets(self):
# test set methods that have non-trivial implementations
warnings.filterwarnings("error", category=DeprecationWarning)
self.assertRaises(DeprecationWarning,
self.db.setCacheDeactivateAfter, 12)
self.assertRaises(DeprecationWarning,
self.db.setVersionCacheDeactivateAfter, 12)
# XXX There is no API call for removing the warning we just
# added, but filters appears to be a public variable.
del warnings.filters[0]
self.db.setCacheSize(15)
self.db.setVersionCacheSize(15)
def test_removeVersionPool(self):
# Test that we can remove a version pool
# This is white box because we check some internal data structures
self.dowork()
self.dowork('v2')
c1 = self.db.open('v1')
c1.close() # return to pool
c12 = self.db.open('v1')
c12.close() # return to pool
self.assert_(c1 is c12) # should be same
pools, pooll = self.db._pools
self.assertEqual(len(pools), 3)
self.assertEqual(len(pooll), 3)
self.db.removeVersionPool('v1')
self.assertEqual(len(pools), 2)
self.assertEqual(len(pooll), 2)
c12 = self.db.open('v1')
c12.close() # return to pool
self.assert_(c1 is not c12) # should be different
self.assertEqual(len(pools), 3)
self.assertEqual(len(pooll), 3)
def _test_for_leak(self):
self.dowork()
self.dowork('v2')
while 1:
c1 = self.db.open('v1')
self.db.removeVersionPool('v1')
c1.close() # return to pool
def test_removeVersionPool_while_connection_open(self):
# Test that we can remove a version pool
# This is white box because we check some internal data structures
self.dowork()
self.dowork('v2')
c1 = self.db.open('v1')
c1.close() # return to pool
c12 = self.db.open('v1')
self.assert_(c1 is c12) # should be same
pools, pooll = self.db._pools
self.assertEqual(len(pools), 3)
self.assertEqual(len(pooll), 3)
self.db.removeVersionPool('v1')
self.assertEqual(len(pools), 2)
self.assertEqual(len(pooll), 2)
c12.close() # should leave pools alone
self.assertEqual(len(pools), 2)
self.assertEqual(len(pooll), 2)
c12 = self.db.open('v1')
c12.close() # return to pool
self.assert_(c1 is not c12) # should be different
self.assertEqual(len(pools), 3)
self.assertEqual(len(pooll), 3)
def test_suite():
return unittest.makeSuite(DBTests)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import ZODB.DemoStorage
import unittest
from ZODB.tests import StorageTestBase, BasicStorage, \
VersionStorage, Synchronization
class DemoStorageTests(StorageTestBase.StorageTestBase,
BasicStorage.BasicStorage,
VersionStorage.VersionStorage,
Synchronization.SynchronizedStorage,
):
def setUp(self):
self._storage = ZODB.DemoStorage.DemoStorage()
def tearDown(self):
self._storage.close()
def checkOversizeNote(self):
# This base class test checks for the common case where a storage
# doesnt support huge transaction metadata. This storage doesnt
# have this limit, so we inhibit this test here.
pass
def checkAbortVersionNonCurrent(self):
# XXX Need to implement a real loadBefore for DemoStorage?
pass
def checkLoadBeforeVersion(self):
# XXX Need to implement a real loadBefore for DemoStorage?
pass
# the next three pack tests depend on undo
def checkPackVersionReachable(self):
pass
def checkPackVersions(self):
pass
def checkPackVersionsInPast(self):
pass
def test_suite():
suite = unittest.makeSuite(DemoStorageTests, 'check')
return suite
if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import os, unittest
import transaction
import ZODB.FileStorage
from ZODB import POSException
from ZODB.tests import StorageTestBase, BasicStorage, \
TransactionalUndoStorage, VersionStorage, \
TransactionalUndoVersionStorage, PackableStorage, \
Synchronization, ConflictResolution, HistoryStorage, \
IteratorStorage, Corruption, RevisionStorage, PersistentStorage, \
MTStorage, ReadOnlyStorage, RecoveryStorage
from ZODB.tests.StorageTestBase import MinPO, zodb_pickle
class BaseFileStorageTests(StorageTestBase.StorageTestBase):
def open(self, **kwargs):
self._storage = ZODB.FileStorage.FileStorage('FileStorageTests.fs',
**kwargs)
def setUp(self):
self.open(create=1)
def tearDown(self):
self._storage.close()
self._storage.cleanup()
class FileStorageTests(
BaseFileStorageTests,
BasicStorage.BasicStorage,
TransactionalUndoStorage.TransactionalUndoStorage,
RevisionStorage.RevisionStorage,
VersionStorage.VersionStorage,
TransactionalUndoVersionStorage.TransactionalUndoVersionStorage,
PackableStorage.PackableStorage,
PackableStorage.PackableUndoStorage,
Synchronization.SynchronizedStorage,
ConflictResolution.ConflictResolvingStorage,
ConflictResolution.ConflictResolvingTransUndoStorage,
HistoryStorage.HistoryStorage,
IteratorStorage.IteratorStorage,
IteratorStorage.ExtendedIteratorStorage,
PersistentStorage.PersistentStorage,
MTStorage.MTStorage,
ReadOnlyStorage.ReadOnlyStorage
):
def checkLongMetadata(self):
s = "X" * 75000
try:
self._dostore(user=s)
except POSException.StorageError:
pass
else:
self.fail("expect long user field to raise error")
try:
self._dostore(description=s)
except POSException.StorageError:
pass
else:
self.fail("expect long user field to raise error")
def check_use_fsIndex(self):
from ZODB.fsIndex import fsIndex
self.assertEqual(self._storage._index.__class__, fsIndex)
# XXX We could really use some tests for sanity checking
def check_conversion_to_fsIndex_not_if_readonly(self):
self.tearDown()
class OldFileStorage(ZODB.FileStorage.FileStorage):
def _newIndexes(self):
return {}, {}, {}, {}, {}, {}, {}
from ZODB.fsIndex import fsIndex
# Hack FileStorage to create dictionary indexes
self._storage = OldFileStorage('FileStorageTests.fs')
self.assertEqual(type(self._storage._index), type({}))
for i in range(10):
self._dostore()
# Should save the index
self._storage.close()
self._storage = ZODB.FileStorage.FileStorage(
'FileStorageTests.fs', read_only=1)
self.assertEqual(type(self._storage._index), type({}))
def check_conversion_to_fsIndex(self):
self.tearDown()
class OldFileStorage(ZODB.FileStorage.FileStorage):
def _newIndexes(self):
return {}, {}, {}, {}, {}, {}, {}
from ZODB.fsIndex import fsIndex
# Hack FileStorage to create dictionary indexes
self._storage = OldFileStorage('FileStorageTests.fs')
self.assertEqual(type(self._storage._index), type({}))
for i in range(10):
self._dostore()
oldindex = self._storage._index.copy()
# Should save the index
self._storage.close()
self._storage = ZODB.FileStorage.FileStorage('FileStorageTests.fs')
self.assertEqual(self._storage._index.__class__, fsIndex)
self.failUnless(self._storage._used_index)
index = {}
for k, v in self._storage._index.items():
index[k] = v
self.assertEqual(index, oldindex)
def check_save_after_load_with_no_index(self):
for i in range(10):
self._dostore()
self._storage.close()
os.remove('FileStorageTests.fs.index')
self.open()
self.assertEqual(self._storage._saved, 1)
# This would make the unit tests too slow
# check_save_after_load_that_worked_hard(self)
def check_periodic_save_index(self):
# Check the basic algorithm
oldsaved = self._storage._saved
self._storage._records_before_save = 10
for i in range(4):
self._dostore()
self.assertEqual(self._storage._saved, oldsaved)
self._dostore()
self.assertEqual(self._storage._saved, oldsaved+1)
# Now make sure the parameter changes as we get bigger
for i in range(20):
self._dostore()
self.failUnless(self._storage._records_before_save > 20)
def checkCorruptionInPack(self):
# This sets up a corrupt .fs file, with a redundant transaction
# length mismatch. The implementation of pack in many releases of
# ZODB blew up if the .fs file had such damage: it detected the
# damage, but the code to raise CorruptedError referenced an undefined
# global.
import time
from ZODB.DB import DB
from ZODB.utils import U64, p64
from ZODB.FileStorage.format import CorruptedError
db = DB(self._storage)
conn = db.open()
conn.root()['xyz'] = 1
get_transaction().commit()
# Ensure it's all on disk.
db.close()
self._storage.close()
# Reopen before damaging.
self.open()
# Open .fs directly, and damage content.
f = open('FileStorageTests.fs', 'r+b')
f.seek(0, 2)
pos2 = f.tell() - 8
f.seek(pos2)
tlen2 = U64(f.read(8)) # length-8 of the last transaction
pos1 = pos2 - tlen2 + 8 # skip over the tid at the start
f.seek(pos1)
tlen1 = U64(f.read(8)) # should be redundant length-8
self.assertEqual(tlen1, tlen2) # verify that it is redundant
# Now damage the second copy.
f.seek(pos2)
f.write(p64(tlen2 - 1))
f.close()
# Try to pack. This used to yield
# NameError: global name 's' is not defined
try:
self._storage.pack(time.time(), None)
except CorruptedError, detail:
self.assert_("redundant transaction length does not match "
"initial transaction length" in str(detail))
else:
self.fail("expected CorruptedError")
class FileStorageRecoveryTest(
StorageTestBase.StorageTestBase,
RecoveryStorage.RecoveryStorage,
):
def setUp(self):
self._storage = ZODB.FileStorage.FileStorage("Source.fs", create=True)
self._dst = ZODB.FileStorage.FileStorage("Dest.fs", create=True)
def tearDown(self):
self._storage.close()
self._dst.close()
self._storage.cleanup()
self._dst.cleanup()
def new_dest(self):
return ZODB.FileStorage.FileStorage('Dest.fs')
class SlowFileStorageTest(BaseFileStorageTests):
level = 2
def check10Kstores(self):
# The _get_cached_serial() method has a special case
# every 8000 calls. Make sure it gets minimal coverage.
oids = [[self._storage.new_oid(), None] for i in range(100)]
for i in range(100):
t = transaction.Transaction()
self._storage.tpc_begin(t)
for j in range(100):
o = MinPO(j)
oid, revid = oids[j]
serial = self._storage.store(oid, revid, zodb_pickle(o), "", t)
oids[j][1] = serial
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# Raise an exception if the tids in FileStorage fs aren't
# strictly increasing.
def checkIncreasingTids(fs):
lasttid = '\0' * 8
for txn in fs.iterator():
if lasttid >= txn.tid:
raise ValueError("tids out of order %r >= %r" % (lasttid, tid))
lasttid = txn.tid
# Return a TimeStamp object 'minutes' minutes in the future.
def timestamp(minutes):
import time
from persistent.TimeStamp import TimeStamp
t = time.time() + 60 * minutes
return TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
def testTimeTravelOnOpen():
"""
>>> from ZODB.FileStorage import FileStorage
>>> from ZODB.DB import DB
>>> import transaction
>>> from ZODB.tests.loggingsupport import InstalledHandler
Arrange to capture log messages -- they're an important part of
this test!
>>> handler = InstalledHandler('ZODB.FileStorage')
Create a new file storage.
>>> st = FileStorage('temp.fs', create=True)
>>> db = DB(st)
>>> db.close()
First check the normal case: transactions are recorded with
increasing tids, and time doesn't run backwards.
>>> st = FileStorage('temp.fs')
>>> db = DB(st)
>>> conn = db.open()
>>> conn.root()['xyz'] = 1
>>> transaction.get().commit()
>>> checkIncreasingTids(st)
>>> db.close()
>>> st.cleanup() # remove .fs, .index, etc files
>>> handler.records # i.e., no log messages
[]
Now force the database to have transaction records with tids from
the future.
>>> st = FileStorage('temp.fs', create=True)
>>> st._ts = timestamp(15) # 15 minutes in the future
>>> db = DB(st)
>>> db.close()
>>> st = FileStorage('temp.fs') # this should log a warning
>>> db = DB(st)
>>> conn = db.open()
>>> conn.root()['xyz'] = 1
>>> transaction.get().commit()
>>> checkIncreasingTids(st)
>>> db.close()
>>> st.cleanup()
>>> [record.levelname for record in handler.records]
['WARNING']
>>> handler.clear()
And one more time, with transaction records far in the future.
We expect to log a critical error then, as a time so far in the
future probably indicates a real problem with the system. Shorter
spans may be due to clock drift.
>>> st = FileStorage('temp.fs', create=True)
>>> st._ts = timestamp(60) # an hour in the future
>>> db = DB(st)
>>> db.close()
>>> st = FileStorage('temp.fs') # this should log a critical error
>>> db = DB(st)
>>> conn = db.open()
>>> conn.root()['xyz'] = 1
>>> transaction.get().commit()
>>> checkIncreasingTids(st)
>>> db.close()
>>> st.cleanup()
>>> [record.levelname for record in handler.records]
['CRITICAL']
>>> handler.clear()
>>> handler.uninstall()
"""
def test_suite():
import doctest
suite = unittest.TestSuite()
for klass in [FileStorageTests, Corruption.FileStorageCorruptTests,
FileStorageRecoveryTest, SlowFileStorageTest]:
suite.addTest(unittest.makeSuite(klass, "check"))
suite.addTest(doctest.DocTestSuite())
return suite
if __name__=='__main__':
unittest.main()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import ZODB.MappingStorage
import unittest
from ZODB.tests import StorageTestBase
from ZODB.tests \
import BasicStorage, MTStorage, Synchronization, PackableStorage
class MappingStorageTests(StorageTestBase.StorageTestBase,
BasicStorage.BasicStorage,
MTStorage.MTStorage,
PackableStorage.PackableStorage,
Synchronization.SynchronizedStorage,
):
def setUp(self):
self._storage = ZODB.MappingStorage.MappingStorage()
def tearDown(self):
self._storage.close()
def checkOversizeNote(self):
# This base class test checks for the common case where a storage
# doesnt support huge transaction metadata. This storage doesnt
# have this limit, so we inhibit this test here.
pass
def test_suite():
suite = unittest.makeSuite(MappingStorageTests, 'check')
return suite
if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the list interface to PersistentList
"""
import unittest
from persistent.list import PersistentList
l0 = []
l1 = [0]
l2 = [0, 1]
class TestPList(unittest.TestCase):
def checkTheWorld(self):
# Test constructors
u = PersistentList()
u0 = PersistentList(l0)
u1 = PersistentList(l1)
u2 = PersistentList(l2)
uu = PersistentList(u)
uu0 = PersistentList(u0)
uu1 = PersistentList(u1)
uu2 = PersistentList(u2)
v = PersistentList(tuple(u))
class OtherList:
def __init__(self, initlist):
self.__data = initlist
def __len__(self):
return len(self.__data)
def __getitem__(self, i):
return self.__data[i]
v0 = PersistentList(OtherList(u0))
vv = PersistentList("this is also a sequence")
# Test __repr__
eq = self.assertEqual
eq(str(u0), str(l0), "str(u0) == str(l0)")
eq(repr(u1), repr(l1), "repr(u1) == repr(l1)")
eq(`u2`, `l2`, "`u2` == `l2`")
# Test __cmp__ and __len__
def mycmp(a, b):
r = cmp(a, b)
if r < 0: return -1
if r > 0: return 1
return r
all = [l0, l1, l2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
eq(mycmp(a, b), mycmp(len(a), len(b)),
"mycmp(a, b) == mycmp(len(a), len(b))")
# Test __getitem__
for i in range(len(u2)):
eq(u2[i], i, "u2[i] == i")
# Test __setitem__
uu2[0] = 0
uu2[1] = 100
try:
uu2[2] = 200
except IndexError:
pass
else:
raise TestFailed("uu2[2] shouldn't be assignable")
# Test __delitem__
del uu2[1]
del uu2[0]
try:
del uu2[0]
except IndexError:
pass
else:
raise TestFailed("uu2[0] shouldn't be deletable")
# Test __getslice__
for i in range(-3, 4):
eq(u2[:i], l2[:i], "u2[:i] == l2[:i]")
eq(u2[i:], l2[i:], "u2[i:] == l2[i:]")
for j in range(-3, 4):
eq(u2[i:j], l2[i:j], "u2[i:j] == l2[i:j]")
# Test __setslice__
for i in range(-3, 4):
u2[:i] = l2[:i]
eq(u2, l2, "u2 == l2")
u2[i:] = l2[i:]
eq(u2, l2, "u2 == l2")
for j in range(-3, 4):
u2[i:j] = l2[i:j]
eq(u2, l2, "u2 == l2")
uu2 = u2[:]
uu2[:0] = [-2, -1]
eq(uu2, [-2, -1, 0, 1], "uu2 == [-2, -1, 0, 1]")
uu2[0:] = []
eq(uu2, [], "uu2 == []")
# Test __contains__
for i in u2:
self.failUnless(i in u2, "i in u2")
for i in min(u2)-1, max(u2)+1:
self.failUnless(i not in u2, "i not in u2")
# Test __delslice__
uu2 = u2[:]
del uu2[1:2]
del uu2[0:1]
eq(uu2, [], "uu2 == []")
uu2 = u2[:]
del uu2[1:]
del uu2[:1]
eq(uu2, [], "uu2 == []")
# Test __add__, __radd__, __mul__ and __rmul__
#self.failUnless(u1 + [] == [] + u1 == u1, "u1 + [] == [] + u1 == u1")
self.failUnless(u1 + [1] == u2, "u1 + [1] == u2")
#self.failUnless([-1] + u1 == [-1, 0], "[-1] + u1 == [-1, 0]")
self.failUnless(u2 == u2*1 == 1*u2, "u2 == u2*1 == 1*u2")
self.failUnless(u2+u2 == u2*2 == 2*u2, "u2+u2 == u2*2 == 2*u2")
self.failUnless(u2+u2+u2 == u2*3 == 3*u2, "u2+u2+u2 == u2*3 == 3*u2")
# Test append
u = u1[:]
u.append(1)
eq(u, u2, "u == u2")
# Test insert
u = u2[:]
u.insert(0, -1)
eq(u, [-1, 0, 1], "u == [-1, 0, 1]")
# Test pop
u = PersistentList([0, -1, 1])
u.pop()
eq(u, [0, -1], "u == [0, -1]")
u.pop(0)
eq(u, [-1], "u == [-1]")
# Test remove
u = u2[:]
u.remove(1)
eq(u, u1, "u == u1")
# Test count
u = u2*3
eq(u.count(0), 3, "u.count(0) == 3")
eq(u.count(1), 3, "u.count(1) == 3")
eq(u.count(2), 0, "u.count(2) == 0")
# Test index
eq(u2.index(0), 0, "u2.index(0) == 0")
eq(u2.index(1), 1, "u2.index(1) == 1")
try:
u2.index(2)
except ValueError:
pass
else:
raise TestFailed("expected ValueError")
# Test reverse
u = u2[:]
u.reverse()
eq(u, [1, 0], "u == [1, 0]")
u.reverse()
eq(u, u2, "u == u2")
# Test sort
u = PersistentList([1, 0])
u.sort()
eq(u, u2, "u == u2")
# Test extend
u = u1[:]
u.extend(u2)
eq(u, u1 + u2, "u == u1 + u2")
def test_suite():
return unittest.makeSuite(TestPList, 'check')
if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Verify that PersistentMapping works with old versions of Zope.
The comments in PersistentMapping.py address the issue in some detail.
The pickled form of a PersistentMapping must use _container to store
the actual mapping, because old versions of Zope used this attribute.
If the new code doesn't generate pickles that are consistent with the
old code, developers will have a hard time testing the new code.
"""
import unittest
import transaction
from transaction import Transaction
import ZODB
from ZODB.MappingStorage import MappingStorage
import cPickle
import cStringIO
import sys
# This pickle contains a persistent mapping pickle created from the
# old code.
pickle = ('((U\x0bPersistenceq\x01U\x11PersistentMappingtq\x02Nt.}q\x03U\n'
'_containerq\x04}q\x05U\x07versionq\x06U\x03oldq\x07ss.\n')
class PMTests(unittest.TestCase):
def checkOldStyleRoot(self):
# insert the pickle in place of the root
s = MappingStorage()
t = Transaction()
s.tpc_begin(t)
s.store('\000' * 8, None, pickle, '', t)
s.tpc_vote(t)
s.tpc_finish(t)
db = ZODB.DB(s)
# If the root can be loaded successfully, we should be okay.
r = db.open().root()
# But make sure it looks like a new mapping
self.assert_(hasattr(r, 'data'))
self.assert_(not hasattr(r, '_container'))
def checkNewPicklesAreSafe(self):
s = MappingStorage()
db = ZODB.DB(s)
r = db.open().root()
r[1] = 1
r[2] = 2
r[3] = r
transaction.commit()
# MappingStorage stores serialno + pickle in its _index.
root_pickle = s._index['\000' * 8][8:]
f = cStringIO.StringIO(root_pickle)
u = cPickle.Unpickler(f)
klass_info = u.load()
klass = find_global(*klass_info[0])
inst = klass.__new__(klass)
state = u.load()
inst.__setstate__(state)
self.assert_(hasattr(inst, '_container'))
self.assert_(not hasattr(inst, 'data'))
def find_global(modulename, classname):
"""Helper for this test suite to get special PersistentMapping"""
if classname == "PersistentMapping":
class PersistentMapping(object):
def __setstate__(self, state):
self.__dict__.update(state)
return PersistentMapping
else:
__import__(modulename)
mod = sys.modules[modulename]
return getattr(mod, classname)
def test_suite():
return None
return unittest.makeSuite(PMTests, 'check')
if __name__ == "__main__":
unittest.main()
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests of the file storage recovery script."""
import base64
import os
import random
import sys
import tempfile
import unittest
import StringIO
import ZODB
from ZODB.FileStorage import FileStorage
from ZODB.fsrecover import recover
from persistent.mapping import PersistentMapping
import transaction
class RecoverTest(unittest.TestCase):
level = 2
path = None
def setUp(self):
self.path = tempfile.mktemp(suffix=".fs")
self.storage = FileStorage(self.path)
self.populate()
self.dest = tempfile.mktemp(suffix=".fs")
self.recovered = None
def tearDown(self):
self.storage.close()
if self.recovered is not None:
self.recovered.close()
self.storage.cleanup()
temp = FileStorage(self.dest)
temp.close()
temp.cleanup()
def populate(self):
db = ZODB.DB(self.storage)
cn = db.open()
rt = cn.root()
# Create a bunch of objects; the Data.fs is about 100KB.
for i in range(50):
d = rt[i] = PersistentMapping()
transaction.commit()
for j in range(50):
d[j] = "a" * j
transaction.commit()
def damage(self, num, size):
self.storage.close()
# Drop size null bytes into num random spots.
for i in range(num):
offset = random.randint(0, self.storage._pos - size)
f = open(self.path, "a+b")
f.seek(offset)
f.write("\0" * size)
f.close()
ITERATIONS = 5
# Run recovery, from self.path to self.dest. Return whatever
# recovery printed to stdout, as a string.
def recover(self):
orig_stdout = sys.stdout
faux_stdout = StringIO.StringIO()
try:
sys.stdout = faux_stdout
try:
recover(self.path, self.dest,
verbose=0, partial=True, force=False, pack=1)
except SystemExit:
raise RuntimeError, "recover tried to exit"
finally:
sys.stdout = orig_stdout
return faux_stdout.getvalue()
# Caution: because recovery is robust against many kinds of damage,
# it's almost impossible for a call to self.recover() to raise an
# exception. As a result, these tests may pass even if fsrecover.py
# is broken badly. testNoDamage() tries to ensure that at least
# recovery doesn't produce any error msgs if the input .fs is in
# fact not damaged.
def testNoDamage(self):
output = self.recover()
self.assert_('error' not in output, output)
self.assert_('\n0 bytes removed during recovery' in output, output)
# Verify that the recovered database is identical to the original.
before = file(self.path, 'rb')
before_guts = before.read()
before.close()
after = file(self.dest, 'rb')
after_guts = after.read()
after.close()
self.assertEqual(before_guts, after_guts,
"recovery changed a non-damaged .fs file")
def testOneBlock(self):
for i in range(self.ITERATIONS):
self.damage(1, 1024)
output = self.recover()
self.assert_('error' in output, output)
self.recovered = FileStorage(self.dest)
self.recovered.close()
os.remove(self.path)
os.rename(self.dest, self.path)
def testFourBlocks(self):
for i in range(self.ITERATIONS):
self.damage(4, 512)
output = self.recover()
self.assert_('error' in output, output)
self.recovered = FileStorage(self.dest)
self.recovered.close()
os.remove(self.path)
os.rename(self.dest, self.path)
def testBigBlock(self):
for i in range(self.ITERATIONS):
self.damage(1, 32 * 1024)
output = self.recover()
self.assert_('error' in output, output)
self.recovered = FileStorage(self.dest)
self.recovered.close()
os.remove(self.path)
os.rename(self.dest, self.path)
def testBadTransaction(self):
# Find transaction headers and blast them.
L = self.storage.undoLog()
r = L[3]
tid = base64.decodestring(r["id"] + "\n")
pos1 = self.storage._txn_find(tid, 0)
r = L[8]
tid = base64.decodestring(r["id"] + "\n")
pos2 = self.storage._txn_find(tid, 0)
self.storage.close()
# Overwrite the entire header.
f = open(self.path, "a+b")
f.seek(pos1 - 50)
f.write("\0" * 100)
f.close()
output = self.recover()
self.assert_('error' in output, output)
self.recovered = FileStorage(self.dest)
self.recovered.close()
os.remove(self.path)
os.rename(self.dest, self.path)
# Overwrite part of the header.
f = open(self.path, "a+b")
f.seek(pos2 + 10)
f.write("\0" * 100)
f.close()
output = self.recover()
self.assert_('error' in output, output)
self.recovered = FileStorage(self.dest)
self.recovered.close()
def test_suite():
return unittest.makeSuite(RecoverTest)
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests of the serializer."""
import cPickle
import cStringIO as StringIO
import sys
import unittest
from ZODB import serialize
class ClassWithNewargs(int):
def __new__(cls, value):
return int.__new__(cls, value)
def __getnewargs__(self):
return int(self),
class ClassWithoutNewargs(object):
def __init__(self, value):
self.value = value
def make_pickle(ob):
sio = StringIO.StringIO()
p = cPickle.Pickler(sio, 1)
p.dump(ob)
return sio.getvalue()
class SerializerTestCase(unittest.TestCase):
# old format: (module, name), None
old_style_without_newargs = make_pickle(
((__name__, "ClassWithoutNewargs"), None))
# old format: (module, name), argtuple
old_style_with_newargs = make_pickle(
((__name__, "ClassWithNewargs"), (1,)))
# new format: klass
new_style_without_newargs = make_pickle(
ClassWithoutNewargs)
# new format: klass, argtuple
new_style_with_newargs = make_pickle(
(ClassWithNewargs, (1,)))
def test_getClassName(self):
r = serialize.BaseObjectReader()
eq = self.assertEqual
eq(r.getClassName(self.old_style_with_newargs),
__name__ + ".ClassWithNewargs")
eq(r.getClassName(self.new_style_with_newargs),
__name__ + ".ClassWithNewargs")
eq(r.getClassName(self.old_style_without_newargs),
__name__ + ".ClassWithoutNewargs")
eq(r.getClassName(self.new_style_without_newargs),
__name__ + ".ClassWithoutNewargs")
def test_getGhost(self):
# Use a TestObjectReader since we need _get_class() to be
# implemented; otherwise this is just a BaseObjectReader.
class TestObjectReader(serialize.BaseObjectReader):
# A production object reader would optimize this, but we
# don't need to in a test
def _get_class(self, module, name):
__import__(module)
return getattr(sys.modules[module], name)
r = TestObjectReader()
g = r.getGhost(self.old_style_with_newargs)
self.assert_(isinstance(g, ClassWithNewargs))
self.assertEqual(g, 1)
g = r.getGhost(self.old_style_without_newargs)
self.assert_(isinstance(g, ClassWithoutNewargs))
g = r.getGhost(self.new_style_with_newargs)
self.assert_(isinstance(g, ClassWithNewargs))
g = r.getGhost(self.new_style_without_newargs)
self.assert_(isinstance(g, ClassWithoutNewargs))
def test_myhasattr(self):
class OldStyle:
bar = "bar"
def __getattr__(self, name):
if name == "error":
raise ValueError("whee!")
else:
raise AttributeError, name
class NewStyle(object):
bar = "bar"
def _raise(self):
raise ValueError("whee!")
error = property(_raise)
self.assertRaises(ValueError,
serialize.myhasattr, OldStyle(), "error")
self.assertRaises(ValueError,
serialize.myhasattr, NewStyle(), "error")
self.assert_(serialize.myhasattr(OldStyle(), "bar"))
self.assert_(serialize.myhasattr(NewStyle(), "bar"))
self.assert_(not serialize.myhasattr(OldStyle(), "rat"))
self.assert_(not serialize.myhasattr(NewStyle(), "rat"))
def test_suite():
import doctest
suite = unittest.makeSuite(SerializerTestCase)
suite.addTest(doctest.DocTestSuite("ZODB.serialize"))
return suite
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
r"""
ZODB subtransaction tests
=========================
Subtransactions are provided by a generic transaction interface, but
only supported by ZODB. These tests verify that some of the important
cases work as expected.
>>> import transaction
>>> from ZODB import DB
>>> from ZODB.tests.test_storage import MinimalMemoryStorage
>>> from ZODB.tests.MinPO import MinPO
First create a few objects in the database root with a normal commit.
We're going to make a series of modifications to these objects.
>>> db = DB(MinimalMemoryStorage())
>>> cn = db.open()
>>> rt = cn.root()
>>> def init():
... global a, b, c
... a = rt["a"] = MinPO("a0")
... b = rt["b"] = MinPO("b0")
... c = rt["c"] = MinPO("c0")
... transaction.commit()
>>> init()
We'll also open a second database connection and use it to verify that
the intermediate results of subtransactions are not visible to other
connections.
>>> cn2 = db.open(synch=False)
>>> rt2 = cn2.root()
>>> shadow_a = rt2["a"]
>>> shadow_b = rt2["b"]
>>> shadow_c = rt2["c"]
Subtransaction commit
---------------------
We'll make a series of modifications in subtransactions.
>>> a.value = "a1"
>>> b.value = "b1"
>>> transaction.commit(1)
>>> a.value, b.value
('a1', 'b1')
>>> shadow_a.value, shadow_b.value
('a0', 'b0')
>>> a.value = "a2"
>>> c.value = "c1"
>>> transaction.commit(1)
>>> a.value, c.value
('a2', 'c1')
>>> shadow_a.value, shadow_c.value
('a0', 'c0')
>>> a.value = "a3"
>>> transaction.commit(1)
>>> a.value
'a3'
>>> shadow_a.value
'a0'
>>> transaction.commit()
>>> a.value, b.value, c.value
('a3', 'b1', 'c1')
Subtransaction with nested abort
--------------------------------
>>> init()
>>> a.value = "a1"
>>> transaction.commit(1)
>>> b.value = "b1"
>>> transaction.commit(1)
A sub-transaction abort will undo current changes, reverting to the
database state as of the last sub-transaction commit. There is
(apparently) no way to abort an already-committed subtransaction.
>>> c.value = "c1"
>>> transaction.abort(1)
Multiple aborts have no extra effect.
>>> transaction.abort(1)
>>> a.value, b.value, c.value
('a1', 'b1', 'c0')
>>> transaction.commit()
>>> a.value, b.value, c.value
('a1', 'b1', 'c0')
Subtransaction with top-level abort
-----------------------------------
>>> init()
>>> a.value = "a1"
>>> transaction.commit(1)
>>> b.value = "b1"
>>> transaction.commit(1)
A sub-transaction abort will undo current changes, reverting to the
database state as of the last sub-transaction commit. There is
(apparently) no way to abort an already-committed subtransaction.
>>> c.value = "c1"
>>> transaction.abort(1)
>>> transaction.abort()
>>> a.value, b.value, c.value
('a0', 'b0', 'c0')
"""
import doctest
def test_suite():
return doctest.DocTestSuite()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the TimeStamp utility type"""
import time
import unittest
from persistent.TimeStamp import TimeStamp
EPSILON = 0.000001
class TimeStampTests(unittest.TestCase):
def checkYMDTimeStamp(self):
self._check_ymd(2001, 6, 3)
def _check_ymd(self, yr, mo, dy):
ts = TimeStamp(yr, mo, dy)
self.assertEqual(ts.year(), yr)
self.assertEqual(ts.month(), mo)
self.assertEqual(ts.day(), dy)
self.assertEquals(ts.hour(), 0)
self.assertEquals(ts.minute(), 0)
self.assertEquals(ts.second(), 0)
t = time.gmtime(ts.timeTime())
self.assertEquals(yr, t[0])
self.assertEquals(mo, t[1])
self.assertEquals(dy, t[2])
def checkFullTimeStamp(self):
native_ts = int(time.time()) # fractional seconds get in the way
t = time.gmtime(native_ts) # the corresponding GMT struct tm
ts = TimeStamp(*t[:6])
# Seconds are stored internally via (conceptually) multiplying by
# 2**32 then dividing by 60, ending up with a 32-bit integer.
# While this gives a lot of room for cramming many distinct
# TimeStamps into a second, it's not good at roundtrip accuracy.
# For example, 1 second is stored as int(2**32/60) == 71582788.
# Converting back gives 71582788*60.0/2**32 == 0.9999999962747097.
# In general, we can lose up to 0.999... to truncation during
# storing, creating an absolute error up to about 1*60.0/2**32 ==
# 0.000000014 on the seconds value we get back. This is so even
# when we have an exact integral second value going in (as we
# do in this test), so we can't expect equality in any comparison
# involving seconds. Minutes (etc) are stored exactly, so we
# can expect equality for those.
self.assert_(abs(ts.timeTime() - native_ts) < EPSILON)
self.assertEqual(ts.year(), t[0])
self.assertEqual(ts.month(), t[1])
self.assertEqual(ts.day(), t[2])
self.assertEquals(ts.hour(), t[3])
self.assertEquals(ts.minute(), t[4])
self.assert_(abs(ts.second() - t[5]) < EPSILON)
def checkRawTimestamp(self):
t = time.gmtime()
ts1 = TimeStamp(*t[:6])
ts2 = TimeStamp(`ts1`)
self.assertEquals(ts1, ts2)
self.assertEquals(ts1.timeTime(), ts2.timeTime())
self.assertEqual(ts1.year(), ts2.year())
self.assertEqual(ts1.month(), ts2.month())
self.assertEqual(ts1.day(), ts2.day())
self.assertEquals(ts1.hour(), ts2.hour())
self.assertEquals(ts1.minute(), ts2.minute())
self.assert_(abs(ts1.second() - ts2.second()) < EPSILON)
def checkDictKey(self):
t = time.gmtime()
ts1 = TimeStamp(*t[:6])
ts2 = TimeStamp(2000, *t[1:6])
d = {}
d[ts1] = 1
d[ts2] = 2
self.assertEquals(len(d), 2)
def checkCompare(self):
ts1 = TimeStamp(1972, 6, 27)
ts2 = TimeStamp(1971, 12, 12)
self.assert_(ts1 > ts2)
self.assert_(ts2 <= ts1)
def checkLaterThan(self):
t = time.gmtime()
ts = TimeStamp(*t[:6])
ts2 = ts.laterThan(ts)
self.assert_(ts2 > ts)
# XXX should test for bogus inputs to TimeStamp constructor
def checkTimeStamp(self):
# Alternate test suite
t = TimeStamp(2002, 1, 23, 10, 48, 5) # GMT
self.assertEquals(str(t), '2002-01-23 10:48:05.000000')
self.assertEquals(repr(t), '\x03B9H\x15UUU')
self.assertEquals(TimeStamp('\x03B9H\x15UUU'), t)
self.assertEquals(t.year(), 2002)
self.assertEquals(t.month(), 1)
self.assertEquals(t.day(), 23)
self.assertEquals(t.hour(), 10)
self.assertEquals(t.minute(), 48)
self.assertEquals(round(t.second()), 5)
self.assertEquals(t.timeTime(), 1011782885)
t1 = TimeStamp(2002, 1, 23, 10, 48, 10)
self.assertEquals(str(t1), '2002-01-23 10:48:10.000000')
self.assert_(t == t)
self.assert_(t != t1)
self.assert_(t < t1)
self.assert_(t <= t1)
self.assert_(t1 >= t)
self.assert_(t1 > t)
self.failIf(t == t1)
self.failIf(t != t)
self.failIf(t > t1)
self.failIf(t >= t1)
self.failIf(t1 < t)
self.failIf(t1 <= t)
self.assertEquals(cmp(t, t), 0)
self.assertEquals(cmp(t, t1), -1)
self.assertEquals(cmp(t1, t), 1)
self.assertEquals(t1.laterThan(t), t1)
self.assert_(t.laterThan(t1) > t1)
self.assertEquals(TimeStamp(2002,1,23), TimeStamp(2002,1,23,0,0,0))
def test_suite():
return unittest.makeSuite(TimeStampTests, 'check')
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the routines to convert between long and 64-bit strings"""
import random
import unittest
from persistent import Persistent
NUM = 100
from ZODB.utils import U64, p64, u64
class TestUtils(unittest.TestCase):
small = [random.randrange(1, 1L<<32, int=long)
for i in range(NUM)]
large = [random.randrange(1L<<32, 1L<<64, int=long)
for i in range(NUM)]
all = small + large
def checkLongToStringToLong(self):
for num in self.all:
s = p64(num)
n = U64(s)
self.assertEquals(num, n, "U64() failed")
n2 = u64(s)
self.assertEquals(num, n2, "u64() failed")
def checkKnownConstants(self):
self.assertEquals("\000\000\000\000\000\000\000\001", p64(1))
self.assertEquals("\000\000\000\001\000\000\000\000", p64(1L<<32))
self.assertEquals(u64("\000\000\000\000\000\000\000\001"), 1)
self.assertEquals(U64("\000\000\000\000\000\000\000\001"), 1)
self.assertEquals(u64("\000\000\000\001\000\000\000\000"), 1L<<32)
self.assertEquals(U64("\000\000\000\001\000\000\000\000"), 1L<<32)
def checkPersistentIdHandlesDescriptor(self):
from ZODB.serialize import BaseObjectWriter
class P(Persistent):
pass
writer = BaseObjectWriter(None)
self.assertEqual(writer.persistent_id(P), None)
# It's hard to know where to put this test. We're checking that the
# ConflictError constructor uses utils.py's get_pickle_metadata() to
# deduce the class path from a pickle, instead of actually loading
# the pickle (and so also trying to import application module and
# class objects, which isn't a good idea on a ZEO server when avoidable).
def checkConflictErrorDoesntImport(self):
from ZODB.serialize import BaseObjectWriter
from ZODB.POSException import ConflictError
from ZODB.tests.MinPO import MinPO
import cPickle as pickle
obj = MinPO()
data = BaseObjectWriter().serialize(obj)
# The pickle contains a GLOBAL ('c') opcode resolving to MinPO's
# module and class.
self.assert_('cZODB.tests.MinPO\nMinPO\n' in data)
# Fiddle the pickle so it points to something "impossible" instead.
data = data.replace('cZODB.tests.MinPO\nMinPO\n',
'cpath.that.does.not.exist\nlikewise.the.class\n')
# Pickle can't resolve that GLOBAL opcode -- gets ImportError.
self.assertRaises(ImportError, pickle.loads, data)
# Verify that building ConflictError doesn't get ImportError.
try:
raise ConflictError(object=obj, data=data)
except ConflictError, detail:
# And verify that the msg names the impossible path.
self.assert_('path.that.does.not.exist.likewise.the.class' in
str(detail))
else:
self.fail("expected ConflictError, but no exception raised")
def test_suite():
return unittest.makeSuite(TestUtils, 'check')
if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
import ZODB
import ZODB.FileStorage
from ZODB.POSException import ReadConflictError, ConflictError
from ZODB.POSException import TransactionFailedError
from ZODB.tests.warnhook import WarningsHook
from persistent import Persistent
from persistent.mapping import PersistentMapping
import transaction
class P(Persistent):
pass
class Independent(Persistent):
def _p_independent(self):
return 1
class DecoyIndependent(Persistent):
def _p_independent(self):
return 0
class ZODBTests(unittest.TestCase):
def setUp(self):
self._storage = ZODB.FileStorage.FileStorage(
'ZODBTests.fs', create=1)
self._db = ZODB.DB(self._storage)
def populate(self):
transaction.begin()
conn = self._db.open()
root = conn.root()
root['test'] = pm = PersistentMapping()
for n in range(100):
pm[n] = PersistentMapping({0: 100 - n})
transaction.get().note('created test data')
transaction.commit()
conn.close()
def tearDown(self):
self._db.close()
self._storage.cleanup()
def checkExportImport(self, abort_it=False):
self.populate()
conn = self._db.open()
try:
self.duplicate(conn, abort_it)
finally:
conn.close()
conn = self._db.open()
try:
self.verify(conn, abort_it)
finally:
conn.close()
def duplicate(self, conn, abort_it):
transaction.begin()
transaction.get().note('duplication')
root = conn.root()
ob = root['test']
assert len(ob) > 10, 'Insufficient test data'
try:
import tempfile
f = tempfile.TemporaryFile()
ob._p_jar.exportFile(ob._p_oid, f)
assert f.tell() > 0, 'Did not export correctly'
f.seek(0)
new_ob = ob._p_jar.importFile(f)
self.assertEqual(new_ob, ob)
root['dup'] = new_ob
f.close()
if abort_it:
transaction.abort()
else:
transaction.commit()
except:
transaction.abort()
raise
def verify(self, conn, abort_it):
transaction.begin()
root = conn.root()
ob = root['test']
try:
ob2 = root['dup']
except KeyError:
if abort_it:
# Passed the test.
return
else:
raise
else:
self.failUnless(not abort_it, 'Did not abort duplication')
l1 = list(ob.items())
l1.sort()
l2 = list(ob2.items())
l2.sort()
l1 = map(lambda (k, v): (k, v[0]), l1)
l2 = map(lambda (k, v): (k, v[0]), l2)
self.assertEqual(l1, l2)
self.assert_(ob._p_oid != ob2._p_oid)
self.assertEqual(ob._p_jar, ob2._p_jar)
oids = {}
for v in ob.values():
oids[v._p_oid] = 1
for v in ob2.values():
assert not oids.has_key(v._p_oid), (
'Did not fully separate duplicate from original')
transaction.commit()
def checkExportImportAborted(self):
self.checkExportImport(abort_it=True)
def checkVersionOnly(self):
# Make sure the changes to make empty transactions a no-op
# still allow things like abortVersion(). This should work
# because abortVersion() calls tpc_begin() itself.
conn = self._db.open("version")
try:
r = conn.root()
r[1] = 1
transaction.commit()
finally:
conn.close()
self._db.abortVersion("version")
transaction.commit()
def checkResetCache(self):
# The cache size after a reset should be 0. Note that
# _resetCache is not a public API, but the resetCaches()
# function is, and resetCaches() causes _resetCache() to be
# called.
self.populate()
conn = self._db.open()
conn.root()
self.assert_(len(conn._cache) > 0) # Precondition
conn._resetCache()
self.assertEqual(len(conn._cache), 0)
def checkResetCachesAPI(self):
# Checks the resetCaches() API.
# (resetCaches used to be called updateCodeTimestamp.)
self.populate()
conn = self._db.open()
conn.root()
self.assert_(len(conn._cache) > 0) # Precondition
ZODB.Connection.resetCaches()
conn.close()
self.assert_(len(conn._cache) > 0) # Still not flushed
conn._setDB(self._db) # simulate the connection being reopened
self.assertEqual(len(conn._cache), 0)
def checkExplicitTransactionManager(self):
# Test of transactions that apply to only the connection,
# not the thread.
tm1 = transaction.TransactionManager()
conn1 = self._db.open(txn_mgr=tm1)
tm2 = transaction.TransactionManager()
conn2 = self._db.open(txn_mgr=tm2)
try:
r1 = conn1.root()
r2 = conn2.root()
if r1.has_key('item'):
del r1['item']
tm1.get().commit()
r1.get('item')
r2.get('item')
r1['item'] = 1
tm1.get().commit()
self.assertEqual(r1['item'], 1)
# r2 has not seen a transaction boundary,
# so it should be unchanged.
self.assertEqual(r2.get('item'), None)
conn2.sync()
# Now r2 is updated.
self.assertEqual(r2['item'], 1)
# Now, for good measure, send an update in the other direction.
r2['item'] = 2
tm2.get().commit()
self.assertEqual(r1['item'], 1)
self.assertEqual(r2['item'], 2)
conn1.sync()
conn2.sync()
self.assertEqual(r1['item'], 2)
self.assertEqual(r2['item'], 2)
finally:
conn1.close()
conn2.close()
def checkLocalTransactions(self):
# Test of transactions that apply to only the connection,
# not the thread.
conn1 = self._db.open()
conn2 = self._db.open()
hook = WarningsHook()
hook.install()
try:
conn1.setLocalTransaction()
conn2.setLocalTransaction()
r1 = conn1.root()
r2 = conn2.root()
if r1.has_key('item'):
del r1['item']
conn1.getTransaction().commit()
r1.get('item')
r2.get('item')
r1['item'] = 1
conn1.getTransaction().commit()
self.assertEqual(r1['item'], 1)
# r2 has not seen a transaction boundary,
# so it should be unchanged.
self.assertEqual(r2.get('item'), None)
conn2.sync()
# Now r2 is updated.
self.assertEqual(r2['item'], 1)
# Now, for good measure, send an update in the other direction.
r2['item'] = 2
conn2.getTransaction().commit()
self.assertEqual(r1['item'], 1)
self.assertEqual(r2['item'], 2)
conn1.sync()
conn2.sync()
self.assertEqual(r1['item'], 2)
self.assertEqual(r2['item'], 2)
for msg, obj, filename, lineno in hook.warnings:
self.assert_(
msg.startswith("setLocalTransaction() is deprecated.") or
msg.startswith("getTransaction() is deprecated."))
finally:
conn1.close()
conn2.close()
hook.uninstall()
def checkReadConflict(self):
self.obj = P()
self.readConflict()
def readConflict(self, shouldFail=True):
# Two transactions run concurrently. Each reads some object,
# then one commits and the other tries to read an object
# modified by the first. This read should fail with a conflict
# error because the object state read is not necessarily
# consistent with the objects read earlier in the transaction.
tm1 = transaction.TransactionManager()
conn = self._db.open(mvcc=False, txn_mgr=tm1)
r1 = conn.root()
r1["p"] = self.obj
self.obj.child1 = P()
tm1.get().commit()
# start a new transaction with a new connection
tm2 = transaction.TransactionManager()
cn2 = self._db.open(mvcc=False, txn_mgr=tm2)
# start a new transaction with the other connection
r2 = cn2.root()
self.assertEqual(r1._p_serial, r2._p_serial)
self.obj.child2 = P()
tm1.get().commit()
# resume the transaction using cn2
obj = r2["p"]
# An attempt to access obj should fail, because r2 was read
# earlier in the transaction and obj was modified by the othe
# transaction.
if shouldFail:
self.assertRaises(ReadConflictError, lambda: obj.child1)
# And since ReadConflictError was raised, attempting to commit
# the transaction should re-raise it. checkNotIndependent()
# failed this part of the test for a long time.
self.assertRaises(ReadConflictError, tm2.get().commit)
# And since that commit failed, trying to commit again should
# fail again.
self.assertRaises(TransactionFailedError, tm2.get().commit)
# And again.
self.assertRaises(TransactionFailedError, tm2.get().commit)
# Etc.
self.assertRaises(TransactionFailedError, tm2.get().commit)
else:
# make sure that accessing the object succeeds
obj.child1
tm2.get().abort()
def checkReadConflictIgnored(self):
# Test that an application that catches a read conflict and
# continues can not commit the transaction later.
root = self._db.open(mvcc=False).root()
root["real_data"] = real_data = PersistentMapping()
root["index"] = index = PersistentMapping()
real_data["a"] = PersistentMapping({"indexed_value": 0})
real_data["b"] = PersistentMapping({"indexed_value": 1})
index[1] = PersistentMapping({"b": 1})
index[0] = PersistentMapping({"a": 1})
transaction.commit()
# load some objects from one connection
tm = transaction.TransactionManager()
cn2 = self._db.open(mvcc=False, txn_mgr=tm)
r2 = cn2.root()
real_data2 = r2["real_data"]
index2 = r2["index"]
real_data["b"]["indexed_value"] = 0
del index[1]["b"]
index[0]["b"] = 1
transaction.commit()
del real_data2["a"]
try:
del index2[0]["a"]
except ReadConflictError:
# This is the crux of the text. Ignore the error.
pass
else:
self.fail("No conflict occurred")
# real_data2 still ready to commit
self.assert_(real_data2._p_changed)
# index2 values not ready to commit
self.assert_(not index2._p_changed)
self.assert_(not index2[0]._p_changed)
self.assert_(not index2[1]._p_changed)
self.assertRaises(ReadConflictError, tm.get().commit)
self.assertRaises(TransactionFailedError, tm.get().commit)
tm.get().abort()
def checkIndependent(self):
self.obj = Independent()
self.readConflict(shouldFail=False)
def checkNotIndependent(self):
self.obj = DecoyIndependent()
self.readConflict()
def checkReadConflictErrorClearedDuringAbort(self):
# When a transaction is aborted, the "memory" of which
# objects were the cause of a ReadConflictError during
# that transaction should be cleared.
root = self._db.open(mvcc=False).root()
data = PersistentMapping({'d': 1})
root["data"] = data
transaction.commit()
# Provoke a ReadConflictError.
tm2 = transaction.TransactionManager()
cn2 = self._db.open(mvcc=False, txn_mgr=tm2)
r2 = cn2.root()
data2 = r2["data"]
data['d'] = 2
transaction.commit()
try:
data2['d'] = 3
except ReadConflictError:
pass
else:
self.fail("No conflict occurred")
# Explicitly abort cn2's transaction.
tm2.get().abort()
# cn2 should retain no memory of the read conflict after an abort(),
# but 3.2.3 had a bug wherein it did.
data_conflicts = data._p_jar._conflicts
data2_conflicts = data2._p_jar._conflicts
self.failIf(data_conflicts)
self.failIf(data2_conflicts) # this used to fail
# And because of that, we still couldn't commit a change to data2['d']
# in the new transaction.
cn2.sync() # process the invalidation for data2['d']
data2['d'] = 3
tm2.get().commit() # 3.2.3 used to raise ReadConflictError
cn2.close()
def checkTxnBeginImpliesAbort(self):
# begin() should do an abort() first, if needed.
cn = self._db.open()
rt = cn.root()
rt['a'] = 1
transaction.begin() # should abort adding 'a' to the root
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
# A longstanding bug: this didn't work if changes were only in
# subtransactions.
transaction.begin()
rt = cn.root()
rt['a'] = 2
transaction.commit(1)
transaction.begin()
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
# One more time, mixing "top level" and subtransaction changes.
transaction.begin()
rt = cn.root()
rt['a'] = 3
transaction.commit(1)
rt['b'] = 4
transaction.begin()
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
self.assertRaises(KeyError, rt.__getitem__, 'b')
# That used methods of the default transaction *manager*. Alas,
# that's not necessarily the same as using methods of the current
# transaction, and, in fact, when this test was written,
# Transaction.begin() didn't do anything (everything from here
# down failed).
# Oh, bleech. Since Transaction.begin is also deprecated, we have
# to goof around suppressing the deprecation warning.
import warnings
# First verify that Transaction.begin *is* deprecated, by turning
# the warning into an error.
warnings.filterwarnings("error", category=DeprecationWarning)
self.assertRaises(DeprecationWarning, transaction.get().begin)
del warnings.filters[0]
# Now ignore DeprecationWarnings for the duration. Use a
# try/finally block to ensure we reenable DeprecationWarnings
# no matter what.
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
cn = self._db.open()
rt = cn.root()
rt['a'] = 1
transaction.get().begin() # should abort adding 'a' to the root
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
# A longstanding bug: this didn't work if changes were only in
# subtransactions.
transaction.get().begin()
rt = cn.root()
rt['a'] = 2
transaction.get().commit(1)
transaction.get().begin()
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
# One more time, mixing "top level" and subtransaction changes.
transaction.get().begin()
rt = cn.root()
rt['a'] = 3
transaction.get().commit(1)
rt['b'] = 4
transaction.get().begin()
rt = cn.root()
self.assertRaises(KeyError, rt.__getitem__, 'a')
self.assertRaises(KeyError, rt.__getitem__, 'b')
cn.close()
finally:
del warnings.filters[0]
def checkFailingCommitSticks(self):
# See also checkFailingSubtransactionCommitSticks.
cn = self._db.open()
rt = cn.root()
rt['a'] = 1
# Arrange for commit to fail during tpc_vote.
poisoned = PoisonedObject(PoisonedJar(break_tpc_vote=True))
transaction.get().register(poisoned)
self.assertRaises(PoisonedError, transaction.get().commit)
# Trying to commit again fails too.
self.assertRaises(TransactionFailedError, transaction.get().commit)
self.assertRaises(TransactionFailedError, transaction.get().commit)
self.assertRaises(TransactionFailedError, transaction.get().commit)
# The change to rt['a'] is lost.
self.assertRaises(KeyError, rt.__getitem__, 'a')
# Trying to modify an object also fails, because Transaction.join()
# also raises TransactionFailedError.
self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
# Clean up via abort(), and try again.
transaction.get().abort()
rt['a'] = 1
transaction.get().commit()
self.assertEqual(rt['a'], 1)
# Cleaning up via begin() should also work.
rt['a'] = 2
transaction.get().register(poisoned)
self.assertRaises(PoisonedError, transaction.get().commit)
self.assertRaises(TransactionFailedError, transaction.get().commit)
# The change to rt['a'] is lost.
self.assertEqual(rt['a'], 1)
# Trying to modify an object also fails.
self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
# Clean up via begin(), and try again.
transaction.begin()
rt['a'] = 2
transaction.get().commit()
self.assertEqual(rt['a'], 2)
cn.close()
def checkFailingSubtransactionCommitSticks(self):
cn = self._db.open()
rt = cn.root()
rt['a'] = 1
transaction.get().commit(True)
self.assertEqual(rt['a'], 1)
rt['b'] = 2
# Subtransactions don't do tpc_vote, so we poison tpc_begin.
poisoned = PoisonedObject(PoisonedJar(break_tpc_begin=True))
transaction.get().register(poisoned)
self.assertRaises(PoisonedError, transaction.get().commit, True)
# Trying to subtxn-commit again fails too.
self.assertRaises(TransactionFailedError, transaction.get().commit, True)
self.assertRaises(TransactionFailedError, transaction.get().commit, True)
# Top-level commit also fails.
self.assertRaises(TransactionFailedError, transaction.get().commit)
# The changes to rt['a'] and rt['b'] are lost.
self.assertRaises(KeyError, rt.__getitem__, 'a')
self.assertRaises(KeyError, rt.__getitem__, 'b')
# Trying to modify an object also fails, because Transaction.join()
# also raises TransactionFailedError.
self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
# Clean up via abort(), and try again.
transaction.get().abort()
rt['a'] = 1
transaction.get().commit()
self.assertEqual(rt['a'], 1)
# Cleaning up via begin() should also work.
rt['a'] = 2
transaction.get().register(poisoned)
self.assertRaises(PoisonedError, transaction.get().commit, True)
self.assertRaises(TransactionFailedError, transaction.get().commit, True)
# The change to rt['a'] is lost.
self.assertEqual(rt['a'], 1)
# Trying to modify an object also fails.
self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
# Clean up via begin(), and try again.
transaction.begin()
rt['a'] = 2
transaction.get().commit(True)
self.assertEqual(rt['a'], 2)
transaction.get().commit()
cn2 = self._db.open()
rt = cn.root()
self.assertEqual(rt['a'], 2)
cn.close()
cn2.close()
class PoisonedError(Exception):
pass
# PoisonedJar arranges to raise exceptions from interesting places.
# For whatever reason, subtransaction commits don't call tpc_vote.
class PoisonedJar:
def __init__(self, break_tpc_begin=False, break_tpc_vote=False):
self.break_tpc_begin = break_tpc_begin
self.break_tpc_vote = break_tpc_vote
def sortKey(self):
return str(id(self))
# A way to poison a subtransaction commit.
def tpc_begin(self, *args):
if self.break_tpc_begin:
raise PoisonedError("tpc_begin fails")
# A way to poison a top-level commit.
def tpc_vote(self, *args):
if self.break_tpc_vote:
raise PoisonedError("tpc_vote fails")
# commit_sub is needed else this jar is ignored during subtransaction
# commit.
def commit_sub(*args):
pass
def abort_sub(*args):
pass
def commit(*args):
pass
def abort(*self):
pass
class PoisonedObject:
def __init__(self, poisonedjar):
self._p_jar = poisonedjar
def test_suite():
return unittest.makeSuite(ZODBTests, 'check')
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test behavior of Connection plus cPickleCache."""
import doctest
from persistent import Persistent
import transaction
from ZODB.config import databaseFromString
class RecalcitrantObject(Persistent):
"""A Persistent object that will not become a ghost."""
deactivations = 0
def _p_deactivate(self):
self.__class__.deactivations += 1
def init(cls):
cls.deactivations = 0
init = classmethod(init)
class RegularObject(Persistent):
deactivations = 0
invalidations = 0
def _p_deactivate(self):
self.__class__.deactivations += 1
super(RegularObject, self)._p_deactivate()
def _p_invalidate(self):
self.__class__.invalidations += 1
super(RegularObject, self)._p_invalidate()
def init(cls):
cls.deactivations = 0
cls.invalidations = 0
init = classmethod(init)
class CacheTests:
def test_cache(self):
r"""Test basic cache methods.
Let's start with a clean transaction
>>> transaction.abort()
>>> RegularObject.init()
>>> db = databaseFromString("<zodb>\n"
... "cache-size 4\n"
... "<mappingstorage/>\n"
... "</zodb>")
>>> cn = db.open()
>>> r = cn.root()
>>> L = []
>>> for i in range(5):
... o = RegularObject()
... L.append(o)
... r[i] = o
>>> transaction.commit()
After committing a transaction and calling cacheGC(), there
should be cache-size (4) objects in the cache. One of the
RegularObjects was deactivated.
>>> cn._cache.ringlen()
4
>>> RegularObject.deactivations
1
If we explicitly activate the objects again, the ringlen
should go back up to 5.
>>> for o in L:
... o._p_activate()
>>> cn._cache.ringlen()
5
>>> cn.cacheGC()
>>> cn._cache.ringlen()
4
>>> RegularObject.deactivations
2
>>> cn.cacheMinimize()
>>> cn._cache.ringlen()
0
>>> RegularObject.deactivations
6
If we activate all the objects again and mark one as modified,
then the one object should not be deactivated even by a
minimize.
>>> for o in L:
... o._p_activate()
>>> o.attr = 1
>>> cn._cache.ringlen()
5
>>> cn.cacheMinimize()
>>> cn._cache.ringlen()
1
>>> RegularObject.deactivations
10
Clean up
>>> transaction.abort()
"""
def test_cache_gc_recalcitrant(self):
r"""Test that a cacheGC() call will return.
It's possible for a particular object to ignore the
_p_deactivate() call. We want to check several things in this
case. The cache should called the real _p_deactivate() method
not the one provided by Persistent. The cacheGC() call should
also return when it's looked at each item, regardless of whether
it became a ghost.
>>> RecalcitrantObject.init()
>>> db = databaseFromString("<zodb>\n"
... "cache-size 4\n"
... "<mappingstorage/>\n"
... "</zodb>")
>>> cn = db.open()
>>> r = cn.root()
>>> L = []
>>> for i in range(5):
... o = RecalcitrantObject()
... L.append(o)
... r[i] = o
>>> transaction.commit()
>>> [o._p_state for o in L]
[0, 0, 0, 0, 0]
The Connection calls cacheGC() after it commits a transaction.
Since the cache will now have more objects that it's target size,
it will call _p_deactivate() on each RecalcitrantObject.
>>> RecalcitrantObject.deactivations
5
>>> [o._p_state for o in L]
[0, 0, 0, 0, 0]
An explicit call to cacheGC() has the same effect.
>>> cn.cacheGC()
>>> RecalcitrantObject.deactivations
10
>>> [o._p_state for o in L]
[0, 0, 0, 0, 0]
"""
def test_cache_on_abort(self):
r"""Test that the cache handles transaction abort correctly.
>>> RegularObject.init()
>>> db = databaseFromString("<zodb>\n"
... "cache-size 4\n"
... "<mappingstorage/>\n"
... "</zodb>")
>>> cn = db.open()
>>> r = cn.root()
>>> L = []
>>> for i in range(5):
... o = RegularObject()
... L.append(o)
... r[i] = o
>>> transaction.commit()
>>> RegularObject.deactivations
1
Modify three of the objects and verify that they are
deactivated when the transaction aborts.
>>> for i in range(0, 5, 2):
... L[i].attr = i
>>> [L[i]._p_state for i in range(0, 5, 2)]
[1, 1, 1]
>>> cn._cache.ringlen()
5
>>> transaction.abort()
>>> cn._cache.ringlen()
2
>>> RegularObject.deactivations
4
"""
def test_suite():
return doctest.DocTestSuite()
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""XXX short summary goes here.
$Id$
"""
import unittest
from doctest import DocTestSuite
from transaction._transaction import DataManagerAdapter
from ZODB.tests.sampledm import DataManager
def test_normal_commit():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes. When the data manager joins a transaction,
the transaction will create an adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1)
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
The transaction will then call tpc_vote:
>>> jar.tpc_vote(t1)
This prepares the data manager:
>>> dm.state, dm.delta
(1, 1)
>>> dm.prepared
True
Finally, tpc_finish is called:
>>> jar.tpc_finish(t1)
and the data manager finishes the two-phase commit:
>>> dm.state, dm.delta
(1, 0)
>>> dm.prepared
False
"""
def test_abort():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
When the data manager joins a transaction,
the transaction will create an adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object.
Now we'll abort the transaction. The transaction will get the
"jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
Then the transaction will call abort on the jar:
>>> t1 = '1'
>>> jar.abort(t1)
Which aborts the changes in the data manager:
>>> dm.state, dm.delta
(0, 0)
"""
def test_tpc_abort_phase1():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes. When the data manager joins a transaction,
the transaction will create an adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1)
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
At this point, the transaction decides to abort. It calls tpc_abort:
>>> jar.tpc_abort(t1)
Which causes the state of the data manager to be restored:
>>> dm.state, dm.delta
(0, 0)
"""
def test_tpc_abort_phase2():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes. When the data manager joins a transaction,
the transaction will create an adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1)
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
The transaction calls vote:
>>> jar.tpc_vote(t1)
This prepares the data manager:
>>> dm.state, dm.delta
(1, 1)
>>> dm.prepared
True
At this point, the transaction decides to abort. It calls tpc_abort:
>>> jar.tpc_abort(t1)
Which causes the state of the data manager to be restored:
>>> dm.state, dm.delta
(0, 0)
>>> dm.prepared
False
"""
def test_commit_w_subtransactions():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes in a subtransaction. When the data
manager joins a transaction, the transaction will create an
adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
The transaction will then call tpc_vote:
>>> jar.tpc_vote(t1)
This doesn't do anything either, because zodb4 data managers don't
actually do two-phase commit for subtransactions.
>>> dm.state, dm.delta
(0, 1)
Finally, we call tpc_finish. This does actally create a savepoint,
but we can't really tell that from outside.
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 1)
We'll do more of the above:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 2)
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 2)
>>> dm.inc()
>>> dm.state, dm.delta
(0, 3)
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 3)
Note that the bove works *because* the same transaction is used
for each subtransaction.
Finally, we'll do a little more work:
>>> dm.inc()
>>> dm.inc()
>>> dm.state, dm.delta
(0, 5)
and then commit the top-level transaction.
The transaction will actually go through the steps for a subtransaction:
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
And then call commit_sub:
>>> jar.commit_sub(t1)
As usual, this doesn't actually do anything. ;)
>>> dm.state, dm.delta
(0, 5)
The transaction manager doesn's call tpc_begin, because commit_sub
implies the start of two-phase commit. Next, it does call commit:
>>> jar.commit(t1)
which doesn't do anything.
Finally, the transaction calls tpc_vote:
>>> jar.tpc_vote(t1)
which actually does something (because this is the top-level txn):
>>> dm.state, dm.delta
(5, 5)
>>> dm.prepared
True
Finally, tpc_finish is called:
>>> jar.tpc_finish(t1)
and the data manager finishes the two-phase commit:
>>> dm.state, dm.delta
(5, 0)
>>> dm.prepared
False
"""
def test_commit_w_subtransactions_featuring_subtransaction_abort():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes in a subtransaction. When the data
manager joins a transaction, the transaction will create an
adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
The transaction will then call tpc_vote:
>>> jar.tpc_vote(t1)
This doesn't do anything either, because zodb4 data managers don't
actually do two-phase commit for subtransactions.
>>> dm.state, dm.delta
(0, 1)
Finally, we call tpc_finish. This does actally create a savepoint,
but we can't really tell that from outside.
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 1)
We'll do more of the above:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 2)
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 2)
>>> dm.inc()
>>> dm.state, dm.delta
(0, 3)
But then we'll decide to abort a subtransaction.
The transaction will just call abort as usual:
>>> jar.abort(t1)
This will cause a rollback to the last savepoint:
>>> dm.state, dm.delta
(0, 2)
Then we do more work:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 3)
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 3)
Note that the bove works *because* the same transaction is used
for each subtransaction.
Finally, we'll do a little more work:
>>> dm.inc()
>>> dm.inc()
>>> dm.state, dm.delta
(0, 5)
and then commit the top-level transaction.
The transaction will actually go through the steps for a subtransaction:
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
And then call commit_sub:
>>> jar.commit_sub(t1)
As usual, this doesn't actually do anything. ;)
>>> dm.state, dm.delta
(0, 5)
The transaction manager doesn's call tpc_begin, because commit_sub
implies the start of two-phase commit. Next, it does call commit:
>>> jar.commit(t1)
which doesn't do anything.
Finally, the transaction calls tpc_vote:
>>> jar.tpc_vote(t1)
which actually does something (because this is the top-level txn):
>>> dm.state, dm.delta
(5, 5)
>>> dm.prepared
True
Finally, tpc_finish is called:
>>> jar.tpc_finish(t1)
and the data manager finishes the two-phase commit:
>>> dm.state, dm.delta
(5, 0)
>>> dm.prepared
False
"""
def test_abort_w_subtransactions():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes in a subtransaction. When the data
manager joins a transaction, the transaction will create an
adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
The transaction will then call tpc_vote:
>>> jar.tpc_vote(t1)
This doesn't do anything either, because zodb4 data managers don't
actually do two-phase commit for subtransactions.
>>> dm.state, dm.delta
(0, 1)
Finally, we call tpc_finish. This does actally create a savepoint,
but we can't really tell that from outside.
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 1)
We'll do more of the above:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 2)
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 2)
>>> dm.inc()
>>> dm.state, dm.delta
(0, 3)
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 3)
Note that the bove works *because* the same transaction is used
for each subtransaction.
Finally, we'll do a little more work:
>>> dm.inc()
>>> dm.inc()
>>> dm.state, dm.delta
(0, 5)
and then abort the top-level transaction.
The transaction first call abort on the jar:
>>> jar.abort(t1)
This will have the effect of aborting the subtrancation:
>>> dm.state, dm.delta
(0, 3)
Then the transaction will call abort_sub:
>>> jar.abort_sub(t1)
This will abort all of the subtransactions:
>>> dm.state, dm.delta
(0, 0)
"""
def test_tpc_abort_w_subtransactions_featuring_subtransaction_abort():
"""
So, we have a data manager:
>>> dm = DataManager()
and we do some work that modifies uncommited state:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 1)
Now we'll commit the changes in a subtransaction. When the data
manager joins a transaction, the transaction will create an
adapter.
>>> dma = DataManagerAdapter(dm)
and register it as a modified object. At commit time, the
transaction will get the "jar" like this:
>>> jar = getattr(dma, '_p_jar', dma)
and, of course, the jar and the adapter will be the same:
>>> jar is dma
True
The transaction will call tpc_begin:
>>> t1 = '1'
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
Then the transaction will call commit on the jar:
>>> jar.commit(t1)
This doesn't actually do anything. :)
>>> dm.state, dm.delta
(0, 1)
The transaction will then call tpc_vote:
>>> jar.tpc_vote(t1)
This doesn't do anything either, because zodb4 data managers don't
actually do two-phase commit for subtransactions.
>>> dm.state, dm.delta
(0, 1)
Finally, we call tpc_finish. This does actally create a savepoint,
but we can't really tell that from outside.
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 1)
We'll do more of the above:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 2)
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 2)
>>> dm.inc()
>>> dm.state, dm.delta
(0, 3)
But then we'll decide to abort a subtransaction.
The transaction will just call abort as usual:
>>> jar.abort(t1)
This will cause a rollback to the last savepoint:
>>> dm.state, dm.delta
(0, 2)
Then we do more work:
>>> dm.inc()
>>> dm.state, dm.delta
(0, 3)
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
>>> dm.state, dm.delta
(0, 3)
Note that the bove works *because* the same transaction is used
for each subtransaction.
Finally, we'll do a little more work:
>>> dm.inc()
>>> dm.inc()
>>> dm.state, dm.delta
(0, 5)
and then commit the top-level transaction.
The transaction will actually go through the steps for a subtransaction:
>>> jar.tpc_begin(t1, 1) # 1 -> subtxn
>>> jar.commit(t1)
>>> jar.tpc_vote(t1)
>>> jar.tpc_finish(t1)
And then call commit_sub:
>>> jar.commit_sub(t1)
As usual, this doesn't actually do anything. ;)
>>> dm.state, dm.delta
(0, 5)
The transaction manager doesn's call tpc_begin, because commit_sub
implies the start of two-phase commit. Next, it does call commit:
>>> jar.commit(t1)
which doesn't do anything.
Finally, the transaction calls tpc_vote:
>>> jar.tpc_vote(t1)
which actually does something (because this is the top-level txn):
>>> dm.state, dm.delta
(5, 5)
>>> dm.prepared
True
Now, at the last minute, the transaction is aborted (possibly due
to a "no vote" from another data manager):
>>> jar.tpc_abort(t1)
An the changes are undone:
>>> dm.state, dm.delta
(0, 0)
>>> dm.prepared
False
"""
def test_suite():
return DocTestSuite()
if __name__ == '__main__':
unittest.main()
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""A storage used for unittests.
The primary purpose of this module is to have a minimal multi-version
storage to use for unit tests. MappingStorage isn't sufficient.
Since even a minimal storage has some complexity, we run standard
storage tests against the test storage.
"""
import bisect
import threading
import unittest
from ZODB.BaseStorage import BaseStorage
from ZODB import POSException
from ZODB.utils import z64
from ZODB.tests import StorageTestBase
from ZODB.tests \
import BasicStorage, MTStorage, Synchronization, PackableStorage, \
RevisionStorage
class Transaction(object):
"""Hold data for current transaction for MinimalMemoryStorage."""
def __init__(self, tid):
self.index = {}
self.tid = tid
def store(self, oid, data):
self.index[(oid, self.tid)] = data
def cur(self):
return dict.fromkeys([oid for oid, tid in self.index.keys()], self.tid)
class MinimalMemoryStorage(BaseStorage, object):
"""Simple in-memory storage that supports revisions.
This storage is needed to test multi-version concurrency control.
It is similar to MappingStorage, but keeps multiple revisions. It
does not support versions. It doesn't implement operations like
pack(), because they aren't necessary for testing.
"""
def __init__(self):
super(MinimalMemoryStorage, self).__init__("name")
# _index maps oid, tid pairs to data records
self._index = {}
# _cur maps oid to current tid
self._cur = {}
def isCurrent(self, oid, serial):
return serial == self._cur[oid]
def hook(self, oid, tid, version):
# A hook for testing
pass
def __len__(self):
return len(self._index)
def _clear_temp(self):
pass
def loadEx(self, oid, version):
self._lock_acquire()
try:
assert not version
tid = self._cur[oid]
self.hook(oid, tid, version)
return self._index[(oid, tid)], tid, ""
finally:
self._lock_release()
def load(self, oid, version):
return self.loadEx(oid, version)[:2]
def _begin(self, tid, u, d, e):
self._txn = Transaction(tid)
def store(self, oid, serial, data, v, txn):
if txn is not self._transaction:
raise POSException.StorageTransactionError(self, txn)
assert not v
if self._cur.get(oid) != serial:
if not (serial is None or self._cur.get(oid) in [None, z64]):
raise POSException.ConflictError(
oid=oid, serials=(self._cur.get(oid), serial), data=data)
self._txn.store(oid, data)
return self._tid
def _abort(self):
del self._txn
def _finish(self, tid, u, d, e):
self._lock_acquire()
try:
self._index.update(self._txn.index)
self._cur.update(self._txn.cur())
self._ltid = self._tid
finally:
self._lock_release()
def lastTransaction(self):
return self._ltid
def loadBefore(self, the_oid, the_tid):
# It's okay if loadBefore() is really expensive, because this
# storage is just used for testing.
self._lock_acquire()
try:
tids = [tid for oid, tid in self._index if oid == the_oid]
if not tids:
raise KeyError, the_oid
tids.sort()
i = bisect.bisect_left(tids, the_tid) - 1
if i == -1:
return None
tid = tids[i]
j = i + 1
if j == len(tids):
end_tid = None
else:
end_tid = tids[j]
return self._index[(the_oid, tid)], tid, end_tid
finally:
self._lock_release()
def loadSerial(self, oid, serial):
self._lock_acquire()
try:
return self._index[(oid, serial)]
finally:
self._lock_release()
class MinimalTestSuite(StorageTestBase.StorageTestBase,
BasicStorage.BasicStorage,
MTStorage.MTStorage,
Synchronization.SynchronizedStorage,
RevisionStorage.RevisionStorage,
):
def setUp(self):
self._storage = MinimalMemoryStorage()
# we don't implement undo
def checkLoadBeforeUndo(self):
pass
def test_suite():
return unittest.makeSuite(MinimalTestSuite, "check")
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
from ZODB.fsIndex import fsIndex
from ZODB.utils import p64
class Test(unittest.TestCase):
def testInserts(self):
index=fsIndex()
for i in range(200):
index[p64(i*1000)]=(i*1000L+1)
for i in range(0,200):
self.assertEqual((i,index[p64(i*1000)]), (i,(i*1000L+1)))
self.assertEqual(len(index), 200)
key=p64(2000)
self.assertEqual(index.get(key), 2001)
key=p64(2001)
self.assertEqual(index.get(key), None)
self.assertEqual(index.get(key, ''), '')
# self.failUnless(len(index._data) > 1)
def testUpdate(self):
index=fsIndex()
d={}
for i in range(200):
d[p64(i*1000)]=(i*1000L+1)
index.update(d)
for i in range(400,600):
d[p64(i*1000)]=(i*1000L+1)
index.update(d)
for i in range(100, 500):
d[p64(i*1000)]=(i*1000L+2)
index.update(d)
self.assertEqual(index.get(p64(2000)), 2001)
self.assertEqual(index.get(p64(599000)), 599001)
self.assertEqual(index.get(p64(399000)), 399002)
self.assertEqual(len(index), 600)
def test_suite():
loader=unittest.TestLoader()
return loader.loadTestsFromTestCase(Test)
if __name__=='__main__':
unittest.TextTestRunner().run(test_suite())
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
r"""
Multi-version concurrency control tests
=======================================
Multi-version concurrency control (MVCC) exploits storages that store
multiple revisions of an object to avoid read conflicts. Normally
when an object is read from the storage, its most recent revision is
read. Under MVCC, an older revision may be read so that the transaction
sees a consistent view of the database.
ZODB guarantees execution-time consistency: A single transaction will
always see a consistent view of the database while it is executing.
If transaction A is running, has already read an object O1, and a
different transaction B modifies object O2, then transaction A can no
longer read the current revision of O2. It must either read the
version of O2 that is consistent with O1 or raise a ReadConflictError.
When MVCC is in use, A will do the former.
This note includes doctests that explain how MVCC is implemented (and
test that the implementation is correct). The tests use a
MinimalMemoryStorage that implements MVCC support, but not much else.
>>> from ZODB.tests.test_storage import MinimalMemoryStorage
>>> from ZODB import DB
>>> db = DB(MinimalMemoryStorage())
We will use two different connections with the experimental
setLocalTransaction() method to make sure that the connections act
independently, even though they'll be run from a single thread.
>>> import transaction
>>> tm1 = transaction.TransactionManager()
>>> cn1 = db.open(txn_mgr=tm1)
The test will just use some MinPO objects. The next few lines just
setup an initial database state.
>>> from ZODB.tests.MinPO import MinPO
>>> r = cn1.root()
>>> r["a"] = MinPO(1)
>>> r["b"] = MinPO(1)
>>> tm1.get().commit()
Now open a second connection.
>>> tm2 = transaction.TransactionManager()
>>> cn2 = db.open(txn_mgr=tm2)
Connection high-water mark
--------------------------
The ZODB Connection tracks a transaction high-water mark, which
bounds the latest transaction id that can be read by the current
transaction and still present a consistent view of the database.
Transactions with ids up to but not including the high-water mark
are OK to read. When a transaction commits, the database sends
invalidations to all the other connections; the invalidation contains
the transaction id and the oids of modified objects. The Connection
stores the high-water mark in _txn_time, which is set to None until
an invalidation arrives.
>>> cn = db.open()
>>> print cn._txn_time
None
>>> cn.invalidate(100, dict.fromkeys([1, 2]))
>>> cn._txn_time
100
>>> cn.invalidate(200, dict.fromkeys([1, 2]))
>>> cn._txn_time
100
A connection's high-water mark is set to the transaction id taken from
the first invalidation processed by the connection. Transaction ids are
monotonically increasing, so the first one seen during the current
transaction remains the high-water mark for the duration of the
transaction.
XXX We'd like simple abort and commit calls to make txn boundaries,
but that doesn't work unless an object is modified. sync() will abort
a transaction and process invalidations.
>>> cn.sync()
>>> print cn._txn_time # the high-water mark got reset to None
None
Basic functionality
-------------------
The next bit of code includes a simple MVCC test. One transaction
will modify "a." The other transaction will then modify "b" and commit.
>>> r1 = cn1.root()
>>> r1["a"].value = 2
>>> tm1.get().commit()
>>> txn = db.lastTransaction()
The second connection has its high-water mark set now.
>>> cn2._txn_time == txn
True
It is safe to read "b," because it was not modified by the concurrent
transaction.
>>> r2 = cn2.root()
>>> r2["b"]._p_serial < cn2._txn_time
True
>>> r2["b"].value
1
>>> r2["b"].value = 2
It is not safe, however, to read the current revision of "a" because
it was modified at the high-water mark. If we read it, we'll get a
non-current version.
>>> r2["a"].value
1
>>> r2["a"]._p_serial < cn2._txn_time
True
We can confirm that we have a non-current revision by asking the
storage.
>>> db._storage.isCurrent(r2["a"]._p_oid, r2["a"]._p_serial)
False
It's possible to modify "a", but we get a conflict error when we
commit the transaction.
>>> r2["a"].value = 3
>>> tm2.get().commit()
Traceback (most recent call last):
...
ConflictError: database conflict error (oid 0x01, class ZODB.tests.MinPO.MinPO)
>>> tm2.get().abort()
This example will demonstrate that we can commit a transaction if we only
modify current revisions.
>>> print cn2._txn_time
None
>>> r1 = cn1.root()
>>> r1["a"].value = 3
>>> tm1.get().commit()
>>> txn = db.lastTransaction()
>>> cn2._txn_time == txn
True
>>> r2["b"].value = r2["a"].value + 1
>>> r2["b"].value
3
>>> tm2.get().commit()
>>> print cn2._txn_time
None
Object cache
------------
A Connection keeps objects in its cache so that multiple database
references will always point to the same Python object. At
transaction boundaries, objects modified by other transactions are
ghostified so that the next transaction doesn't see stale state. We
need to be sure the non-current objects loaded by MVCC are always
ghosted. It should be trivial, because MVCC is only used when an
invalidation has been received for an object.
First get the database back in an initial state.
>>> cn1.sync()
>>> r1["a"].value = 0
>>> r1["b"].value = 0
>>> tm1.get().commit()
>>> cn2.sync()
>>> r2["a"].value
0
>>> r2["b"].value = 1
>>> tm2.get().commit()
>>> r1["b"].value
0
>>> cn1.sync() # cn2 modified 'b', so cn1 should get a ghost for b
>>> r1["b"]._p_state # -1 means GHOST
-1
Closing the connection, committing a transaction, and aborting a transaction,
should all have the same effect on non-current objects in cache.
>>> def testit():
... cn1.sync()
... r1["a"].value = 0
... r1["b"].value = 0
... tm1.get().commit()
... cn2.sync()
... r2["b"].value = 1
... tm2.get().commit()
>>> testit()
>>> r1["b"]._p_state # 0 means UPTODATE, although note it's an older revision
0
>>> r1["b"].value
0
>>> r1["a"].value = 1
>>> tm1.get().commit()
>>> r1["b"]._p_state
-1
When a connection is closed, it is saved by the database. It will be
reused by the next open() call (along with its object cache).
>>> testit()
>>> r1["a"].value = 1
>>> tm1.get().abort()
>>> cn1.close()
>>> cn3 = db.open()
>>> cn1 is cn3
True
>>> r1 = cn1.root()
Although "b" is a ghost in cn1 at this point (because closing a connection
has the same effect on non-current objects in the connection's cache as
committing a transaction), not every object is a ghost. The root was in
the cache and was current, so our first reference to it doesn't return
a ghost.
>>> r1._p_state # UPTODATE
0
>>> r1["b"]._p_state # GHOST
-1
>>> cn1._transaction = None # See the Cleanup section below
Late invalidation
-----------------
The combination of ZEO and MVCC adds more complexity. Since
invalidations are delivered asynchronously by ZEO, it is possible for
an invalidation to arrive just after a request to load the invalidated
object is sent. The connection can't use the just-loaded data,
because the invalidation arrived first. The complexity for MVCC is
that it must check for invalidated objects after it has loaded them,
just in case.
Rather than add all the complexity of ZEO to these tests, the
MinimalMemoryStorage has a hook. We'll write a subclass that will
deliver an invalidation when it loads an object. The hook allows us
to test the Connection code.
>>> class TestStorage(MinimalMemoryStorage):
... def __init__(self):
... self.hooked = {}
... self.count = 0
... super(TestStorage, self).__init__()
... def registerDB(self, db, limit):
... self.db = db
... def hook(self, oid, tid, version):
... if oid in self.hooked:
... self.db.invalidate(tid, {oid:1})
... self.count += 1
We can execute this test with a single connection, because we're
synthesizing the invalidation that is normally generated by the second
connection. We need to create two revisions so that there is a
non-current revision to load.
>>> ts = TestStorage()
>>> db = DB(ts)
>>> cn1 = db.open(txn_mgr=tm1)
>>> r1 = cn1.root()
>>> r1["a"] = MinPO(0)
>>> r1["b"] = MinPO(0)
>>> tm1.get().commit()
>>> r1["b"].value = 1
>>> tm1.get().commit()
>>> cn1.cacheMinimize() # makes everything in cache a ghost
>>> oid = r1["b"]._p_oid
>>> ts.hooked[oid] = 1
Once the oid is hooked, an invalidation will be delivered the next
time it is activated. The code below activates the object, then
confirms that the hook worked and that the old state was retrieved.
>>> oid in cn1._invalidated
False
>>> r1["b"]._p_state
-1
>>> r1["b"]._p_activate()
>>> oid in cn1._invalidated
True
>>> ts.count
1
>>> r1["b"].value
0
No earlier revision available
-----------------------------
We'll reuse the code from the example above, except that there will
only be a single revision of "b." As a result, the attempt to
activate "b" will result in a ReadConflictError.
>>> ts = TestStorage()
>>> db = DB(ts)
>>> cn1 = db.open(txn_mgr=tm1)
>>> r1 = cn1.root()
>>> r1["a"] = MinPO(0)
>>> r1["b"] = MinPO(0)
>>> tm1.get().commit()
>>> cn1.cacheMinimize() # makes everything in cache a ghost
>>> oid = r1["b"]._p_oid
>>> ts.hooked[oid] = 1
Again, once the oid is hooked, an invalidation will be delivered the next
time it is activated. The code below activates the object, but unlike the
section above, this is no older state to retrieve.
>>> oid in cn1._invalidated
False
>>> r1["b"]._p_state
-1
>>> r1["b"]._p_activate()
Traceback (most recent call last):
...
ReadConflictError: database read conflict error (oid 0x02, class ZODB.tests.MinPO.MinPO)
>>> oid in cn1._invalidated
True
>>> ts.count
1
Cleanup
-------
The setLocalTransaction() feature creates cyclic trash involving the
Connection and Transaction. The Transaction has an __del__ method,
which prevents the cycle from being collected. There's no API for
clearing the Connection's local transaction.
>>> cn1._transaction = None
>>> cn2._transaction = None
"""
import doctest
def test_suite():
return doctest.DocTestSuite()
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Conventience function for creating test databases
$Id$
"""
import time
import persistent
import transaction
from ZODB.MappingStorage import MappingStorage
from ZODB.DB import DB as _DB
try:
from transaction import get_transaction
except ImportError:
pass # else assume ZODB will install it as a builtin
def DB(name='Test'):
return _DB(MappingStorage(name))
def commit():
transaction.commit()
def pack(db):
db.pack(time.time()+1)
class P(persistent.Persistent):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'P(%s)' % self.name
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import warnings
class WarningsHook:
"""Hook to capture warnings generated by Python.
The function warnings.showwarning() is designed to be hooked by
application code, allowing the application to customize the way it
handles warnings.
This hook captures the unformatted warning information and stored
it in a list. A test can inspect this list after the test is over.
Issues:
The warnings module has lots of delicate internal state. If
a warning has been reported once, it won't be reported again. It
may be necessary to extend this class with a mechanism for
modifying the internal state so that we can be guaranteed a
warning will be reported.
If Python is run with a warnings filter, e.g. python -Werror,
then a test that is trying to inspect a particular warning will
fail. Perhaps this class can be extended to install more-specific
filters the test to work anyway.
"""
def __init__(self):
self.original = None
self.warnings = []
def install(self):
self.original = warnings.showwarning
warnings.showwarning = self.showwarning
def uninstall(self):
assert self.original is not None
warnings.showwarning = self.original
self.original = None
def showwarning(self, message, category, filename, lineno):
self.warnings.append((str(message), category, filename, lineno))
def clear(self):
self.warnings = []
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Tools to simplify transactions within applications."""
from ZODB.POSException import ReadConflictError, ConflictError
def _commit(note):
t = transaction.get()
if note:
t.note(note)
t.commit()
def transact(f, note=None, retries=5):
"""Returns transactional version of function argument f.
Higher-order function that converts a regular function into
a transactional function. The transactional function will
retry up to retries time before giving up. If note, it will
be added to the transaction metadata when it commits.
The retries occur on ConflictErrors. If some other
TransactionError occurs, the transaction will not be retried.
"""
# XXX deal with ZEO disconnected errors?
def g(*args, **kwargs):
n = retries
while n:
n -= 1
try:
r = f(*args, **kwargs)
except ReadConflictError, msg:
transaction.abort()
if not n:
raise
continue
try:
_commit(note)
except ConflictError, msg:
transaction.abort()
if not n:
raise
continue
return r
raise RuntimeError, "couldn't commit transaction"
return g
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import sys
import time
from struct import pack, unpack
from binascii import hexlify
import cPickle as pickle
from cStringIO import StringIO
from persistent.TimeStamp import TimeStamp
z64 = '\0'*8
t32 = 1L << 32
assert sys.hexversion >= 0x02020000
# The distinction between ints and longs is blurred in Python 2.2,
# so u64() are U64() really the same.
def p64(v):
"""Pack an integer or long into a 8-byte string"""
return pack(">Q", v)
def u64(v):
"""Unpack an 8-byte string into a 64-bit long integer."""
return unpack(">Q", v)[0]
U64 = u64
def cp(f1, f2, l):
read = f1.read
write = f2.write
n = 8192
while l > 0:
if n > l:
n = l
d = read(n)
if not d:
break
write(d)
l = l - len(d)
def newTimeStamp(old=None,
TimeStamp=TimeStamp,
time=time.time, gmtime=time.gmtime):
t = time()
ts = TimeStamp(gmtime(t)[:5]+(t%60,))
if old is not None:
return ts.laterThan(old)
return ts
def oid_repr(oid):
if isinstance(oid, str) and len(oid) == 8:
# Convert to hex and strip leading zeroes.
as_hex = hexlify(oid).lstrip('0')
# Ensure two characters per input byte.
if len(as_hex) & 1:
as_hex = '0' + as_hex
elif as_hex == '':
as_hex = '00'
return '0x' + as_hex
else:
return repr(oid)
serial_repr = oid_repr
tid_repr = serial_repr
# For example, produce
# '0x03441422948b4399 2002-04-14 20:50:34.815000'
# for 8-byte string tid '\x03D\x14"\x94\x8bC\x99'.
def readable_tid_repr(tid):
result = tid_repr(tid)
if isinstance(tid, str) and len(tid) == 8:
result = "%s %s" % (result, TimeStamp(tid))
return result
# Addresses can "look negative" on some boxes, some of the time. If you
# feed a "negative address" to an %x format, Python 2.3 displays it as
# unsigned, but produces a FutureWarning, because Python 2.4 will display
# it as signed. So when you want to prodce an address, use positive_id() to
# obtain it.
def positive_id(obj):
"""Return id(obj) as a non-negative integer."""
result = id(obj)
if result < 0:
# This is a puzzle: there's no way to know the natural width of
# addresses on this box (in particular, there's no necessary
# relation to sys.maxint). Try 32 bits first (and on a 32-bit
# box, adding 2**32 gives a positive number with the same hex
# representation as the original result).
result += 1L << 32
if result < 0:
# Undo that, and try 64 bits.
result -= 1L << 32
result += 1L << 64
assert result >= 0 # else addresses are fatter than 64 bits
return result
# Given a ZODB pickle, return pair of strings (module_name, class_name).
# Do this without importing the module or class object.
# See ZODB/serialize.py's module docstring for the only docs that exist about
# ZODB pickle format. If the code here gets smarter, please update those
# docs to be at least as smart. The code here doesn't appear to make sense
# for what serialize.py calls formats 5 and 6.
def get_pickle_metadata(data):
# ZODB's data records contain two pickles. The first is the class
# of the object, the second is the object. We're only trying to
# pick apart the first here, to extract the module and class names.
if data.startswith('(c'): # pickle MARK GLOBAL opcode sequence
global_prefix = 2
elif data.startswith('c'): # pickle GLOBAL opcode
global_prefix = 1
else:
global_prefix = 0
if global_prefix:
# Formats 1 and 2.
# Don't actually unpickle a class, because it will attempt to
# load the class. Just break open the pickle and get the
# module and class from it. The module and class names are given by
# newline-terminated strings following the GLOBAL opcode.
modname, classname, rest = data.split('\n', 2)
modname = modname[global_prefix:] # strip GLOBAL opcode
return modname, classname
# Else there are a bunch of other possible formats.
f = StringIO(data)
u = pickle.Unpickler(f)
try:
class_info = u.load()
except Exception, err:
print "Error", err
return '', ''
if isinstance(class_info, tuple):
if isinstance(class_info[0], tuple):
# Formats 3 and 4.
modname, classname = class_info[0]
else:
# Formats 5 and 6 (probably) end up here.
modname, classname = class_info
else:
# This isn't a known format.
modname = repr(class_info)
classname = ''
return modname, classname
/*****************************************************************************
Copyright (c) 2001, 2002 Zope Corporation and Contributors.
All Rights Reserved.
This software is subject to the provisions of the Zope Public License,
Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
FOR A PARTICULAR PURPOSE
****************************************************************************/
static char winlock_doc_string[] =
"Lock files on Windows."
"\n"
"$Id$\n";
#include "Python.h"
static PyObject *Error;
#ifdef MS_WIN32
#include <windows.h>
#include <io.h>
/* LOCK_FUNC is the shared type of Win32 LockFile and UnlockFile. */
typedef WINBASEAPI BOOL WINAPI LOCK_FUNC(HANDLE, DWORD, DWORD, DWORD, DWORD);
static PyObject *
common(LOCK_FUNC func, PyObject *args)
{
int fileno;
long h, ofslo, ofshi, lenlo, lenhi;
if (! PyArg_ParseTuple(args, "illll", &fileno,
&ofslo, &ofshi,
&lenlo, &lenhi))
return NULL;
h = _get_osfhandle(fileno);
if (h == -1) {
PyErr_SetString(Error, "_get_osfhandle failed");
return NULL;
}
if (func((HANDLE)h, ofslo, ofshi, lenlo, lenhi)) {
Py_INCREF(Py_None);
return Py_None;
}
PyErr_SetObject(Error, PyInt_FromLong(GetLastError()));
return NULL;
}
static PyObject *
winlock(PyObject *ignored, PyObject *args)
{
return common(LockFile, args);
}
static PyObject *
winunlock(PyObject *ignored, PyObject *args)
{
return common(UnlockFile, args);
}
static struct PyMethodDef methods[] = {
{"LockFile", (PyCFunction)winlock, METH_VARARGS,
"LockFile(fileno, offsetLow, offsetHigh, lengthLow, lengthHigh) -- "
"Lock the file associated with fileno"},
{"UnlockFile", (PyCFunction)winunlock, METH_VARARGS,
"UnlockFile(fileno, offsetLow, offsetHigh, lengthLow, lengthHigh) -- "
"Unlock the file associated with fileno"},
{NULL, NULL} /* sentinel */
};
#else
static struct PyMethodDef methods[] = {
{NULL, NULL} /* sentinel */
};
#endif
/* Initialization function for the module (*must* be called initcStringIO) */
#ifndef DL_EXPORT /* declarations for DLL import/export */
#define DL_EXPORT(RTYPE) RTYPE
#endif
DL_EXPORT(void)
initwinlock(void)
{
PyObject *m, *d;
if (!(Error=PyString_FromString("winlock.error")))
return;
/* Create the module and add the functions */
m = Py_InitModule4("winlock", methods, winlock_doc_string,
(PyObject*)NULL, PYTHON_API_VERSION);
d = PyModule_GetDict(m);
PyDict_SetItemString(d, "error", Error);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment