Commit 07f45175 authored by Martijn Pieters's avatar Martijn Pieters

Clean up indentation and trailing whitespace.

parent 813e756a
......@@ -2,21 +2,21 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Python implementation of persistent list.
$Id: PersistentList.py,v 1.2 2002/02/11 23:49:07 gvanrossum Exp $"""
$Id: PersistentList.py,v 1.3 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.2 $'[11:-2]
__version__='$Revision: 1.3 $'[11:-2]
import Persistence
from UserList import UserList
......@@ -51,7 +51,7 @@ class PersistentList(UserList, Persistence.Persistent):
def __delslice__(self, i, j):
self.__super_delslice(i, j)
self._p_changed = 1
def __iadd__(self, other):
self.__super_iadd(other)
self._p_changed = 1
......@@ -63,7 +63,7 @@ class PersistentList(UserList, Persistence.Persistent):
def append(self, item):
self.__super_append(item)
self._p_changed = 1
def insert(self, i, item):
self.__super_insert(i, item)
self._p_changed = 1
......@@ -76,11 +76,11 @@ class PersistentList(UserList, Persistence.Persistent):
def remove(self, item):
self.__super_remove(item)
self._p_changed = 1
def reverse(self):
self.__super_reverse()
self._p_changed = 1
def sort(self, *args):
self.__super_sort(*args)
self._p_changed = 1
......
......@@ -2,21 +2,21 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Python implementation of persistent base types
$Id: PersistentMapping.py,v 1.19 2002/02/12 22:33:08 gvanrossum Exp $"""
$Id: PersistentMapping.py,v 1.20 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.19 $'[11:-2]
__version__='$Revision: 1.20 $'[11:-2]
import Persistence
from UserDict import UserDict
......@@ -82,7 +82,7 @@ class PersistentMapping(UserDict, Persistence.Persistent):
# different versions of the code. Compatibility works in both
# directions, because an application may want to share a database
# between applications using different versions of the code.
# Effectively, the original rep is part of the "API." To provide
# full compatibility, the getstate and setstate must read and
# right objects using the old rep.
......
......@@ -2,19 +2,19 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""ZODB transfer activity monitoring
$Id: ActivityMonitor.py,v 1.2 2002/06/10 20:20:44 shane Exp $"""
__version__='$Revision: 1.2 $'[11:-2]
$Id: ActivityMonitor.py,v 1.3 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.3 $'[11:-2]
import time
......@@ -104,4 +104,3 @@ class ActivityMonitor:
div['loads'] = div['loads'] + total_loads
return res
......@@ -2,20 +2,20 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Handy standard storage machinery
"""
# Do this portably in the face of checking out with -kv
import string
__version__ = string.split('$Revision: 1.19 $')[-2:][0]
__version__ = string.split('$Revision: 1.20 $')[-2:][0]
import ThreadLock, bpthread
import time, UndoLogCompatible
......@@ -30,7 +30,7 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
_is_read_only = 0
def __init__(self, name, base=None):
self.__name__=name
# Allocate locks:
......@@ -64,13 +64,13 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
def getName(self):
return self.__name__
def getSize(self):
return len(self)*300 # WAG!
def history(self, oid, version, length=1):
pass
def modifiedInVersion(self, oid):
return ''
......@@ -97,13 +97,13 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
def isReadOnly(self):
return self._is_read_only
def supportsUndo(self):
return 0
def supportsVersions(self):
return 0
def tpc_abort(self, transaction):
self._lock_acquire()
try:
......@@ -147,7 +147,7 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
self._tstatus=status
self._begin(self._serial, user, desc, ext)
finally: self._lock_release()
def _begin(self, tid, u, d, e):
......@@ -247,7 +247,7 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
else:
restoring = 0
for transaction in other.iterator():
tid=transaction.tid
if _ts is None:
_ts=TimeStamp(tid)
......@@ -265,7 +265,7 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
ok=1
if verbose: print _ts
self.tpc_begin(transaction, tid, transaction.status)
for r in transaction:
oid=r.oid
......@@ -276,7 +276,7 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
pre=preget(oid, None)
s=self.store(oid, pre, r.data, r.version, transaction)
preindex[oid]=s
self.tpc_vote(transaction)
self.tpc_finish(transaction)
......
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
from cStringIO import StringIO
from cPickle import Unpickler, Pickler
......
......@@ -2,18 +2,18 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Database connection support
$Id: Connection.py,v 1.71 2002/06/14 20:25:06 jeremy Exp $"""
$Id: Connection.py,v 1.72 2002/08/14 22:07:09 mj Exp $"""
from cPickleCache import PickleCache, MUCH_RING_CHECKING
from POSException import ConflictError, ReadConflictError
......@@ -116,7 +116,7 @@ class Connection(ExportImport.ExportImport):
def __getitem__(self, oid, tt=type(())):
obj = self._cache.get(oid, None)
if obj is not None:
return obj
return obj
__traceback_info__ = (oid)
p, serial = self._storage.load(oid, self._version)
......@@ -136,7 +136,7 @@ class Connection(ExportImport.ExportImport):
if type(klass) is tt:
module, name = klass
klass=self._db._classFactory(self, module, name)
if (args is None or
not args and not hasattr(klass,'__getinitargs__')):
object=klass.__basicnew__()
......@@ -152,7 +152,7 @@ class Connection(ExportImport.ExportImport):
self._cache[oid] = object
if oid=='\0\0\0\0\0\0\0\0':
self._root_=object # keep a ref
self._root_=object # keep a ref
return object
def _persistent_load(self,oid,
......@@ -176,12 +176,12 @@ class Connection(ExportImport.ExportImport):
# Maybe their's more current data in the
# object's actual record!
return self[oid]
object=klass.__basicnew__()
object._p_oid=oid
object._p_jar=self
object._p_changed=None
self._cache[oid] = object
return object
......@@ -230,13 +230,13 @@ class Connection(ExportImport.ExportImport):
def cacheFullSweep(self, dt=0):
self._cache.full_sweep(dt)
def cacheMinimize(self, dt=0):
# dt is ignored
self._cache.minimize()
__onCloseCallbacks = None
def onCloseCallback(self, f):
if self.__onCloseCallbacks is None:
self.__onCloseCallbacks = []
......@@ -259,9 +259,9 @@ class Connection(ExportImport.ExportImport):
self._debug_info=()
# Return the connection to the pool.
db._closeConnection(self)
__onCommitActions = None
def onCommitAction(self, method_name, *args, **kw):
if self.__onCommitActions is None:
self.__onCommitActions = []
......@@ -307,26 +307,26 @@ class Connection(ExportImport.ExportImport):
# stackup=stackup, new_oid=self.new_oid):
# if (not hasattr(object, '_p_oid') or
# type(object) is ClassType): return None
#
#
# oid=object._p_oid
#
#
# if oid is None or object._p_jar is not self:
# oid = self.new_oid()
# object._p_jar=self
# object._p_oid=oid
# stackup(object)
#
#
# klass=object.__class__
#
#
# if klass is ExtensionKlass: return oid
#
#
# if hasattr(klass, '__getinitargs__'): return oid
#
#
# module=getattr(klass,'__module__','')
# if module: klass=module, klass.__name__
#
#
# return oid, klass
file=StringIO()
seek=file.seek
pickler=Pickler(file,1)
......@@ -340,7 +340,7 @@ class Connection(ExportImport.ExportImport):
version=self._version
while stack:
object=stack[-1]
del stack[-1]
......@@ -359,9 +359,9 @@ class Connection(ExportImport.ExportImport):
):
raise ConflictError(object=object)
self._invalidating.append(oid)
klass = object.__class__
if klass is ExtensionKlass:
# Yee Ha!
dict={}
......@@ -375,12 +375,12 @@ class Connection(ExportImport.ExportImport):
len(args) # XXX Assert it's a sequence
else:
args = None # New no-constructor protocol!
module=getattr(klass,'__module__','')
if module: klass=module, klass.__name__
__traceback_info__=klass, oid, self._version
state=object.__getstate__()
seek(0)
clear_memo()
dump((klass,args))
......@@ -409,12 +409,12 @@ class Connection(ExportImport.ExportImport):
LOG('ZODB', BLATHER,
'Commiting subtransaction of size %s' % src.getSize())
self._storage=tmp
self._tmp=None
tmp.tpc_begin(t)
load=src.load
store=tmp.store
dest=self._version
......@@ -426,7 +426,7 @@ class Connection(ExportImport.ExportImport):
invalidating[len(invalidating):]=oids
creating=self._creating
creating[len(creating):]=src._creating
for oid in oids:
data, serial = load(oid, src)
s=store(oid, serial, data, dest, t)
......@@ -464,7 +464,7 @@ class Connection(ExportImport.ExportImport):
def db(self): return self._db
def getVersion(self): return self._version
def invalidate(self, oid):
"""Invalidate a particular oid
......@@ -575,21 +575,21 @@ class Connection(ExportImport.ExportImport):
file=StringIO(p)
unpickler=Unpickler(file)
unpickler.persistent_load=self._persistent_load
copy = unpickler.load()
klass, args = copy
if klass is not ExtensionKlass:
LOG('ZODB',ERROR,
"Unexpected klass when setting class state on %s"
% getattr(object,'__name__','(?)'))
return
copy=apply(klass,args)
object.__dict__.clear()
object.__dict__.update(copy.__dict__)
object._p_oid=oid
object._p_jar=self
object._p_changed=0
......@@ -647,7 +647,7 @@ class Connection(ExportImport.ExportImport):
# update the _p_changed flag, because the subtransaction
# tpc_vote() calls already did this. The change=1 argument
# exists to allow commit_sub() to avoid setting the flag
# again.
# again.
if not store_return:
return
if isinstance(store_return, StringType):
......@@ -712,7 +712,7 @@ class Connection(ExportImport.ExportImport):
def getDebugInfo(self):
return self._debug_info
def setDebugInfo(self, *args):
self._debug_info = self._debug_info + args
......@@ -737,9 +737,8 @@ class Connection(ExportImport.ExportImport):
new._p_changed=1
get_transaction().register(new)
self._cache[oid]=new
class tConnection(Connection):
def close(self):
self._breakcr()
......@@ -2,19 +2,19 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Database objects
$Id: DB.py,v 1.42 2002/06/10 20:20:44 shane Exp $"""
__version__='$Revision: 1.42 $'[11:-2]
$Id: DB.py,v 1.43 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.43 $'[11:-2]
import cPickle, cStringIO, sys, POSException, UndoLogCompatible
from Connection import Connection
......@@ -101,7 +101,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
if hasattr(storage, 'undoInfo'):
self.undoInfo=storage.undoInfo
def _cacheMean(self, attr):
# XXX this method doesn't work
......@@ -120,7 +120,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
_silly=('__doc__',), _globals={}):
return getattr(__import__(location, _globals, _globals, _silly),
name)
def _closeConnection(self, connection):
"""Return a connection to the pool"""
self._a()
......@@ -136,7 +136,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
# Pool now usable again, unlock it.
pool_lock.release()
finally: self._r()
def _connectionMap(self, f):
self._a()
try:
......@@ -171,7 +171,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
detail[c] = detail[c] + 1
else:
detail[c] = 1
self._connectionMap(f)
detail = detail.items()
detail.sort()
......@@ -194,7 +194,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
module = getattr(ob.__class__, '__module__', '')
module = module and '%s.' % module or ''
detail.append({
'conn_no': cn,
'oid': oid,
......@@ -252,10 +252,10 @@ class DB(UndoLogCompatible.UndoLogCompatible):
def exportFile(self, oid, file=None):
raise 'Not yet implemented'
def getCacheDeactivateAfter(self):
return self._cache_deactivate_after
def getCacheSize(self):
return self._cache_size
......@@ -267,7 +267,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
def getVersionCacheDeactivateAfter(self):
return self._version_cache_deactivate_after
def getVersionCacheSize(self):
return self._version_cache_size
......@@ -336,7 +336,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
def objectCount(self):
return len(self._storage)
def open(self, version='', transaction=None, temporary=0, force=None,
waitflag=1):
"""Return a object space (AKA connection) to work in
......@@ -354,7 +354,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
"""
if type(version) is not StringType:
raise POSException.Unimplemented, 'temporary versions'
self._a()
try:
......@@ -366,7 +366,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
else:
transaction._connections=connections={}
transaction=transaction._connections
if temporary:
# This is a temporary connection.
......@@ -407,7 +407,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
# the last connection from the pool and just after adding
# a connection to an empty pool.
if pools.has_key(version):
pool, allocated, pool_lock = pools[version]
else:
......@@ -432,7 +432,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
cache_size=self._cache_size)
allocated.append(c)
pool.append(c)
if c is None:
if waitflag:
self._r()
......@@ -482,17 +482,17 @@ class DB(UndoLogCompatible.UndoLogCompatible):
if len(d)==1: d=d[0]
else: d=''
d="%s (%s)" % (d, len(c._cache))
r.append({
'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)),
'info': d,
'version': version,
})
return r
def getActivityMonitor(self):
return self._activity_monitor
def pack(self, t=None, days=0):
if t is None: t=time()
t=t-(days*86400)
......@@ -500,7 +500,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
except:
LOG("ZODB", ERROR, "packing", error=sys.exc_info())
raise
def setCacheDeactivateAfter(self, v):
self._cache_deactivate_after = v
d = self._pools[0]
......@@ -539,7 +539,7 @@ class DB(UndoLogCompatible.UndoLogCompatible):
if ver:
for c in self._pools[0][ver][1]:
c._cache.cache_size=v
def setVersionPoolSize(self, v): self._version_pool_size=v
def cacheStatistics(self): return () # :(
......@@ -590,7 +590,7 @@ class CommitVersion:
# the code above just invalidated the dest version.
# now we need to invalidate the source!
for oid in oids: db.invalidate(oid, version=self._version)
class AbortVersion(CommitVersion):
"""An object that will see to version abortion
......@@ -610,7 +610,7 @@ class TransactionalUndo(CommitVersion):
in cooperation with a transaction manager.
"""
# I'm lazy. I'm reusing __init__ and abort and reusing the
# version attr for the transavtion id. There's such a strong
# similarity of rythm, that I think it's justified.
......
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Demo ZODB storage
......@@ -45,7 +45,7 @@ There are three main data structures:
A record is a tuple:
oid, serial, pre, vdata, p,
oid, serial, pre, vdata, p,
where:
......@@ -79,7 +79,7 @@ method::
and call it to monitor the storage.
"""
__version__='$Revision: 1.11 $'[11:-2]
__version__='$Revision: 1.12 $'[11:-2]
import base64, time, string
from ZODB import POSException, BaseStorage, utils
......@@ -109,7 +109,7 @@ class DemoStorage(BaseStorage.BaseStorage):
def __len__(self):
base=self._base
return (base and len(base) or 0) + len(self._index)
def getSize(self):
s=100
for tid, (p, u, d, e, t) in self._data.items():
......@@ -131,12 +131,12 @@ class DemoStorage(BaseStorage.BaseStorage):
raise POSException.StorageTransactionError(self, transaction)
if not src:
raise POSException.VersionCommitError("Invalid version")
self._lock_acquire()
try:
v=self._vindex.get(src, None)
if not v: return
tindex=self._tindex
oids=[]
for r in v.values():
......@@ -147,16 +147,16 @@ class DemoStorage(BaseStorage.BaseStorage):
tindex.append([oid, serial, r, None, p])
else:
# effectively, delete the thing
tindex.append([oid, None, r, None, None])
tindex.append([oid, None, r, None, None])
return oids
finally: self._lock_release()
def commitVersion(self, src, dest, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
if not src:
raise POSException.VersionCommitError("Invalid source version")
if src == dest:
......@@ -167,7 +167,7 @@ class DemoStorage(BaseStorage.BaseStorage):
try:
v=self._vindex.get(src, None)
if v is None: return
tindex=self._tindex
oids=[]
for r in v.values():
......@@ -179,7 +179,7 @@ class DemoStorage(BaseStorage.BaseStorage):
else:
new_vdata = None
tindex.append([oid, serial, r, new_vdata, p])
return oids
......@@ -205,10 +205,10 @@ class DemoStorage(BaseStorage.BaseStorage):
if p is None:
raise KeyError, oid
return p, serial
finally: self._lock_release()
def modifiedInVersion(self, oid):
self._lock_acquire()
try:
......@@ -232,22 +232,22 @@ class DemoStorage(BaseStorage.BaseStorage):
except: pass
else:
old= oid, oserial, None, None, p
nv=None
if old:
oid, oserial, pre, vdata, p = old
if vdata:
if vdata[0] != version:
raise POSException.VersionLockError, oid
nv=vdata[1]
else:
nv=old
if serial != oserial:
raise POSException.ConflictError(serials=(oserial, serial))
serial=self._serial
r=[oid, serial, old, version and (version, nv) or None, data]
self._tindex.append(r)
......@@ -274,7 +274,7 @@ class DemoStorage(BaseStorage.BaseStorage):
def _begin(self, tid, u, d, e):
self._tsize=self._size+120+len(u)+len(d)+len(e)
def _finish(self, tid, user, desc, ext):
index=self._index
......@@ -293,9 +293,9 @@ class DemoStorage(BaseStorage.BaseStorage):
v=vindex[oldvdata[0]]
del v[oid]
if not v: del vindex[oldvdata[0]]
index[oid]=r
if vdata:
version=vdata[0]
v=vindex.get(version, None)
......@@ -321,7 +321,7 @@ class DemoStorage(BaseStorage.BaseStorage):
for r in t:
oid, serial, pre, vdata, p = r
if pre:
index[oid] = pre
oids.append(oid)
......@@ -338,7 +338,7 @@ class DemoStorage(BaseStorage.BaseStorage):
v=vindex.get(version, None)
if v is None: v=vindex[version]={}
v[oid]=pre
else:
del index[oid]
if vdata:
......@@ -413,7 +413,7 @@ class DemoStorage(BaseStorage.BaseStorage):
v=vindex[oldvdata[0]]
del v[oid]
if not v: del vindex[oldvdata[0]]
index[oid]=r
if vdata:
......@@ -428,16 +428,16 @@ class DemoStorage(BaseStorage.BaseStorage):
# Packing is hard, at least when undo is supported.
# Even for a simple storage like this one, packing
# is pretty complex.
self._lock_acquire()
try:
stop=`apply(TimeStamp, time.gmtime(t)[:5]+(t%60,))`
_data=self._data
# Build indexes up to the pack time:
index, vindex = self._build_indexes(stop)
# Now build an index of *only* those objects reachable
# from the root.
rootl=['\0\0\0\0\0\0\0\0']
......@@ -447,7 +447,7 @@ class DemoStorage(BaseStorage.BaseStorage):
while rootl:
oid=pop()
if referenced(oid): continue
# Scan non-version pickle for references
r=index.get(oid, None)
if r is None:
......@@ -463,7 +463,7 @@ class DemoStorage(BaseStorage.BaseStorage):
if nv:
oid, serial, pre, vdata, p = nv
referencesf(p, rootl)
# Now we're ready to do the actual packing.
# We'll simply edit the transaction data in place.
# We'll defer deleting transactions till the end
......@@ -484,7 +484,7 @@ class DemoStorage(BaseStorage.BaseStorage):
if vdata:
# Version record are current *only* if they
# are indexed
continue
continue
else:
# OK, this isn't a version record, so it may be the
# non-version record for the indexed record.
......@@ -500,16 +500,16 @@ class DemoStorage(BaseStorage.BaseStorage):
# record for it.
continue
o.append(r)
if o:
if len(o) != len(t):
_data[tid]=1, u, d, e, tuple(o) # Reset data
else:
deleted.append(tid)
# Now delete empty transactions
for tid in deleted: del _data[tid]
# Now reset previous pointers for "current" records:
for r in pindex.values():
r[2]=None # Previous record
......@@ -517,7 +517,7 @@ class DemoStorage(BaseStorage.BaseStorage):
r[3][1][2]=None
pindex=None
# Finally, rebuild indexes from transaction data:
self._index, self._vindex = self._build_indexes()
......@@ -559,6 +559,6 @@ class DemoStorage(BaseStorage.BaseStorage):
for oid, r in vitems:
if r: r=id(r)
o.append(' %s: %s' % (utils.u64(oid), r))
return string.join(o,'\n')
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Support for database export and import.
......@@ -60,7 +60,7 @@ class ExportImport:
file=open(file,'rb')
else:
try: file_name=file.name
except: file_name='(unknown)'
except: file_name='(unknown)'
read=file.read
magic=read(4)
......@@ -100,7 +100,7 @@ class ExportImport:
atoi=string.atoi, TupleType=type(()),
oids=oids, wrote_oid=oids.has_key,
new_oid=storage.new_oid):
"Remap a persistent id to a new ID and create a ghost for it."
if type(ooid) is TupleType: ooid, klass = ooid
......@@ -168,4 +168,3 @@ class Ghost: pass
def persistent_id(object, Ghost=Ghost):
if getattr(object, '__class__', None) is Ghost:
return object.oid
......@@ -2,98 +2,98 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
#
#
# File-based ZODB storage
#
#
# Files are arranged as follows.
#
#
# - The first 4 bytes are a file identifier.
#
#
# - The rest of the file consists of a sequence of transaction
# "records".
#
#
# A transaction record consists of:
#
#
# - 8-byte transaction id, which is also a time stamp.
#
#
# - 8-byte transaction record length - 8.
#
#
# - 1-byte status code
#
#
# - 2-byte length of user name
#
# - 2-byte length of description
#
# - 2-byte length of extension attributes
#
#
# - 2-byte length of description
#
# - 2-byte length of extension attributes
#
# - user name
#
#
# - description
#
# - extension attributes
#
#
# * A sequence of data records
#
#
# - 8-byte redundant transaction length -8
#
#
# A data record consists of
#
#
# - 8-byte oid.
#
#
# - 8-byte serial, which is a type stamp that matches the
# transaction timestamp.
#
#
# - 8-byte previous-record file-position.
#
#
# - 8-byte beginning of transaction record file position.
#
#
# - 2-byte version length
#
#
# - 8-byte data length
#
#
# ? 8-byte position of non-version data
# (if version length > 0)
#
#
# ? 8-byte position of previous record in this version
# (if version length > 0)
#
# ? version string
#
# ? version string
# (if version length > 0)
#
#
# ? data
# (data length > 0)
#
#
# ? 8-byte position of data record containing data
# (data length == 0)
#
#
# Note that the lengths and positions are all big-endian.
# Also, the object ids time stamps are big-endian, so comparisons
# are meaningful.
#
#
# Version handling
#
#
# There isn't a separate store for versions. Each record has a
# version field, indicating what version it is in. The records in a
# version form a linked list. Each record that has a non-empty
# version string has a pointer to the previous record in the version.
# Version back pointers are retained *even* when versions are
# committed or aborted or when transactions are undone.
#
#
# There is a notion of "current" version records, which are the
# records in a version that are the current records for their
# respective objects. When a version is comitted, the current records
# are committed to the destination version. When a version is
# aborted, the current records are aborted.
#
#
# When committing or aborting, we search backward through the linked
# list until we find a record for an object that does not have a
# current record in the version. If we find a record for which the
......@@ -101,7 +101,7 @@
# forget that the corresponding object had a current record in the
# version. This strategy allows us to avoid searching backward through
# previously committed or aborted version records.
#
#
# Of course, we ignore records in undone transactions when committing
# or aborting.
#
......@@ -115,7 +115,7 @@
# may have a back pointer to a version record or to a non-version
# record.
#
__version__='$Revision: 1.94 $'[11:-2]
__version__='$Revision: 1.95 $'[11:-2]
import base64
from cPickle import Pickler, Unpickler, loads
......@@ -235,7 +235,7 @@ class FileStorage(BaseStorage.BaseStorage,
index, vindex, tindex, tvindex = self._newIndexes()
self._initIndex(index, vindex, tindex, tvindex)
# Now open the file
self._file = None
......@@ -256,7 +256,7 @@ class FileStorage(BaseStorage.BaseStorage,
raise
else:
create = 1
if self._file is None and create:
if os.path.exists(file_name):
os.remove(file_name)
......@@ -304,14 +304,14 @@ class FileStorage(BaseStorage.BaseStorage,
def _newIndexes(self):
# hook to use something other than builtin dict
return {}, {}, {}, {}
def abortVersion(self, src, transaction):
return self.commitVersion(src, '', transaction, abort=1)
def _save_index(self):
"""Write the database index to a file to support quick startup
"""
index_name=self.__name__+'.index'
tmp_name=index_name+'.index_tmp'
......@@ -348,7 +348,7 @@ class FileStorage(BaseStorage.BaseStorage,
with the index. Any invalid record records or inconsistent
object positions cause zero to be returned.
"""
if pos < 100: return 0
file=self._file
seek=file.seek
......@@ -376,20 +376,20 @@ class FileStorage(BaseStorage.BaseStorage,
if opos==tend: continue # empty trans
while opos < tend:
# Read the data records for this transaction
# Read the data records for this transaction
seek(opos)
h=read(DATA_HDR_LEN)
oid,serial,sprev,stloc,vlen,splen = unpack(">8s8s8s8sH8s", h)
tloc=U64(stloc)
plen=U64(splen)
dlen=DATA_HDR_LEN+(plen or 8)
if vlen: dlen=dlen+(16+vlen)
if opos+dlen > tend or tloc != pos: return 0
if index.get(oid, 0) != opos: return 0
opos=opos+dlen
return ltid
......@@ -399,10 +399,10 @@ class FileStorage(BaseStorage.BaseStorage,
"""
file_name=self.__name__
index_name=file_name+'.index'
try: f=open(index_name,'rb')
except: return None
p=Unpickler(f)
try:
......@@ -422,7 +422,7 @@ class FileStorage(BaseStorage.BaseStorage,
tid=self._sane(index, pos)
if not tid: return None
return index, vindex, pos, oid, tid
def close(self):
......@@ -436,7 +436,7 @@ class FileStorage(BaseStorage.BaseStorage,
except:
# XXX should log the error, though
pass # We don't care if this fails.
def commitVersion(self, src, dest, transaction, abort=None):
# We are going to commit by simply storing back pointers.
if self._is_read_only:
......@@ -452,10 +452,10 @@ class FileStorage(BaseStorage.BaseStorage,
if dest and abort:
raise POSException.VersionCommitError(
"Internal error, can't abort to a version")
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
self._lock_acquire()
try:
return self._commitVersion(src, dest, transaction, abort)
......@@ -572,7 +572,7 @@ class FileStorage(BaseStorage.BaseStorage,
return h[8:16]
finally:
self._lock_release()
def _load(self, oid, version, _index, file):
try:
......@@ -640,7 +640,7 @@ class FileStorage(BaseStorage.BaseStorage,
pnv=read(8)
return _loadBack(file, oid, pnv)[0]
finally: self._lock_release()
def modifiedInVersion(self, oid):
self._lock_acquire()
try:
......@@ -692,7 +692,7 @@ class FileStorage(BaseStorage.BaseStorage,
serials=(oserial, serial))
else:
oserial=serial
tfile=self._tfile
write=tfile.write
pos=self._pos
......@@ -724,7 +724,7 @@ class FileStorage(BaseStorage.BaseStorage,
return (serial == oserial and newserial
or ConflictResolution.ResolvedSerial)
finally:
self._lock_release()
......@@ -806,7 +806,7 @@ class FileStorage(BaseStorage.BaseStorage,
def supportsUndo(self):
return 1
def supportsVersions(self):
return 1
......@@ -876,7 +876,7 @@ class FileStorage(BaseStorage.BaseStorage,
self._nextpos = self._pos + (tl + 8)
finally:
self._lock_release()
def _finish(self, tid, u, d, e):
nextpos=self._nextpos
if nextpos:
......@@ -884,7 +884,7 @@ class FileStorage(BaseStorage.BaseStorage,
# Clear the checkpoint flag
file.seek(self._pos+16)
file.write(self._tstatus)
file.write(self._tstatus)
file.flush()
if fsync is not None: fsync(file.fileno())
......@@ -920,7 +920,7 @@ class FileStorage(BaseStorage.BaseStorage,
unpack=struct.unpack
seek(tpos)
h=read(TRANS_HDR_LEN)
if len(h) != TRANS_HDR_LEN or h[:8] != tid:
if len(h) != TRANS_HDR_LEN or h[:8] != tid:
raise UndoError('Invalid undo transaction id')
if h[16] == 'u': return
if h[16] != ' ': raise UndoError
......@@ -947,7 +947,7 @@ class FileStorage(BaseStorage.BaseStorage,
file.write('u')
file.flush()
self._index.update(t)
return t.keys()
return t.keys()
finally: self._lock_release()
def supportsTransactionalUndo(self):
......@@ -984,7 +984,7 @@ class FileStorage(BaseStorage.BaseStorage,
if tpos: file.seek(tpos) # Restore temp file to end
return serial, pos, data, version
def _getVersion(self, oid, pos):
self._file.seek(pos)
read=self._file.read
......@@ -995,7 +995,7 @@ class FileStorage(BaseStorage.BaseStorage,
return read(vlen), h[:8]
else:
return '',''
def _getSerial(self, oid, pos):
self._file.seek(pos+8)
return self._file.read(8)
......@@ -1008,9 +1008,9 @@ class FileStorage(BaseStorage.BaseStorage,
position. If the pickle is true, then the data pointer must
be 0, but the pickle can be empty *and* the pointer 0.
"""
copy=1 # Can we just copy a data pointer
tpos=self._tindex.get(oid, 0)
tpos=self._tindex.get(oid, 0)
ipos=self._index.get(oid, 0)
tipos=tpos or ipos
if tipos != pos:
......@@ -1055,7 +1055,7 @@ class FileStorage(BaseStorage.BaseStorage,
except KeyError:
# couldn't find oid; what's the real explanation for this?
raise UndoError("_loadBack() failed for %s" % repr(oid))
data=self.tryToResolveConflict(oid, cserial, serial, bdata, cdata)
data=self.tryToResolveConflict(oid, cserial, serial, bdata, cdata)
if data:
return data, 0, version, snv, ipos
......@@ -1129,7 +1129,7 @@ class FileStorage(BaseStorage.BaseStorage,
raise POSException.ReadOnlyError()
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
self._lock_acquire()
try:
return self._txn_undo(transaction_id)
......@@ -1143,7 +1143,7 @@ class FileStorage(BaseStorage.BaseStorage,
tpos = self._txn_find(tid)
tindex = self._txn_undo_write(tpos, tid)
self._tindex.update(tindex)
return tindex.keys()
return tindex.keys()
def _txn_find(self, tid):
pos = self._pos
......@@ -1187,7 +1187,7 @@ class FileStorage(BaseStorage.BaseStorage,
oid, serial, sprev, stloc, vlen, splen = \
struct.unpack(">8s8s8s8sH8s", h)
if failed(oid):
del failures[oid] # second chance!
del failures[oid] # second chance!
plen = U64(splen)
prev = U64(sprev)
if vlen:
......@@ -1205,7 +1205,7 @@ class FileStorage(BaseStorage.BaseStorage,
# Don't fail right away. We may be redeemed later!
failures[oid] = v
else:
plen = len(p)
plen = len(p)
self._tfile.write(pack(">8s8s8s8sH8s",
oid, self._serial, p64(ipos),
ostloc, len(v), p64(plen)))
......@@ -1232,7 +1232,7 @@ class FileStorage(BaseStorage.BaseStorage,
raise UndoError(failures)
return tindex
def versionEmpty(self, version):
if not version:
......@@ -1345,10 +1345,10 @@ class FileStorage(BaseStorage.BaseStorage,
def pack(self, t, referencesf):
"""Copy data from the current database file to a packed file
Non-current records from transactions with time-stamp strings less
than packtss are ommitted. As are all undone records.
Also, data back pointers that point before packtss are resolved and
the associated data are copied, since the old records are not copied.
"""
......@@ -1356,7 +1356,7 @@ class FileStorage(BaseStorage.BaseStorage,
if self._is_read_only:
raise POSException.ReadOnlyError()
# Ugh, this seems long
packing=1 # are we in the packing phase (or the copy phase)
locked=0
_lock_acquire=self._lock_acquire
......@@ -1395,7 +1395,7 @@ class FileStorage(BaseStorage.BaseStorage,
raise FileStorageError, (
'The database has already been packed to a later time\n'
'or no changes have been made since the last pack')
rootl=[z64]
pop=rootl.pop
pindex=fsIndex()
......@@ -1412,35 +1412,35 @@ class FileStorage(BaseStorage.BaseStorage,
if nv:
p, serial = _load(oid, '', index, file)
referencesf(p, rootl)
pindex[oid]=index[oid]
except:
pindex[oid]=0
error('Bad reference to %s', `(oid,v)`)
spackpos=p64(packpos)
##################################################################
# Step 2, copy data and compute new index based on new positions.
index, vindex, tindex, tvindex = self._newIndexes()
ofile=open(name+'.pack', 'w+b')
# Index for non-version data. This is a temporary structure
# to reduce I/O during packing
nvindex=fsIndex()
# Cache a bunch of methods
seek=file.seek
read=file.read
oseek=ofile.seek
write=ofile.write
index_get=index.get
vindex_get=vindex.get
pindex_get=pindex.get
# Initialize,
# Initialize,
pv=z64
offset=0L # the amount of space freed by packing
pos=opos=4L
......@@ -1470,7 +1470,7 @@ class FileStorage(BaseStorage.BaseStorage,
file.close()
os.remove(name+'.pack')
return
packing=0
_commit_lock_acquire()
_lock_acquire()
......@@ -1499,7 +1499,7 @@ class FileStorage(BaseStorage.BaseStorage,
tl=tl+8
write(read(tl-TRANS_HDR_LEN))
opos=opos+tl
# Undone transaction, skip it
pos=tend+8
continue
......@@ -1546,13 +1546,13 @@ class FileStorage(BaseStorage.BaseStorage,
if packing:
ppos=pindex_get(oid, 0)
if ppos != pos:
if not ppos:
# This object is no longer referenced
# so skip it.
pos=pos+dlen
continue
# This is not the most current record
# But maybe it's the most current committed
# record.
......@@ -1584,7 +1584,7 @@ class FileStorage(BaseStorage.BaseStorage,
nvindex[oid]=opos
tindex[oid]=opos
opos=opos+dlen
pos=pos+dlen
......@@ -1626,7 +1626,7 @@ class FileStorage(BaseStorage.BaseStorage,
# Just adjust for the offset
p=p-offset
p=p64(p)
sprev=p64(index_get(oid, 0))
write(pack(">8s8s8s8sH8s",
oid,serial,sprev,p64(otpos),vlen,splen))
......@@ -1642,7 +1642,7 @@ class FileStorage(BaseStorage.BaseStorage,
# we just need to adjust the pointer
# with the offset
pnv=pnv-offset
write(p64(pnv))
write(pv)
write(version)
......@@ -1772,7 +1772,7 @@ def shift_transactions_forward(index, vindex, tindex, file, pos, opos):
index_get=index.get
vindex_get=vindex.get
# Initialize,
# Initialize,
pv=z64
p1=opos
p2=pos
......@@ -1795,7 +1795,7 @@ def shift_transactions_forward(index, vindex, tindex, file, pos, opos):
h=read(TRANS_HDR_LEN)
if len(h) < TRANS_HDR_LEN: break
tid, stl, status, ul, dl, el = unpack(">8s8scHHH",h)
if status=='c': break # Oops. we found a checkpoint flag.
if status=='c': break # Oops. we found a checkpoint flag.
tl=U64(stl)
tpos=pos
tend=tpos+tl
......@@ -1856,13 +1856,13 @@ def shift_transactions_forward(index, vindex, tindex, file, pos, opos):
if pnv >= p2: pnv=pnv-offset
elif pnv >= p1:
pnv=index_get(oid, 0)
write(p64(pnv))
write(pv)
write(version)
write(p)
opos=opos+dlen
pos=pos+dlen
......@@ -1897,7 +1897,7 @@ def recover(file_name):
index={}
vindex={}
tindex={}
pos, oid, tid = read_index(
file, file_name, index, vindex, tindex, recover=1)
if oid is not None:
......@@ -1916,7 +1916,7 @@ def recover(file_name):
print "Recovered file, lost %s, ended up with %s bytes" % (
pos-opos, npos)
def read_index(file, name, index, vindex, tindex, stop='\377'*8,
ltid=z64, start=4L, maxoid=z64, recover=0, read_only=0):
......@@ -1939,9 +1939,9 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
The file position returned is the position just after the last
valid transaction record. The oid returned is the maximum object
id in the data. The transaction id is the tid of the last
transaction.
transaction.
"""
read = file.read
seek = file.seek
seek(0, 2)
......@@ -2023,7 +2023,7 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
tpos=pos
tend=tpos+tl
if status=='u':
# Undone transaction, skip it
seek(tend)
......@@ -2045,10 +2045,10 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
prev=U64(sprev)
tloc=U64(stloc)
plen=U64(splen)
dlen=DATA_HDR_LEN+(plen or 8)
tindex[oid]=pos
if vlen:
dlen=dlen+(16+vlen)
seek(8,1)
......@@ -2064,7 +2064,7 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
if recover: return tpos, None, None
panic("%s data record exceeds transaction record at %s",
name, pos)
if index_get(oid, 0) != prev:
if prev:
if recover: return tpos, None, None
......@@ -2099,7 +2099,7 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
def _loadBack(file, oid, back):
## seek=file.seek
## read=file.read
while 1:
old = U64(back)
if not old:
......@@ -2119,7 +2119,7 @@ def _loadBackPOS(file, oid, back):
the record at the given position (back)."""
seek=file.seek
read=file.read
while 1:
old=U64(back)
if not old:
......@@ -2152,7 +2152,7 @@ def _truncate(file, name, pos):
error("couldn\'t write truncated data for %s", name)
raise POSException.StorageSystemError, (
"Couldn't save truncated data")
seek(pos)
file.truncate()
......@@ -2177,7 +2177,7 @@ class FileIterator(Iterator):
"""
_ltid = z64
_file = None
def __init__(self, file, start=None, stop=None):
if isinstance(file, StringType):
file = open(file, 'rb')
......@@ -2201,7 +2201,7 @@ class FileIterator(Iterator):
def _skip_to_start(self, start):
# Scan through the transaction records doing almost no sanity
# checks.
# checks.
while 1:
self._file.seek(self._pos)
h = self._file.read(16)
......@@ -2335,7 +2335,7 @@ class FileIterator(Iterator):
return result
raise IndexError, index
class RecordIterator(Iterator, BaseStorage.TransactionRecord):
"""Iterate over the transactions in a FileStorage file.
"""
......@@ -2391,11 +2391,11 @@ class RecordIterator(Iterator, BaseStorage.TransactionRecord):
p = None
else:
p = _loadBack(file, oid, p)[0]
r = Record(oid, serial, version, p)
return r
raise IndexError, index
class Record(BaseStorage.DataRecord):
......
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Very Simple Mapping ZODB storage
......@@ -53,7 +53,7 @@ There are three main data structures:
A record is a tuple:
oid, serial, pre, vdata, p,
oid, serial, pre, vdata, p,
where:
......@@ -87,7 +87,7 @@ method::
and call it to minotor the storage.
"""
__version__='$Revision: 1.6 $'[11:-2]
__version__='$Revision: 1.7 $'[11:-2]
import POSException, BaseStorage, string, utils
from TimeStamp import TimeStamp
......@@ -108,14 +108,14 @@ class MappingStorage(BaseStorage.BaseStorage):
def __len__(self):
return len(self._index)
def getSize(self):
s=32
index=self._index
for oid in index.keys():
p=index[oid]
s=s+56+len(p)
return s
def load(self, oid, version):
......@@ -139,7 +139,7 @@ class MappingStorage(BaseStorage.BaseStorage):
oserial=old[:8]
if serial != oserial:
raise POSException.ConflictError(serials=(oserial, serial))
serial=self._serial
self._tindex.append((oid,serial+data))
finally: self._lock_release()
......@@ -155,9 +155,9 @@ class MappingStorage(BaseStorage.BaseStorage):
for oid, p in self._tindex: index[oid]=p
def pack(self, t, referencesf):
self._lock_acquire()
try:
try:
# Build an index of *only* those objects reachable
# from the root.
index=self._index
......@@ -168,7 +168,7 @@ class MappingStorage(BaseStorage.BaseStorage):
while rootl:
oid=pop()
if referenced(oid): continue
# Scan non-version pickle for references
r=index[oid]
pindex[oid]=r
......@@ -178,7 +178,7 @@ class MappingStorage(BaseStorage.BaseStorage):
# Now delete any unreferenced entries:
for oid in index.keys():
if not referenced(oid): del index[oid]
finally: self._lock_release()
def _splat(self):
......@@ -193,5 +193,5 @@ class MappingStorage(BaseStorage.BaseStorage):
r=index[oid]
o.append(' %s: %s, %s' %
(utils.u64(oid),TimeStamp(r[:8]),`r[8:]`))
return string.join(o,'\n')
......@@ -2,19 +2,19 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Mounted database support
$Id: Mount.py,v 1.15 2002/05/23 20:53:22 shane Exp $"""
__version__='$Revision: 1.15 $'[11:-2]
$Id: Mount.py,v 1.16 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.16 $'[11:-2]
import thread, Persistence, Acquisition
import ExtensionClass, string, time, sys
......@@ -188,7 +188,7 @@ class MountPoint(Persistence.Persistent, Acquisition.Implicit):
data = t[0]
return data.__of__(parent)
def __of__(self, parent):
# Accesses the database, returning an acquisition
# wrapper around the connected object rather than around self.
......@@ -277,7 +277,7 @@ class MountedConnectionCloser:
try: del conn._mount_parent_jar
except: pass
conn.close()
if close_db:
# Stop using this database. Close it if no other
# MountPoint is using it.
......
......@@ -2,19 +2,19 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""BoboPOS-defined exceptions
$Id: POSException.py,v 1.11 2002/02/11 23:40:42 gvanrossum Exp $"""
__version__ = '$Revision: 1.11 $'.split()[-2:][0]
$Id: POSException.py,v 1.12 2002/08/14 22:07:09 mj Exp $"""
__version__ = '$Revision: 1.12 $'.split()[-2:][0]
from string import join
from types import StringType, DictType
......@@ -193,7 +193,7 @@ class Unimplemented(POSError):
class Unsupported(POSError):
"""An feature that is unsupported bt the storage was used.
"""
class InvalidObjectReference(POSError):
"""An object contains an invalid reference to another object.
......
......@@ -2,21 +2,21 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Python implementation of persistent list.
$Id: PersistentList.py,v 1.2 2002/02/11 23:49:07 gvanrossum Exp $"""
$Id: PersistentList.py,v 1.3 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.2 $'[11:-2]
__version__='$Revision: 1.3 $'[11:-2]
import Persistence
from UserList import UserList
......@@ -51,7 +51,7 @@ class PersistentList(UserList, Persistence.Persistent):
def __delslice__(self, i, j):
self.__super_delslice(i, j)
self._p_changed = 1
def __iadd__(self, other):
self.__super_iadd(other)
self._p_changed = 1
......@@ -63,7 +63,7 @@ class PersistentList(UserList, Persistence.Persistent):
def append(self, item):
self.__super_append(item)
self._p_changed = 1
def insert(self, i, item):
self.__super_insert(i, item)
self._p_changed = 1
......@@ -76,11 +76,11 @@ class PersistentList(UserList, Persistence.Persistent):
def remove(self, item):
self.__super_remove(item)
self._p_changed = 1
def reverse(self):
self.__super_reverse()
self._p_changed = 1
def sort(self, *args):
self.__super_sort(*args)
self._p_changed = 1
......
......@@ -2,21 +2,21 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Python implementation of persistent base types
$Id: PersistentMapping.py,v 1.19 2002/02/12 22:33:08 gvanrossum Exp $"""
$Id: PersistentMapping.py,v 1.20 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.19 $'[11:-2]
__version__='$Revision: 1.20 $'[11:-2]
import Persistence
from UserDict import UserDict
......@@ -82,7 +82,7 @@ class PersistentMapping(UserDict, Persistence.Persistent):
# different versions of the code. Compatibility works in both
# directions, because an application may want to share a database
# between applications using different versions of the code.
# Effectively, the original rep is part of the "API." To provide
# full compatibility, the getstate and setstate must read and
# right objects using the old rep.
......
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
import POSException
......@@ -53,7 +53,7 @@ class TmpStore:
if h[:8] != oid:
raise POSException.StorageSystemError, 'Bad temporary storage'
return file.read(u64(h[16:])), h[8:16]
def modifiedInVersion(self, oid):
if self._index.has_key(oid): return 1
return self._db._storage.modifiedInVersion(oid)
......@@ -78,7 +78,7 @@ class TmpStore:
self._tindex.append((oid,pos))
self._pos=pos+l+24
return serial
def tpc_abort(self, transaction):
if transaction is not self._transaction: return
del self._tindex[:]
......@@ -103,6 +103,6 @@ class TmpStore:
self._tpos=self._pos
def undoLog(self, first, last, filter=None): return ()
def versionEmpty(self, version):
if version is self: return len(self._index)
......@@ -2,19 +2,19 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Transaction management
$Id: Transaction.py,v 1.36 2002/04/12 19:59:55 jeremy Exp $"""
__version__='$Revision: 1.36 $'[11:-2]
$Id: Transaction.py,v 1.37 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.37 $'[11:-2]
import time, sys, struct, POSException
from struct import pack
......@@ -39,7 +39,7 @@ class Transaction:
# commits and aborts to ensure that they are correctly committed
# or aborted in the "outside" transaction.
_non_st_objects=None
def __init__(self, id=None):
self._id=id
self._objects=[]
......@@ -60,7 +60,7 @@ class Transaction:
r.description=self.description
r._extension=self._extension
return r
def __str__(self):
if self._id is None:
return "Transaction user=%s" % `self.user`
......@@ -117,7 +117,7 @@ class Transaction:
while subjars:
j = subjars.pop()
j.abort_sub(self) # This should never fail
if t is not None:
raise t, v, tb
......@@ -207,7 +207,7 @@ class Transaction:
vote(self) # last chance to bail
# Try to finish one jar, since we may be able to
# recover if the first one fails.
# recover if the first one fails.
self._finish_one(jarsv)
# Once a single jar has finished, it's a fatal (hosed)
# error if another jar fails.
......@@ -234,7 +234,7 @@ class Transaction:
i = id(j)
if not jars.has_key(i):
jars[i] = j
if subtransaction:
# If a jar does not support subtransactions,
# we need to save it away to be committed in
......@@ -285,7 +285,7 @@ class Transaction:
while jarsv:
jarsv[-1].tpc_finish(self) # This should never fail
jarsv.pop() # It didn't, so it's taken care of.
except:
except:
# Bug if it does, we need to yell FIRE!
# Someone finished, so don't allow any more
# work without at least a restart!
......@@ -298,12 +298,12 @@ class Transaction:
"until the site/storage is reset by a restart. ",
error=sys.exc_info())
raise
def _commit_error(self, (t, v, tb),
objects, ncommitted, jarsv, subjars):
# handle an exception raised during commit
# takes sys.exc_info() as argument
# First, we have to abort any uncommitted objects.
for o in objects[ncommitted:]:
try:
......@@ -317,11 +317,11 @@ class Transaction:
for j in jarsv:
try:
j.tpc_abort(self) # This should never fail
except:
except:
LOG('ZODB', ERROR,
"A storage error occured during object abort. This "
"shouldn't happen. ", error=sys.exc_info())
# Ugh, we need to abort work done in sub-transactions.
while subjars:
j = subjars.pop()
......@@ -342,9 +342,9 @@ class Transaction:
def note(self, text):
if self.description:
self.description = "%s\n\n%s" % (self.description, strip(text))
else:
else:
self.description = strip(text)
def setUser(self, user_name, path='/'):
self.user="%s %s" % (path, user_name)
......@@ -366,7 +366,7 @@ the application may not come up until you deal with
the system problem. See your application log for
information on the error that lead to this problem.
"""
############################################################################
......@@ -377,16 +377,16 @@ try:
except:
_t = Transaction(None)
def get_transaction(_t=_t):
return _t
def free_transaction(_t=_t):
_t.__init__()
else:
_t = {}
def get_transaction(_id=thread.get_ident, _t=_t, get=_t.get):
id = _id()
t = get(id, None)
......@@ -405,6 +405,5 @@ else:
del _t
import __main__
import __main__
__main__.__builtins__.get_transaction=get_transaction
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Provide backward compatability with storages that have undoLog, but not undoInfo."""
......@@ -25,5 +25,5 @@ class UndoLogCompatible:
return 0
return 1
else: filter=None
return self.undoLog(first, last, filter)
......@@ -2,21 +2,21 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Implement an bobo_application object that is BoboPOS3 aware
This module provides a wrapper that causes a database connection to be created
and used when bobo publishes a bobo_application object.
"""
__version__='$Revision: 1.10 $'[11:-2]
__version__='$Revision: 1.11 $'[11:-2]
StringType=type('')
connection_open_hooks = []
......@@ -34,7 +34,7 @@ class ZApplicationWrapper:
get_transaction().commit()
conn.close()
self._klass=klass
# This hack is to overcome a bug in Bobo!
def __getattr__(self, name):
......@@ -57,16 +57,16 @@ class ZApplicationWrapper:
REQUEST._hold(cleanup)
conn.setDebugInfo(REQUEST.environ, REQUEST.other)
v=conn.root()[aname]
if name is not None:
if hasattr(v, '__bobo_traverse__'):
return v.__bobo_traverse__(REQUEST, name)
if hasattr(v,name): return getattr(v,name)
return v[name]
return v
......@@ -77,9 +77,8 @@ class ZApplicationWrapper:
connection=db.open()
elif type(connection) is StringType:
connection=db.open(connection)
return connection.root()[aname]
class Cleanup: pass
class Cleanup: pass
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
import sys, ExtensionClass, TimeStamp, cPersistence, Persistence
import cStringIO, cPickle
......
......@@ -2,20 +2,20 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Thread abstraction module
With this, we can run with or wothout threads.
$Id: bpthread.py,v 1.4 2002/02/11 23:40:42 gvanrossum Exp $"""
$Id: bpthread.py,v 1.5 2002/08/14 22:07:09 mj Exp $"""
try:
from thread import *
......
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
import PersistentMapping
......
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Very Simple dbm-based ZODB storage
......@@ -18,7 +18,7 @@ don't support versions or Undo. This may be useful when implementing
objects like hit counters that don't need or want to participate
in undo or versions.
"""
__version__='$Revision: 1.3 $'[11:-2]
__version__='$Revision: 1.4 $'[11:-2]
import base64, POSException, time, string, utils
......@@ -40,7 +40,7 @@ class anydbmStorage(MappingStorage):
def getSize(self):
# This is a little iffy, since we aren't entirely sure what the file is
self._lock_acquire()
try:
try:
try:
return (os.stat(self.__name__+'.data')[6] +
os.stat(self.__name__+'.dir')[6]
......@@ -73,9 +73,9 @@ class gdbmStorage(anydbmStorage):
finally: self._lock_release()
def pack(self, t, referencesf):
self._lock_acquire()
try:
try:
# Build an index of *only* those objects reachable
# from the root.
index=self._index
......@@ -86,7 +86,7 @@ class gdbmStorage(anydbmStorage):
while rootl:
oid=pop()
if referenced(oid): continue
# Scan non-version pickle for references
r=index[oid]
pindex[oid]=r
......@@ -107,7 +107,7 @@ class gdbmStorage(anydbmStorage):
index.sync()
index.reorganize()
finally: self._lock_release()
......
......@@ -2,33 +2,33 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Implement an OID to File-position (long integer) mapping
"""
#
#
# To save space, we do two things:
#
#
# 1. We split the keys (OIDS) into 6-byte prefixes and 2-byte suffixes.
# We use the prefixes as keys in a mapping from prefix to mappings
# of suffix to data:
#
#
# data is {prefix -> {suffix -> data}}
#
#
# 2. We limit the data size to 48 bits. This should allow databases
# as large as 256 terabytes.
#
#
# Mostof the space is consumed by items in the mappings from 2-byte
# suffix to 6-byte data. This should reduce the overall memory usage to
# 8-16 bytes per OID.
#
#
# We use p64 to convert integers to 8-byte strings and lop off the two
# high-order bytes when saving. On loading data, we add the leading
# bytes back before using U64 to convert the data back to (long)
......@@ -51,7 +51,7 @@ def str2num(s):
if h:
return (long(h) << 32) + l
else:
return l
return l
class fsIndex:
......
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
......@@ -49,14 +49,14 @@ Options:
Pack data to t seconds in the past. Note that is the "-p"
option is used, then t should be 0.
Important note: The ZODB package must be imporable. You may need
to adjust the Python path accordingly.
"""
# Algorithm:
#
#
# position to start of input
# while 1:
# if end of file: break
......@@ -81,7 +81,7 @@ except ImportError:
elif os.path.exists('FileStorage.py'): sys.path.append('..')
import ZODB
import getopt, ZODB.FileStorage, struct, time
from struct import unpack
from ZODB.utils import t32, p64, U64
......@@ -185,7 +185,7 @@ def iprogress(i):
sys.stdout.flush()
def progress(p):
for i in range(p): iprogress(i)
for i in range(p): iprogress(i)
def recover(argv=sys.argv):
......@@ -199,7 +199,7 @@ def recover(argv=sys.argv):
elif opt == '-f': force=1
elif opt == '-P': pack=time.time()-float(v)
force = filter(lambda opt: opt[0]=='-f', opts)
partial = filter(lambda opt: opt[0]=='-p', opts)
verbose = filter(lambda opt: opt[0]=='-v', opts)
......@@ -208,7 +208,7 @@ def recover(argv=sys.argv):
except:
die()
print __doc__ % argv[0]
if os.path.exists(outp) and not force:
die("%s exists" % outp)
......@@ -267,7 +267,7 @@ def recover(argv=sys.argv):
ok=1
if verbose:
print 'begin',
print 'begin',
if verbose > 1: print
sys.stdout.flush()
......@@ -317,14 +317,13 @@ def recover(argv=sys.argv):
print "\n%s bytes removed during recovery" % bad
if undone:
print "%s bytes of undone transaction data were skipped" % undone
if pack is not None:
print "Packing ..."
from ZODB.referencesf import referencesf
ofs.pack(pack, referencesf)
ofs.close()
if __name__=='__main__': recover()
if __name__=='__main__': recover()
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
import POSException
......@@ -44,15 +44,14 @@ except:
un=file.fileno()
except:
return # don't care if not a real file
try:
LockFile(un,0,0,1,0) # just lock the first byte, who cares
except:
raise error, (
"Could not lock the database file. There must be\n"
"another process that has opened the file.\n"
"<p>")
"<p>")
except:
def lock_file(file, error=None):
pass
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Provide a function that can find object references in pickles
"""
......
......@@ -16,7 +16,7 @@ from ZODB.tests.StorageTestBase \
ZERO = '\0'*8
class BasicStorage:
def checkBasics(self):
t = Transaction()
......
......@@ -59,7 +59,7 @@ class ConflictResolvingStorage:
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
......@@ -79,7 +79,7 @@ class ConflictResolvingStorage:
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
self.assertRaises(ConflictError,
self._dostoreNP,
......@@ -89,7 +89,7 @@ class ConflictResolvingStorage:
from ZODB.ConflictResolution import bad_class
dummy_class_tuple = ('*foobar', ())
assert bad_class(dummy_class_tuple) == 1
def checkBuggyResolve1(self):
obj = PCounter3()
obj.inc()
......@@ -102,7 +102,7 @@ class ConflictResolvingStorage:
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
self.assertRaises(AttributeError,
self._dostoreNP,
......@@ -120,7 +120,7 @@ class ConflictResolvingStorage:
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
self.assertRaises(TypeError,
self._dostoreNP,
......@@ -132,7 +132,7 @@ class ConflictResolvingTransUndoStorage:
# This test is based on checkNotUndoable in the
# TransactionalUndoStorage test suite. Except here, conflict
# resolution should allow us to undo the transaction anyway.
obj = PCounter()
obj.inc()
oid = self._storage.new_oid()
......@@ -153,7 +153,7 @@ class ConflictResolvingTransUndoStorage:
# This test is based on checkNotUndoable in the
# TransactionalUndoStorage test suite. Except here, conflict
# resolution should allow us to undo the transaction anyway.
obj = PCounter2()
obj.inc()
oid = self._storage.new_oid()
......@@ -170,4 +170,3 @@ class ConflictResolvingTransUndoStorage:
self.assertRaises(UndoError, self._storage.transactionalUndo,
tid, t)
self._storage.tpc_abort(t)
......@@ -35,7 +35,7 @@ class FileStorageCorruptTests(StorageTestBase):
def checkTruncatedIndex(self):
oids = self._do_stores()
self._close()
# truncation the index file
path = self.path + '.index'
self.failUnless(os.path.exists(path))
......@@ -52,7 +52,7 @@ class FileStorageCorruptTests(StorageTestBase):
def checkCorruptedIndex(self):
oids = self._do_stores()
self._close()
# truncation the index file
path = self.path + '.index'
self.failUnless(os.path.exists(path))
......
......@@ -9,7 +9,7 @@ from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle
class HistoryStorage:
def checkSimpleHistory(self):
eq = self.assertEqual
......
......@@ -9,7 +9,7 @@ from ZODB.tests.StorageTestBase import zodb_unpickle
from ZODB.utils import U64, p64
from ZODB.Transaction import Transaction
class IteratorCompare:
def iter_verify(self, txniter, revids, val0):
......
......@@ -173,7 +173,7 @@ class MTStorage:
t.join(10)
for t in threads:
self.failIf(t.isAlive())
def check2ZODBThreads(self):
db = ZODB.DB(self._storage)
self._checkNThreads(2, ZODBClientThread, db, self)
......@@ -184,10 +184,9 @@ class MTStorage:
def check2StorageThreads(self):
self._checkNThreads(2, StorageClientThread, self._storage, self)
def check7StorageThreads(self):
self._checkNThreads(7, StorageClientThread, self._storage, self)
def check4ExtStorageThread(self):
self._checkNThreads(4, ExtStorageClientThread, self._storage, self)
......@@ -18,7 +18,7 @@ from ZODB.referencesf import referencesf
ZERO = '\0'*8
# This class is for the root object. It must not contain a getoid() method
# (really, attribute). The persistent pickling machinery -- in the dumps()
# function below -- will pickle Root objects as normal, but any attributes
......@@ -64,7 +64,7 @@ def dumps(obj):
return s.getvalue()
class PackableStorageBase:
# We keep a cache of object ids to instances so that the unpickler can
# easily return any persistent object.
......@@ -100,7 +100,7 @@ class PackableStorageBase:
return loads
class PackableStorage(PackableStorageBase):
def _initroot(self):
try:
......@@ -125,11 +125,11 @@ class PackableStorage(PackableStorageBase):
def checkPackTomorrow(self):
self._initroot()
self._storage.pack(time.time() + 10000, referencesf)
def checkPackYesterday(self):
self._initroot()
self._storage.pack(time.time() - 10000, referencesf)
def checkPackAllRevisions(self):
self._initroot()
eq = self.assertEqual
......
......@@ -4,7 +4,7 @@ class PersistentStorage:
def checkUpdatesPersist(self):
oids = []
def new_oid_wrapper(l=oids, new_oid=self._storage.new_oid):
oid = new_oid()
l.append(oid)
......@@ -31,7 +31,7 @@ class PersistentStorage:
if ver:
p, s = self._storage.load(oid, ver)
objects.append((oid, ver, p, s))
self._storage.close()
self.open()
......
......@@ -38,7 +38,7 @@ class ReadOnlyStorage:
self.assertRaises(ReadOnlyError, self._storage.abortVersion,
'', t)
self._storage.tpc_abort(t)
t = Transaction()
self._storage.tpc_begin(t)
self.assertRaises(ReadOnlyError, self._storage.commitVersion,
......@@ -57,5 +57,3 @@ class ReadOnlyStorage:
self.assertRaises(ReadOnlyError, self._storage.transactionalUndo,
'\000' * 8, t)
self._storage.tpc_abort(t)
......@@ -6,7 +6,7 @@ from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
ZERO = '\0'*8
class RevisionStorage:
def checkLoadSerial(self):
oid = self._storage.new_oid()
revid = ZERO
......@@ -18,4 +18,3 @@ class RevisionStorage:
for revid, value in revisions.items():
data = self._storage.loadSerial(oid, revid)
self.assertEqual(zodb_unpickle(data), value)
......@@ -116,13 +116,13 @@ def removefs(base):
except os.error, err:
if err[0] != errno.ENOENT:
raise
class StorageTestBase(unittest.TestCase):
# XXX It would be simpler if concrete tests didn't need to extend
# setUp() and tearDown().
def setUp(self):
# You need to override this with a setUp that creates self._storage
self._storage = None
......@@ -139,12 +139,12 @@ class StorageTestBase(unittest.TestCase):
def _dostore(self, oid=None, revid=None, data=None, version=None,
already_pickled=0, user=None, description=None):
"""Do a complete storage transaction. The defaults are:
- oid=None, ask the storage for a new oid
- revid=None, use a revid of ZERO
- data=None, pickle up some arbitrary data (the integer 7)
- version=None, use the empty string version
Returns the object's new revision id.
"""
if oid is None:
......@@ -177,7 +177,7 @@ class StorageTestBase(unittest.TestCase):
self._storage.tpc_abort(t)
raise
return revid
def _dostoreNP(self, oid=None, revid=None, data=None, version=None,
user=None, description=None):
return self._dostore(oid, revid, data, version, already_pickled=1)
......@@ -61,7 +61,7 @@ class SynchronizedStorage:
## def verifyCommitting(self, callable, *args):
## self.assertRaises(StorageTransactionError, callable *args)
def verifyNotCommitting(self, callable, *args):
args = (StorageTransactionError, callable) + args
apply(self.assertRaises, args)
......@@ -92,17 +92,17 @@ class SynchronizedStorage:
def checkStoreNotCommitting(self):
self.verifyNotCommitting(self._storage.store,
OID, SERIALNO, "", "", Transaction())
def checkStoreWrongTrans(self):
self.verifyWrongTrans(self._storage.store,
OID, SERIALNO, "", "", Transaction())
## def checkNewOidNotCommitting(self):
## self.verifyNotCommitting(self._storage.new_oid)
## def checkNewOidWrongTrans(self):
## self.verifyWrongTrans(self._storage.new_oid)
def checkAbortNotCommitting(self):
self._storage.tpc_abort(Transaction())
......@@ -123,7 +123,7 @@ class SynchronizedStorage:
self._storage.tpc_begin(t)
self._storage.tpc_finish(Transaction())
self._storage.tpc_abort(t)
def checkBeginCommitting(self):
t = Transaction()
self._storage.tpc_begin(t)
......
......@@ -51,7 +51,7 @@ class TransactionalUndoStorage:
for oid in newrevs.keys():
newrevs[oid] = self._transaction_newserial(oid)
return newrevs
def checkSimpleTransactionalUndo(self):
eq = self.assertEqual
oid = self._storage.new_oid()
......@@ -366,7 +366,7 @@ class TransactionalUndoStorage:
eq(zodb_unpickle(data), MinPO(33))
data, revid2 = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(54))
def checkNotUndoable(self):
eq = self.assertEqual
......
......@@ -11,7 +11,7 @@ from ZODB.Transaction import Transaction
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle
class TransactionalUndoVersionStorage:
def _x_dostore(self, *args, **kwargs):
......@@ -28,7 +28,7 @@ class TransactionalUndoVersionStorage:
except KeyError:
pass # not expected
return self._dostore(*args, **kwargs)
def checkUndoInVersion(self):
oid = self._storage.new_oid()
version = 'one'
......@@ -129,7 +129,7 @@ class TransactionalUndoVersionStorage:
revid = self._x_dostore(oid, revid, description='packable%d' % i)
pt = time.time()
time.sleep(1)
oid1 = self._storage.new_oid()
version = 'version'
revid1 = self._x_dostore(oid1, data=MinPO(0), description='create1')
......@@ -176,7 +176,7 @@ class TransactionalUndoVersionStorage:
revid = self._x_dostore(oid, revid, description='packable%d' % i)
pt = time.time()
time.sleep(1)
oid1 = self._storage.new_oid()
version = 'version'
revid1 = self._x_dostore(oid1, data=MinPO(0), description='create1')
......@@ -227,4 +227,3 @@ class TransactionalUndoVersionStorage:
self.assertEqual(load_value(oid1), 0)
# after abort, we should see non-version data
self.assertEqual(load_value(oid1, version), 0)
......@@ -12,7 +12,7 @@ from ZODB.Transaction import Transaction
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle
class VersionStorage:
def checkVersionedStoreAndLoad(self):
eq = self.assertEqual
......@@ -23,7 +23,7 @@ class VersionStorage:
# And now store some new revisions in a version
version = 'test-version'
revid = self._dostore(oid, revid=revid, data=MinPO(13),
version=version)
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(14),
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(15),
......@@ -137,12 +137,12 @@ class VersionStorage:
def checkAbortVersion(self):
eq = self.assertEqual
oid, version = self._setup_version()
# XXX Not sure I can write a test for getSerial() in the
# presence of aborted versions, because FileStorage and
# Berkeley storage give a different answer. I think Berkeley
# is right and FS is wrong.
## s1 = self._storage.getSerial(oid)
# Now abort the version -- must be done in a transaction
t = Transaction()
......@@ -178,7 +178,7 @@ class VersionStorage:
self.assertRaises(POSException.VersionError,
self._storage.abortVersion,
'', t)
# But now we really try to abort the version
oids = self._storage.abortVersion(version, t)
self._storage.tpc_vote(t)
......@@ -258,7 +258,7 @@ class VersionStorage:
eq(zodb_unpickle(data), MinPO(51))
data, revid2 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
# Okay, now let's commit object1 to version2
t = Transaction()
self._storage.tpc_begin(t)
......@@ -274,7 +274,7 @@ class VersionStorage:
eq(zodb_unpickle(data), MinPO(54))
# an object can only exist in one version, so a load from
# version1 should now give the non-version data
# version1 should now give the non-version data
data, revid2 = self._storage.load(oid1, version1)
eq(zodb_unpickle(data), MinPO(51))
......
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
usage="""Test speed of a ZODB storage
......@@ -34,7 +34,7 @@ Options:
-M Output means only
"""
import sys, os, getopt, string, time
sys.path.insert(0, os.getcwd())
......@@ -114,11 +114,11 @@ def main(args):
sys.stderr.write("mean:\t%s\t%.4f\t%.4f (s/o)\n" % (r, t, t/r))
db.close()
def compress(s):
c=zlib.compressobj()
o=c.compress(s)
return o+c.flush()
return o+c.flush()
if __name__=='__main__': main(sys.argv[1:])
......@@ -15,7 +15,7 @@
See ZODB/ActivityMonitor.py
$Id: testActivityMonitor.py,v 1.2 2002/06/10 20:20:44 shane Exp $
$Id: testActivityMonitor.py,v 1.3 2002/08/14 22:07:09 mj Exp $
"""
import unittest
......@@ -74,7 +74,7 @@ class Tests(unittest.TestCase):
am.setHistoryLength(0.1)
self.assertEqual(am.getHistoryLength(), 0.1)
self.assert_(len(am.log) <= 1)
def testActivityAnalysis(self):
am = ActivityMonitor(history_length=3600)
c = FakeConnection()
......@@ -98,7 +98,7 @@ class Tests(unittest.TestCase):
self.assert_(div['start'] > 0)
self.assert_(div['start'] >= lastend)
self.assert_(div['start'] < div['end'])
def test_suite():
return unittest.makeSuite(Tests)
......
......@@ -50,7 +50,7 @@ class CacheTestBase(unittest.TestCase):
if d is None:
d = r[i] = PersistentMapping()
get_transaction().commit()
for i in range(15):
o = d.get(i)
if o is None:
......@@ -116,7 +116,7 @@ class DBMethods(CacheTestBase):
c.klass_items()
class LRUCacheTests(CacheTestBase):
def checkLRU(self):
# verify the LRU behavior of the cache
dataset_size = 5
......@@ -166,7 +166,7 @@ class LRUCacheTests(CacheTestBase):
CONNS = 3
for i in range(CONNS):
self.noodle_new_connection()
self.assertEquals(self.db.cacheSize(), CACHE_SIZE * CONNS)
details = self.db.cacheDetailSize()
self.assertEquals(len(details), CONNS)
......@@ -183,7 +183,7 @@ class LRUCacheTests(CacheTestBase):
CONNS = 3
for i in range(CONNS):
self.noodle_new_connection()
for klass, count in self.db.cacheDetail():
if klass.endswith('MinPO'):
self.assertEqual(count, CONNS * CACHE_SIZE)
......@@ -236,7 +236,7 @@ class CacheErrors(unittest.TestCase):
def checkBogusObject(self):
def add(key, obj):
self.cache[key] = obj
key = p64(2)
# value isn't persistent
self.assertRaises(TypeError, add, key, 12)
......
......@@ -38,4 +38,3 @@ class DBTests(unittest.TestCase):
def test_suite():
return unittest.makeSuite(DBTests)
......@@ -24,4 +24,3 @@ if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
......@@ -124,7 +124,7 @@ class FileStorageRecoveryTest(
self.assertRaises(IndexError, lambda i:trans[i], 1)
self.assertEqual(data.oid, oid)
self.assertEqual(data.data, None)
def test_suite():
suite = unittest.makeSuite(FileStorageTests, 'check')
......@@ -140,14 +140,14 @@ def main():
runner.run(alltests)
def debug():
test_suite().debug()
test_suite().debug()
def pdebug():
import pdb
pdb.run('debug()')
if __name__=='__main__':
if len(sys.argv) > 1:
globals()[sys.argv[1]]()
else:
main()
if len(sys.argv) > 1:
globals()[sys.argv[1]]()
else:
main()
......@@ -22,4 +22,3 @@ if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
......@@ -31,7 +31,7 @@ class PMTests(unittest.TestCase):
s.store('\000' * 8, None, pickle, '', t)
s.tpc_vote(t)
s.tpc_finish(t)
db = ZODB.DB(s)
# If the root can be loaded successfully, we should be okay.
r = db.open().root()
......
......@@ -2,19 +2,19 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""
Revision information:
$Id: testTransaction.py,v 1.9 2002/08/14 15:37:08 jeremy Exp $
$Id: testTransaction.py,v 1.10 2002/08/14 22:07:09 mj Exp $
"""
"""
......@@ -41,7 +41,7 @@ TODO
add in tests for objects which are modified multiple times,
for example an object that gets modified in multiple sub txns.
"""
import random
......@@ -61,9 +61,9 @@ class TransactionTests(unittest.TestCase):
self.nosub1 = DataObject(nost=1)
def tearDown(self):
Transaction.free_transaction()
# basic tests with two sub trans jars
# really we only need one, so tests for
# sub1 should identical to tests for sub2
......@@ -84,7 +84,7 @@ class TransactionTests(unittest.TestCase):
get_transaction().abort()
assert self.sub2._p_jar.cabort == 1
assert self.sub2._p_jar.cabort == 1
def testSubTransactionCommitCommit(self):
......@@ -92,10 +92,10 @@ class TransactionTests(unittest.TestCase):
self.sub2.modify()
get_transaction().commit(1)
assert self.sub1._p_jar.ctpc_vote == 0
assert self.sub1._p_jar.ctpc_finish == 1
get_transaction().commit()
assert self.sub1._p_jar.ccommit_sub == 1
......@@ -108,13 +108,13 @@ class TransactionTests(unittest.TestCase):
get_transaction().commit(1)
get_transaction().abort()
assert self.sub1._p_jar.ctpc_vote == 0
assert self.sub1._p_jar.cabort == 0
assert self.sub1._p_jar.cabort_sub == 1
def testMultipleSubTransactionCommitCommit(self):
# add it
self.sub1.modify()
......@@ -133,7 +133,7 @@ class TransactionTests(unittest.TestCase):
# objects... i don't like this but its an impl artifact
assert self.sub1._p_jar.ctpc_vote == 0
assert self.sub1._p_jar.ctpc_finish > 0
assert self.sub1._p_jar.ctpc_finish > 0
# add another before we do the entire txn commit
self.sub3.modify()
......@@ -160,7 +160,7 @@ class TransactionTests(unittest.TestCase):
sub1 calling method tpc_finish
sub2 calling method tpc_finish
"""
# add it
self.sub1.modify()
......@@ -172,7 +172,7 @@ class TransactionTests(unittest.TestCase):
get_transaction().commit(1)
assert self.sub1._p_jar.ctpc_vote == 0
assert self.sub1._p_jar.ctpc_finish > 0
assert self.sub1._p_jar.ctpc_finish > 0
# add another before we do the entire txn commit
self.sub3.modify()
......@@ -185,7 +185,7 @@ class TransactionTests(unittest.TestCase):
assert self.sub3._p_jar.cabort == 1
assert self.sub1._p_jar.ccommit_sub == 1
assert self.sub1._p_jar.ctpc_finish > 1
assert self.sub1._p_jar.ctpc_finish > 1
# repeat adding in a nonsub trans jars
......@@ -213,7 +213,7 @@ class TransactionTests(unittest.TestCase):
the nosub jar should not have tpc_finish
called on it till the containing txn
ends.
sub calling method commit
nosub calling method tpc_begin
sub calling method tpc_finish
......@@ -221,7 +221,7 @@ class TransactionTests(unittest.TestCase):
nosub calling method abort
sub calling method abort_sub
"""
self.sub1.modify(tracing='sub')
self.nosub1.modify(tracing='nosub')
......@@ -276,10 +276,10 @@ class TransactionTests(unittest.TestCase):
nosub calling method tpc_finish
sub1 calling method tpc_finish
"""
# add it
self.sub1.modify()
get_transaction().commit(1)
# add another
......@@ -288,15 +288,15 @@ class TransactionTests(unittest.TestCase):
get_transaction().commit(1)
assert self.sub1._p_jar.ctpc_vote == 0
assert self.nosub1._p_jar.ctpc_vote == 0
assert self.sub1._p_jar.ctpc_finish > 0
assert self.nosub1._p_jar.ctpc_vote == 0
assert self.sub1._p_jar.ctpc_finish > 0
# add another before we do the entire txn commit
self.sub2.modify()
# commit the container transaction
get_transaction().commit()
# we did an implicit sub commit
assert self.sub2._p_jar.ccommit_sub == 1
assert self.sub1._p_jar.ctpc_finish > 1
......@@ -308,28 +308,28 @@ class TransactionTests(unittest.TestCase):
# error handling by throwing errors from
# various jar methods
###
# first the recoverable errors
def testExceptionInAbort(self):
self.sub1._p_jar = SubTransactionJar(errors='abort')
self.nosub1.modify()
self.sub1.modify(nojar=1)
self.sub2.modify()
try:
try:
get_transaction().abort()
except TestTxnException: pass
assert self.nosub1._p_jar.cabort == 1
assert self.sub2._p_jar.cabort == 1
def testExceptionInCommit(self):
def testExceptionInCommit(self):
self.sub1._p_jar = SubTransactionJar(errors='commit')
self.nosub1.modify()
self.sub1.modify(nojar=1)
......@@ -345,14 +345,14 @@ class TransactionTests(unittest.TestCase):
def testExceptionInTpcVote(self):
self.sub1._p_jar = SubTransactionJar(errors='tpc_vote')
self.nosub1.modify()
self.sub1.modify(nojar=1)
try:
get_transaction().commit()
except TestTxnException: pass
assert self.nosub1._p_jar.ctpc_finish == 0
assert self.nosub1._p_jar.ccommit == 1
assert self.nosub1._p_jar.ctpc_abort == 1
......@@ -363,7 +363,7 @@ class TransactionTests(unittest.TestCase):
"""
ok this test reveals a bug in the TM.py
as the nosub tpc_abort there is ignored.
nosub calling method tpc_begin
nosub calling method commit
sub calling method tpc_begin
......@@ -372,11 +372,11 @@ class TransactionTests(unittest.TestCase):
nosub calling method tpc_abort
"""
self.sub1._p_jar = SubTransactionJar(errors='tpc_begin')
self.nosub1.modify()
self.sub1.modify(nojar=1)
try:
try:
get_transaction().commit()
except TestTxnException: pass
......@@ -401,7 +401,7 @@ class TransactionTests(unittest.TestCase):
### More Failure modes...
# now we mix in some sub transactions
###
def testExceptionInSubCommitSub(self):
"""
this tests exhibits some odd behavior,
......@@ -433,19 +433,19 @@ class TransactionTests(unittest.TestCase):
try:
get_transaction().commit()
except TestTxnException: pass
# odd this doesn't seem to be entirely deterministic..
if self.sub1._p_jar.ccommit_sub:
assert self.sub1._p_jar.ctpc_abort == 1
assert self.sub1._p_jar.ctpc_abort == 1
else:
assert self.sub1._p_jar.cabort_sub == 1
assert self.sub1._p_jar.cabort_sub == 1
if self.sub3._p_jar.ccommit_sub:
assert self.sub3._p_jar.ctpc_abort == 1
assert self.sub3._p_jar.ctpc_abort == 1
else:
assert self.sub3._p_jar.cabort_sub == 1
assert self.sub3._p_jar.cabort_sub == 1
assert self.sub2._p_jar.ctpc_abort == 1
assert self.nosub1._p_jar.ctpc_abort == 1
......@@ -458,7 +458,7 @@ class TransactionTests(unittest.TestCase):
self.sub2._p_jar = SubTransactionJar(errors='abort_sub')
self.sub2.modify(nojar=1)
get_transaction().commit(1)
self.sub3.modify()
try:
......@@ -516,7 +516,7 @@ class TransactionTests(unittest.TestCase):
succeed += 1
elif jar.ctpc_abort:
fail += 1
if Transaction.hosed:
self.assert_(fail > 0 and succeed > 0)
break
......@@ -526,7 +526,7 @@ class TransactionTests(unittest.TestCase):
self.setUp()
else:
self.fail("Couldn't provoke hosed state.")
self.sub2.modify()
try:
......@@ -566,22 +566,22 @@ class BasicJar:
self.ctpc_vote = 0
self.ctpc_finish = 0
self.cabort_sub = 0
self.ccommit_sub = 0
self.ccommit_sub = 0
def check(self, method):
if self.tracing:
print '%s calling method %s'%(str(self.tracing),method)
if ((type(self.errors) is TupleType and method in self.errors)
or method == self.errors):
raise TestTxnException("error %s" % method)
## basic jar txn interface
## basic jar txn interface
def abort(self, *args):
self.check('abort')
self.cabort += 1
def commit(self, *args):
self.check('commit')
self.ccommit += 1
......@@ -607,11 +607,11 @@ class SubTransactionJar(BasicJar):
def abort_sub(self, txn):
self.check('abort_sub')
self.cabort_sub = 1
def commit_sub(self, txn):
self.check('commit_sub')
self.ccommit_sub = 1
class NoSubTransactionJar(BasicJar): pass
def test_suite():
......
......@@ -14,7 +14,7 @@ class TestUtils(unittest.TestCase):
large = [random.randrange(1L<<32, 1L<<64, int=long)
for i in range(NUM)]
all = small + large
def checkLongToStringToLong(self):
for num in self.all:
s = p64(num)
......@@ -33,9 +33,8 @@ class TestUtils(unittest.TestCase):
def test_suite():
return unittest.makeSuite(TestUtils, 'check')
if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
......@@ -72,7 +72,7 @@ class ExportImportTests:
def checkDuplicateAborted(self):
self.checkDuplicate(abort_it=1, dup_name='test_duplicate_aborted')
class ZODBTests(unittest.TestCase, ExportImportTests):
......@@ -103,14 +103,14 @@ def main():
runner.run(alltests)
def debug():
test_suite().debug()
test_suite().debug()
def pdebug():
import pdb
pdb.run('debug()')
if __name__=='__main__':
if len(sys.argv) > 1:
globals()[sys.argv[1]]()
else:
main()
if len(sys.argv) > 1:
globals()[sys.argv[1]]()
else:
main()
import unittest, sys
from ZODB.fsIndex import fsIndex
from ZODB.utils import p64
......@@ -14,7 +13,7 @@ class Test(unittest.TestCase):
for i in range(0,200):
self.assertEqual((i,index[p64(i*1000)]), (i,(i*1000L+1)))
self.assertEqual(len(index), 200)
key=p64(2000)
......@@ -38,12 +37,12 @@ class Test(unittest.TestCase):
for i in range(400,600):
d[p64(i*1000)]=(i*1000L+1)
index.update(d)
for i in range(100, 500):
d[p64(i*1000)]=(i*1000L+2)
index.update(d)
self.assertEqual(index.get(p64(2000)), 2001)
......
......@@ -2,14 +2,14 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
import sys
......@@ -64,7 +64,7 @@ def cp(f1, f2, l):
read = f1.read
write = f2.write
n = 8192
while l > 0:
if n > l:
n = l
......
......@@ -2,21 +2,21 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Python implementation of persistent list.
$Id: list.py,v 1.2 2002/02/11 23:49:07 gvanrossum Exp $"""
$Id: list.py,v 1.3 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.2 $'[11:-2]
__version__='$Revision: 1.3 $'[11:-2]
import Persistence
from UserList import UserList
......@@ -51,7 +51,7 @@ class PersistentList(UserList, Persistence.Persistent):
def __delslice__(self, i, j):
self.__super_delslice(i, j)
self._p_changed = 1
def __iadd__(self, other):
self.__super_iadd(other)
self._p_changed = 1
......@@ -63,7 +63,7 @@ class PersistentList(UserList, Persistence.Persistent):
def append(self, item):
self.__super_append(item)
self._p_changed = 1
def insert(self, i, item):
self.__super_insert(i, item)
self._p_changed = 1
......@@ -76,11 +76,11 @@ class PersistentList(UserList, Persistence.Persistent):
def remove(self, item):
self.__super_remove(item)
self._p_changed = 1
def reverse(self):
self.__super_reverse()
self._p_changed = 1
def sort(self, *args):
self.__super_sort(*args)
self._p_changed = 1
......
......@@ -2,21 +2,21 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
#
##############################################################################
"""Python implementation of persistent base types
$Id: mapping.py,v 1.19 2002/02/12 22:33:08 gvanrossum Exp $"""
$Id: mapping.py,v 1.20 2002/08/14 22:07:09 mj Exp $"""
__version__='$Revision: 1.19 $'[11:-2]
__version__='$Revision: 1.20 $'[11:-2]
import Persistence
from UserDict import UserDict
......@@ -82,7 +82,7 @@ class PersistentMapping(UserDict, Persistence.Persistent):
# different versions of the code. Compatibility works in both
# directions, because an application may want to share a database
# between applications using different versions of the code.
# Effectively, the original rep is part of the "API." To provide
# full compatibility, the getstate and setstate must read and
# right objects using the old rep.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment