Commit eef59eb3 authored by Jim Fulton's avatar Jim Fulton

Removed lots of version support. More version support, especially in

ZEO, still needs to be removed.
parent 7b044c7d
......@@ -52,51 +52,3 @@ class TransUndoStorageWithCache:
data, revid = self._storage.load(oid, '')
obj = zodb_unpickle(data)
assert obj == MinPO(24)
class StorageWithCache:
def checkAbortVersionInvalidation(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(1))
revid = self._dostore(oid, revid=revid, data=MinPO(2))
revid = self._dostore(oid, revid=revid, data=MinPO(3), version="foo")
revid = self._dostore(oid, revid=revid, data=MinPO(4), version="foo")
t = Transaction()
self._storage.tpc_begin(t)
self._storage.abortVersion("foo", t)
self._storage.load(oid, "foo")
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
data, revid = self._storage.load(oid, "foo")
obj = zodb_unpickle(data)
assert obj == MinPO(2), obj
def checkCommitEmptyVersionInvalidation(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(1))
revid = self._dostore(oid, revid=revid, data=MinPO(2))
revid = self._dostore(oid, revid=revid, data=MinPO(3), version="foo")
t = Transaction()
self._storage.tpc_begin(t)
self._storage.commitVersion("foo", "", t)
self._storage.load(oid, "")
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
data, revid = self._storage.load(oid, "")
obj = zodb_unpickle(data)
assert obj == MinPO(3), obj
def checkCommitVersionInvalidation(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(1))
revid = self._dostore(oid, revid=revid, data=MinPO(2))
revid = self._dostore(oid, revid=revid, data=MinPO(3), version="foo")
t = Transaction()
self._storage.tpc_begin(t)
self._storage.commitVersion("foo", "bar", t)
self._storage.load(oid, "")
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
data, revid = self._storage.load(oid, "bar")
obj = zodb_unpickle(data)
assert obj == MinPO(3), obj
......@@ -234,74 +234,6 @@ class LargeUpdatesThread(FailableThread):
self.added_keys = keys_added.keys()
cn.close()
class VersionStressThread(FailableThread):
def __init__(self, db, stop, threadnum, commitdict, startnum,
step=2, sleep=None):
TestThread.__init__(self)
self.db = db
self.stop = stop
self.threadnum = threadnum
self.startnum = startnum
self.step = step
self.sleep = sleep
self.added_keys = []
self.commitdict = commitdict
def _testrun(self):
commit = 0
key = self.startnum
while not self.stop.isSet():
version = "%s:%s" % (self.threadnum, key)
commit = not commit
if self.oneupdate(version, key, commit):
self.added_keys.append(key)
self.commitdict[self] = 1
key += self.step
def oneupdate(self, version, key, commit=1):
# The mess of sleeps below were added to reduce the number
# of VersionLockErrors, based on empirical observation.
# It looks like the threads don't switch enough without
# the sleeps.
cn = self.db.open(version)
while not self.stop.isSet():
try:
tree = cn.root()["tree"]
break
except (ConflictError, KeyError):
transaction.abort()
while not self.stop.isSet():
try:
tree[key] = self.threadnum
transaction.commit()
if self.sleep:
time.sleep(self.sleep)
break
except (VersionLockError, ReadConflictError, ConflictError), msg:
transaction.abort()
if self.sleep:
time.sleep(self.sleep)
try:
while not self.stop.isSet():
try:
if commit:
self.db.commitVersion(version)
transaction.get().note("commit version %s" % version)
else:
self.db.abortVersion(version)
transaction.get().note("abort version %s" % version)
transaction.commit()
if self.sleep:
time.sleep(self.sleep)
return commit
except ConflictError, msg:
transaction.abort()
finally:
cn.close()
return 0
class InvalidationTests:
level = 2
......
......@@ -82,7 +82,7 @@ class AuthTest(CommonSetupTearDown):
self._storage._connection.poll()
self.assert_(self._storage.is_connected())
# Make a call to make sure the mechanism is working
self._storage.versions()
self._storage.undoInfo()
def testNOK(self):
self._storage = self.openClientStorage(wait=0, username="foo",
......@@ -101,11 +101,11 @@ class AuthTest(CommonSetupTearDown):
# Sleep for 0.2 seconds to give the server some time to start up
# seems to be needed before and after creating the storage
self.wait()
self._storage.versions()
self._storage.undoInfo()
# Manually clear the state of the hmac connection
self._storage._connection._SizedMessageAsyncConnection__hmac_send = None
# Once the client stops using the hmac, it should be disconnected.
self.assertRaises(ClientDisconnected, self._storage.versions)
self.assertRaises(ClientDisconnected, self._storage.undoInfo)
class PlainTextAuth(AuthTest):
......
......@@ -17,10 +17,9 @@ from zope.testing import doctest
class FakeStorageBase:
def __getattr__(self, name):
if name in ('versionEmpty', 'versions', 'getTid',
'history', 'load', 'loadSerial', 'modifiedInVersion',
if name in ('getTid', 'history', 'load', 'loadSerial',
'lastTransaction', 'getSize', 'getName', 'supportsUndo',
'supportsVersions', 'tpc_transaction'):
'tpc_transaction'):
return lambda *a, **k: None
raise AttributeError(name)
......
......@@ -38,8 +38,8 @@ import persistent
import transaction
# ZODB test mixin classes
from ZODB.tests import StorageTestBase, BasicStorage, VersionStorage, \
TransactionalUndoStorage, TransactionalUndoVersionStorage, \
from ZODB.tests import StorageTestBase, BasicStorage, \
TransactionalUndoStorage, \
PackableStorage, Synchronization, ConflictResolution, RevisionStorage, \
MTStorage, ReadOnlyStorage
......@@ -223,7 +223,6 @@ class GenericTests(
class FullGenericTests(
GenericTests,
Cache.StorageWithCache,
Cache.TransUndoStorageWithCache,
CommitLockTests.CommitLockUndoTests,
ConflictResolution.ConflictResolvingStorage,
......@@ -231,8 +230,6 @@ class FullGenericTests(
PackableStorage.PackableUndoStorage,
RevisionStorage.RevisionStorage,
TransactionalUndoStorage.TransactionalUndoStorage,
TransactionalUndoVersionStorage.TransactionalUndoVersionStorage,
VersionStorage.VersionStorage,
):
"""Extend GenericTests with tests that MappingStorage can't pass."""
......@@ -256,8 +253,6 @@ class MappingStorageTests(GenericTests):
class DemoStorageTests(
GenericTests,
Cache.StorageWithCache,
VersionStorage.VersionStorage,
):
def getConfig(self):
......@@ -269,21 +264,6 @@ class DemoStorageTests(
</demostorage>
""" % tempfile.mktemp()
def checkLoadBeforeVersion(self):
# Doesn't implement loadBefore, except as a kind of place holder.
pass
# the next three pack tests depend on undo
def checkPackVersionReachable(self):
pass
def checkPackVersions(self):
pass
def checkPackVersionsInPast(self):
pass
class HeartbeatTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
"""Make sure a heartbeat is being sent and that it does no harm
......
......@@ -11,7 +11,7 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Basic unit tests for a multi-version client cache."""
"""Basic unit tests for a client cache."""
import os
import random
......
......@@ -338,7 +338,7 @@ def copy(source, dest, verbose=0):
if verbose:
print oid_repr(oid), r.version, len(r.data)
if restoring:
dest.restore(oid, r.tid, r.data, r.version,
dest.restore(oid, r.tid, r.data, '',
r.data_txn, transaction)
else:
pre = preget(oid, None)
......
......@@ -31,7 +31,6 @@ fsync = getattr(os, "fsync", None)
from ZODB import BaseStorage, ConflictResolution, POSException
from ZODB.POSException import UndoError, POSKeyError, MultipleUndoErrors
from ZODB.POSException import VersionLockError
from persistent.TimeStamp import TimeStamp
from ZODB.lock_file import LockFile
from ZODB.utils import p64, u64, cp, z64
......@@ -120,10 +119,8 @@ class FileStorage(BaseStorage.BaseStorage,
BaseStorage.BaseStorage.__init__(self, file_name)
(index, vindex, tindex, tvindex,
oid2tid, toid2tid, toid2tid_delete) = self._newIndexes()
self._initIndex(index, vindex, tindex, tvindex,
oid2tid, toid2tid, toid2tid_delete)
(index, tindex, oid2tid, toid2tid, toid2tid_delete) = self._newIndexes()
self._initIndex(index, tindex, oid2tid, toid2tid, toid2tid_delete)
# Now open the file
......@@ -155,18 +152,17 @@ class FileStorage(BaseStorage.BaseStorage,
r = self._restore_index()
if r is not None:
self._used_index = 1 # Marker for testing
index, vindex, start, ltid = r
index, start, ltid = r
self._initIndex(index, vindex, tindex, tvindex,
oid2tid, toid2tid, toid2tid_delete)
self._initIndex(index, tindex, oid2tid, toid2tid, toid2tid_delete)
self._pos, self._oid, tid = read_index(
self._file, file_name, index, vindex, tindex, stop,
self._file, file_name, index, tindex, stop,
ltid=ltid, start=start, read_only=read_only,
)
else:
self._used_index = 0 # Marker for testing
self._pos, self._oid, tid = read_index(
self._file, file_name, index, vindex, tindex, stop,
self._file, file_name, index, tindex, stop,
read_only=read_only,
)
self._save_index()
......@@ -195,20 +191,14 @@ class FileStorage(BaseStorage.BaseStorage,
# tid cache statistics.
self._oid2tid_nlookups = self._oid2tid_nhits = 0
def _initIndex(self, index, vindex, tindex, tvindex,
oid2tid, toid2tid, toid2tid_delete):
def _initIndex(self, index, tindex, oid2tid, toid2tid, toid2tid_delete):
self._index=index
self._vindex=vindex
self._tindex=tindex
self._tvindex=tvindex
self._index_get=index.get
self._vindex_get=vindex.get
# .store() needs to compare the passed-in serial to the
# current tid in the database. _oid2tid caches the oid ->
# current tid mapping for non-version data (if the current
# record for oid is version data, the oid is not a key in
# _oid2tid). The point is that otherwise seeking into the
# current tid mapping. The point is that otherwise seeking into the
# storage is needed to extract the current tid, and that's
# an expensive operation. For example, if a transaction
# stores 4000 objects, and each random seek + read takes 7ms
......@@ -220,8 +210,7 @@ class FileStorage(BaseStorage.BaseStorage,
# oid->tid map to transactionally add to _oid2tid.
self._toid2tid = toid2tid
# Set of oids to transactionally delete from _oid2tid (e.g.,
# oids reverted by undo, or for which the most recent record
# becomes version data).
# oids reverted by undo).
self._toid2tid_delete = toid2tid_delete
def __len__(self):
......@@ -229,7 +218,7 @@ class FileStorage(BaseStorage.BaseStorage,
def _newIndexes(self):
# hook to use something other than builtin dict
return fsIndex(), {}, {}, {}, {}, {}, {}
return fsIndex(), {}, {}, {}, {}
_saved = 0
def _save_index(self):
......@@ -244,11 +233,7 @@ class FileStorage(BaseStorage.BaseStorage,
f=open(tmp_name,'wb')
p=Pickler(f,1)
# Note: starting with ZODB 3.2.6, the 'oid' value stored is ignored
# by the code that reads the index. We still write it, so that
# .index files can still be read by older ZODBs.
info={'index': self._index, 'pos': self._pos,
'oid': self._oid, 'vindex': self._vindex}
info={'index': self._index, 'pos': self._pos}
p.dump(info)
f.flush()
......@@ -338,10 +323,7 @@ class FileStorage(BaseStorage.BaseStorage,
def _restore_index(self):
"""Load database index to support quick startup."""
# Returns (index, vindex, pos, tid), or None in case of
# error.
# Starting with ZODB 3.2.6, the 'oid' value stored in the index
# is ignored.
# Returns (index, pos, tid), or None in case of error.
# The index returned is always an instance of fsIndex. If the
# index cached in the file is a Python dict, it's converted to
# fsIndex here, and, if we're not in read-only mode, the .index
......@@ -365,8 +347,7 @@ class FileStorage(BaseStorage.BaseStorage,
return None
index = info.get('index')
pos = info.get('pos')
vindex = info.get('vindex')
if index is None or pos is None or vindex is None:
if index is None or pos is None:
return None
pos = long(pos)
......@@ -393,7 +374,7 @@ class FileStorage(BaseStorage.BaseStorage,
if not tid:
return None
return index, vindex, pos, tid
return index, pos, tid
def close(self):
self._file.close()
......@@ -428,83 +409,6 @@ class FileStorage(BaseStorage.BaseStorage,
return result
def abortVersion(self, src, transaction):
return self.commitVersion(src, '', transaction, abort=True)
def commitVersion(self, src, dest, transaction, abort=False):
# We are going to commit by simply storing back pointers.
if self._is_read_only:
raise POSException.ReadOnlyError()
if not (src and isinstance(src, StringType)
and isinstance(dest, StringType)):
raise POSException.VersionCommitError('Invalid source version')
if src == dest:
raise POSException.VersionCommitError(
"Can't commit to same version: %s" % repr(src))
if dest and abort:
raise POSException.VersionCommitError(
"Internal error, can't abort to a version")
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
self._lock_acquire()
try:
return self._commitVersion(src, dest, transaction, abort)
finally:
self._lock_release()
def _commitVersion(self, src, dest, transaction, abort=False):
# call after checking arguments and acquiring lock
srcpos = self._vindex_get(src, 0)
spos = p64(srcpos)
# middle holds bytes 16:34 of a data record:
# pos of transaction, len of version name, data length
# commit version never writes data, so data length is always 0
middle = pack(">8sH8s", p64(self._pos), len(dest), z64)
if dest:
sd = p64(self._vindex_get(dest, 0))
heredelta = 66 + len(dest)
else:
sd = ''
heredelta = 50
here = self._pos + (self._tfile.tell() + self._thl)
oids = []
current_oids = {}
while srcpos:
h = self._read_data_header(srcpos)
if self._index.get(h.oid) == srcpos:
# This is a current record!
self._tindex[h.oid] = here
oids.append(h.oid)
self._tfile.write(h.oid + self._tid + spos + middle)
if dest:
self._tvindex[dest] = here
self._tfile.write(p64(h.pnv) + sd + dest)
sd = p64(here)
self._tfile.write(abort and p64(h.pnv) or spos)
# data backpointer to src data
here += heredelta
current_oids[h.oid] = 1
else:
# Hm. This is a non-current record. Is there a
# current record for this oid?
if not current_oids.has_key(h.oid):
break
srcpos = h.vprev
spos = p64(srcpos)
self._toid2tid_delete.update(current_oids)
return self._tid, oids
def getSize(self):
return self._pos
......@@ -516,15 +420,14 @@ class FileStorage(BaseStorage.BaseStorage,
except TypeError:
raise TypeError("invalid oid %r" % (oid,))
def load(self, oid, version):
def load(self, oid, version=''):
"""Return pickle data and serial number."""
assert not version
self._lock_acquire()
try:
pos = self._lookup_pos(oid)
h = self._read_data_header(pos, oid)
if h.version and h.version != version:
data = self._loadBack_impl(oid, h.pnv)[0]
return data, h.tid
if h.plen:
data = self._file.read(h.plen)
return data, h.tid
......@@ -537,8 +440,6 @@ class FileStorage(BaseStorage.BaseStorage,
self._lock_release()
def loadSerial(self, oid, serial):
# loadSerial must always return non-version data, because it
# is used by conflict resolution.
self._lock_acquire()
try:
pos = self._lookup_pos(oid)
......@@ -549,8 +450,6 @@ class FileStorage(BaseStorage.BaseStorage,
pos = h.prev
if not pos:
raise POSKeyError(oid)
if h.version:
return self._loadBack_impl(oid, h.pnv)[0]
if h.plen:
return self._file.read(h.plen)
else:
......@@ -565,18 +464,6 @@ class FileStorage(BaseStorage.BaseStorage,
end_tid = None
while True:
h = self._read_data_header(pos, oid)
if h.version:
# Just follow the pnv pointer to the previous
# non-version data.
if not h.pnv:
# Object was created in version. There is no
# before data to find.
return None
pos = h.pnv
# The end_tid for the non-version data is not affected
# by versioned data records.
continue
if h.tid < tid:
break
......@@ -594,21 +481,13 @@ class FileStorage(BaseStorage.BaseStorage,
finally:
self._lock_release()
def modifiedInVersion(self, oid):
self._lock_acquire()
try:
pos = self._lookup_pos(oid)
h = self._read_data_header(pos, oid)
return h.version
finally:
self._lock_release()
def store(self, oid, oldserial, data, version, transaction):
if self._is_read_only:
raise POSException.ReadOnlyError()
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
assert not version
self._lock_acquire()
try:
if oid > self._oid:
......@@ -620,10 +499,6 @@ class FileStorage(BaseStorage.BaseStorage,
cached_tid = self._get_cached_tid(oid)
if cached_tid is None:
h = self._read_data_header(old, oid)
if h.version:
if h.version != version:
raise VersionLockError(oid, h.version)
pnv = h.pnv
cached_tid = h.tid
if oldserial != cached_tid:
......@@ -638,20 +513,9 @@ class FileStorage(BaseStorage.BaseStorage,
pos = self._pos
here = pos + self._tfile.tell() + self._thl
self._tindex[oid] = here
new = DataHeader(oid, self._tid, old, pos, len(version),
len(data))
if version:
# Link to last record for this version:
pv = (self._tvindex.get(version, 0)
or self._vindex.get(version, 0))
if pnv is None:
pnv = old
new.setVersion(version, pnv, pv)
self._tvindex[version] = here
self._toid2tid_delete[oid] = 1
else:
self._toid2tid[oid] = self._tid
new = DataHeader(oid, self._tid, old, pos, 0, len(data))
self._toid2tid[oid] = self._tid
self._tfile.write(new.asString())
self._tfile.write(data)
......@@ -737,6 +601,8 @@ class FileStorage(BaseStorage.BaseStorage,
raise POSException.ReadOnlyError()
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
if version:
raise TypeError("Versions are no-longer supported")
self._lock_acquire()
try:
......@@ -761,17 +627,8 @@ class FileStorage(BaseStorage.BaseStorage,
dlen = len(data)
# Write the recovery data record
new = DataHeader(oid, serial, old, self._pos, len(version), dlen)
if version:
pnv = self._restore_pnv(oid, old, version, prev_pos) or old
vprev = self._tvindex.get(version, 0)
if not vprev:
vprev = self._vindex.get(version, 0)
new.setVersion(version, pnv, vprev)
self._tvindex[version] = here
self._toid2tid_delete[oid] = 1
else:
self._toid2tid[oid] = serial
new = DataHeader(oid, serial, old, self._pos, 0, dlen)
self._toid2tid[oid] = serial
self._tfile.write(new.asString())
......@@ -788,38 +645,11 @@ class FileStorage(BaseStorage.BaseStorage,
finally:
self._lock_release()
def _restore_pnv(self, oid, prev, version, bp):
# Find a valid pnv (previous non-version) pointer for this version.
# If there is no previous record, there can't be a pnv.
if not prev:
return None
# Load the record pointed to be prev
h = self._read_data_header(prev, oid)
if h.version:
return h.pnv
if h.back:
# TODO: Not sure the following is always true:
# The previous record is not for this version, yet we
# have a backpointer to it. The current record must
# be an undo of an abort or commit, so the backpointer
# must be to a version record with a pnv.
h2 = self._read_data_header(h.back, oid)
if h2.version:
return h2.pnv
return None
def supportsUndo(self):
return 1
def supportsVersions(self):
return 1
def _clear_temp(self):
self._tindex.clear()
self._tvindex.clear()
self._toid2tid.clear()
self._toid2tid_delete.clear()
if self._tfile is not None:
......@@ -891,7 +721,6 @@ class FileStorage(BaseStorage.BaseStorage,
self._pos = nextpos
self._index.update(self._tindex)
self._vindex.update(self._tvindex)
self._oid2tid.update(self._toid2tid)
for oid in self._toid2tid_delete.keys():
try:
......@@ -916,8 +745,8 @@ class FileStorage(BaseStorage.BaseStorage,
self._nextpos=0
def _undoDataInfo(self, oid, pos, tpos):
"""Return the tid, data pointer, data, and version for the oid
record at pos"""
"""Return the tid, data pointer, and data for the oid record at pos
"""
if tpos:
pos = tpos - self._pos - self._thl
tpos = self._tfile.tell()
......@@ -938,7 +767,7 @@ class FileStorage(BaseStorage.BaseStorage,
if tpos:
self._tfile.seek(tpos) # Restore temp file to end
return h.tid, pos, data, h.version
return h.tid, pos, data
def getTid(self, oid):
self._lock_acquire()
......@@ -957,24 +786,17 @@ class FileStorage(BaseStorage.BaseStorage,
finally:
self._lock_release()
def _getVersion(self, oid, pos):
h = self._read_data_header(pos, oid)
if h.version:
return h.version, h.pnv
else:
return "", None
def _transactionalUndoRecord(self, oid, pos, tid, pre, version):
def _transactionalUndoRecord(self, oid, pos, tid, pre):
"""Get the undo information for a data record
'pos' points to the data header for 'oid' in the transaction
being undone. 'tid' refers to the transaction being undone.
'pre' is the 'prev' field of the same data header.
Return a 5-tuple consisting of a pickle, data pointer,
version, packed non-version data pointer, and current
position. If the pickle is true, then the data pointer must
be 0, but the pickle can be empty *and* the pointer 0.
Return a 3-tuple consisting of a pickle, data pointer, and
current position. If the pickle is true, then the data
pointer must be 0, but the pickle can be empty *and* the
pointer 0.
"""
copy = 1 # Can we just copy a data pointer
......@@ -987,11 +809,8 @@ class FileStorage(BaseStorage.BaseStorage,
if tipos != pos:
# Eek, a later transaction modified the data, but,
# maybe it is pointing at the same data we are.
ctid, cdataptr, cdata, cver = self._undoDataInfo(oid, ipos, tpos)
# Versions of undone record and current record *must* match!
if cver != version:
raise UndoError('Current and undone versions differ', oid)
ctid, cdataptr, cdata = self._undoDataInfo(oid, ipos, tpos)
if cdataptr != pos:
# We aren't sure if we are talking about the same data
try:
......@@ -1018,12 +837,11 @@ class FileStorage(BaseStorage.BaseStorage,
if not pre:
# There is no previous revision, because the object creation
# is being undone.
return "", 0, "", "", ipos
return "", 0, ipos
version, snv = self._getVersion(oid, pre)
if copy:
# we can just copy our previous-record pointer forward
return "", pre, version, snv, ipos
return "", pre, ipos
try:
bdata = self._loadBack_impl(oid, pre)[0]
......@@ -1033,7 +851,7 @@ class FileStorage(BaseStorage.BaseStorage,
data = self.tryToResolveConflict(oid, ctid, tid, bdata, cdata)
if data:
return data, 0, version, snv, ipos
return data, 0, ipos
raise UndoError("Some data were modified by a later transaction", oid)
......@@ -1148,18 +966,13 @@ class FileStorage(BaseStorage.BaseStorage,
assert base + self._tfile.tell() == here, (here, base,
self._tfile.tell())
try:
p, prev, v, snv, ipos = self._transactionalUndoRecord(
h.oid, pos, h.tid, h.prev, h.version)
p, prev, ipos = self._transactionalUndoRecord(
h.oid, pos, h.tid, h.prev)
except UndoError, v:
# Don't fail right away. We may be redeemed later!
failures[h.oid] = v
else:
new = DataHeader(h.oid, self._tid, ipos, otloc, len(v),
len(p))
if v:
vprev = self._tvindex.get(v, 0) or self._vindex.get(v, 0)
new.setVersion(v, snv, vprev)
self._tvindex[v] = here
new = DataHeader(h.oid, self._tid, ipos, otloc, 0, len(p))
# TODO: This seek shouldn't be necessary, but some other
# bit of code is messing with the file pointer.
......@@ -1182,78 +995,17 @@ class FileStorage(BaseStorage.BaseStorage,
return tindex
def versionEmpty(self, version):
if not version:
# The interface is silent on this case. I think that this should
# be an error, but Barry thinks this should return 1 if we have
# any non-version data. This would be excruciatingly painful to
# test, so I must be right. ;)
raise POSException.VersionError(
'The version must be an non-empty string')
self._lock_acquire()
try:
index=self._index
file=self._file
seek=file.seek
read=file.read
srcpos=self._vindex_get(version, 0)
t=tstatus=None
while srcpos:
seek(srcpos)
oid=read(8)
if index[oid]==srcpos: return 0
h=read(50) # serial, prev(oid), tloc, vlen, plen, pnv, pv
tloc=h[16:24]
if t != tloc:
# We haven't checked this transaction before,
# get its status.
t=tloc
seek(u64(t)+16)
tstatus=read(1)
if tstatus != 'u': return 1
spos=h[-8:]
srcpos=u64(spos)
return 1
finally: self._lock_release()
def versions(self, max=None):
r=[]
a=r.append
keys=self._vindex.keys()
if max is not None: keys=keys[:max]
for version in keys:
if self.versionEmpty(version): continue
a(version)
if max and len(r) >= max: return r
return r
def history(self, oid, version=None, size=1, filter=None):
assert not version
self._lock_acquire()
try:
r = []
pos = self._lookup_pos(oid)
wantver = version
while 1:
if len(r) >= size: return r
h = self._read_data_header(pos)
if h.version:
if wantver is not None and h.version != wantver:
if h.prev:
pos = h.prev
continue
else:
return r
else:
version = ""
wantver = None
th = self._read_txn_header(h.tloc)
if th.ext:
d = loads(th.ext)
......@@ -1264,7 +1016,6 @@ class FileStorage(BaseStorage.BaseStorage,
"user_name": th.user,
"description": th.descr,
"tid": h.tid,
"version": h.version,
"size": h.plen,
})
......@@ -1341,7 +1092,7 @@ class FileStorage(BaseStorage.BaseStorage,
# OK, we're beyond the point of no return
os.rename(self._file_name + '.pack', self._file_name)
self._file = open(self._file_name, 'r+b')
self._initIndex(p.index, p.vindex, p.tindex, p.tvindex,
self._initIndex(p.index, p.tindex,
p.oid2tid, p.toid2tid,
p.toid2tid_delete)
self._pos = opos
......@@ -1375,7 +1126,7 @@ class FileStorage(BaseStorage.BaseStorage,
pos = pos - 8 - u64(read(8))
seek(0)
return [(trans.tid, [(r.oid, r.version) for r in trans])
return [(trans.tid, [(r.oid, '') for r in trans])
for trans in FileIterator(self._file, pos=pos)]
finally:
self._lock_release()
......@@ -1412,15 +1163,13 @@ class FileStorage(BaseStorage.BaseStorage,
except ValueError: # "empty tree" error
next_oid = None
# ignore versions
# XXX if the object was created in a version, this will fail.
data, tid = self.load(oid, "")
return oid, tid, data, next_oid
def shift_transactions_forward(index, vindex, tindex, file, pos, opos):
def shift_transactions_forward(index, tindex, file, pos, opos):
"""Copy transactions forward in the data file
This might be done as part of a recovery effort
......@@ -1432,7 +1181,6 @@ def shift_transactions_forward(index, vindex, tindex, file, pos, opos):
write=file.write
index_get=index.get
vindex_get=vindex.get
# Initialize,
pv=z64
......@@ -1482,18 +1230,10 @@ def shift_transactions_forward(index, vindex, tindex, file, pos, opos):
seek(pos)
h=read(DATA_HDR_LEN)
oid,serial,sprev,stloc,vlen,splen = unpack(DATA_HDR, h)
assert not vlen
plen=u64(splen)
dlen=DATA_HDR_LEN+(plen or 8)
if vlen:
dlen=dlen+(16+vlen)
pnv=u64(read(8))
# skip position of previous version record
seek(8,1)
version=read(vlen)
pv=p64(vindex_get(version, 0))
if status != 'u': vindex[version]=opos
tindex[oid]=opos
if plen: p=read(plen)
......@@ -1511,17 +1251,7 @@ def shift_transactions_forward(index, vindex, tindex, file, pos, opos):
seek(opos)
sprev=p64(index_get(oid, 0))
write(pack(DATA_HDR,
oid,serial,sprev,p64(otpos),vlen,splen))
if vlen:
if not pnv: write(z64)
else:
if pnv >= p2: pnv=pnv-offset
elif pnv >= p1:
pnv=index_get(oid, 0)
write(p64(pnv))
write(pv)
write(version)
oid, serial, sprev, p64(otpos), 0, splen))
write(p)
......@@ -1557,11 +1287,9 @@ def search_back(file, pos):
def recover(file_name):
file=open(file_name, 'r+b')
index={}
vindex={}
tindex={}
pos, oid, tid = read_index(
file, file_name, index, vindex, tindex, recover=1)
pos, oid, tid = read_index(file, file_name, index, tindex, recover=1)
if oid is not None:
print "Nothing to recover"
return
......@@ -1569,9 +1297,7 @@ def recover(file_name):
opos=pos
pos, sz = search_back(file, pos)
if pos < sz:
npos = shift_transactions_forward(
index, vindex, tindex, file, pos, opos,
)
npos = shift_transactions_forward(index, tindex, file, pos, opos)
file.truncate(npos)
......@@ -1580,7 +1306,7 @@ def recover(file_name):
def read_index(file, name, index, vindex, tindex, stop='\377'*8,
def read_index(file, name, index, tindex, stop='\377'*8,
ltid=z64, start=4L, maxoid=z64, recover=0, read_only=0):
"""Scan the file storage and update the index.
......@@ -1591,7 +1317,6 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
file -- a file object (the Data.fs)
name -- the name of the file (presumably file.name)
index -- fsIndex, oid -> data record file offset
vindex -- dictionary, oid -> data record offset for version data
tindex -- dictionary, oid -> data record offset
tindex is cleared before return
......@@ -1719,9 +1444,6 @@ def read_index(file, name, index, vindex, tindex, stop='\377'*8,
dlen = h.recordlen()
tindex[h.oid] = pos
if h.version:
vindex[h.version] = pos
if pos + dlen > tend or h.tloc != tpos:
if recover:
return tpos, None, None
......@@ -2006,8 +1728,7 @@ class RecordIterator(Iterator, BaseStorage.TransactionRecord,
else:
if h.back == 0:
# If the backpointer is 0, then this transaction
# undoes the object creation. It either aborts
# the version that created the object or undid the
# undoes the object creation. It undid the
# transaction that created it. Return None
# instead of a pickle to indicate this.
data = None
......@@ -2017,17 +1738,16 @@ class RecordIterator(Iterator, BaseStorage.TransactionRecord,
# Should it go to the original data like BDBFullStorage?
prev_txn = self.getTxnFromData(h.oid, h.back)
r = Record(h.oid, h.tid, h.version, data, prev_txn, pos)
r = Record(h.oid, h.tid, data, prev_txn, pos)
return r
raise IndexError(index)
class Record(BaseStorage.DataRecord):
"""An abstract database record."""
def __init__(self, oid, tid, version, data, prev, pos):
def __init__(self, oid, tid, data, prev, pos):
self.oid = oid
self.tid = tid
self.version = version
self.data = data
self.data_txn = prev
self.pos = pos
......
......@@ -64,19 +64,10 @@
#
# - 8-byte beginning of transaction record file position.
#
# - 2-byte version length
# - 2-bytes with zero values. (Was version length.)
#
# - 8-byte data length
#
# ? 8-byte position of non-version data record
# (if version length > 0)
#
# ? 8-byte position of previous record in this version
# (if version length > 0)
#
# ? version string
# (if version length > 0)
#
# ? data
# (data length > 0)
#
......@@ -87,41 +78,10 @@
# Also, the object ids time stamps are big-endian, so comparisons
# are meaningful.
#
# Version handling
#
# There isn't a separate store for versions. Each record has a
# version field, indicating what version it is in. The records in a
# version form a linked list. Each record that has a non-empty
# version string has a pointer to the previous record in the version.
# Version back pointers are retained *even* when versions are
# committed or aborted or when transactions are undone.
#
# There is a notion of "current" version records, which are the
# records in a version that are the current records for their
# respective objects. When a version is comitted, the current records
# are committed to the destination version. When a version is
# aborted, the current records are aborted.
#
# When committing or aborting, we search backward through the linked
# list until we find a record for an object that does not have a
# current record in the version. If we find a record for which the
# non-version pointer is the same as the previous pointer, then we
# forget that the corresponding object had a current record in the
# version. This strategy allows us to avoid searching backward through
# previously committed or aborted version records.
#
# Of course, we ignore records in undone transactions when committing
# or aborting.
#
# Backpointers
#
# When we commit or abort a version, we don't copy (or delete)
# and data. Instead, we write records with back pointers.
#
# A version record *never* has a back pointer to a non-version
# record, because we never abort to a version. A non-version record
# may have a back pointer to a version record or to a non-version
# record.
# When we undo a record, we don't copy (or delete)
# data. Instead, we write records with back pointers.
import struct
import logging
......@@ -156,7 +116,6 @@ DATA_HDR = ">8s8sQQHQ"
# constants to support various header sizes
TRANS_HDR_LEN = 23
DATA_HDR_LEN = 42
DATA_VERSION_HDR_LEN = 58
assert struct.calcsize(TRANS_HDR) == TRANS_HDR_LEN
assert struct.calcsize(DATA_HDR) == DATA_HDR_LEN
......@@ -180,9 +139,6 @@ class FileStorageFormatter(object):
If ois is not None, raise CorruptedDataError if oid passed
does not match oid in file.
If there is version data, reads the version part of the header.
If there is no pickle data, reads the back pointer.
"""
self._file.seek(pos)
s = self._file.read(DATA_HDR_LEN)
......@@ -191,17 +147,10 @@ class FileStorageFormatter(object):
h = DataHeaderFromString(s)
if oid is not None and oid != h.oid:
raise CorruptedDataError(oid, s, pos)
if h.vlen:
s = self._file.read(16 + h.vlen)
h.parseVersion(s)
if not h.plen:
h.back = u64(self._file.read(8))
return h
def _write_version_header(self, file, pnv, vprev, version):
s = struct.pack(">8s8s", pnv, vprev)
file.write(s + version)
def _read_txn_header(self, pos, tid=None):
self._file.seek(pos)
s = self._file.read(TRANS_HDR_LEN)
......@@ -284,47 +233,26 @@ def DataHeaderFromString(s):
class DataHeader(object):
"""Header for a data record."""
__slots__ = (
"oid", "tid", "prev", "tloc", "vlen", "plen", "back",
# These three attributes are only defined when vlen > 0
"pnv", "vprev", "version")
__slots__ = ("oid", "tid", "prev", "tloc", "plen", "back")
def __init__(self, oid, tid, prev, tloc, vlen, plen):
self.back = 0 # default
self.version = "" # default
if vlen:
raise ValueError(
"Non-zero version length. Versions aren't supported.")
self.oid = oid
self.tid = tid
self.prev = prev
self.tloc = tloc
self.vlen = vlen
self.plen = plen
self.back = 0 # default
def asString(self):
s = struct.pack(DATA_HDR, self.oid, self.tid, self.prev,
self.tloc, self.vlen, self.plen)
if self.version:
v = struct.pack(">QQ", self.pnv, self.vprev)
return s + v + self.version
else:
return s
def setVersion(self, version, pnv, vprev):
self.version = version
self.vlen = len(version)
self.pnv = pnv
self.vprev = vprev
def parseVersion(self, buf):
pnv, vprev = struct.unpack(">QQ", buf[:16])
self.pnv = pnv
self.vprev = vprev
self.version = buf[16:]
return struct.pack(DATA_HDR, self.oid, self.tid, self.prev,
self.tloc, 0, self.plen)
def recordlen(self):
rlen = DATA_HDR_LEN + (self.plen or 8)
if self.version:
rlen += 16 + self.vlen
return rlen
return DATA_HDR_LEN + (self.plen or 8)
def TxnHeaderFromString(s):
return TxnHeader(*struct.unpack(TRANS_HDR, s))
......
......@@ -14,8 +14,8 @@
import struct
from ZODB.FileStorage import FileIterator
from ZODB.FileStorage.format import TRANS_HDR, TRANS_HDR_LEN, DATA_HDR, DATA_HDR_LEN
from ZODB.FileStorage.format import DATA_HDR_LEN
from ZODB.FileStorage.format import TRANS_HDR, TRANS_HDR_LEN
from ZODB.FileStorage.format import DATA_HDR, DATA_HDR_LEN
from ZODB.TimeStamp import TimeStamp
from ZODB.utils import u64, get_pickle_metadata
from ZODB.tests.StorageTestBase import zodb_unpickle
......@@ -41,11 +41,6 @@ def fsdump(path, file=None, with_offset=1):
size = " size=%d" % len(rec.data)
fullclass = "%s.%s" % (modname, classname)
if rec.version:
version = " version=%r" % rec.version
else:
version = ""
if rec.data_txn:
# It would be nice to print the transaction number
# (i) but it would be expensive to keep track of.
......@@ -53,8 +48,8 @@ def fsdump(path, file=None, with_offset=1):
else:
bp = ""
print >> file, (" data #%05d oid=%016x%s%s class=%s%s" %
(j, u64(rec.oid), version, size, fullclass, bp))
print >> file, (" data #%05d oid=%016x%s class=%s%s" %
(j, u64(rec.oid), size, fullclass, bp))
iter.close()
def fmt(p64):
......@@ -117,14 +112,7 @@ class Dumper:
print >> self.dest, "revid: %s" % fmt(revid)
print >> self.dest, "previous record offset: %d" % prev
print >> self.dest, "transaction offset: %d" % tloc
if vlen:
pnv = self.file.read(8)
sprevdata = self.file.read(8)
version = self.file.read(vlen)
print >> self.dest, "version: %r" % version
print >> self.dest, "non-version data offset: %d" % u64(pnv)
print >> self.dest, ("previous version data offset: %d" %
u64(sprevdata))
assert not vlen
print >> self.dest, "len(data): %d" % dlen
self.file.read(dlen)
if not dlen:
......
......@@ -148,7 +148,7 @@ class Tracer(object):
txn._tpos)
def _save_references(self, drec):
# drec has members oid, tid, version, data, data_txn
# drec has members oid, tid, data, data_txn
tid, oid, pick, pos = drec.tid, drec.oid, drec.data, drec.pos
if pick:
if oid in self.oids:
......@@ -159,13 +159,12 @@ class Tracer(object):
self._records_map[oid] = drec
self._records.append(drec)
elif oid in self.oids:
# Or maybe it's a version abort.
self._msg(oid, tid, "creation undo at", pos)
# Process next data record. If a message is produced, self._produced_msg
# will be set True.
def _check_drec(self, drec):
# drec has members oid, tid, version, data, data_txn
# drec has members oid, tid, data, data_txn
tid, oid, pick, pos = drec.tid, drec.oid, drec.data, drec.pos
ref2name = self._ref2name
ref2name_get = ref2name.get
......
......@@ -33,27 +33,13 @@ from ZODB.fsIndex import fsIndex
from ZODB.FileStorage.format import FileStorageFormatter, CorruptedDataError
from ZODB.FileStorage.format import DataHeader, TRANS_HDR_LEN
class DataCopier(FileStorageFormatter):
"""Mixin class for copying transactions into a storage.
class PackCopier(FileStorageFormatter):
The restore() and pack() methods share a need to copy data records
and update pointers to data in earlier transaction records. This
class provides the shared logic.
The mixin extends the FileStorageFormatter with a copy() method.
It also requires that the concrete class provides the following
attributes:
_file -- file with earlier destination data
_tfile -- destination file for copied data
_pos -- file pos of destination transaction
_tindex -- maps oid to data record file pos
_tvindex -- maps version name to data record file pos
_tindex and _tvindex are updated by copy().
The copy() method does not do any locking.
"""
def __init__(self, f, index, tindex):
self._file = f
self._index = index
self._tindex = tindex
self._pos = None
def _txn_find(self, tid, stop_at_pack):
# _pos always points just past the last transaction
......@@ -107,43 +93,7 @@ class DataCopier(FileStorageFormatter):
pos += h.recordlen()
return 0
def _restore_pnv(self, oid, prev, version, bp):
# Find a valid pnv (previous non-version) pointer for this version.
# If there is no previous record, there can't be a pnv.
if not prev:
return None
pnv = None
h = self._read_data_header(prev, oid)
# If the previous record is for a version, it must have
# a valid pnv.
if h.version:
return h.pnv
elif bp:
# Unclear: Not sure the following is always true:
# The previous record is not for this version, yet we
# have a backpointer to it. The current record must
# be an undo of an abort or commit, so the backpointer
# must be to a version record with a pnv.
h2 = self._read_data_header(bp, oid)
if h2.version:
return h2.pnv
else:
warn("restore could not find previous non-version data "
"at %d or %d", prev, bp)
return None
def _resolve_backpointer(self, prev_txn, oid, data):
prev_pos = 0
if prev_txn is not None:
prev_txn_pos = self._txn_find(prev_txn, 0)
if prev_txn_pos:
prev_pos = self._data_find(prev_txn_pos, oid, data)
return prev_pos
def copy(self, oid, serial, data, version, prev_txn,
txnpos, datapos):
def copy(self, oid, serial, data, prev_txn, txnpos, datapos):
prev_pos = self._resolve_backpointer(prev_txn, oid, data)
old = self._index.get(oid, 0)
# Calculate the pos the record will have in the storage.
......@@ -158,31 +108,34 @@ class DataCopier(FileStorageFormatter):
else:
dlen = len(data)
# Write the recovery data record
h = DataHeader(oid, serial, old, txnpos, len(version), dlen)
if version:
h.version = version
pnv = self._restore_pnv(oid, old, version, prev_pos)
if pnv is not None:
h.pnv = pnv
else:
h.pnv = old
# Link to the last record for this version
h.vprev = self._tvindex.get(version, 0)
if not h.vprev:
h.vprev = self._vindex.get(version, 0)
self._tvindex[version] = here
h = DataHeader(oid, serial, old, txnpos, 0, dlen)
self._tfile.write(h.asString())
self._file.write(h.asString())
# Write the data or a backpointer
if data is None:
if prev_pos:
self._tfile.write(p64(prev_pos))
self._file.write(p64(prev_pos))
else:
# Write a zero backpointer, which indicates an
# un-creation transaction.
self._tfile.write(z64)
self._file.write(z64)
else:
self._tfile.write(data)
self._file.write(data)
def setTxnPos(self, pos):
self._pos = pos
def _resolve_backpointer(self, prev_txn, oid, data):
pos = self._file.tell()
try:
prev_pos = 0
if prev_txn is not None:
prev_txn_pos = self._txn_find(prev_txn, 0)
if prev_txn_pos:
prev_pos = self._data_find(prev_txn_pos, oid, data)
return prev_pos
finally:
self._file.seek(pos)
class GC(FileStorageFormatter):
......@@ -194,7 +147,6 @@ class GC(FileStorageFormatter):
# packpos: position of first txn header after pack time
self.packpos = None
self.oid2curpos = fsIndex() # maps oid to current data record position
self.oid2verpos = fsIndex() # maps oid to current version data
# The set of reachable revisions of each object.
#
......@@ -228,7 +180,6 @@ class GC(FileStorageFormatter):
self.findReachableFromFuture()
# These mappings are no longer needed and may consume a lot
# of space.
del self.oid2verpos
del self.oid2curpos
def buildPackIndex(self):
......@@ -254,10 +205,7 @@ class GC(FileStorageFormatter):
while pos < end:
dh = self._read_data_header(pos)
self.checkData(th, tpos, dh, pos)
if dh.version:
self.oid2verpos[dh.oid] = pos
else:
self.oid2curpos[dh.oid] = pos
self.oid2curpos[dh.oid] = pos
pos += dh.recordlen()
tlen = self._read_num(pos)
......@@ -302,11 +250,6 @@ class GC(FileStorageFormatter):
L.append(pos)
todo.extend(self.findrefs(pos))
pos = self.oid2verpos.get(oid)
if pos is not None:
L.append(pos)
todo.extend(self.findrefs(pos))
if not L:
continue
......@@ -344,15 +287,6 @@ class GC(FileStorageFormatter):
else:
self.reachable[dh.oid] = dh.back
if dh.version and dh.pnv:
if self.reachable.has_key(dh.oid):
L = self.reach_ex.setdefault(dh.oid, [])
if dh.pnv not in L:
L.append(dh.pnv)
extra_roots.append(dh.pnv)
else:
self.reachable[dh.oid] = dh.back
pos += dh.recordlen()
tlen = self._read_num(pos)
......@@ -377,43 +311,6 @@ class GC(FileStorageFormatter):
else:
return []
class PackCopier(DataCopier):
# PackCopier has to cope with _file and _tfile being the
# same file. The copy() implementation is written assuming
# that they are different, so that using one object doesn't
# mess up the file pointer for the other object.
# PackCopier overrides _resolve_backpointer() and _restore_pnv()
# to guarantee that they keep the file pointer for _tfile in
# the right place.
def __init__(self, f, index, vindex, tindex, tvindex):
self._file = f
self._tfile = f
self._index = index
self._vindex = vindex
self._tindex = tindex
self._tvindex = tvindex
self._pos = None
def setTxnPos(self, pos):
self._pos = pos
def _resolve_backpointer(self, prev_txn, oid, data):
pos = self._tfile.tell()
try:
return DataCopier._resolve_backpointer(self, prev_txn, oid, data)
finally:
self._tfile.seek(pos)
def _restore_pnv(self, oid, prev, version, bp):
pos = self._tfile.tell()
try:
return DataCopier._restore_pnv(self, oid, prev, version, bp)
finally:
self._tfile.seek(pos)
class FileStoragePacker(FileStorageFormatter):
# path is the storage file path.
......@@ -447,23 +344,15 @@ class FileStoragePacker(FileStorageFormatter):
# The packer will use several indexes.
# index: oid -> pos
# vindex: version -> pos
# tindex: oid -> pos, for current txn
# tvindex: version -> pos, for current txn
# oid2tid: not used by the packer
self.index = fsIndex()
self.vindex = {}
self.tindex = {}
self.tvindex = {}
self.oid2tid = {}
self.toid2tid = {}
self.toid2tid_delete = {}
# Index for non-version data. This is a temporary structure
# to reduce I/O during packing
self.nvindex = fsIndex()
def pack(self):
# Pack copies all data reachable at the pack time or later.
#
......@@ -486,8 +375,7 @@ class FileStoragePacker(FileStorageFormatter):
self._file.seek(0)
self._tfile.write(self._file.read(self._metadata_size))
self._copier = PackCopier(self._tfile, self.index, self.vindex,
self.tindex, self.tvindex)
self._copier = PackCopier(self._tfile, self.index, self.tindex)
ipos, opos = self.copyToPacktime()
assert ipos == self.gc.packpos
......@@ -623,13 +511,7 @@ class FileStoragePacker(FileStorageFormatter):
h.plen = len(data)
h.tloc = new_tpos
pos = self._tfile.tell()
if h.version:
h.pnv = self.index.get(h.oid, 0)
h.vprev = self.vindex.get(h.version, 0)
self.vindex[h.version] = pos
self.index[h.oid] = pos
if h.version:
self.vindex[h.version] = pos
self._tfile.write(h.asString())
self._tfile.write(data)
if not data:
......@@ -681,8 +563,8 @@ class FileStoragePacker(FileStorageFormatter):
if h.back:
prev_txn = self.getTxnFromData(h.oid, h.back)
self._copier.copy(h.oid, h.tid, data, h.version,
prev_txn, pos, self._tfile.tell())
self._copier.copy(h.oid, h.tid, data, prev_txn,
pos, self._tfile.tell())
tlen = self._tfile.tell() - pos
assert tlen == th.tlen
......@@ -691,8 +573,6 @@ class FileStoragePacker(FileStorageFormatter):
self.index.update(self.tindex)
self.tindex.clear()
self.vindex.update(self.tvindex)
self.tvindex.clear()
if self._lock_counter % 20 == 0:
self._commit_lock_acquire()
return ipos
......@@ -338,8 +338,8 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
else:
l = len(r.data)
print "%7d %s %s" % (u64(r.oid), l, r.version)
ofs.restore(r.oid, r.tid, r.data, r.version, r.data_txn,
print "%7d %s %s" % (u64(r.oid), l)
ofs.restore(r.oid, r.tid, r.data, '', r.data_txn,
txn)
nrec += 1
except (KeyboardInterrupt, SystemExit):
......
......@@ -22,7 +22,7 @@ import cPickle
import struct
from ZODB.FileStorage.format import TRANS_HDR, DATA_HDR, TRANS_HDR_LEN
from ZODB.FileStorage.format import DATA_HDR_LEN, DATA_VERSION_HDR_LEN
from ZODB.FileStorage.format import DATA_HDR_LEN
from ZODB.utils import u64
from persistent.TimeStamp import TimeStamp
......@@ -103,12 +103,9 @@ class DataHeader:
serial 8-16 object serial numver
prev_rec_pos 16-24 position of previous data record for object
txn_pos 24-32 position of txn header
version_len 32-34 length of version
version_len 32-34 length of version (always 0)
data_len 34-42 length of data
nonversion_pos 42-50* position of nonversion data record
prev_version_pos 50-58* pos of previous version data record
* these attributes are only present if version_len != 0.
"""
def __init__(self, file, pos):
......@@ -118,28 +115,19 @@ class DataHeader:
def _read_header(self):
self._file.seek(self._pos)
self._hdr = self._file.read(DATA_VERSION_HDR_LEN)
self._hdr = self._file.read(DATA_HDR_LEN)
# always read the longer header, just in case
(self.oid, self.serial, prev_rec_pos, txn_pos, self.version_len,
data_len) = struct.unpack(DATA_HDR, self._hdr[:DATA_HDR_LEN])
(self.oid, self.serial, prev_rec_pos, txn_pos, vlen, data_len
) = struct.unpack(DATA_HDR, self._hdr[:DATA_HDR_LEN])
assert not vlen
self.prev_rec_pos = u64(prev_rec_pos)
self.txn_pos = u64(txn_pos)
self.data_len = u64(data_len)
if self.version_len:
s = self._hdr[DATA_HDR_LEN:]
self.nonversion_pos = u64(s[:8])
self.prev_version_pos = u64(s[8:])
else:
self.nonversion_pos = None
self.prev_version_pos = None
def next_offset(self):
"""Return offset of next record."""
off = self._pos + self.data_len
if self.version_len:
off += self.version_len + DATA_VERSION_HDR_LEN
else:
off += DATA_HDR_LEN
off += DATA_HDR_LEN
if self.data_len == 0:
off += 8 # backpointer
return off
......
......@@ -44,40 +44,12 @@ class BasicStorage:
self._storage.store,
0, 0, 0, 0, transaction.Transaction())
if self.__supportsVersions():
try:
self._storage.abortVersion(
'dummy', transaction.Transaction())
except (POSException.StorageTransactionError,
POSException.VersionCommitError):
pass # test passed ;)
else:
assert 0, "Should have failed, invalid transaction."
try:
self._storage.commitVersion('dummy', 'dummer',
transaction.Transaction())
except (POSException.StorageTransactionError,
POSException.VersionCommitError):
pass # test passed ;)
else:
assert 0, "Should have failed, invalid transaction."
self.assertRaises(
POSException.StorageTransactionError,
self._storage.store,
0, 1, 2, 3, transaction.Transaction())
self._storage.tpc_abort(t)
def __supportsVersions(self):
storage = self._storage
try:
supportsVersions = storage.supportsVersions
except AttributeError:
return False
else:
return supportsVersions()
def checkSerialIsNoneForInitialRevision(self):
eq = self.assertEqual
oid = self._storage.new_oid()
......@@ -95,13 +67,13 @@ class BasicStorage:
eq(value, MinPO(11))
eq(revid, newrevid)
def checkNonVersionStore(self):
def checkStore(self):
revid = ZERO
newrevid = self._dostore(revid=None)
# Finish the transaction.
self.assertNotEqual(newrevid, revid)
def checkNonVersionStoreAndLoad(self):
def checkStoreAndLoad(self):
eq = self.assertEqual
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(7))
......@@ -115,12 +87,6 @@ class BasicStorage:
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(21))
def checkNonVersionModifiedInVersion(self):
if self.__supportsVersions():
oid = self._storage.new_oid()
self._dostore(oid=oid)
self.assertEqual(self._storage.modifiedInVersion(oid), '')
def checkConflicts(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
......
......@@ -23,7 +23,7 @@ from transaction import Transaction
class HistoryStorage:
def checkSimpleHistory(self):
eq = self.assertEqual
# Store a couple of non-version revisions of the object
# Store a couple of revisions of the object
oid = self._storage.new_oid()
self.assertRaises(KeyError,self._storage.history,oid)
revid1 = self._dostore(oid, data=MinPO(11))
......@@ -34,197 +34,29 @@ class HistoryStorage:
eq(len(h), 1)
d = h[0]
eq(d['tid'], revid3)
eq(d['version'], '')
# Try to get 2 historical revisions
h = self._storage.history(oid, size=2)
eq(len(h), 2)
d = h[0]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid2)
eq(d['version'], '')
# Try to get all 3 historical revisions
h = self._storage.history(oid, size=3)
eq(len(h), 3)
d = h[0]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[2]
eq(d['tid'], revid1)
eq(d['version'], '')
# There should be no more than 3 revisions
h = self._storage.history(oid, size=4)
eq(len(h), 3)
d = h[0]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[2]
eq(d['tid'], revid1)
eq(d['version'], '')
def checkVersionHistory(self):
if not self._storage.supportsVersions():
return
eq = self.assertEqual
# Store a couple of non-version revisions
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
# Now store some new revisions in a version
version = 'test-version'
revid4 = self._dostore(oid, revid=revid3, data=MinPO(14),
version=version)
revid5 = self._dostore(oid, revid=revid4, data=MinPO(15),
version=version)
revid6 = self._dostore(oid, revid=revid5, data=MinPO(16),
version=version)
# Now, try to get the six historical revisions (first three are in
# 'test-version', followed by the non-version revisions).
h = self._storage.history(oid, version, 100)
eq(len(h), 6)
d = h[0]
eq(d['tid'], revid6)
eq(d['version'], version)
d = h[1]
eq(d['tid'], revid5)
eq(d['version'], version)
d = h[2]
eq(d['tid'], revid4)
eq(d['version'], version)
d = h[3]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[4]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[5]
eq(d['tid'], revid1)
eq(d['version'], '')
def checkHistoryAfterVersionCommit(self):
if not self._storage.supportsVersions():
return
eq = self.assertEqual
# Store a couple of non-version revisions
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
# Now store some new revisions in a version
version = 'test-version'
revid4 = self._dostore(oid, revid=revid3, data=MinPO(14),
version=version)
revid5 = self._dostore(oid, revid=revid4, data=MinPO(15),
version=version)
revid6 = self._dostore(oid, revid=revid5, data=MinPO(16),
version=version)
# Now commit the version
t = Transaction()
self._storage.tpc_begin(t)
self._storage.commitVersion(version, '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# After consultation with Jim, we agreed that the semantics of
# revision id's after a version commit is that the committed object
# gets a new serial number (a.k.a. revision id). Note that
# FileStorage is broken here; the serial number in the post-commit
# non-version revision will be the same as the serial number of the
# previous in-version revision.
#
# BAW: Using load() is the only way to get the serial number of the
# current revision of the object. But at least this works for both
# broken and working storages.
ign, revid7 = self._storage.load(oid, '')
# Now, try to get the six historical revisions (first three are in
# 'test-version', followed by the non-version revisions).
h = self._storage.history(oid, version, 100)
eq(len(h), 7)
d = h[0]
eq(d['tid'], revid7)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid6)
eq(d['version'], version)
d = h[2]
eq(d['tid'], revid5)
eq(d['version'], version)
d = h[3]
eq(d['tid'], revid4)
eq(d['version'], version)
d = h[4]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[5]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[6]
eq(d['tid'], revid1)
eq(d['version'], '')
def checkHistoryAfterVersionAbort(self):
if not self._storage.supportsVersions():
return
eq = self.assertEqual
# Store a couple of non-version revisions
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
# Now store some new revisions in a version
version = 'test-version'
revid4 = self._dostore(oid, revid=revid3, data=MinPO(14),
version=version)
revid5 = self._dostore(oid, revid=revid4, data=MinPO(15),
version=version)
revid6 = self._dostore(oid, revid=revid5, data=MinPO(16),
version=version)
# Now commit the version
t = Transaction()
self._storage.tpc_begin(t)
self._storage.abortVersion(version, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# After consultation with Jim, we agreed that the semantics of
# revision id's after a version commit is that the committed object
# gets a new serial number (a.k.a. revision id). Note that
# FileStorage is broken here; the serial number in the post-commit
# non-version revision will be the same as the serial number of the
# previous in-version revision.
#
# BAW: Using load() is the only way to get the serial number of the
# current revision of the object. But at least this works for both
# broken and working storages.
ign, revid7 = self._storage.load(oid, '')
# Now, try to get the six historical revisions (first three are in
# 'test-version', followed by the non-version revisions).
h = self._storage.history(oid, version, 100)
eq(len(h), 7)
d = h[0]
eq(d['tid'], revid7)
eq(d['version'], '')
d = h[1]
eq(d['tid'], revid6)
eq(d['version'], version)
d = h[2]
eq(d['tid'], revid5)
eq(d['version'], version)
d = h[3]
eq(d['tid'], revid4)
eq(d['version'], version)
d = h[4]
eq(d['tid'], revid3)
eq(d['version'], '')
d = h[5]
eq(d['tid'], revid2)
eq(d['version'], '')
d = h[6]
eq(d['tid'], revid1)
eq(d['version'], '')
......@@ -34,7 +34,6 @@ class IteratorCompare:
for rec in reciter:
eq(rec.oid, oid)
eq(rec.tid, revid)
eq(rec.version, '')
eq(zodb_unpickle(rec.data), MinPO(val))
val = val + 1
eq(val, val0 + len(revids))
......@@ -59,34 +58,7 @@ class IteratorStorage(IteratorCompare):
txniter.close()
self.assertRaises(IOError, txniter.__getitem__, 0)
def checkVersionIterator(self):
if not self._storage.supportsVersions():
return
self._dostore()
self._dostore(version='abort')
self._dostore()
self._dostore(version='abort')
t = Transaction()
self._storage.tpc_begin(t)
self._storage.abortVersion('abort', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
self._dostore(version='commit')
self._dostore()
self._dostore(version='commit')
t = Transaction()
self._storage.tpc_begin(t)
self._storage.commitVersion('commit', '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
txniter = self._storage.iterator()
for trans in txniter:
for data in trans:
pass
def checkUndoZombieNonVersion(self):
def checkUndoZombie(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(94))
# Get the undo information
......@@ -215,7 +187,6 @@ class IteratorDeepCompare:
for rec1, rec2 in zip(txn1, txn2):
eq(rec1.oid, rec2.oid)
eq(rec1.tid, rec2.tid)
eq(rec1.version, rec2.version)
eq(rec1.data, rec2.data)
# Make sure there are no more records left in rec1 and rec2,
# meaning they were the same length.
......
......@@ -165,13 +165,6 @@ class ExtStorageClientThread(StorageClientThread):
names = ["do_load"]
storage = self.storage
try:
supportsVersions = storage.supportsVersions
except AttributeError:
pass
else:
if supportsVersions():
names.append("do_modifiedInVersion")
try:
supportsUndo = storage.supportsUndo
......@@ -203,10 +196,6 @@ class ExtStorageClientThread(StorageClientThread):
oid = self.pick_oid()
self.storage.loadSerial(oid, self.oids[oid])
def do_modifiedInVersion(self):
oid = self.pick_oid()
self.storage.modifiedInVersion(oid)
def do_undoLog(self):
self.storage.undoLog(0, -20)
......
......@@ -28,8 +28,6 @@ class PersistentStorage:
self._dostore()
oid = self._storage.new_oid()
revid = self._dostore(oid)
if self._storage.supportsVersions():
self._dostore(oid, revid, data=8, version='b')
oid = self._storage.new_oid()
revid = self._dostore(oid, data=1)
revid = self._dostore(oid, revid, data=2)
......@@ -40,10 +38,6 @@ class PersistentStorage:
for oid in oids:
p, s = self._storage.load(oid, '')
objects.append((oid, '', p, s))
ver = self._storage.modifiedInVersion(oid)
if ver:
p, s = self._storage.load(oid, ver)
objects.append((oid, ver, p, s))
self._storage.close()
self.open()
......
......@@ -36,7 +36,6 @@ class ReadOnlyStorage:
for oid in self.oids.keys():
data, revid = self._storage.load(oid, '')
self.assertEqual(revid, self.oids[oid])
self.assert_(not self._storage.modifiedInVersion(oid))
# Storages without revisions may not have loadSerial().
try:
_data = self._storage.loadSerial(oid, revid)
......@@ -50,12 +49,6 @@ class ReadOnlyStorage:
t = transaction.Transaction()
self.assertRaises(ReadOnlyError, self._storage.tpc_begin, t)
if self._storage.supportsVersions():
self.assertRaises(ReadOnlyError, self._storage.abortVersion,
'', t)
self.assertRaises(ReadOnlyError, self._storage.commitVersion,
'', '', t)
self.assertRaises(ReadOnlyError, self._storage.store,
'\000' * 8, None, '', '', t)
......
......@@ -32,104 +32,6 @@ class RecoveryStorage(IteratorDeepCompare):
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
def checkRecoveryAcrossVersions(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=21)
revid = self._dostore(oid, revid=revid, data=22)
revid = self._dostore(oid, revid=revid, data=23, version='one')
revid = self._dostore(oid, revid=revid, data=34, version='one')
# Now commit the version
t = Transaction()
self._storage.tpc_begin(t)
self._storage.commitVersion('one', '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
def checkRecoverAbortVersion(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=21, version="one")
revid = self._dostore(oid, revid=revid, data=23, version='one')
revid = self._dostore(oid, revid=revid, data=34, version='one')
# Now abort the version and the creation
t = Transaction()
self._storage.tpc_begin(t)
tid, oids = self._storage.abortVersion('one', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
self.assertEqual(oids, [oid])
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
# Also make sure the the last transaction has a data record
# with None for its data attribute, because we've undone the
# object.
for s in self._storage, self._dst:
iter = s.iterator()
for trans in iter:
pass # iterate until we get the last one
data = trans[0]
self.assertRaises(IndexError, lambda i, t=trans: t[i], 1)
self.assertEqual(data.oid, oid)
self.assertEqual(data.data, None)
def checkRecoverUndoInVersion(self):
oid = self._storage.new_oid()
version = "aVersion"
revid_a = self._dostore(oid, data=MinPO(91))
revid_b = self._dostore(oid, revid=revid_a, version=version,
data=MinPO(92))
revid_c = self._dostore(oid, revid=revid_b, version=version,
data=MinPO(93))
self._undo(self._storage.undoInfo()[0]['id'], [oid])
self._commitVersion(version, '')
self._undo(self._storage.undoInfo()[0]['id'], [oid])
# now copy the records to a new storage
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
# The last two transactions were applied directly rather than
# copied. So we can't use compare() to verify that they new
# transactions are applied correctly. (The new transactions
# will have different timestamps for each storage.)
self._abortVersion(version)
self.assert_(self._storage.versionEmpty(version))
self._undo(self._storage.undoInfo()[0]['id'], [oid])
self.assert_(not self._storage.versionEmpty(version))
# check the data is what we expect it to be
data, revid = self._storage.load(oid, version)
self.assertEqual(zodb_unpickle(data), MinPO(92))
data, revid = self._storage.load(oid, '')
self.assertEqual(zodb_unpickle(data), MinPO(91))
# and swap the storages
tmp = self._storage
self._storage = self._dst
self._abortVersion(version)
self.assert_(self._storage.versionEmpty(version))
self._undo(self._storage.undoInfo()[0]['id'], [oid])
self.assert_(not self._storage.versionEmpty(version))
# check the data is what we expect it to be
data, revid = self._storage.load(oid, version)
self.assertEqual(zodb_unpickle(data), MinPO(92))
data, revid = self._storage.load(oid, '')
self.assertEqual(zodb_unpickle(data), MinPO(91))
# swap them back
self._storage = tmp
# Now remove _dst and copy all the transactions a second time.
# This time we will be able to confirm via compare().
self._dst.close()
self._dst.cleanup()
self._dst = self.new_dest()
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
def checkRestoreAcrossPack(self):
db = DB(self._storage)
c = db.open()
......@@ -150,7 +52,7 @@ class RecoveryStorage(IteratorDeepCompare):
final = list(it)[-1]
self._dst.tpc_begin(final, final.tid, final.status)
for r in final:
self._dst.restore(r.oid, r.tid, r.data, r.version, r.data_txn,
self._dst.restore(r.oid, r.tid, r.data, '', r.data_txn,
final)
it.close()
self._dst.tpc_vote(final)
......
......@@ -160,14 +160,13 @@ class StorageTestBase(unittest.TestCase):
def tearDown(self):
self._close()
def _dostore(self, oid=None, revid=None, data=None, version=None,
def _dostore(self, oid=None, revid=None, data=None,
already_pickled=0, user=None, description=None):
"""Do a complete storage transaction. The defaults are:
- oid=None, ask the storage for a new oid
- revid=None, use a revid of ZERO
- data=None, pickle up some arbitrary data (the integer 7)
- version=None, use the empty string version
Returns the object's new revision id.
"""
......@@ -181,8 +180,6 @@ class StorageTestBase(unittest.TestCase):
data = MinPO(data)
if not already_pickled:
data = zodb_pickle(data)
if version is None:
version = ''
# Begin the transaction
t = transaction.Transaction()
if user is not None:
......@@ -192,7 +189,7 @@ class StorageTestBase(unittest.TestCase):
try:
self._storage.tpc_begin(t)
# Store an object
r1 = self._storage.store(oid, revid, data, version, t)
r1 = self._storage.store(oid, revid, data, '', t)
# Finish the transaction
r2 = self._storage.tpc_vote(t)
revid = handle_serials(oid, r1, r2)
......@@ -202,9 +199,9 @@ class StorageTestBase(unittest.TestCase):
raise
return revid
def _dostoreNP(self, oid=None, revid=None, data=None, version=None,
def _dostoreNP(self, oid=None, revid=None, data=None,
user=None, description=None):
return self._dostore(oid, revid, data, version, 1, user, description)
return self._dostore(oid, revid, data, 1, user, description)
# The following methods depend on optional storage features.
......@@ -222,21 +219,3 @@ class StorageTestBase(unittest.TestCase):
for oid in expected_oids:
self.assert_(oid in oids)
return self._storage.lastTransaction()
def _commitVersion(self, src, dst):
t = transaction.Transaction()
t.note("commit %r to %r" % (src, dst))
self._storage.tpc_begin(t)
tid, oids = self._storage.commitVersion(src, dst, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
return oids
def _abortVersion(self, ver):
t = transaction.Transaction()
t.note("abort %r" % ver)
self._storage.tpc_begin(t)
tid, oids = self._storage.abortVersion(ver, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
return oids
......@@ -45,7 +45,7 @@ undo(): how's that handled?
Methods that have nothing to do with committing/non-committing:
load(), loadSerial(), getName(), getSize(), __len__(), history(),
undoLog(), modifiedInVersion(), versionEmpty(), versions(), pack().
undoLog(), pack().
Specific questions:
......@@ -65,7 +65,6 @@ tested? Is it a general restriction?
from transaction import Transaction
from ZODB.POSException import StorageTransactionError
VERSION = "testversion"
OID = "\000" * 8
SERIALNO = "\000" * 8
TID = "\000" * 8
......@@ -84,35 +83,6 @@ class SynchronizedStorage:
self.assertRaises(StorageTransactionError, callable, *args)
self._storage.tpc_abort(t)
def __supportsVersions(self):
storage = self._storage
try:
supportsVersions = storage.supportsVersions
except AttributeError:
return False
return supportsVersions()
def checkAbortVersionNotCommitting(self):
if self.__supportsVersions():
self.verifyNotCommitting(self._storage.abortVersion,
VERSION, Transaction())
def checkAbortVersionWrongTrans(self):
if self.__supportsVersions():
self.verifyWrongTrans(self._storage.abortVersion,
VERSION, Transaction())
def checkCommitVersionNotCommitting(self):
if self.__supportsVersions():
self.verifyNotCommitting(self._storage.commitVersion,
VERSION, "", Transaction())
def checkCommitVersionWrongTrans(self):
if self.__supportsVersions():
self.verifyWrongTrans(self._storage.commitVersion,
VERSION, "", Transaction())
def checkStoreNotCommitting(self):
self.verifyNotCommitting(self._storage.store,
OID, SERIALNO, "", "", Transaction())
......
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# Check interactions between undo() and versions. Any storage that
# supports both undo() and versions must pass these tests.
import time
import transaction
from ZODB.serialize import referencesf
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle
from ZODB.tests.VersionStorage import loadEx
class TransactionalUndoVersionStorage:
# `hook` is a callable used by the ZRS tests.
def checkUndoInVersion(self, hook=None):
eq = self.assertEqual
unless = self.failUnless
def check_objects(nonversiondata, versiondata):
data, revid = self._storage.load(oid, version)
self.assertEqual(zodb_unpickle(data), MinPO(versiondata))
data, revid = self._storage.load(oid, '')
self.assertEqual(zodb_unpickle(data), MinPO(nonversiondata))
oid = self._storage.new_oid()
version = 'one'
revid_a = self._dostore(oid, data=MinPO(91))
revid_b = self._dostore(oid, revid=revid_a, data=MinPO(92),
version=version)
revid_c = self._dostore(oid, revid=revid_b, data=MinPO(93),
version=version)
info = self._storage.undoInfo()
self._undo(info[0]['id'], [oid])
data, revid = self._storage.load(oid, '')
# load() always returns the tid of the most recent reversion in 3.4,
# so this old test of revid can't work anymore.
##eq(revid, revid_a)
# But the data should be correct for the non-version revision.
eq(zodb_unpickle(data), MinPO(91))
data, revid = self._storage.load(oid, version)
unless(revid > revid_b and revid > revid_c)
eq(zodb_unpickle(data), MinPO(92))
# Now commit the version...
oids = self._commitVersion(version, "")
eq(len(oids), 1)
eq(oids[0], oid)
check_objects(92, 92)
# ...and undo the commit
info = self._storage.undoInfo()
self._undo(info[0]['id'], [oid])
check_objects(91, 92)
if hook:
# ZRS passes a hook that arranges to start a secondary at this
# point in the test.
hook()
# Now abort the version.
oids = self._abortVersion(version)
assert len(oids) == 1
assert oids[0] == oid
check_objects(91, 91)
# Now undo the abort.
info=self._storage.undoInfo()
self._undo(info[0]['id'], [oid])
# And the object should be back in versions 'one' and ''.
check_objects(91, 92)
def checkUndoCommitVersion(self):
def load_value(oid, version=''):
data, revid = self._storage.load(oid, version)
return zodb_unpickle(data).value
# create a bunch of packable transactions
oid = self._storage.new_oid()
revid = '\000' * 8
for i in range(4):
revid = self._dostore(oid, revid, description='packable%d' % i)
pt = time.time()
time.sleep(1)
oid1 = self._storage.new_oid()
version = 'version'
revid1 = self._dostore(oid1, data=MinPO(0), description='create1')
revid2 = self._dostore(oid1, data=MinPO(1), revid=revid1,
version=version, description='version1')
self._dostore(oid1, data=MinPO(2), revid=revid2,
version=version, description='version2')
self._dostore(description='create2')
t = transaction.Transaction()
t.description = 'commit version'
self._storage.tpc_begin(t)
self._storage.commitVersion(version, '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
info = self._storage.undoInfo()
t_id = info[0]['id']
self.assertEqual(load_value(oid1), 2)
self.assertEqual(load_value(oid1, version), 2)
self._storage.pack(pt, referencesf)
self._undo(t_id, note="undo commit version")
self.assertEqual(load_value(oid1), 0)
self.assertEqual(load_value(oid1, version), 2)
data, tid = self._storage.load(oid1, "")
# After undoing the version commit, the non-version data
# once again becomes the non-version data from 'create1'.
self.assertEqual(tid, self._storage.lastTransaction())
# The current version data comes from an undo record, which
# means that it gets data via the backpointer but tid from the
# current txn.
data, tid, ver = loadEx(self._storage, oid1, version)
self.assertEqual(ver, version)
self.assertEqual(tid, self._storage.lastTransaction())
def checkUndoAbortVersion(self):
def load_value(oid, version=''):
data, revid = self._storage.load(oid, version)
return zodb_unpickle(data).value
# create a bunch of packable transactions
oid = self._storage.new_oid()
revid = '\000' * 8
for i in range(3):
revid = self._dostore(oid, revid, description='packable%d' % i)
pt = time.time()
time.sleep(1)
oid1 = self._storage.new_oid()
version = 'version'
revid1 = self._dostore(oid1, data=MinPO(0), description='create1')
revid2 = self._dostore(oid1, data=MinPO(1), revid=revid1,
version=version, description='version1')
self._dostore(oid1, data=MinPO(2), revid=revid2,
version=version, description='version2')
self._dostore(description='create2')
self._abortVersion(version)
info = self._storage.undoInfo()
t_id = info[0]['id']
self.assertEqual(load_value(oid1), 0)
# after abort, we should see non-version data
self.assertEqual(load_value(oid1, version), 0)
self._undo(t_id, note="undo abort version")
self.assertEqual(load_value(oid1), 0)
# t undo will re-create the version
self.assertEqual(load_value(oid1, version), 2)
info = self._storage.undoInfo()
t_id = info[0]['id']
self._storage.pack(pt, referencesf)
self._undo(t_id, note="undo undo")
# undo of undo will put as back where we started
self.assertEqual(load_value(oid1), 0)
# after abort, we should see non-version data
self.assertEqual(load_value(oid1, version), 0)
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Run the version related tests for a storage.
Any storage that supports versions should be able to pass all these tests.
"""
import time, warnings
import transaction
from transaction import Transaction
from ZODB import POSException
from ZODB.serialize import referencesf
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle, snooze
from ZODB import DB
def loadEx(storage, oid, version):
v = storage.modifiedInVersion(oid)
if v == version:
data, serial = storage.load(oid, version)
return data, serial, version
else:
data, serial = storage.load(oid, '')
return data, serial, ''
warnings.filterwarnings(
'ignore', message='Versions are deprecated', module=__name__)
class VersionStorage:
def checkCommitVersionSerialno(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(12))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(13),
version="version")
oids = self._commitVersion("version", "")
self.assertEqual([oid], oids)
data, revid3 = self._storage.load(oid, "")
# use repr() to avoid getting binary data in a traceback on error
self.assertNotEqual(`revid1`, `revid3`)
self.assertNotEqual(`revid2`, `revid3`)
def checkAbortVersionSerialno(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(12))
revid2 = self._dostore(oid, revid=revid1, data=MinPO(13),
version="version")
data, tid, ver = loadEx(self._storage, oid, "version")
self.assertEqual(revid2, tid)
self.assertEqual(zodb_unpickle(data), MinPO(13))
oids = self._abortVersion("version")
self.assertEqual([oid], oids)
data, revid3 = self._storage.load(oid, "")
# use repr() to avoid getting binary data in a traceback on error
self.assertNotEqual(revid1, revid3)
self.assertNotEqual(revid2, revid3)
data, tid = self._storage.load(oid, "")
self.assertEqual(revid3, tid)
self.assertEqual(zodb_unpickle(data), MinPO(12))
self.assertEqual(tid, self._storage.lastTransaction())
def checkVersionedStoreAndLoad(self):
eq = self.assertEqual
# Store a couple of non-version revisions of the object
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
revid1 = self._dostore(oid, revid=revid, data=MinPO(12))
# And now store some new revisions in a version
version = 'test-version'
revid = self._dostore(oid, revid=revid1, data=MinPO(13),
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(14),
version=version)
revid2 = self._dostore(oid, revid=revid, data=MinPO(15),
version=version)
# Now read back the object in both the non-version and version and
# make sure the values jive.
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(12))
data, vrevid = self._storage.load(oid, version)
eq(zodb_unpickle(data), MinPO(15))
if hasattr(self._storage, 'getTid'):
s = self._storage.getTid(oid)
eq(s, max(revid, vrevid))
data, tid, ver = loadEx(self._storage, oid, version)
eq(zodb_unpickle(data), MinPO(15))
eq(tid, revid2)
data, tid, ver = loadEx(self._storage, oid, "other version")
eq(zodb_unpickle(data), MinPO(12))
eq(tid, revid2)
# loadSerial returns non-version data
try:
data = self._storage.loadSerial(oid, revid)
eq(zodb_unpickle(data), MinPO(12))
data = self._storage.loadSerial(oid, revid2)
eq(zodb_unpickle(data), MinPO(12))
except POSException.Unsupported:
pass
def checkVersionedLoadErrors(self):
oid = self._storage.new_oid()
version = 'test-version'
revid = self._dostore(oid, data=MinPO(11))
revid = self._dostore(oid, revid=revid, data=MinPO(12),
version=version)
# Try to load a bogus oid
self.assertRaises(KeyError,
self._storage.load,
self._storage.new_oid(), '')
data, revid = self._storage.load(oid, 'bogus')
self.assertEqual(zodb_unpickle(data), MinPO(11))
def checkVersionLock(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
version = 'test-version'
revid = self._dostore(oid, revid=revid, data=MinPO(12),
version=version)
self.assertRaises(POSException.VersionLockError,
self._dostore,
oid, revid=revid, data=MinPO(14),
version='another-version')
def checkVersionEmpty(self):
# Before we store anything, these versions ought to be empty
version = 'test-version'
self.failUnless(self._storage.versionEmpty(version))
# Now store some objects
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
revid = self._dostore(oid, revid=revid, data=MinPO(12))
revid = self._dostore(oid, revid=revid, data=MinPO(13),
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(14),
version=version)
# The blank version should not be empty
# Neither should 'test-version'
self.failUnless(not self._storage.versionEmpty(version))
# But this non-existant version should be empty
self.failUnless(self._storage.versionEmpty('bogus'))
def checkVersions(self):
unless = self.failUnless
# Store some objects in the non-version
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
oid3 = self._storage.new_oid()
revid1 = self._dostore(oid1, data=MinPO(11))
revid2 = self._dostore(oid2, data=MinPO(12))
revid3 = self._dostore(oid3, data=MinPO(13))
# Now create some new versions
revid1 = self._dostore(oid1, revid=revid1, data=MinPO(14),
version='one')
revid2 = self._dostore(oid2, revid=revid2, data=MinPO(15),
version='two')
revid3 = self._dostore(oid3, revid=revid3, data=MinPO(16),
version='three')
# Ask for the versions
versions = self._storage.versions()
unless('one' in versions)
unless('two' in versions)
unless('three' in versions)
# Now flex the `max' argument
versions = self._storage.versions(1)
self.assertEqual(len(versions), 1)
unless('one' in versions or 'two' in versions or 'three' in versions)
def _setup_version(self, version='test-version'):
# Store some revisions in the non-version
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(49))
revid = self._dostore(oid, revid=revid, data=MinPO(50))
revid = self._dostore(oid, revid=revid, data=MinPO(51))
# Now do some stores in a version
revid = self._dostore(oid, revid=revid, data=MinPO(52),
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(53),
version=version)
revid = self._dostore(oid, revid=revid, data=MinPO(54),
version=version)
return oid, version
def checkAbortVersion(self):
eq = self.assertEqual
oid, version = self._setup_version()
# Not sure I can write a test for getTid() in the
# presence of aborted versions, because FileStorage and
# Berkeley storage give a different answer. I think Berkeley
# is right and FS is wrong.
oids = self._abortVersion(version)
eq(len(oids), 1)
eq(oids[0], oid)
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(51))
def checkAbortVersionNonCurrent(self):
# Make sure the non-current serial number is correctly
# after a version is aborted.
oid, version = self._setup_version()
self._abortVersion(version)
data, tid, ver = loadEx(self._storage, oid, "")
# write a new revision of oid so that the aborted-version txn
# is not current
self._dostore(oid, revid=tid, data=MinPO(17))
ltid = self._storage.lastTransaction()
ncdata, ncstart, end = self._storage.loadBefore(oid, ltid)
self.assertEqual(data, ncdata)
self.assertEqual(tid, ncstart)
def checkAbortVersionErrors(self):
eq = self.assertEqual
oid, version = self._setup_version()
# Now abort a bogus version
t = Transaction()
self._storage.tpc_begin(t)
# And try to abort the empty version
self.assertRaises(POSException.VersionError,
self._storage.abortVersion,
'', t)
# But now we really try to abort the version
tid, oids = self._storage.abortVersion(version, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
eq(len(oids), 1)
eq(oids[0], oid)
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(51))
def checkCommitVersionErrors(self):
eq = self.assertEqual
oid1, version1 = self._setup_version('one')
data, revid1 = self._storage.load(oid1, version1)
eq(zodb_unpickle(data), MinPO(54))
t = Transaction()
self._storage.tpc_begin(t)
try:
self.assertRaises(POSException.VersionCommitError,
self._storage.commitVersion,
'one', 'one', t)
finally:
self._storage.tpc_abort(t)
def checkNewSerialOnCommitVersionToVersion(self):
oid, version = self._setup_version()
data, vtid = self._storage.load(oid, version)
data, ntid = self._storage.load(oid, '')
version2 = 'test version 2'
self._commitVersion(version, version2)
data, tid = self._storage.load(oid, version2)
self.failUnless(tid != vtid and tid != ntid,
"New tid, %r, should be different from the old "
"version, %r, and non-version, %r, tids."
% (tid, vtid, ntid))
def checkModifyAfterAbortVersion(self):
eq = self.assertEqual
oid, version = self._setup_version()
self._abortVersion(version)
data, revid = self._storage.load(oid, '')
# And modify it a few times
revid = self._dostore(oid, revid=revid, data=MinPO(52))
revid = self._dostore(oid, revid=revid, data=MinPO(53))
revid = self._dostore(oid, revid=revid, data=MinPO(54))
data, newrevid = self._storage.load(oid, '')
eq(newrevid, revid)
eq(zodb_unpickle(data), MinPO(54))
def checkCommitToNonVersion(self):
eq = self.assertEqual
oid, version = self._setup_version()
data, revid = self._storage.load(oid, version)
eq(zodb_unpickle(data), MinPO(54))
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(51))
self._commitVersion(version, '')
data, revid = self._storage.load(oid, '')
eq(zodb_unpickle(data), MinPO(54))
def checkCommitToOtherVersion(self):
eq = self.assertEqual
oid1, version1 = self._setup_version('one')
data, revid1 = self._storage.load(oid1, version1)
eq(zodb_unpickle(data), MinPO(54))
oid2, version2 = self._setup_version('two')
data, revid2 = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
# make sure we see the non-version data when appropriate
data, revid2 = self._storage.load(oid1, version2)
eq(zodb_unpickle(data), MinPO(51))
data, revid2 = self._storage.load(oid2, version1)
eq(zodb_unpickle(data), MinPO(51))
data, revid2 = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
# Okay, now let's commit object1 to version2
oids = self._commitVersion(version1, version2)
eq(len(oids), 1)
eq(oids[0], oid1)
data, revid = self._storage.load(oid1, version2)
eq(zodb_unpickle(data), MinPO(54))
data, revid = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
# an object can only exist in one version, so a load from
# version1 should now give the non-version data
data, revid2 = self._storage.load(oid1, version1)
eq(zodb_unpickle(data), MinPO(51))
# as should a version that has never been used
data, revid2 = self._storage.load(oid1, 'bela lugosi')
eq(zodb_unpickle(data), MinPO(51))
def checkAbortOneVersionCommitTheOther(self):
eq = self.assertEqual
oid1, version1 = self._setup_version('one')
data, revid1 = self._storage.load(oid1, version1)
eq(zodb_unpickle(data), MinPO(54))
oid2, version2 = self._setup_version('two')
data, revid2 = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
# Let's make sure we can't get object1 in version2
data, revid2 = self._storage.load(oid1, version2)
eq(zodb_unpickle(data), MinPO(51))
oids = self._abortVersion(version1)
eq(len(oids), 1)
eq(oids[0], oid1)
data, revid = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
data, revid = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
data, revid = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
data, revid = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(51))
data, revid = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
# Okay, now let's commit version2 back to the trunk
oids = self._commitVersion(version2, '')
eq(len(oids), 1)
eq(oids[0], oid2)
data, revid = self._storage.load(oid1, '')
eq(zodb_unpickle(data), MinPO(51))
# But the trunk should be up to date now
data, revid = self._storage.load(oid2, '')
eq(zodb_unpickle(data), MinPO(54))
data, revid = self._storage.load(oid2, version2)
eq(zodb_unpickle(data), MinPO(54))
oid = self._storage.new_oid()
revid = self._dostore(oid, revid=revid, data=MinPO(54), version='one')
self.assertRaises(KeyError,
self._storage.load, oid, '')
self.assertRaises(KeyError,
self._storage.load, oid, 'two')
def checkCreateObjectInVersionWithAbort(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=21, version="one")
revid = self._dostore(oid, revid=revid, data=23, version='one')
revid = self._dostore(oid, revid=revid, data=34, version='one')
# Now abort the version and the creation
t = Transaction()
self._storage.tpc_begin(t)
tid, oids = self._storage.abortVersion('one', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
self.assertEqual(oids, [oid])
def checkLoadBeforeVersion(self):
eq = self.assertEqual
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=1)
revid2 = self._dostore(oid, data=2, revid=revid1, version="kobe")
revid3 = self._dostore(oid, data=3, revid=revid2, version="kobe")
data, start_tid, end_tid = self._storage.loadBefore(oid, revid3)
eq(zodb_unpickle(data), MinPO(1))
eq(start_tid, revid1)
eq(end_tid, None)
......@@ -145,9 +145,6 @@ class UserMethodTests(unittest.TestCase):
# add isn't tested here, because there are a bunch of traditional
# unit tests for it.
# The version tests would require a storage that supports versions
# which is a bit more work.
def test_root(self):
r"""doctest of root() method
......@@ -547,7 +544,6 @@ class StubStorage:
Only one concurrent transaction is supported.
Voting is not supported.
Versions are not supported.
Inspect self._stored and self._finished to see how the storage has been
used during a unit test. Whenever an object is stored in the store()
......@@ -607,7 +603,7 @@ class StubStorage:
self._transdata.clear()
self._transstored = []
def load(self, oid, version):
def load(self, oid, version=''):
if version != '':
raise TypeError('StubStorage does not support versions.')
return self._data[oid]
......
......@@ -17,12 +17,11 @@ import transaction
from ZODB.DB import DB
import ZODB.utils
import ZODB.DemoStorage
from ZODB.tests import StorageTestBase, BasicStorage, VersionStorage
from ZODB.tests import StorageTestBase, BasicStorage
from ZODB.tests import Synchronization
class DemoStorageTests(StorageTestBase.StorageTestBase,
BasicStorage.BasicStorage,
VersionStorage.VersionStorage,
Synchronization.SynchronizedStorage,
):
......@@ -38,25 +37,6 @@ class DemoStorageTests(StorageTestBase.StorageTestBase,
# have this limit, so we inhibit this test here.
pass
def checkAbortVersionNonCurrent(self):
# TODO: Need to implement a real loadBefore for DemoStorage?
pass
def checkLoadBeforeVersion(self):
# TODO: Need to implement a real loadBefore for DemoStorage?
pass
# the next three pack tests depend on undo
def checkPackVersionReachable(self):
pass
def checkPackVersions(self):
pass
def checkPackVersionsInPast(self):
pass
def checkLoadDelegation(self):
# Minimal test of loadEX w/o version -- ironically
db = DB(self._storage) # creates object 0. :)
......
......@@ -19,7 +19,6 @@ from ZODB import POSException
from ZODB import DB
from ZODB.tests import StorageTestBase, BasicStorage, TransactionalUndoStorage
from ZODB.tests import VersionStorage, TransactionalUndoVersionStorage
from ZODB.tests import PackableStorage, Synchronization, ConflictResolution
from ZODB.tests import HistoryStorage, IteratorStorage, Corruption
from ZODB.tests import RevisionStorage, PersistentStorage, MTStorage
......@@ -44,8 +43,6 @@ class FileStorageTests(
BasicStorage.BasicStorage,
TransactionalUndoStorage.TransactionalUndoStorage,
RevisionStorage.RevisionStorage,
VersionStorage.VersionStorage,
TransactionalUndoVersionStorage.TransactionalUndoVersionStorage,
PackableStorage.PackableStorage,
PackableStorage.PackableUndoStorage,
Synchronization.SynchronizedStorage,
......@@ -182,45 +179,6 @@ class FileStorageTests(
self.open()
self.assertEqual(self._storage._saved, 1)
def check_index_oid_ignored(self):
# Prior to ZODB 3.2.6, the 'oid' value stored in the .index file
# was believed. But there were cases where adding larger oids
# didn't update the FileStorage ._oid attribute -- the restore()
# method in particular didn't update it, and that's about the only
# method copyTransactionsFrom() uses. A database copy created that
# way then stored an 'oid' of z64 in the .index file. This created
# torturous problems, as when that file was opened, "new" oids got
# generated starting over from 0 again.
# Now the cached 'oid' value is ignored: verify that this is so.
import cPickle as pickle
from ZODB.utils import z64
# Create some data.
db = DB(self._storage)
conn = db.open()
conn.root()['xyz'] = 1
transaction.commit()
true_max_oid = self._storage._oid
# Save away the index, and poke in a bad 'oid' value by hand.
db.close()
f = open('FileStorageTests.fs.index', 'r+b')
p = pickle.Unpickler(f)
data = p.load()
saved_oid = data['oid']
self.assertEqual(true_max_oid, saved_oid)
data['oid'] = z64
f.seek(0)
f.truncate()
p = pickle.Pickler(f, 1)
p.dump(data)
f.close()
# Verify that we get the correct oid again when we reopen, despite
# that we stored nonsense in the .index file's 'oid'.
self.open()
self.assertEqual(self._storage._oid, true_max_oid)
# This would make the unit tests too slow
# check_save_after_load_that_worked_hard(self)
......
......@@ -25,11 +25,6 @@ from persistent import Persistent
from persistent.mapping import PersistentMapping
import transaction
# deprecated39 remove when versions go away
warnings.filterwarnings("ignore",
"Versions are deprecated",
DeprecationWarning, __name__)
class P(Persistent):
pass
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment