Commit 3e5f106f authored by Barry Warsaw's avatar Barry Warsaw

_zaprevision(): Watch out for the key being missing from the metadata

table; use a safer way of pulling out the lrevid.

pack(): The previous logic was broken, now we first gather up all the
oids of the objects affected by packable transactions, then we cruise
through the metadata records of each of those in a separate loop.

Watch out for the call to c.set_range() on the txnoids table to raise
a DBNotFoundError.  This just means we're trying to access a time
stamp past the last one in this table, so just use c.last() if that
happens, and pack all transactions.

When cruising through the txnoids table, stop when you see the first
PROTECTED_TRANSACTION.  That means that we've already packed back to
this transaction (Jim, is this right?  Would we ever want to re-pack a
transaction that's already been packed?)
parent f983c923
...@@ -4,7 +4,7 @@ See Minimal.py for an implementation of Berkeley storage that does not support ...@@ -4,7 +4,7 @@ See Minimal.py for an implementation of Berkeley storage that does not support
undo or versioning. undo or versioning.
""" """
# $Revision: 1.19 $ # $Revision: 1.20 $
__version__ = '0.1' __version__ = '0.1'
import struct import struct
...@@ -846,7 +846,10 @@ class Full(BerkeleyBase): ...@@ -846,7 +846,10 @@ class Full(BerkeleyBase):
# perform cascading decrefs on the referenced objects. # perform cascading decrefs on the referenced objects.
# #
# We need the lrevid which points to the pickle for this revision... # We need the lrevid which points to the pickle for this revision...
lrevid = self._metadata.get(key)[16:24] rec = self._metadata.get(key)
if rec is None:
return
lrevid = rec[1][16:24]
# ...and now delete the metadata record for this object revision # ...and now delete the metadata record for this object revision
self._metadata.delete(key) self._metadata.delete(key)
# Decref the reference count of the pickle pointed to by oid+lrevid. # Decref the reference count of the pickle pointed to by oid+lrevid.
...@@ -924,6 +927,8 @@ class Full(BerkeleyBase): ...@@ -924,6 +927,8 @@ class Full(BerkeleyBase):
c.close() c.close()
def pack(self, t, referencesf): def pack(self, t, referencesf):
# BAW: This doesn't play nicely if you enable the `debugging revids'
#
# t is a TimeTime, or time float, convert this to a TimeStamp object, # t is a TimeTime, or time float, convert this to a TimeStamp object,
# using an algorithm similar to what's used in FileStorage. The # using an algorithm similar to what's used in FileStorage. The
# TimeStamp can then be used as a key in the txnMetadata table, since # TimeStamp can then be used as a key in the txnMetadata table, since
...@@ -932,12 +937,16 @@ class Full(BerkeleyBase): ...@@ -932,12 +937,16 @@ class Full(BerkeleyBase):
self._lock_acquire() self._lock_acquire()
c = None c = None
tidmarks = {} tidmarks = {}
oids = {}
try: try:
# Figure out when to pack to. We happen to know that our # Figure out when to pack to. We happen to know that our
# transaction ids are really timestamps. # transaction ids are really timestamps.
c = self._txnoids.cursor() c = self._txnoids.cursor()
# Need to use the repr of the TimeStamp so we get a string # Need to use the repr of the TimeStamp so we get a string
rec = c.set_range(`t0`) try:
rec = c.set_range(`t0`)
except db.DBNotFoundError:
rec = c.last()
while rec: while rec:
tid, oid = rec tid, oid = rec
rec = c.prev() rec = c.prev()
...@@ -945,16 +954,33 @@ class Full(BerkeleyBase): ...@@ -945,16 +954,33 @@ class Full(BerkeleyBase):
# pack, so that undo will not create a temporal anomaly. # pack, so that undo will not create a temporal anomaly.
if not tidmarks.has_key(tid): if not tidmarks.has_key(tid):
meta = self._txnMetadata[tid] meta = self._txnMetadata[tid]
# Has this transaction already been packed? If so, we can
# stop here... I think!
if meta[0] == PROTECTED_TRANSACTION:
break
self._txnMetadata[tid] = PROTECTED_TRANSACTION + meta[1:] self._txnMetadata[tid] = PROTECTED_TRANSACTION + meta[1:]
tidmarks[tid] = 1 tidmarks[tid] = 1
# Find out if the oid is current, if so skip it. The oid # For now, just remember which objects are touched by the
# record could be missing from serials if it's already been # packable
# garbage collected. oids[oid] = 1
revid = self._serials.get(oid) # Now look at every object revision metadata record for the
if revid in (None, tid): # objects that have been touched in the packable transactions. If
continue # the metadata record points at the current revision of the
self._zaprevision(oid+revid, referencesf) # object, ignore it, otherwise reclaim it.
c.close()
c = self._metadata.cursor()
for oid in oids.keys():
current = self._serials[oid]
rec = c.set_range(oid)
while rec:
key, data = rec
rec = c.next()
if key[8:] == current:
continue
self._zaprevision(key, referencesf)
finally: finally:
if c:
c.close()
self._lock_release() self._lock_release()
# GCable interface, for cyclic garbage collection # GCable interface, for cyclic garbage collection
......
...@@ -4,7 +4,7 @@ See Minimal.py for an implementation of Berkeley storage that does not support ...@@ -4,7 +4,7 @@ See Minimal.py for an implementation of Berkeley storage that does not support
undo or versioning. undo or versioning.
""" """
# $Revision: 1.19 $ # $Revision: 1.20 $
__version__ = '0.1' __version__ = '0.1'
import struct import struct
...@@ -846,7 +846,10 @@ class Full(BerkeleyBase): ...@@ -846,7 +846,10 @@ class Full(BerkeleyBase):
# perform cascading decrefs on the referenced objects. # perform cascading decrefs on the referenced objects.
# #
# We need the lrevid which points to the pickle for this revision... # We need the lrevid which points to the pickle for this revision...
lrevid = self._metadata.get(key)[16:24] rec = self._metadata.get(key)
if rec is None:
return
lrevid = rec[1][16:24]
# ...and now delete the metadata record for this object revision # ...and now delete the metadata record for this object revision
self._metadata.delete(key) self._metadata.delete(key)
# Decref the reference count of the pickle pointed to by oid+lrevid. # Decref the reference count of the pickle pointed to by oid+lrevid.
...@@ -924,6 +927,8 @@ class Full(BerkeleyBase): ...@@ -924,6 +927,8 @@ class Full(BerkeleyBase):
c.close() c.close()
def pack(self, t, referencesf): def pack(self, t, referencesf):
# BAW: This doesn't play nicely if you enable the `debugging revids'
#
# t is a TimeTime, or time float, convert this to a TimeStamp object, # t is a TimeTime, or time float, convert this to a TimeStamp object,
# using an algorithm similar to what's used in FileStorage. The # using an algorithm similar to what's used in FileStorage. The
# TimeStamp can then be used as a key in the txnMetadata table, since # TimeStamp can then be used as a key in the txnMetadata table, since
...@@ -932,12 +937,16 @@ class Full(BerkeleyBase): ...@@ -932,12 +937,16 @@ class Full(BerkeleyBase):
self._lock_acquire() self._lock_acquire()
c = None c = None
tidmarks = {} tidmarks = {}
oids = {}
try: try:
# Figure out when to pack to. We happen to know that our # Figure out when to pack to. We happen to know that our
# transaction ids are really timestamps. # transaction ids are really timestamps.
c = self._txnoids.cursor() c = self._txnoids.cursor()
# Need to use the repr of the TimeStamp so we get a string # Need to use the repr of the TimeStamp so we get a string
rec = c.set_range(`t0`) try:
rec = c.set_range(`t0`)
except db.DBNotFoundError:
rec = c.last()
while rec: while rec:
tid, oid = rec tid, oid = rec
rec = c.prev() rec = c.prev()
...@@ -945,16 +954,33 @@ class Full(BerkeleyBase): ...@@ -945,16 +954,33 @@ class Full(BerkeleyBase):
# pack, so that undo will not create a temporal anomaly. # pack, so that undo will not create a temporal anomaly.
if not tidmarks.has_key(tid): if not tidmarks.has_key(tid):
meta = self._txnMetadata[tid] meta = self._txnMetadata[tid]
# Has this transaction already been packed? If so, we can
# stop here... I think!
if meta[0] == PROTECTED_TRANSACTION:
break
self._txnMetadata[tid] = PROTECTED_TRANSACTION + meta[1:] self._txnMetadata[tid] = PROTECTED_TRANSACTION + meta[1:]
tidmarks[tid] = 1 tidmarks[tid] = 1
# Find out if the oid is current, if so skip it. The oid # For now, just remember which objects are touched by the
# record could be missing from serials if it's already been # packable
# garbage collected. oids[oid] = 1
revid = self._serials.get(oid) # Now look at every object revision metadata record for the
if revid in (None, tid): # objects that have been touched in the packable transactions. If
continue # the metadata record points at the current revision of the
self._zaprevision(oid+revid, referencesf) # object, ignore it, otherwise reclaim it.
c.close()
c = self._metadata.cursor()
for oid in oids.keys():
current = self._serials[oid]
rec = c.set_range(oid)
while rec:
key, data = rec
rec = c.next()
if key[8:] == current:
continue
self._zaprevision(key, referencesf)
finally: finally:
if c:
c.close()
self._lock_release() self._lock_release()
# GCable interface, for cyclic garbage collection # GCable interface, for cyclic garbage collection
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment