Commit 6e5baffd authored by Michael Howitz's avatar Michael Howitz

Lint the code.

Add support for Python 3.9 and 3.10.
parent 1f3a0d62
......@@ -2,7 +2,7 @@
Change History
================
5.6.1 (unreleased)
5.7.0 (unreleased)
==================
- Fix ``TypeError: can't concat str to bytes`` when running fsoids.py script with Python 3.
......@@ -24,6 +24,8 @@
- Fix deprecation warnings occurring on Python 3.10.
- Add support for Python 3.9 and 3.10.
5.6.0 (2020-06-11)
==================
......
......@@ -56,7 +56,7 @@ master_doc = 'index'
# General information about the project.
project = 'ZODB'
copyright = '2009-2020, Zope Foundation'
copyright = '2009-2021, Zope Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
......
......@@ -13,7 +13,7 @@
##############################################################################
from setuptools import setup, find_packages
version = '5.6.1.dev0'
version = '5.7.0.dev0'
classifiers = """\
Intended Audience :: Developers
......@@ -26,6 +26,8 @@ Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Database
......@@ -35,10 +37,12 @@ Operating System :: Unix
Framework :: ZODB
"""
def read(path):
with open(path) as f:
return f.read()
long_description = read("README.rst") + "\n\n" + read("CHANGES.rst")
tests_require = [
......@@ -67,6 +71,13 @@ setup(
tests_require=tests_require,
extras_require={
'test': tests_require,
'docs': [
'Sphinx',
'ZODB',
'j1m.sphinxautozconfig',
'sphinx_rtd_theme',
'sphinxcontrib_zopeext',
]
},
install_requires=[
'persistent >= 4.4.0',
......
......@@ -72,7 +72,7 @@ class ActivityMonitor(object):
'loads': 0,
'stores': 0,
'connections': 0,
})
})
div = res[0]
div_end = div['end']
......
......@@ -20,7 +20,6 @@ from __future__ import print_function
import time
import logging
import sys
from struct import pack as _structpack, unpack as _structunpack
import zope.interface
......@@ -35,6 +34,7 @@ from ._compat import py2_hasattr
log = logging.getLogger("ZODB.BaseStorage")
class BaseStorage(UndoLogCompatible):
"""Base class that supports storage implementations.
......@@ -74,12 +74,12 @@ class BaseStorage(UndoLogCompatible):
perhaps other things. It is always held when load() is called, so
presumably the load() implementation should also acquire the lock.
"""
_transaction=None # Transaction that is being committed
_tstatus=' ' # Transaction status, used for copying data
_transaction = None # Transaction that is being committed
_tstatus = ' ' # Transaction status, used for copying data
_is_read_only = False
def __init__(self, name, base=None):
self.__name__= name
self.__name__ = name
log.debug("create storage %s", self.__name__)
# Allocate locks:
......@@ -93,7 +93,7 @@ class BaseStorage(UndoLogCompatible):
self._commit_lock_release = self._commit_lock.release
t = time.time()
t = self._ts = TimeStamp(*(time.gmtime(t)[:5] + (t%60,)))
t = self._ts = TimeStamp(*(time.gmtime(t)[:5] + (t % 60,)))
self._tid = t.raw()
# ._oid is the highest oid in use (0 is always in use -- it's
......@@ -122,7 +122,7 @@ class BaseStorage(UndoLogCompatible):
return self.__name__
def getSize(self):
return len(self)*300 # WAG!
return len(self)*300 # WAG!
def history(self, oid, version, length=1, filter=None):
return ()
......@@ -151,7 +151,7 @@ class BaseStorage(UndoLogCompatible):
self._oid = possible_new_max_oid
def registerDB(self, db):
pass # we don't care
pass # we don't care
def isReadOnly(self):
return self._is_read_only
......@@ -279,6 +279,7 @@ class BaseStorage(UndoLogCompatible):
"""
copy(other, self, verbose)
def copy(source, dest, verbose=0):
"""Copy transactions from a source to a destination storage
......@@ -287,7 +288,7 @@ def copy(source, dest, verbose=0):
"""
_ts = None
ok = 1
preindex = {};
preindex = {}
preget = preindex.get
# restore() is a new storage API method which has an identical
# signature to store() except that it does not return anything.
......@@ -310,7 +311,8 @@ def copy(source, dest, verbose=0):
else:
t = TimeStamp(tid)
if t <= _ts:
if ok: print(('Time stamps out of order %s, %s' % (_ts, t)))
if ok:
print(('Time stamps out of order %s, %s' % (_ts, t)))
ok = 0
_ts = t.laterThan(_ts)
tid = _ts.raw()
......@@ -351,23 +353,24 @@ def checkCurrentSerialInTransaction(self, oid, serial, transaction):
raise POSException.ReadConflictError(
oid=oid, serials=(committed_tid, serial))
BaseStorage.checkCurrentSerialInTransaction = checkCurrentSerialInTransaction
@zope.interface.implementer(ZODB.interfaces.IStorageTransactionInformation)
class TransactionRecord(TransactionMetaData):
"""Abstract base class for iterator protocol"""
def __init__(self, tid, status, user, description, extension):
self.tid = tid
self.status = status
TransactionMetaData.__init__(self, user, description, extension)
@zope.interface.implementer(ZODB.interfaces.IStorageRecordInformation)
class DataRecord(object):
"""Abstract base class for iterator protocol"""
version = ''
def __init__(self, oid, tid, data, prev):
......
......@@ -29,9 +29,11 @@ from pickle import PicklingError
logger = logging.getLogger('ZODB.ConflictResolution')
class BadClassName(Exception):
pass
class BadClass(object):
def __init__(self, *args):
......@@ -40,8 +42,11 @@ class BadClass(object):
def __reduce__(self):
raise BadClassName(*self.args)
_class_cache = {}
_class_cache_get = _class_cache.get
def find_global(*args):
cls = _class_cache_get(args, 0)
if cls == 0:
......@@ -60,23 +65,24 @@ def find_global(*args):
if cls == 1:
# Not importable
if (isinstance(args, tuple) and len(args) == 2 and
isinstance(args[0], six.string_types) and
isinstance(args[1], six.string_types)
):
isinstance(args[0], six.string_types) and
isinstance(args[1], six.string_types)):
return BadClass(*args)
else:
raise BadClassName(*args)
return cls
def state(self, oid, serial, prfactory, p=''):
p = p or self.loadSerial(oid, serial)
p = self._crs_untransform_record_data(p)
file = BytesIO(p)
unpickler = PersistentUnpickler(
find_global, prfactory.persistent_load, file)
unpickler.load() # skip the class tuple
unpickler.load() # skip the class tuple
return unpickler.load()
class IPersistentReference(zope.interface.Interface):
'''public contract for references to persistent objects from an object
with conflicts.'''
......@@ -114,10 +120,10 @@ class IPersistentReference(zope.interface.Interface):
have two references to the same object that are spelled with different
data (for instance, one with a class and one without).'''
@zope.interface.implementer(IPersistentReference)
class PersistentReference(object):
weak = False
oid = database_name = klass = None
......@@ -134,7 +140,7 @@ class PersistentReference(object):
self.data = self.oid, klass.args
elif isinstance(data, (bytes, str)):
self.oid = data
else: # a list
else: # a list
reference_type = data[0]
# 'm' = multi_persistent: (database_name, oid, klass)
# 'n' = multi_oid: (database_name, oid)
......@@ -165,11 +171,11 @@ class PersistentReference(object):
def __cmp__(self, other):
if self is other or (
isinstance(other, PersistentReference) and
self.oid == other.oid and
self.database_name == other.database_name and
not self.weak and
not other.weak):
isinstance(other, PersistentReference) and
self.oid == other.oid and
self.database_name == other.database_name and
not self.weak and
not other.weak):
return 0
else:
raise ValueError(
......@@ -211,6 +217,7 @@ class PersistentReference(object):
elif isinstance(data, list) and data[0] == 'm':
return data[1][2]
class PersistentReferenceFactory(object):
data = None
......@@ -218,7 +225,8 @@ class PersistentReferenceFactory(object):
def persistent_load(self, ref):
if self.data is None:
self.data = {}
key = tuple(ref) # lists are not hashable; formats are different enough
# lists are not hashable; formats are different enough
key = tuple(ref)
# even after eliminating list/tuple distinction
r = self.data.get(key, None)
if r is None:
......@@ -227,12 +235,16 @@ class PersistentReferenceFactory(object):
return r
def persistent_id(object):
if getattr(object, '__class__', 0) is not PersistentReference:
return None
return object.data
_unresolvable = {}
def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
committedData=b''):
# class_tuple, old, committed, newstate = ('',''), 0, 0, 0
......@@ -264,13 +276,12 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
_unresolvable[klass] = 1
raise ConflictError
oldData = self.loadSerial(oid, oldSerial)
if not committedData:
committedData = self.loadSerial(oid, committedSerial)
committedData = self.loadSerial(oid, committedSerial)
newstate = unpickler.load()
old = state(self, oid, oldSerial, prfactory, oldData)
old = state(self, oid, oldSerial, prfactory, oldData)
committed = state(self, oid, committedSerial, prfactory, committedData)
resolved = resolve(old, committed, newstate)
......@@ -284,7 +295,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
logger.debug(
"Conflict resolution on %s failed with %s: %s",
klass, e.__class__.__name__, str(e))
except:
except: # noqa: E722 do not use bare 'except'
# If anything else went wrong, catch it here and avoid passing an
# arbitrary exception back to the client. The error here will mask
# the original ConflictError. A client can recover from a
......@@ -296,6 +307,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
raise ConflictError(oid=oid, serials=(committedSerial, oldSerial),
data=newpickle)
class ConflictResolvingStorage(object):
"Mix-in class that provides conflict resolution handling for storages"
......
This diff is collapsed.
......@@ -41,6 +41,7 @@ from ZODB import valuedoc
logger = logging.getLogger('ZODB.DB')
class AbstractConnectionPool(object):
"""Manage a pool of connections.
......@@ -111,7 +112,7 @@ class AbstractConnectionPool(object):
class ConnectionPool(AbstractConnectionPool):
def __init__(self, size, timeout=1<<31):
def __init__(self, size, timeout=1 << 31):
super(ConnectionPool, self).__init__(size, timeout)
# A stack of connections available to hand out. This is a subset
......@@ -127,9 +128,8 @@ class ConnectionPool(AbstractConnectionPool):
def _append(self, c):
available = self.available
cactive = c._cache.cache_non_ghost_count
if (available and
(available[-1][1]._cache.cache_non_ghost_count > cactive)
):
if (available
and (available[-1][1]._cache.cache_non_ghost_count > cactive)):
i = len(available) - 1
while (i and
(available[i-1][1]._cache.cache_non_ghost_count > cactive)
......@@ -185,7 +185,7 @@ class ConnectionPool(AbstractConnectionPool):
(len(available) > target)
or
(available and available[0][0] < threshhold)
):
):
t, c = available.pop(0)
assert not c.opened
self.all.remove(c)
......@@ -244,7 +244,7 @@ class KeyedConnectionPool(AbstractConnectionPool):
# see the comments in ConnectionPool for method descriptions.
def __init__(self, size, timeout=1<<31):
def __init__(self, size, timeout=1 << 31):
super(KeyedConnectionPool, self).__init__(size, timeout)
self.pools = {}
......@@ -303,6 +303,7 @@ def toTimeStamp(dt):
args = utc_struct[:5]+(utc_struct[5] + dt.microsecond/1000000.0,)
return TimeStamp(*args)
def getTID(at, before):
if at is not None:
if before is not None:
......@@ -319,6 +320,7 @@ def getTID(at, before):
before = TimeStamp(before).raw()
return before
@implementer(IDatabase)
class DB(object):
"""The Object Database
......@@ -348,7 +350,7 @@ class DB(object):
def __init__(self,
storage,
pool_size=7,
pool_timeout=1<<31,
pool_timeout=1 << 31,
cache_size=400,
cache_size_bytes=0,
historical_pool_size=3,
......@@ -358,7 +360,7 @@ class DB(object):
database_name='unnamed',
databases=None,
xrefs=True,
large_record_size=1<<24,
large_record_size=1 << 24,
**storage_args):
"""Create an object database.
......@@ -425,10 +427,10 @@ class DB(object):
# Setup storage
if isinstance(storage, six.string_types):
from ZODB import FileStorage
from ZODB import FileStorage # noqa: F401 import unused
storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
elif storage is None:
from ZODB import MappingStorage
from ZODB import MappingStorage # noqa: F401 import unused
storage = ZODB.MappingStorage.MappingStorage(**storage_args)
else:
assert not storage_args
......@@ -507,6 +509,7 @@ class DB(object):
"""
detail = {}
def f(con, detail=detail):
for oid, ob in con._cache.items():
module = getattr(ob.__class__, '__module__', '')
......@@ -570,17 +573,18 @@ class DB(object):
'rc': (rc(ob) - 3 - (ob._p_changed is not None)
if rc else False),
'state': ob._p_changed,
#'references': con.references(oid),
})
# 'references': con.references(oid),
})
self._connectionMap(f)
return detail
def cacheFullSweep(self): # XXX this is the same as cacheMinimize
def cacheFullSweep(self): # XXX this is the same as cacheMinimize
self._connectionMap(lambda c: c._cache.full_sweep())
def cacheLastGCTime(self):
m = [0]
def f(con, m=m):
t = con._cache.cache_last_gc_time
if t > m[0]:
......@@ -598,6 +602,7 @@ class DB(object):
"""Return the total count of non-ghost objects in all object caches
"""
m = [0]
def f(con, m=m):
m[0] += con._cache.cache_non_ghost_count
......@@ -608,6 +613,7 @@ class DB(object):
"""Return non-ghost counts sizes for all connections.
"""
m = []
def f(con, m=m):
m.append({'connection': repr(con),
'ngsize': con._cache.cache_non_ghost_count,
......@@ -731,7 +737,7 @@ class DB(object):
before = getTID(at, before)
if (before is not None and
before > self.lastTransaction() and
before > getTID(self.lastTransaction(), None)):
before > getTID(self.lastTransaction(), None)):
raise ValueError(
'cannot open an historical connection in the future.')
......@@ -773,7 +779,6 @@ class DB(object):
self.pool.availableGC()
self.historical_pool.availableGC()
result.open(transaction_manager)
return result
......@@ -808,7 +813,7 @@ class DB(object):
t-o)),
'info': d,
'before': c.before,
})
})
self._connectionMap(get_info)
return result
......@@ -836,7 +841,7 @@ class DB(object):
t -= days * 86400
try:
self.storage.pack(t, self.references)
except:
except: # noqa: E722 do not use bare 'except'
logger.exception("packing")
raise
......@@ -994,7 +999,7 @@ class DB(object):
Kept for backwards compatibility only. New oids should be
allocated in a transaction using an open Connection.
"""
return self.storage.new_oid() # pragma: no cover
return self.storage.new_oid() # pragma: no cover
def open_then_close_db_when_connection_closes(self):
"""Create and return a connection.
......@@ -1029,9 +1034,11 @@ class ContextManager(object):
self.tm.abort()
self.conn.close()
resource_counter_lock = utils.Lock()
resource_counter = 0
class TransactionalUndo(object):
def __init__(self, db, tids):
......@@ -1064,9 +1071,10 @@ class TransactionalUndo(object):
# a new storage instance, and so we must close it to be sure
# to reclaim resources in a timely manner.
#
# Once the tpc_begin method has been called, the transaction manager will
# guarantee to call either `tpc_finish` or `tpc_abort`, so those are the only
# methods we need to be concerned about calling close() from.
# Once the tpc_begin method has been called, the transaction manager
# will guarantee to call either `tpc_finish` or `tpc_abort`, so those
# are the only methods we need to be concerned about calling close()
# from.
db_mvcc_storage = self._db._mvcc_storage
self._storage = getattr(
db_mvcc_storage,
......@@ -1117,7 +1125,10 @@ def connection(*args, **kw):
"""
return DB(*args, **kw).open_then_close_db_when_connection_closes()
_transaction_meta_data_text_variables = 'user_name', 'description'
def _text_transaction_info(info):
for d in info:
for name in _transaction_meta_data_text_variables:
......
......@@ -35,10 +35,11 @@ import zope.interface
from .ConflictResolution import ConflictResolvingStorage
from .utils import load_current, maxtid
@zope.interface.implementer(
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
class DemoStorage(ConflictResolvingStorage):
"""A storage that stores changes against a read-only base database
......@@ -99,7 +100,6 @@ class DemoStorage(ConflictResolvingStorage):
self.base = base
self.close_base_on_close = close_base_on_close
if changes is None:
self._temporary_changes = True
changes = ZODB.MappingStorage.MappingStorage()
......@@ -128,16 +128,15 @@ class DemoStorage(ConflictResolvingStorage):
self._copy_methods_from_changes(changes)
self._next_oid = random.randint(1, 1<<62)
self._next_oid = random.randint(1, 1 << 62)
def _blobify(self):
if (self._temporary_changes and
isinstance(self.changes, ZODB.MappingStorage.MappingStorage)
):
isinstance(self.changes, ZODB.MappingStorage.MappingStorage)):
blob_dir = tempfile.mkdtemp('.demoblobs')
_temporary_blobdirs[
weakref.ref(self, cleanup_temporary_blobdir)
] = blob_dir
] = blob_dir
self.changes = ZODB.blob.BlobStorage(blob_dir, self.changes)
self._copy_methods_from_changes(self.changes)
return True
......@@ -147,6 +146,7 @@ class DemoStorage(ConflictResolvingStorage):
self.changes.cleanup()
__opened = True
def opened(self):
return self.__opened
......@@ -162,7 +162,7 @@ class DemoStorage(ConflictResolvingStorage):
'_lock',
'getSize', 'isReadOnly',
'sortKey', 'tpc_transaction',
):
):
setattr(self, meth, getattr(changes, meth))
supportsUndo = getattr(changes, 'supportsUndo', None)
......@@ -253,7 +253,7 @@ class DemoStorage(ConflictResolvingStorage):
t = self.changes.loadBefore(oid, end_tid)
result = result[:2] + (
end_tid if end_tid != maxtid else None,
)
)
return result
......@@ -296,7 +296,7 @@ class DemoStorage(ConflictResolvingStorage):
def new_oid(self):
with self._lock:
while 1:
oid = ZODB.utils.p64(self._next_oid )
oid = ZODB.utils.p64(self._next_oid)
if oid not in self._issued_oids:
try:
load_current(self.changes, oid)
......@@ -308,7 +308,7 @@ class DemoStorage(ConflictResolvingStorage):
self._issued_oids.add(oid)
return oid
self._next_oid = random.randint(1, 1<<62)
self._next_oid = random.randint(1, 1 << 62)
def pack(self, t, referencesf, gc=None):
if gc is None:
......@@ -325,7 +325,7 @@ class DemoStorage(ConflictResolvingStorage):
self.changes.pack(t, referencesf, gc=False)
except TypeError as v:
if 'gc' in str(v):
pass # The gc arg isn't supported. Don't pack
pass # The gc arg isn't supported. Don't pack
raise
def pop(self):
......@@ -344,7 +344,7 @@ class DemoStorage(ConflictResolvingStorage):
close_base_on_close=False)
def store(self, oid, serial, data, version, transaction):
assert version=='', "versions aren't supported"
assert version == '', "versions aren't supported"
if transaction is not self._transaction:
raise ZODB.POSException.StorageTransactionError(self, transaction)
......@@ -367,7 +367,7 @@ class DemoStorage(ConflictResolvingStorage):
def storeBlob(self, oid, oldserial, data, blobfilename, version,
transaction):
assert version=='', "versions aren't supported"
assert version == '', "versions aren't supported"
if transaction is not self._transaction:
raise ZODB.POSException.StorageTransactionError(self, transaction)
......@@ -425,7 +425,7 @@ class DemoStorage(ConflictResolvingStorage):
"Unexpected resolved conflicts")
return self._resolved
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
with self._lock:
if (transaction is not self._transaction):
raise ZODB.POSException.StorageTransactionError(
......@@ -437,11 +437,14 @@ class DemoStorage(ConflictResolvingStorage):
self._commit_lock.release()
return tid
_temporary_blobdirs = {}
def cleanup_temporary_blobdir(
ref,
_temporary_blobdirs=_temporary_blobdirs, # Make sure it stays around
):
_temporary_blobdirs=_temporary_blobdirs, # Make sure it stays around
):
blob_dir = _temporary_blobdirs.pop(ref, None)
if blob_dir and os.path.exists(blob_dir):
ZODB.blob.remove_committed_dir(blob_dir)
......@@ -29,17 +29,17 @@ from ZODB._compat import PersistentPickler, Unpickler, BytesIO, _protocol
logger = logging.getLogger('ZODB.ExportImport')
class ExportImport(object):
def exportFile(self, oid, f=None, bufsize=64 * 1024):
if f is None:
f = TemporaryFile(prefix="EXP")
elif isinstance(f, six.string_types):
f = open(f,'w+b')
f = open(f, 'w+b')
f.write(b'ZEXP')
oids = [oid]
done_oids = {}
done = done_oids.__contains__
load = self._storage.load
supports_blobs = IBlobStorage.providedBy(self._storage)
while oids:
......@@ -49,7 +49,7 @@ class ExportImport(object):
done_oids[oid] = True
try:
p, serial = load(oid)
except:
except: # noqa: E722 do not use bare 'except'
logger.debug("broken reference for oid %s", repr(oid),
exc_info=True)
else:
......@@ -58,7 +58,7 @@ class ExportImport(object):
if supports_blobs:
if not isinstance(self._reader.getGhost(p), Blob):
continue # not a blob
continue # not a blob
blobfilename = self._storage.loadBlob(oid, serial)
f.write(blob_begin_marker)
......@@ -159,8 +159,7 @@ class ExportImport(object):
return_oid_list.append(oid)
if (b'blob' in data and
isinstance(self._reader.getGhost(data), Blob)
):
isinstance(self._reader.getGhost(data), Blob)):
# Blob support
# Make sure we have a (redundant, overly) blob marker.
......@@ -198,11 +197,14 @@ class ExportImport(object):
export_end_marker = b'\377'*16
blob_begin_marker = b'\000BLOBSTART'
class Ghost(object):
__slots__ = ("oid",)
def __init__(self, oid):
self.oid = oid
def persistent_id(obj):
if isinstance(obj, Ghost):
return obj.oid
This diff is collapsed.
......@@ -90,9 +90,11 @@ from ZODB.POSException import POSKeyError
from ZODB.utils import u64, oid_repr, as_bytes
from ZODB._compat import PY3
class CorruptedError(Exception):
pass
class CorruptedDataError(CorruptedError):
def __init__(self, oid=None, buf=None, pos=None):
......@@ -110,6 +112,7 @@ class CorruptedDataError(CorruptedError):
msg += " at %d" % self.pos
return msg
# the struct formats for the headers
TRANS_HDR = ">8sQcHHH"
DATA_HDR = ">8s8sQQHQ"
......@@ -121,6 +124,7 @@ assert struct.calcsize(DATA_HDR) == DATA_HDR_LEN
logger = logging.getLogger('ZODB.FileStorage.format')
class FileStorageFormatter(object):
"""Mixin class that can read and write the low-level format."""
......@@ -211,7 +215,7 @@ class FileStorageFormatter(object):
self.ltid = th.tid
if th.status == "c":
self.fail(pos, "transaction with checkpoint flag set")
if not th.status in " pu": # recognize " ", "p", and "u" as valid
if th.status not in " pu": # recognize " ", "p", and "u" as valid
self.fail(pos, "invalid transaction status: %r", th.status)
if th.tlen < th.headerlen():
self.fail(pos, "invalid transaction header: "
......@@ -232,9 +236,11 @@ class FileStorageFormatter(object):
if dh.plen:
self.fail(pos, "data record has back pointer and data")
def DataHeaderFromString(s):
return DataHeader(*struct.unpack(DATA_HDR, s))
class DataHeader(object):
"""Header for a data record."""
......@@ -250,7 +256,7 @@ class DataHeader(object):
self.prev = prev
self.tloc = tloc
self.plen = plen
self.back = 0 # default
self.back = 0 # default
def asString(self):
return struct.pack(DATA_HDR, self.oid, self.tid, self.prev,
......@@ -259,12 +265,14 @@ class DataHeader(object):
def recordlen(self):
return DATA_HDR_LEN + (self.plen or 8)
def TxnHeaderFromString(s):
res = TxnHeader(*struct.unpack(TRANS_HDR, s))
if PY3:
res.status = res.status.decode('ascii')
return res
class TxnHeader(object):
"""Header for a transaction record."""
......
......@@ -20,19 +20,20 @@ from ZODB.FileStorage.format import DATA_HDR, DATA_HDR_LEN
from ZODB.TimeStamp import TimeStamp
from ZODB.utils import u64, get_pickle_metadata
def fsdump(path, file=None, with_offset=1):
iter = FileIterator(path)
for i, trans in enumerate(iter):
size = trans._tend - trans._tpos
if with_offset:
print(("Trans #%05d tid=%016x size=%d time=%s offset=%d" %
(i, u64(trans.tid), size,
TimeStamp(trans.tid), trans._pos)), file=file)
(i, u64(trans.tid), size,
TimeStamp(trans.tid), trans._pos)), file=file)
else:
print(("Trans #%05d tid=%016x size=%d time=%s" %
(i, u64(trans.tid), size, TimeStamp(trans.tid))), file=file)
(i, u64(trans.tid), size, TimeStamp(trans.tid))), file=file)
print((" status=%r user=%r description=%r" %
(trans.status, trans.user, trans.description)), file=file)
(trans.status, trans.user, trans.description)), file=file)
for j, rec in enumerate(trans):
if rec.data is None:
......@@ -51,13 +52,15 @@ def fsdump(path, file=None, with_offset=1):
bp = ""
print((" data #%05d oid=%016x%s class=%s%s" %
(j, u64(rec.oid), size, fullclass, bp)), file=file)
(j, u64(rec.oid), size, fullclass, bp)), file=file)
iter.close()
def fmt(p64):
# Return a nicely formatted string for a packaged 64-bit value
return "%016x" % u64(p64)
class Dumper(object):
"""A very verbose dumper for debuggin FileStorage problems."""
......@@ -87,13 +90,13 @@ class Dumper(object):
print("transaction id: %s" % fmt(tid), file=self.dest)
print("trec len: %d" % tlen, file=self.dest)
print("status: %r" % status, file=self.dest)
user = descr = extra = ""
user = descr = ""
if ul:
user = self.file.read(ul)
if dl:
descr = self.file.read(dl)
if el:
extra = self.file.read(el)
self.file.read(el)
print("user: %r" % user, file=self.dest)
print("description: %r" % descr, file=self.dest)
print("len(extra): %d" % el, file=self.dest)
......@@ -121,6 +124,7 @@ class Dumper(object):
sbp = self.file.read(8)
print("backpointer: %d" % u64(sbp), file=self.dest)
def main():
import sys
fsdump(sys.argv[1])
......
......@@ -18,10 +18,14 @@ from ZODB.serialize import get_refs
from ZODB.TimeStamp import TimeStamp
# Extract module.class string from pickle.
def get_class(pickle):
return "%s.%s" % get_pickle_metadata(pickle)
# Shorten a string for display.
def shorten(s, size=50):
if len(s) <= size:
return s
......@@ -35,6 +39,7 @@ def shorten(s, size=50):
sep = " ... "
return s[:nleading] + sep + s[-ntrailing:]
class Tracer(object):
"""Trace all occurrences of a set of oids in a FileStorage.
......@@ -84,7 +89,7 @@ class Tracer(object):
self.oids[oid] = 0 # 0 revisions seen so far
def _msg(self, oid, tid, *args):
self.msgs.append( (oid, tid, ' '.join(map(str, args))) )
self.msgs.append((oid, tid, ' '.join(map(str, args))))
self._produced_msg = True
def report(self):
......@@ -98,9 +103,9 @@ class Tracer(object):
NOT_SEEN = "this oid was not defined (no data record for it found)"
for oid in oids:
if oid not in oid2name:
msgs.append( (oid, None, NOT_SEEN) )
msgs.append((oid, None, NOT_SEEN))
msgs.sort() # oids are primary key, tids secondary
msgs.sort() # oids are primary key, tids secondary
current_oid = current_tid = None
for oid, tid, msg in msgs:
if oid != current_oid:
......
......@@ -36,9 +36,11 @@ import ZODB.POSException
logger = logging.getLogger(__name__)
class PackError(ZODB.POSException.POSError):
pass
class PackCopier(FileStorageFormatter):
def __init__(self, f, index, tindex):
......@@ -54,7 +56,7 @@ class PackCopier(FileStorageFormatter):
self._file.seek(pos - 8)
pos = pos - u64(self._file.read(8)) - 8
self._file.seek(pos)
h = self._file.read(TRANS_HDR_LEN) # XXX bytes
h = self._file.read(TRANS_HDR_LEN) # XXX bytes
_tid = h[:8]
if _tid == tid:
return pos
......@@ -144,6 +146,7 @@ class PackCopier(FileStorageFormatter):
finally:
self._file.seek(pos)
class GC(FileStorageFormatter):
def __init__(self, file, eof, packtime, gc, referencesf):
......@@ -330,6 +333,7 @@ class GC(FileStorageFormatter):
else:
return []
class FileStoragePacker(FileStorageFormatter):
# path is the storage file path.
......@@ -409,15 +413,15 @@ class FileStoragePacker(FileStorageFormatter):
# try our best, but don't fail
try:
self._tfile.close()
except:
except: # noqa: E722 do not use bare 'except'
pass
try:
self._file.close()
except:
except: # noqa: E722 do not use bare 'except'
pass
try:
os.remove(self._name + ".pack")
except:
except: # noqa: E722 do not use bare 'except'
pass
if self.blob_removed is not None:
self.blob_removed.close()
......@@ -459,8 +463,8 @@ class FileStoragePacker(FileStorageFormatter):
# argument, and then on every platform except native
# Windows it was observed that we could read stale
# data from the tail end of the file.
self._file.close() # else self.gc keeps the original
# alive & open
self._file.close() # else self.gc keeps the original
# alive & open
self._file = open(self._path, "rb", 0)
self._file.seek(0, 2)
self.file_end = self._file.tell()
......@@ -483,13 +487,12 @@ class FileStoragePacker(FileStorageFormatter):
if self.locked:
self._commit_lock.release()
raise # don't succeed silently
except:
except: # noqa: E722 do not use bare 'except'
if self.locked:
self._commit_lock.release()
raise
def copyToPacktime(self):
offset = 0 # the amount of space freed by packing
pos = self._metadata_size
new_pos = pos
......@@ -506,7 +509,6 @@ class FileStoragePacker(FileStorageFormatter):
self._tfile.seek(new_pos - 8)
self._tfile.write(p64(tlen))
tlen = self._read_num(pos)
if tlen != th.tlen:
self.fail(pos, "redundant transaction length does not "
......@@ -546,8 +548,8 @@ class FileStoragePacker(FileStorageFormatter):
# record. There's a bug in ZEO blob support that causes
# duplicate data records.
rpos = self.gc.reachable.get(h.oid)
is_dup = (rpos
and self._read_data_header(rpos).tid == h.tid)
is_dup = (
rpos and self._read_data_header(rpos).tid == h.tid)
if not is_dup:
if h.oid not in self.gc.reachable:
self.blob_removed.write(
......@@ -569,7 +571,6 @@ class FileStoragePacker(FileStorageFormatter):
s = th.asString()
new_tpos = self._tfile.tell()
self._tfile.write(s)
new_pos = new_tpos + len(s)
copy = 1
if h.plen:
......@@ -578,7 +579,6 @@ class FileStoragePacker(FileStorageFormatter):
data = self.fetchDataViaBackpointer(h.oid, h.back)
self.writePackedDataRecord(h, data, new_tpos)
new_pos = self._tfile.tell()
return new_tpos, pos
......
......@@ -13,6 +13,7 @@
##############################################################################
import zope.interface
class IFileStoragePacker(zope.interface.Interface):
def __call__(storage, referencesf, stop, gc):
......@@ -58,20 +59,21 @@ class IFileStoragePacker(zope.interface.Interface):
corresponding to the file records.
"""
class IFileStorage(zope.interface.Interface):
packer = zope.interface.Attribute(
"The IFileStoragePacker to be used for packing."
)
)
_file = zope.interface.Attribute(
"The file object used to access the underlying data."
)
)
_lock = zope.interface.Attribute(
"The storage lock."
)
)
_commit_lock = zope.interface.Attribute(
"The storage commit lock."
)
)
......@@ -29,10 +29,11 @@ checker = renormalizing.RENormalizing([
# Python 3 adds module name to exceptions.
(re.compile("ZODB.POSException.POSKeyError"), r"POSKeyError"),
(re.compile("ZODB.FileStorage.FileStorage.FileStorageQuotaError"),
"FileStorageQuotaError"),
"FileStorageQuotaError"),
(re.compile('data.fs:[0-9]+'), 'data.fs:<OFFSET>'),
])
def pack_keep_old():
"""Should a copy of the database be kept?
......@@ -106,6 +107,7 @@ directory for blobs is kept.)
>>> db.close()
"""
def pack_with_repeated_blob_records():
"""
There is a bug in ZEO that causes duplicate bloc database records
......@@ -144,6 +146,7 @@ def pack_with_repeated_blob_records():
>>> db.close()
"""
def _save_index():
"""
......@@ -187,6 +190,7 @@ cleanup
"""
def pack_disk_full_copyToPacktime():
"""Recover from a disk full situation by removing the `.pack` file
......@@ -239,6 +243,7 @@ check the data we added
>>> db.close()
"""
def pack_disk_full_copyRest():
"""Recover from a disk full situation by removing the `.pack` file
......@@ -307,6 +312,7 @@ check the data we added
>>> db.close()
"""
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite(
......@@ -319,4 +325,4 @@ def test_suite():
setUp=ZODB.tests.util.setUp,
tearDown=ZODB.tests.util.tearDown,
checker=checker),
))
))
......@@ -28,9 +28,9 @@ import zope.interface
@zope.interface.implementer(
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
class MappingStorage(object):
"""In-memory storage implementation
......@@ -50,7 +50,8 @@ class MappingStorage(object):
"""
self.__name__ = name
self._data = {} # {oid->{tid->pickle}}
self._transactions = BTrees.OOBTree.OOBTree() # {tid->TransactionRecord}
# {tid->TransactionRecord}
self._transactions = BTrees.OOBTree.OOBTree()
self._ltid = ZODB.utils.z64
self._last_pack = None
self._lock = ZODB.utils.RLock()
......@@ -117,14 +118,14 @@ class MappingStorage(object):
tids.reverse()
return [
dict(
time = ZODB.TimeStamp.TimeStamp(tid).timeTime(),
tid = tid,
serial = tid,
user_name = self._transactions[tid].user,
description = self._transactions[tid].description,
extension = self._transactions[tid].extension,
size = len(tid_data[tid])
)
time=ZODB.TimeStamp.TimeStamp(tid).timeTime(),
tid=tid,
serial=tid,
user_name=self._transactions[tid].user,
description=self._transactions[tid].description,
extension=self._transactions[tid].extension,
size=len(tid_data[tid])
)
for tid in tids]
# ZODB.interfaces.IStorage
......@@ -167,8 +168,8 @@ class MappingStorage(object):
else:
raise ZODB.POSException.POSKeyError(oid)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def loadSerial(self, oid, serial):
tid_data = self._data.get(oid)
......@@ -192,7 +193,7 @@ class MappingStorage(object):
if not self._data:
return
stop = ZODB.TimeStamp.TimeStamp(*time.gmtime(t)[:5]+(t%60,)).raw()
stop = ZODB.TimeStamp.TimeStamp(*time.gmtime(t)[:5]+(t % 60,)).raw()
if self._last_pack is not None and self._last_pack >= stop:
if self._last_pack == stop:
return
......@@ -298,7 +299,7 @@ class MappingStorage(object):
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
if (transaction is not self._transaction):
raise ZODB.POSException.StorageTransactionError(
"tpc_finish called with wrong transaction")
......@@ -332,6 +333,7 @@ class MappingStorage(object):
raise ZODB.POSException.StorageTransactionError(
"tpc_vote called with wrong transaction")
class TransactionRecord(object):
status = ' '
......@@ -357,11 +359,11 @@ class TransactionRecord(object):
del self.data[oid]
return not self.data
@zope.interface.implementer(ZODB.interfaces.IStorageRecordInformation)
class DataRecord(object):
"""Abstract base class for iterator protocol"""
version = ''
data_txn = None
......@@ -370,5 +372,6 @@ class DataRecord(object):
self.tid = tid
self.data = data
def DB(*args, **kw):
return ZODB.DB(MappingStorage(), *args, **kw)
......@@ -18,20 +18,26 @@ $Id$"""
from ZODB.utils import oid_repr, readable_tid_repr
# BBB: We moved the two transactions to the transaction package
from transaction.interfaces import TransactionError, TransactionFailedError
from transaction.interfaces import TransactionError # noqa: F401 import unused
from transaction.interfaces import TransactionFailedError # noqa: F401
import transaction.interfaces
def _fmt_undo(oid, reason):
s = reason and (": %s" % reason) or ""
return "Undo error %s%s" % (oid_repr(oid), s)
def _recon(class_, state):
err = class_.__new__(class_)
err.__setstate__(state)
return err
_recon.__no_side_effects__ = True
class POSError(Exception):
"""Persistent object system error."""
......@@ -49,9 +55,10 @@ class POSError(Exception):
# the args would then get lost, leading to unprintable exceptions
# and worse. Manually assign to args from the state to be sure
# this doesn't happen.
super(POSError,self).__setstate__(state)
super(POSError, self).__setstate__(state)
self.args = state['args']
class POSKeyError(POSError, KeyError):
"""Key not found in database."""
......@@ -143,6 +150,7 @@ class ConflictError(POSError, transaction.interfaces.TransientError):
def get_serials(self):
return self.serials
class ReadConflictError(ConflictError):
"""Conflict detected when object was requested to stay unchanged.
......@@ -156,64 +164,67 @@ class ReadConflictError(ConflictError):
- object is found to be removed, and
- there is possibility that database pack was running simultaneously.
"""
def __init__(self, message=None, object=None, serials=None, **kw):
if message is None:
message = "database read conflict error"
ConflictError.__init__(self, message=message, object=object,
serials=serials, **kw)
class BTreesConflictError(ConflictError):
"""A special subclass for BTrees conflict errors."""
msgs = [# 0; i2 or i3 bucket split; positions are all -1
'Conflicting bucket split',
msgs = [
# 0; i2 or i3 bucket split; positions are all -1
'Conflicting bucket split',
# 1; keys the same, but i2 and i3 values differ, and both values
# differ from i1's value
'Conflicting changes',
# 1; keys the same, but i2 and i3 values differ, and both values
# differ from i1's value
'Conflicting changes',
# 2; i1's value changed in i2, but key+value deleted in i3
'Conflicting delete and change',
# 2; i1's value changed in i2, but key+value deleted in i3
'Conflicting delete and change',
# 3; i1's value changed in i3, but key+value deleted in i2
'Conflicting delete and change',
# 3; i1's value changed in i3, but key+value deleted in i2
'Conflicting delete and change',
# 4; i1 and i2 both added the same key, or both deleted the
# same key
'Conflicting inserts or deletes',
# 4; i1 and i2 both added the same key, or both deleted the
# same key
'Conflicting inserts or deletes',
# 5; i2 and i3 both deleted the same key
'Conflicting deletes',
# 5; i2 and i3 both deleted the same key
'Conflicting deletes',
# 6; i2 and i3 both added the same key
'Conflicting inserts',
# 6; i2 and i3 both added the same key
'Conflicting inserts',
# 7; i2 and i3 both deleted the same key, or i2 changed the value
# associated with a key and i3 deleted that key
'Conflicting deletes, or delete and change',
# 7; i2 and i3 both deleted the same key, or i2 changed the value
# associated with a key and i3 deleted that key
'Conflicting deletes, or delete and change',
# 8; i2 and i3 both deleted the same key, or i3 changed the value
# associated with a key and i2 deleted that key
'Conflicting deletes, or delete and change',
# 8; i2 and i3 both deleted the same key, or i3 changed the value
# associated with a key and i2 deleted that key
'Conflicting deletes, or delete and change',
# 9; i2 and i3 both deleted the same key
'Conflicting deletes',
# 9; i2 and i3 both deleted the same key
'Conflicting deletes',
# 10; i2 and i3 deleted all the keys, and didn't insert any,
# leaving an empty bucket; conflict resolution doesn't have
# enough info to unlink an empty bucket from its containing
# BTree correctly
'Empty bucket from deleting all keys',
# 10; i2 and i3 deleted all the keys, and didn't insert any,
# leaving an empty bucket; conflict resolution doesn't have
# enough info to unlink an empty bucket from its containing
# BTree correctly
'Empty bucket from deleting all keys',
# 11; conflicting changes in an internal BTree node
'Conflicting changes in an internal BTree node',
# 11; conflicting changes in an internal BTree node
'Conflicting changes in an internal BTree node',
# 12; i2 or i3 was empty
'Empty bucket in a transaction',
# 12; i2 or i3 was empty
'Empty bucket in a transaction',
# 13; delete of first key, which causes change to parent node
'Delete of first key',
]
# 13; delete of first key, which causes change to parent node
'Delete of first key',
]
def __init__(self, p1, p2, p3, reason):
self.p1 = p1
......@@ -226,11 +237,14 @@ class BTreesConflictError(ConflictError):
self.p2,
self.p3,
self.reason)
def __str__(self):
return "BTrees conflict error at %d/%d/%d: %s" % (
self.p1, self.p2, self.p3, self.msgs[self.reason])
class DanglingReferenceError(POSError, transaction.interfaces.TransactionError):
class DanglingReferenceError(
POSError, transaction.interfaces.TransactionError):
"""An object has a persistent reference to a missing object.
If an object is stored and it has a reference to another object
......@@ -258,9 +272,11 @@ class DanglingReferenceError(POSError, transaction.interfaces.TransactionError):
class VersionError(POSError):
"""An error in handling versions occurred."""
class VersionCommitError(VersionError):
"""An invalid combination of versions was used in a version commit."""
class VersionLockError(VersionError, transaction.interfaces.TransactionError):
"""Modification to an object modified in an unsaved version.
......@@ -269,6 +285,7 @@ class VersionLockError(VersionError, transaction.interfaces.TransactionError):
"""
############################################################################
class UndoError(POSError):
"""An attempt was made to undo a non-undoable transaction."""
......@@ -279,6 +296,7 @@ class UndoError(POSError):
def __str__(self):
return _fmt_undo(self._oid, self._reason)
class MultipleUndoErrors(UndoError):
"""Several undo errors occurred during a single transaction."""
......@@ -290,33 +308,43 @@ class MultipleUndoErrors(UndoError):
def __str__(self):
return "\n".join([_fmt_undo(*pair) for pair in self._errs])
class StorageError(POSError):
"""Base class for storage based exceptions."""
class StorageTransactionError(StorageError):
"""An operation was invoked for an invalid transaction or state."""
class StorageSystemError(StorageError):
"""Panic! Internal storage error!"""
class MountedStorageError(StorageError):
"""Unable to access mounted storage."""
class ReadOnlyError(StorageError):
"""Unable to modify objects in a read-only storage."""
class TransactionTooLargeError(StorageTransactionError):
"""The transaction exhausted some finite storage resource."""
class ExportError(POSError):
"""An export file doesn't have the right format."""
class Unsupported(POSError):
"""A feature was used that is not supported by the storage."""
class ReadOnlyHistoryError(POSError):
"""Unable to add or modify objects in an historical connection."""
class InvalidObjectReference(POSError):
"""An object contains an invalid reference to another object.
......@@ -329,6 +357,7 @@ class InvalidObjectReference(POSError):
TODO: The exception ought to have a member that is the invalid object.
"""
class ConnectionStateError(POSError):
"""A Connection isn't in the required state for an operation.
......
......@@ -12,6 +12,7 @@
#
##############################################################################
from ZODB.DB import DB, connection
import sys
from persistent import TimeStamp
......@@ -24,5 +25,3 @@ sys.modules['ZODB.PersistentMapping'] = sys.modules['persistent.mapping']
sys.modules['ZODB.PersistentList'] = sys.modules['persistent.list']
del mapping, list, sys
from ZODB.DB import DB, connection
......@@ -11,13 +11,13 @@
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from zodbpickle import binary # noqa: F401 import unused
import sys
from six import PY3
IS_JYTHON = sys.platform.startswith('java')
_protocol = 3
from zodbpickle import binary
if not PY3:
# Python 2.x
......@@ -42,7 +42,8 @@ else:
# http://bugs.python.org/issue6784
import zodbpickle.pickle
HIGHEST_PROTOCOL = 3
from _compat_pickle import IMPORT_MAPPING, NAME_MAPPING
from _compat_pickle import IMPORT_MAPPING # noqa: F401 import unused
from _compat_pickle import NAME_MAPPING # noqa: F401 import unused
class Pickler(zodbpickle.pickle.Pickler):
def __init__(self, f, protocol=None):
......@@ -92,6 +93,7 @@ def PersistentPickler(persistent_id, *args, **kwargs):
p.persistent_id = persistent_id
return p
def PersistentUnpickler(find_global, load_persistent, *args, **kwargs):
"""
Returns a :class:`Unpickler` that will use the given `find_global` function
......@@ -104,7 +106,8 @@ def PersistentUnpickler(find_global, load_persistent, *args, **kwargs):
if find_global is not None:
unpickler.find_global = find_global
try:
unpickler.find_class = find_global # PyPy, zodbpickle, the non-c-accelerated version
# PyPy, zodbpickle, the non-c-accelerated version
unpickler.find_class = find_global
except AttributeError:
pass
if load_persistent is not None:
......@@ -118,7 +121,7 @@ try:
from cStringIO import StringIO as BytesIO
except ImportError:
# Python 3.x
from io import BytesIO
from io import BytesIO # noqa: F401 import unused
try:
......@@ -126,14 +129,15 @@ try:
from base64 import decodebytes, encodebytes
except ImportError:
# Python 2.x
from base64 import decodestring as decodebytes, encodestring as encodebytes
from base64 import decodestring as decodebytes # noqa: F401 import unused
from base64 import encodestring as encodebytes # noqa: F401 import unused
# Python 3.x: ``hasattr()`` swallows only AttributeError.
def py2_hasattr(obj, name):
try:
getattr(obj, name)
except:
except: # noqa: E722 do not use bare 'except'
return False
return True
......@@ -151,9 +155,10 @@ else:
try:
TEXT = unicode
except NameError: #pragma NO COVER Py3k
except NameError: # pragma NO COVER Py3k
TEXT = str
def ascii_bytes(x):
if isinstance(x, TEXT):
x = x.encode('ascii')
......
......@@ -35,7 +35,6 @@ from ZODB._compat import BytesIO
from ZODB._compat import PersistentUnpickler
from ZODB._compat import decodebytes
from ZODB._compat import ascii_bytes
from ZODB._compat import INT_TYPES
from ZODB._compat import PY3
......@@ -62,20 +61,21 @@ valid_modes = 'r', 'w', 'r+', 'a', 'c'
# of a weakref when the weakref object dies at the same time
# as the object it refers to. In other words, this doesn't work:
# self._ref = weakref.ref(self, lambda ref: ...)
# because the function never gets called (https://bitbucket.org/pypy/pypy/issue/2030).
# because the function never gets called
# (https://bitbucket.org/pypy/pypy/issue/2030).
# The Blob class used to use that pattern to clean up uncommitted
# files; now we use this module-level global (but still keep a
# reference in the Blob in case we need premature cleanup).
_blob_close_refs = []
@zope.interface.implementer(ZODB.interfaces.IBlob)
class Blob(persistent.Persistent):
"""A BLOB supports efficient handling of large data within ZODB."""
_p_blob_uncommitted = None # Filename of the uncommitted (dirty) data
_p_blob_committed = None # Filename of the committed data
_p_blob_ref = None # weakreference to self; also in _blob_close_refs
_p_blob_committed = None # Filename of the committed data
_p_blob_ref = None # weakreference to self; also in _blob_close_refs
readers = writers = None
......@@ -140,11 +140,10 @@ class Blob(persistent.Persistent):
if mode == 'c':
if (self._p_blob_uncommitted
or
not self._p_blob_committed
or
self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)
):
or
not self._p_blob_committed
or
self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)):
raise BlobError('Uncommitted changes')
return self._p_jar._storage.openCommittedBlobFile(
self._p_oid, self._p_serial)
......@@ -186,7 +185,7 @@ class Blob(persistent.Persistent):
if self._p_blob_uncommitted is None:
self._create_uncommitted_file()
result = BlobFile(self._p_blob_uncommitted, mode, self)
else: # 'r+' and 'a'
else: # 'r+' and 'a'
if self._p_blob_uncommitted is None:
# Create a new working copy
self._create_uncommitted_file()
......@@ -214,11 +213,10 @@ class Blob(persistent.Persistent):
def committed(self):
if (self._p_blob_uncommitted
or
not self._p_blob_committed
or
self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)
):
or
not self._p_blob_committed
or
self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)):
raise BlobError('Uncommitted changes')
result = self._p_blob_committed
......@@ -254,7 +252,7 @@ class Blob(persistent.Persistent):
try:
rename_or_copy_blob(filename, target, chmod=False)
except:
except: # noqa: E722 do not use bare 'except'
# Recover from the failed consumption: First remove the file, it
# might exist and mark the pointer to the uncommitted file.
self._p_blob_uncommitted = None
......@@ -317,6 +315,7 @@ class Blob(persistent.Persistent):
self._p_blob_uncommitted = self._p_blob_ref = None
return filename
class BlobFile(file):
"""A BlobFile that holds a file handle to actual blob data.
......@@ -348,8 +347,10 @@ class BlobFile(file):
# prohibit it on all versions.
raise TypeError("Pickling a BlobFile is not allowed")
_pid = str(os.getpid())
def log(msg, level=logging.INFO, subsys=_pid, exc_info=False):
message = "(%s) %s" % (subsys, msg)
logger.log(level, message, exc_info=exc_info)
......@@ -394,8 +395,8 @@ class FilesystemHelper(object):
layout = layout_marker.read().strip()
if layout != self.layout_name:
raise ValueError(
"Directory layout `%s` selected for blob directory %s, but "
"marker found for layout `%s`" %
"Directory layout `%s` selected for blob directory %s, but"
" marker found for layout `%s`" %
(self.layout_name, self.base_dir, layout))
def isSecure(self, path):
......@@ -541,6 +542,7 @@ class NoBlobsFileSystemHelper(object):
class BlobStorageError(Exception):
"""The blob storage encountered an invalid state."""
def auto_layout_select(path):
# A heuristic to look at a path and determine which directory layout to
# use.
......@@ -593,7 +595,7 @@ class BushyLayout(object):
directories = [b'0x' + hex_bytes[x:x+2]
for x in range(0, 16, 2)]
if bytes is not str: # py3
if bytes is not str: # py3
sep_bytes = os.path.sep.encode('ascii')
path_bytes = sep_bytes.join(directories)
return path_bytes.decode('ascii')
......@@ -618,8 +620,10 @@ class BushyLayout(object):
filename = "%s%s" % (utils.tid_repr(tid), BLOB_SUFFIX)
return os.path.join(oid_path, filename)
LAYOUTS['bushy'] = BushyLayout()
class LawnLayout(BushyLayout):
"""A shallow directory layout for blob directories.
......@@ -640,8 +644,10 @@ class LawnLayout(BushyLayout):
except (TypeError, binascii.Error):
raise ValueError('Not a valid OID path: `%s`' % path)
LAYOUTS['lawn'] = LawnLayout()
class BlobStorageMixin(object):
"""A mix-in to help storages support blobs."""
......@@ -738,7 +744,6 @@ class BlobStorage(BlobStorageMixin):
"""A wrapper/proxy storage to support blobs.
"""
def __init__(self, base_directory, storage, layout='automatic'):
assert not ZODB.interfaces.IBlobStorage.providedBy(storage)
self.__storage = storage
......@@ -780,8 +785,8 @@ class BlobStorage(BlobStorageMixin):
def tpc_abort(self, *arg, **kw):
# We need to override the base storage's abort instead of
# providing an _abort method because methods found on the proxied object
# aren't rebound to the proxy
# providing an _abort method because methods found on the proxied
# object aren't rebound to the proxy
self.__storage.tpc_abort(*arg, **kw)
self._blob_tpc_abort()
......@@ -814,7 +819,7 @@ class BlobStorage(BlobStorageMixin):
if exists:
files = os.listdir(oid_path)
files.sort()
latest = files[-1] # depends on ever-increasing tids
latest = files[-1] # depends on ever-increasing tids
files.remove(latest)
for f in files:
remove_committed(os.path.join(oid_path, f))
......@@ -905,7 +910,10 @@ class BlobStorage(BlobStorageMixin):
res = BlobStorage(base_dir, s)
return res
copied = logging.getLogger('ZODB.blob.copied').debug
def rename_or_copy_blob(f1, f2, chmod=True):
"""Try to rename f1 to f2, fallback to copy.
......@@ -926,6 +934,7 @@ def rename_or_copy_blob(f1, f2, chmod=True):
if chmod:
set_not_writable(f2)
if sys.platform == 'win32':
# On Windows, you can't remove read-only files, so make the
# file writable first.
......@@ -952,6 +961,7 @@ def find_global_Blob(module, class_):
if module == 'ZODB.blob' and class_ == 'Blob':
return Blob
def is_blob_record(record):
"""Check whether a database record is a blob record.
......@@ -960,7 +970,8 @@ def is_blob_record(record):
"""
if record and (b'ZODB.blob' in record):
unpickler = PersistentUnpickler(find_global_Blob, None, BytesIO(record))
unpickler = PersistentUnpickler(
find_global_Blob, None, BytesIO(record))
try:
return unpickler.load() is Blob
......@@ -971,6 +982,7 @@ def is_blob_record(record):
return False
def copyTransactionsFromTo(source, destination):
for trans in source.iterator():
destination.tpc_begin(trans, trans.tid, trans.status)
......@@ -990,10 +1002,10 @@ def copyTransactionsFromTo(source, destination):
with open(name, 'wb') as df:
utils.cp(sf, df)
destination.restoreBlob(record.oid, record.tid, record.data,
name, record.data_txn, trans)
name, record.data_txn, trans)
else:
destination.restore(record.oid, record.tid, record.data,
'', record.data_txn, trans)
'', record.data_txn, trans)
destination.tpc_vote(trans)
destination.tpc_finish(trans)
......@@ -1001,6 +1013,8 @@ def copyTransactionsFromTo(source, destination):
NO_WRITE = ~ (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
READ_PERMS = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
def set_not_writable(path):
perms = stat.S_IMODE(os.lstat(path).st_mode)
......
......@@ -25,6 +25,7 @@ from ZODB._compat import NAME_MAPPING
broken_cache = {}
@zope.interface.implementer(ZODB.interfaces.IBroken)
class Broken(object):
"""Broken object base class
......@@ -99,7 +100,6 @@ class Broken(object):
>>> broken_cache.clear()
"""
__Broken_state__ = __Broken_initargs__ = None
__name__ = 'broken object'
......@@ -131,6 +131,7 @@ class Broken(object):
def __setattr__(self, name, value):
raise BrokenModified("Can't change broken objects")
def find_global(modulename, globalname,
# These are *not* optimizations. Callers can override these.
Broken=Broken, type=type,
......@@ -220,6 +221,7 @@ def find_global(modulename, globalname,
broken_cache[(modulename, globalname)] = class_
return class_
def rebuild(modulename, globalname, *args):
"""Recreate a broken object, possibly recreating the missing class
......@@ -257,10 +259,12 @@ def rebuild(modulename, globalname, *args):
class_ = find_global(modulename, globalname)
return class_.__new__(class_, *args)
class BrokenModified(TypeError):
"""Attempt to modify a broken object
"""
class PersistentBroken(Broken, persistent.Persistent):
r"""Persistent broken objects
......@@ -347,6 +351,7 @@ class PersistentBroken(Broken, persistent.Persistent):
def __getnewargs__(self):
return self.__Broken_newargs__
def persistentBroken(class_):
try:
return class_.__dict__['__Broken_Persistent__']
......@@ -356,5 +361,5 @@ def persistentBroken(class_):
(PersistentBroken, class_),
{'__module__': class_.__module__},
)
)
)
return class_.__dict__['__Broken_Persistent__']
......@@ -29,18 +29,21 @@ _db_schema = None
s_schema_path = os.path.join(ZODB.__path__[0], "storage.xml")
_s_schema = None
def getDbSchema():
global _db_schema
if _db_schema is None:
_db_schema = ZConfig.loadSchema(db_schema_path)
return _db_schema
def getStorageSchema():
global _s_schema
if _s_schema is None:
_s_schema = ZConfig.loadSchema(s_schema_path)
return _s_schema
def databaseFromString(s):
"""Create a database from a database-configuration string.
......@@ -56,6 +59,7 @@ def databaseFromString(s):
"""
return databaseFromFile(StringIO(s))
def databaseFromFile(f):
"""Create a database from a file object that provides configuration.
......@@ -64,6 +68,7 @@ def databaseFromFile(f):
config, handle = ZConfig.loadConfigFile(getDbSchema(), f)
return databaseFromConfig(config.database)
def databaseFromURL(url):
"""Load a database from URL (or file name) that provides configuration.
......@@ -72,6 +77,7 @@ def databaseFromURL(url):
config, handler = ZConfig.loadConfig(getDbSchema(), url)
return databaseFromConfig(config.database)
def databaseFromConfig(database_factories):
databases = {}
first = None
......@@ -82,17 +88,20 @@ def databaseFromConfig(database_factories):
return first
def storageFromString(s):
"""Create a storage from a storage-configuration string.
"""
return storageFromFile(StringIO(s))
def storageFromFile(f):
"""Create a storage from a file object providing storage-configuration.
"""
config, handle = ZConfig.loadConfigFile(getStorageSchema(), f)
return storageFromConfig(config.storage)
def storageFromURL(url):
"""\
Create a storage from a URL (or file name) providing storage-configuration.
......@@ -100,9 +109,11 @@ def storageFromURL(url):
config, handler = ZConfig.loadConfig(getStorageSchema(), url)
return storageFromConfig(config.storage)
def storageFromConfig(section):
return section.open()
class BaseConfig(object):
"""Object representing a configured storage or database.
......@@ -124,6 +135,7 @@ class BaseConfig(object):
"""Open and return the storage object."""
raise NotImplementedError
class ZODBDatabase(BaseConfig):
def open(self, databases=None):
......@@ -150,21 +162,23 @@ class ZODBDatabase(BaseConfig):
cache_size_bytes=section.cache_size_bytes,
historical_pool_size=section.historical_pool_size,
historical_cache_size=section.historical_cache_size,
historical_cache_size_bytes=section.historical_cache_size_bytes,
historical_cache_size_bytes=section.historical_cache_size_bytes, # noqa: E501 line too long
historical_timeout=section.historical_timeout,
database_name=section.database_name or self.name or '',
databases=databases,
**options)
except:
except: # noqa: E722 do not use bare 'except'
storage.close()
raise
class MappingStorage(BaseConfig):
def open(self):
from ZODB.MappingStorage import MappingStorage
return MappingStorage(self.config.name)
class DemoStorage(BaseConfig):
def open(self):
......@@ -181,6 +195,7 @@ class DemoStorage(BaseConfig):
from ZODB.DemoStorage import DemoStorage
return DemoStorage(self.config.name, base=base, changes=changes)
class FileStorage(BaseConfig):
def open(self):
......@@ -206,6 +221,7 @@ class FileStorage(BaseConfig):
return FileStorage(config.path, **options)
class BlobStorage(BaseConfig):
def open(self):
......@@ -225,7 +241,8 @@ class ZEOClient(BaseConfig):
if self.config.blob_cache_size is not None:
options['blob_cache_size'] = self.config.blob_cache_size
if self.config.blob_cache_size_check is not None:
options['blob_cache_size_check'] = self.config.blob_cache_size_check
options['blob_cache_size_check'] = (
self.config.blob_cache_size_check)
if self.config.client_label is not None:
options['client_label'] = self.config.client_label
......@@ -249,6 +266,7 @@ class ZEOClient(BaseConfig):
realm=self.config.realm,
**options)
class BDBStorage(BaseConfig):
def open(self):
......@@ -261,12 +279,14 @@ class BDBStorage(BaseConfig):
setattr(bconf, name, getattr(self.config, name))
return storageclass(self.config.envdir, config=bconf)
class BDBMinimalStorage(BDBStorage):
def get_storageclass(self):
import BDBStorage.BDBMinimalStorage
return BDBStorage.BDBMinimalStorage.BDBMinimalStorage
class BDBFullStorage(BDBStorage):
def get_storageclass(self):
......
......@@ -14,21 +14,29 @@
import persistent.mapping
class fixer(object):
def __of__(self, parent):
def __setstate__(state, self=parent):
self._container=state
self._container = state
del self.__setstate__
return __setstate__
fixer=fixer()
class hack(object): pass
hack=hack()
fixer = fixer()
class hack(object):
pass
hack = hack()
def __basicnew__():
r=persistent.mapping.PersistentMapping()
r.__setstate__=fixer
r = persistent.mapping.PersistentMapping()
r.__setstate__ = fixer
return r
hack.__basicnew__=__basicnew__
hack.__basicnew__ = __basicnew__
......@@ -14,5 +14,5 @@
try:
from zope.event import notify
except ImportError:
notify = lambda event: None
def notify(event):
return None
......@@ -55,17 +55,21 @@ from ZODB._compat import _protocol
def num2str(n):
return struct.pack(">Q", n)[2:]
def str2num(s):
return struct.unpack(">Q", b"\000\000" + s)[0]
def prefix_plus_one(s):
num = str2num(s)
return num2str(num + 1)
def prefix_minus_one(s):
num = str2num(s)
return num2str(num - 1)
def ensure_bytes(s):
# on Python 3 we might pickle bytes and unpickle unicode strings
return s.encode('ascii') if not isinstance(s, bytes) else s
......@@ -80,11 +84,11 @@ class fsIndex(object):
def __getstate__(self):
return dict(
state_version = 1,
_data = [(k, v.toString())
for (k, v) in six.iteritems(self._data)
]
)
state_version=1,
_data=[(k, v.toString())
for (k, v) in six.iteritems(self._data)
]
)
def __setstate__(self, state):
version = state.pop('state_version', 0)
......@@ -96,13 +100,13 @@ class fsIndex(object):
self._data = OOBTree([
(ensure_bytes(k), v)
for (k, v) in self._data.items()
])
])
def _setstate_1(self, state):
self._data = OOBTree([
(ensure_bytes(k), fsBucket().fromString(ensure_bytes(v)))
for (k, v) in state['_data']
])
])
def __getitem__(self, key):
assert isinstance(key, bytes)
......@@ -246,7 +250,7 @@ class fsIndex(object):
else:
try:
smallest_suffix = tree.minKey(key[6:])
except ValueError: # 'empty tree' (no suffix >= arg)
except ValueError: # 'empty tree' (no suffix >= arg)
next_prefix = prefix_plus_one(smallest_prefix)
smallest_prefix = self._data.minKey(next_prefix)
tree = self._data[smallest_prefix]
......@@ -270,7 +274,7 @@ class fsIndex(object):
else:
try:
biggest_suffix = tree.maxKey(key[6:])
except ValueError: # 'empty tree' (no suffix <= arg)
except ValueError: # 'empty tree' (no suffix <= arg)
next_prefix = prefix_minus_one(biggest_prefix)
biggest_prefix = self._data.maxKey(next_prefix)
tree = self._data[biggest_prefix]
......
......@@ -94,12 +94,15 @@ def die(mess='', show_docstring=False):
print(__doc__ % sys.argv[0], file=sys.stderr)
sys.exit(1)
class ErrorFound(Exception):
pass
def error(mess, *args):
raise ErrorFound(mess % args)
def read_txn_header(f, pos, file_size, outp, ltid):
# Read the transaction record
f.seek(pos)
......@@ -107,7 +110,7 @@ def read_txn_header(f, pos, file_size, outp, ltid):
if len(h) < 23:
raise EOFError
tid, stl, status, ul, dl, el = unpack(">8s8scHHH",h)
tid, stl, status, ul, dl, el = unpack(">8s8scHHH", h)
status = as_text(status)
tl = u64(stl)
......@@ -157,6 +160,7 @@ def read_txn_header(f, pos, file_size, outp, ltid):
return pos, result, tid
def truncate(f, pos, file_size, outp):
"""Copy data from pos to end of f to a .trNNN file."""
......@@ -176,6 +180,7 @@ def truncate(f, pos, file_size, outp):
f.seek(pos)
tr.close()
def copy(src, dst, n):
while n:
buf = src.read(8096)
......@@ -186,6 +191,7 @@ def copy(src, dst, n):
dst.write(buf)
n -= len(buf)
def scan(f, pos):
"""Return a potential transaction location following pos in f.
......@@ -206,20 +212,21 @@ def scan(f, pos):
s = 0
while 1:
l = data.find(b".", s)
if l < 0:
l_ = data.find(b".", s)
if l_ < 0:
pos += len(data)
break
# If we are less than 8 bytes from the end of the
# string, we need to read more data.
s = l + 1
s = l_ + 1
if s > len(data) - 8:
pos += l
pos += l_
break
tl = u64(data[s:s+8])
if tl < pos:
return pos + s + 8
def iprogress(i):
if i % 2:
print(".", end=' ')
......@@ -227,10 +234,12 @@ def iprogress(i):
print((i/2) % 10, end=' ')
sys.stdout.flush()
def progress(p):
for i in range(p):
iprogress(i)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "fv:pP:")
......@@ -256,6 +265,7 @@ def main():
recover(inp, outp, verbose, partial, force, pack)
def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
print("Recovering", inp, "into", outp)
......@@ -266,7 +276,7 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
if f.read(4) != ZODB.FileStorage.packed_version:
die("input is not a file storage")
f.seek(0,2)
f.seek(0, 2)
file_size = f.tell()
ofs = ZODB.FileStorage.FileStorage(outp, create=1)
......@@ -332,11 +342,11 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
for r in txn:
if verbose > 1:
if r.data is None:
l = "bp"
l_ = "bp"
else:
l = len(r.data)
l_ = len(r.data)
print("%7d %s %s" % (u64(r.oid), l))
print("%7d %s" % (u64(r.oid), l_))
ofs.restore(r.oid, r.tid, r.data, '', r.data_txn,
txn)
nrec += 1
......@@ -370,7 +380,6 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
prog1 = prog1 + 1
iprogress(prog1)
bad = file_size - undone - ofs._pos
print("\n%s bytes removed during recovery" % bad)
......@@ -385,5 +394,6 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
ofs.close()
f.close()
if __name__ == "__main__":
main()
......@@ -100,6 +100,7 @@ class TxnHeader(object):
tlen = u64(self._file.read(8))
return TxnHeader(self._file, self._pos - (tlen + 8))
class DataHeader(object):
"""Object representing a data record header.
......@@ -111,7 +112,7 @@ class DataHeader(object):
txn_pos 24-32 position of txn header
version_len 32-34 length of version (always 0)
data_len 34-42 length of data
"""
def __init__(self, file, pos):
......@@ -129,15 +130,16 @@ class DataHeader(object):
self.prev_rec_pos = u64(prev_rec_pos)
self.txn_pos = u64(txn_pos)
self.data_len = u64(data_len)
def next_offset(self):
"""Return offset of next record."""
off = self._pos + self.data_len
off += DATA_HDR_LEN
if self.data_len == 0:
off += 8 # backpointer
off += 8 # backpointer
return off
def prev_txn(f):
"""Return transaction located before current file position."""
f.seek(-8, 1)
......
......@@ -267,6 +267,7 @@ class IConnection(Interface):
separate object.
"""
class IStorageWrapper(Interface):
"""Storage wrapper interface
......@@ -296,7 +297,7 @@ class IStorageWrapper(Interface):
This interface may be implemented by storage adapters or other
intermediaries. For example, a storage adapter that provides
encryption and/or compresssion will apply record transformations
encryption and/or compression will apply record transformations
in it's references method.
"""
......@@ -343,7 +344,8 @@ class IStorageWrapper(Interface):
"""Return untransformed data
"""
IStorageDB = IStorageWrapper # for backward compatibility
IStorageDB = IStorageWrapper # for backward compatibility
class IDatabase(IStorageDB):
......@@ -371,7 +373,6 @@ class IDatabase(IStorageDB):
this attribute.
""")
def open(transaction_manager=None, serial=''):
"""Return an IConnection object for use by application code.
......@@ -421,7 +422,6 @@ class IDatabase(IStorageDB):
also included if they don't conflict with the keys above.
"""
def pack(t=None, days=0):
"""Pack the storage, deleting unused object revisions.
......@@ -433,7 +433,7 @@ class IDatabase(IStorageDB):
usually an expensive operation.
There are two optional arguments that can be used to set the
pack time: t, pack time in seconds since the epcoh, and days,
pack time: t, pack time in seconds since the epoch, and days,
the number of days to subtract from t or from the current
time if t is not specified.
"""
......@@ -539,6 +539,7 @@ class IDatabase(IStorageDB):
should also close all the Connections.
"""
class IStorageTransactionMetaData(Interface):
"""Provide storage transaction meta data.
......@@ -628,13 +629,13 @@ class IStorage(Interface):
The format and interpretation of this name is storage
dependent. It could be a file name, a database name, etc..
This is used soley for informational purposes.
This is used solely for informational purposes.
"""
def getSize():
"""An approximate size of the database, in bytes.
This is used soley for informational purposes.
This is used solely for informational purposes.
"""
def history(oid, size=1):
......@@ -660,7 +661,7 @@ class IStorage(Interface):
user_name
The bytes user identifier, if any (or an empty string) of the
user on whos behalf the revision was committed.
user on whose behalf the revision was committed.
description
The bytes transaction description for the transaction that
......@@ -704,7 +705,7 @@ class IStorage(Interface):
def __len__():
"""The approximate number of objects in the storage
This is used soley for informational purposes.
This is used solely for informational purposes.
"""
def loadBefore(oid, tid):
......@@ -821,7 +822,7 @@ class IStorage(Interface):
This call is ignored is the storage is not participating in
two-phase commit or if the given transaction is not the same
as the transaction the storage is commiting.
as the transaction the storage is committing.
"""
def tpc_begin(transaction):
......@@ -837,7 +838,7 @@ class IStorage(Interface):
current transaction ends (commits or aborts).
"""
def tpc_finish(transaction, func = lambda tid: None):
def tpc_finish(transaction, func=lambda tid: None):
"""Finish the transaction, making any transaction changes permanent.
Changes must be made permanent at this point.
......@@ -863,7 +864,7 @@ class IStorage(Interface):
The argument is the same object passed to tpc_begin.
This call raises a StorageTransactionError if the storage
isn't participating in two-phase commit or if it is commiting
isn't participating in two-phase commit or if it is committing
a different transaction.
If a transaction can be committed by a storage, then the
......@@ -901,7 +902,7 @@ class IMultiCommitStorage(IStorage):
the return value is always None.
"""
def tpc_finish(transaction, func = lambda tid: None):
def tpc_finish(transaction, func=lambda tid: None):
"""Finish the transaction, making any transaction changes permanent.
See IStorage.store. For objects implementing this interface,
......@@ -954,7 +955,6 @@ class IStorageRestoreable(IStorage):
# including the existing FileStorage implementation), that
# failed to take into account records after the pack time.
def restore(oid, serial, data, version, prev_txn, transaction):
"""Write data already committed in a separate database
......@@ -996,6 +996,7 @@ class IStorageRecordInformation(Interface):
data = Attribute("The data record, bytes")
data_txn = Attribute("The previous transaction id, bytes")
class IStorageTransactionInformation(IStorageTransactionMetaData):
"""Provide information about a storage transaction.
......@@ -1003,7 +1004,7 @@ class IStorageTransactionInformation(IStorageTransactionMetaData):
Note that this may contain a status field used by FileStorage to
support packing. At some point, this will go away when FileStorage
has a better pack algoritm.
has a better pack algorithm.
"""
tid = Attribute("Transaction id")
......@@ -1034,6 +1035,7 @@ class IStorageIteration(Interface):
"""
class IStorageUndoable(IStorage):
"""A storage supporting transactional undo.
"""
......@@ -1245,6 +1247,7 @@ class IMVCCStorage(IStorage):
A POSKeyError is raised if there is no record for the object id.
"""
class IMVCCPrefetchStorage(IMVCCStorage):
def prefetch(oids):
......@@ -1254,6 +1257,7 @@ class IMVCCPrefetchStorage(IMVCCStorage):
more than once.
"""
class IMVCCAfterCompletionStorage(IMVCCStorage):
def afterCompletion():
......@@ -1264,6 +1268,7 @@ class IMVCCAfterCompletionStorage(IMVCCStorage):
See ``transaction.interfaces.ISynchronizer.afterCompletion``.
"""
class IStorageCurrentRecordIteration(IStorage):
def record_iternext(next=None):
......@@ -1271,6 +1276,7 @@ class IStorageCurrentRecordIteration(IStorage):
Use like this:
>>> storage = ...
>>> next = None
>>> while 1:
... oid, tid, data, next = storage.record_iternext(next)
......@@ -1280,24 +1286,26 @@ class IStorageCurrentRecordIteration(IStorage):
"""
class IExternalGC(IStorage):
def deleteObject(oid, serial, transaction):
"""Mark an object as deleted
def deleteObject(oid, serial, transaction):
"""Mark an object as deleted
This method marks an object as deleted via a new object
revision. Subsequent attempts to load current data for the
object will fail with a POSKeyError, but loads for
non-current data will suceed if there are previous
non-delete records. The object will be removed from the
storage when all not-delete records are removed.
This method marks an object as deleted via a new object
revision. Subsequent attempts to load current data for the
object will fail with a POSKeyError, but loads for
non-current data will succeed if there are previous
non-delete records. The object will be removed from the
storage when all not-delete records are removed.
The serial argument must match the most recently committed
serial for the object. This is a seat belt.
The serial argument must match the most recently committed
serial for the object. This is a seat belt.
This method can only be called in the first phase of 2-phase
commit.
"""
This method can only be called in the first phase of 2-phase
commit.
"""
class ReadVerifyingStorage(IStorage):
......@@ -1315,6 +1323,7 @@ class ReadVerifyingStorage(IStorage):
through the end of the transaction.
"""
class IBlob(Interface):
"""A BLOB supports efficient handling of large data within ZODB."""
......@@ -1325,7 +1334,7 @@ class IBlob(Interface):
mode: Mode to open the file with. Possible values: r,w,r+,a,c
The mode 'c' is similar to 'r', except that an orinary file
The mode 'c' is similar to 'r', except that an ordinary file
object is returned and may be used in a separate transaction
and after the blob's database connection has been closed.
......@@ -1335,8 +1344,8 @@ class IBlob(Interface):
"""Return a file name for committed data.
The returned file name may be opened for reading or handed to
other processes for reading. The file name isn't guarenteed
to be valid indefinately. The file may be removed in the
other processes for reading. The file name isn't guaranteed
to be valid indefinitely. The file may be removed in the
future as a result of garbage collection depending on system
configuration.
......@@ -1412,6 +1421,7 @@ class IBlobStorage(Interface):
If Blobs use this, then commits can be performed with a simple rename.
"""
class IBlobStorageRestoreable(IBlobStorage, IStorageRestoreable):
def restoreBlob(oid, serial, data, blobfilename, prev_txn, transaction):
......@@ -1446,6 +1456,7 @@ class IBroken(Interface):
__Broken_initargs__ = Attribute("Arguments passed to __init__.")
__Broken_state__ = Attribute("Value passed to __setstate__.")
class BlobError(Exception):
pass
......
......@@ -12,6 +12,7 @@ import zope.interface
from . import interfaces, serialize, POSException
from .utils import p64, u64, Lock, oid_repr, tid_repr
class Base(object):
_copy_methods = (
......@@ -19,7 +20,7 @@ class Base(object):
'loadBlob', 'openCommittedBlobFile',
'isReadOnly', 'supportsUndo', 'undoLog', 'undoInfo',
'temporaryDirectory',
)
)
def __init__(self, storage):
self._storage = storage
......@@ -37,6 +38,7 @@ class Base(object):
def __len__(self):
return len(self._storage)
class MVCCAdapter(Base):
def __init__(self, storage):
......@@ -63,6 +65,7 @@ class MVCCAdapter(Base):
self._instances.remove(instance)
closed = False
def close(self):
if not self.closed:
self.closed = True
......@@ -92,14 +95,15 @@ class MVCCAdapter(Base):
def pack(self, pack_time, referencesf):
return self._storage.pack(pack_time, referencesf)
class MVCCAdapterInstance(Base):
_copy_methods = Base._copy_methods + (
'loadSerial', 'new_oid', 'tpc_vote',
'checkCurrentSerialInTransaction', 'tpc_abort',
)
)
_start = None # Transaction start time
_start = None # Transaction start time
_ltid = b'' # Last storage transaction id
def __init__(self, base):
......@@ -107,7 +111,7 @@ class MVCCAdapterInstance(Base):
Base.__init__(self, base._storage)
self._lock = Lock()
self._invalidations = set()
self._sync = getattr(self._storage, 'sync', lambda : None)
self._sync = getattr(self._storage, 'sync', lambda: None)
def release(self):
self._base._release(self)
......@@ -175,8 +179,8 @@ class MVCCAdapterInstance(Base):
# into account, and raise ReadConflictError only in the presence of
# database being simultaneously updated from back of its log.
raise POSException.ReadConflictError(
"load %s @%s: object deleted, likely by simultaneous pack" %
(oid_repr(oid), tid_repr(p64(u64(self._start) - 1))))
"load %s @%s: object deleted, likely by simultaneous pack" %
(oid_repr(oid), tid_repr(p64(u64(self._start) - 1))))
return r[:2]
......@@ -189,8 +193,8 @@ class MVCCAdapterInstance(Base):
else:
raise
_modified = None # Used to keep track of oids modified within a
# transaction, so we can invalidate them later.
_modified = None # Used to keep track of oids modified within a
# transaction, so we can invalidate them later.
def tpc_begin(self, transaction):
self._storage.tpc_begin(transaction)
......@@ -205,7 +209,7 @@ class MVCCAdapterInstance(Base):
oid, serial, data, blobfilename, '', transaction)
self._modified.add(oid)
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
modified = self._modified
self._modified = None
......@@ -216,9 +220,11 @@ class MVCCAdapterInstance(Base):
return self._storage.tpc_finish(transaction, invalidate_finish)
def read_only_writer(self, *a, **kw):
raise POSException.ReadOnlyError
class HistoricalStorageAdapter(Base):
"""Adapt a storage to a historical storage
"""
......@@ -226,7 +232,7 @@ class HistoricalStorageAdapter(Base):
_copy_methods = Base._copy_methods + (
'loadSerial', 'tpc_begin', 'tpc_finish', 'tpc_abort', 'tpc_vote',
'checkCurrentSerialInTransaction',
)
)
def __init__(self, storage, before=None):
Base.__init__(self, storage)
......@@ -267,7 +273,7 @@ class UndoAdapterInstance(Base):
_copy_methods = Base._copy_methods + (
'tpc_abort',
)
)
def __init__(self, base):
self._base = base
......@@ -293,7 +299,7 @@ class UndoAdapterInstance(Base):
if result:
self._undone.update(result)
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
def invalidate_finish(tid):
self._base._invalidate_finish(tid, self._undone, None)
......
......@@ -63,6 +63,7 @@ class _p_DataDescr(object):
def __delete__(self, inst):
raise AttributeError(self.__name__)
class _p_oid_or_jar_Descr(_p_DataDescr):
# Special descr for _p_oid and _p_jar that loads
# state when set if both are set and _p_changed is None
......@@ -78,11 +79,11 @@ class _p_oid_or_jar_Descr(_p_DataDescr):
jar = get('_p_jar')
if (jar is not None
and get('_p_oid') is not None
and get('_p_changed') is None
):
and get('_p_oid') is not None
and get('_p_changed') is None):
jar.setstate(inst)
class _p_ChangedDescr(object):
# descriptor to handle special weird semantics of _p_changed
......@@ -99,6 +100,7 @@ class _p_ChangedDescr(object):
def __delete__(self, inst):
inst._p_invalidate()
class _p_MethodDescr(object):
"""Provide unassignable class attributes
"""
......@@ -120,6 +122,7 @@ class _p_MethodDescr(object):
special_class_descrs = '__dict__', '__weakref__'
class PersistentMetaClass(type):
_p_jar = _p_oid_or_jar_Descr('_p_jar')
......@@ -148,8 +151,8 @@ class PersistentMetaClass(type):
and
(get('_p_oid') is not None)
and
(get('_p_changed') == False)
):
(get('_p_changed') is False)
):
self._p_changed = True
data_manager.register(self)
......@@ -177,7 +180,6 @@ class PersistentMetaClass(type):
_p_invalidate = _p_MethodDescr(_p_invalidate)
def __getstate__(self):
return (self.__bases__,
dict([(k, v) for (k, v) in self.__dict__.items()
......@@ -185,7 +187,7 @@ class PersistentMetaClass(type):
or k.startswith('_v_')
or k in special_class_descrs
)
]),
]),
)
__getstate__ = _p_MethodDescr(__getstate__)
......
......@@ -9,7 +9,6 @@ from ZODB.FileStorage import FileStorage
from ZODB._compat import PersistentUnpickler, BytesIO
class FakeError(Exception):
def __init__(self, module, name):
Exception.__init__(self)
......@@ -41,20 +40,22 @@ class Report(object):
self.FOIDS = 0
self.FBYTES = 0
def shorten(s, n):
l = len(s)
if l <= n:
length = len(s)
if length <= n:
return s
while len(s) + 3 > n: # account for ...
while len(s) + 3 > n: # account for ...
i = s.find(".")
if i == -1:
# In the worst case, just return the rightmost n bytes
return s[-n:]
else:
s = s[i + 1:]
l = len(s)
length = len(s)
return "..." + s
def report(rep):
print("Processed %d records in %d transactions" % (rep.OIDS, rep.TIDS))
print("Average record size is %7.2f bytes" % (rep.DBYTES * 1.0 / rep.OIDS))
......@@ -63,8 +64,8 @@ def report(rep):
print("Types used:")
fmt = "%-46s %7s %9s %6s %7s"
fmtp = "%-46s %7d %9d %5.1f%% %7.2f" # per-class format
fmts = "%46s %7d %8dk %5.1f%% %7.2f" # summary format
fmtp = "%-46s %7d %9d %5.1f%% %7.2f" # per-class format
fmts = "%46s %7d %8dk %5.1f%% %7.2f" # summary format
print(fmt % ("Class Name", "Count", "TBytes", "Pct", "AvgSize"))
print(fmt % ('-'*46, '-'*7, '-'*9, '-'*5, '-'*7))
typemap = sorted(rep.TYPEMAP)
......@@ -76,8 +77,9 @@ def report(rep):
pct, rep.TYPESIZE[t] * 1.0 / rep.TYPEMAP[t]))
print(fmt % ('='*46, '='*7, '='*9, '='*5, '='*7))
print("%46s %7d %9s %6s %6.2fk" % ('Total Transactions', rep.TIDS, ' ',
' ', rep.DBYTES * 1.0 / rep.TIDS / 1024.0))
print("%46s %7d %9s %6s %6.2fk" % (
'Total Transactions', rep.TIDS, ' ', ' ',
rep.DBYTES * 1.0 / rep.TIDS / 1024.0))
print(fmts % ('Total Records', rep.OIDS, rep.DBYTES / 1024.0, cumpct,
rep.DBYTES * 1.0 / rep.OIDS))
......@@ -89,6 +91,7 @@ def report(rep):
rep.FBYTES * 100.0 / rep.DBYTES,
rep.FBYTES * 1.0 / rep.FOIDS))
def analyze(path):
fs = FileStorage(path, read_only=1)
fsi = fs.iterator()
......@@ -97,11 +100,13 @@ def analyze(path):
analyze_trans(report, txn)
return report
def analyze_trans(report, txn):
report.TIDS += 1
for rec in txn:
analyze_rec(report, rec)
def get_type(record):
try:
unpickled = FakeUnpickler(BytesIO(record.data)).load()
......@@ -114,6 +119,7 @@ def get_type(record):
else:
return str(classinfo)
def analyze_rec(report, record):
oid = record.oid
report.OIDS += 1
......@@ -121,7 +127,7 @@ def analyze_rec(report, record):
# No pickle -- aborted version or undo of object creation.
return
try:
size = len(record.data) # Ignores various overhead
size = len(record.data) # Ignores various overhead
report.DBYTES += size
if oid not in report.OIDMAP:
type = get_type(record)
......@@ -142,6 +148,7 @@ def analyze_rec(report, record):
except Exception as err:
print(err)
if __name__ == "__main__":
path = sys.argv[1]
report(analyze(path))
......@@ -19,18 +19,21 @@ oids_seen = {}
# Append (obj, path) to L if and only if obj is a persistent object
# and we haven't seen it before.
def add_if_new_persistent(L, obj, path):
global oids_seen
getattr(obj, '_', None) # unghostify
getattr(obj, '_', None) # unghostify
if hasattr(obj, '_p_oid'):
oid = obj._p_oid
if oid not in oids_seen:
L.append((obj, path))
oids_seen[oid] = 1
def get_subobjects(obj):
getattr(obj, '_', None) # unghostify
getattr(obj, '_', None) # unghostify
sub = []
try:
attrs = obj.__dict__.items()
......@@ -55,22 +58,23 @@ def get_subobjects(obj):
while 1:
try:
elt = obj[i]
except:
except: # noqa: E722 do not use bare 'except'
break
sub.append(("[%d]" % i, elt))
i += 1
return sub
def main(fname=None):
if fname is None:
import sys
try:
fname, = sys.argv[1:]
except:
except: # noqa: E722 do not use bare 'except'
print(__doc__)
sys.exit(2)
fs = FileStorage(fname, read_only=1)
cn = ZODB.DB(fs).open()
rt = cn.root()
......@@ -116,5 +120,6 @@ def main(fname=None):
print("total", len(fs._index), "found", found)
if __name__ == "__main__":
main()
......@@ -43,9 +43,11 @@ import sys
from ZODB.FileStorage.fsoids import Tracer
def usage():
print(__doc__)
def main():
import getopt
......@@ -64,7 +66,7 @@ def main():
c = Tracer(args[0])
for oid in args[1:]:
as_int = int(oid, 0) # 0 == auto-detect base
as_int = int(oid, 0) # 0 == auto-detect base
c.register_oids(as_int)
if path is not None:
for line in open(path):
......@@ -75,5 +77,6 @@ def main():
c.run()
c.report()
if __name__ == "__main__":
main()
......@@ -74,6 +74,8 @@ from BTrees.QQBTree import QQBTree
# There's a problem with oid. 'data' is its pickle, and 'serial' its
# serial number. 'missing' is a list of (oid, class, reason) triples,
# explaining what the problem(s) is(are).
def report(oid, data, serial, missing):
from_mod, from_class = get_pickle_metadata(data)
if len(missing) > 1:
......@@ -92,6 +94,7 @@ def report(oid, data, serial, missing):
print("\toid %s %s: %r" % (oid_repr(oid), reason, description))
print()
def main(path=None):
verbose = 0
if path is None:
......@@ -105,7 +108,6 @@ def main(path=None):
path, = args
fs = FileStorage(path, read_only=1)
# Set of oids in the index that failed to load due to POSKeyError.
......@@ -122,7 +124,7 @@ def main(path=None):
# build {pos -> oid} index that is reverse to {oid -> pos} fs._index
# we'll need this to iterate objects in order of ascending file position to
# optimize disk IO.
pos2oid = QQBTree() # pos -> u64(oid)
pos2oid = QQBTree() # pos -> u64(oid)
for oid, pos in fs._index.iteritems():
pos2oid[pos] = u64(oid)
......@@ -137,14 +139,14 @@ def main(path=None):
raise
except POSKeyError:
undone[oid] = 1
except:
except: # noqa: E722 do not use bare 'except'
if verbose:
traceback.print_exc()
noload[oid] = 1
# pass 2: go through all objects again and verify that their references do
# not point to problematic object set. Iterate objects in order of ascending
# file position to optimize disk IO.
# not point to problematic object set. Iterate objects in order of
# ascending file position to optimize disk IO.
inactive = noload.copy()
inactive.update(undone)
for oid64 in pos2oid.itervalues():
......@@ -153,7 +155,7 @@ def main(path=None):
continue
data, serial = load_current(fs, oid)
refs = get_refs(data)
missing = [] # contains 3-tuples of oid, klass-metadata, reason
missing = [] # contains 3-tuples of oid, klass-metadata, reason
for ref, klass in refs:
if klass is None:
klass = '<unknown>'
......@@ -166,5 +168,6 @@ def main(path=None):
if missing:
report(oid, data, serial, missing)
if __name__ == "__main__":
main()
......@@ -9,6 +9,7 @@ from six.moves import filter
rx_txn = re.compile(r"tid=([0-9a-f]+).*size=(\d+)")
rx_data = re.compile(r"oid=([0-9a-f]+) size=(\d+) class=(\S+)")
def sort_byhsize(seq, reverse=False):
L = [(v.size(), k, v) for k, v in seq]
L.sort()
......@@ -16,6 +17,7 @@ def sort_byhsize(seq, reverse=False):
L.reverse()
return [(k, v) for n, k, v in L]
class Histogram(dict):
def add(self, size):
......@@ -93,6 +95,7 @@ class Histogram(dict):
i * binsize, n, p, pc, "*" * (n // dot)))
print()
def class_detail(class_size):
# summary of classes
fmt = "%5s %6s %6s %6s %-50.50s"
......@@ -110,6 +113,7 @@ def class_detail(class_size):
continue
h.report("Object size for %s" % klass, usebins=True)
def revision_detail(lifetimes, classes):
# Report per-class details for any object modified more than once
for name, oids in six.iteritems(classes):
......@@ -124,17 +128,18 @@ def revision_detail(lifetimes, classes):
if keep:
h.report("Number of revisions for %s" % name, binsize=10)
def main(path=None):
if path is None:
path = sys.argv[1]
txn_objects = Histogram() # histogram of txn size in objects
txn_bytes = Histogram() # histogram of txn size in bytes
obj_size = Histogram() # histogram of object size
n_updates = Histogram() # oid -> num updates
n_classes = Histogram() # class -> num objects
lifetimes = {} # oid -> list of tids
class_size = {} # class -> histogram of object size
classes = {} # class -> list of oids
txn_objects = Histogram() # histogram of txn size in objects
txn_bytes = Histogram() # histogram of txn size in bytes
obj_size = Histogram() # histogram of object size
n_updates = Histogram() # oid -> num updates
n_classes = Histogram() # class -> num objects
lifetimes = {} # oid -> list of tids
class_size = {} # class -> histogram of object size
classes = {} # class -> list of oids
MAX = 0
objects = 0
......@@ -203,5 +208,6 @@ def main(path=None):
class_detail(class_size)
if __name__ == "__main__":
main()
......@@ -25,6 +25,7 @@ try:
except ImportError:
from sha import sha as sha1
def main(path, ntxn):
with open(path, "rb") as f:
f.seek(0, 2)
......@@ -32,7 +33,6 @@ def main(path, ntxn):
i = ntxn
while th and i > 0:
hash = sha1(th.get_raw_data()).digest()
l = len(str(th.get_timestamp())) + 1
th.read_meta()
print("%s: hash=%s" % (th.get_timestamp(),
binascii.hexlify(hash).decode()))
......@@ -42,6 +42,7 @@ def main(path, ntxn):
th = th.prev_txn()
i -= 1
def Main():
ntxn = 10
opts, args = getopt.getopt(sys.argv[1:], "n:")
......@@ -51,5 +52,6 @@ def Main():
ntxn = int(v)
main(path, ntxn)
if __name__ == "__main__":
Main()
......@@ -41,13 +41,16 @@ import struct
import sys
from ZODB._compat import FILESTORAGE_MAGIC
class FormatError(ValueError):
"""There is a problem with the format of the FileStorage."""
class Status(object):
checkpoint = b'c'
undone = b'u'
packed_version = FILESTORAGE_MAGIC
TREC_HDR_LEN = 23
......@@ -55,6 +58,7 @@ DREC_HDR_LEN = 42
VERBOSE = 0
def hexify(s):
r"""Format an 8-bit string as hex
......@@ -64,17 +68,20 @@ def hexify(s):
"""
return '0x' + binascii.hexlify(s).decode()
def chatter(msg, level=1):
if VERBOSE >= level:
sys.stdout.write(msg)
def U64(v):
"""Unpack an 8-byte string as a 64-bit long"""
h, l = struct.unpack(">II", v)
h, l_ = struct.unpack(">II", v)
if h:
return (h << 32) + l
return (h << 32) + l_
else:
return l
return l_
def check(path):
with open(path, 'rb') as file:
......@@ -87,7 +94,7 @@ def check(path):
raise FormatError("invalid file header")
pos = 4
tid = b'\000' * 8 # lowest possible tid to start
tid = b'\000' * 8 # lowest possible tid to start
i = 0
while pos:
_pos = pos
......@@ -106,7 +113,7 @@ def check_trec(path, file, pos, ltid, file_size):
used for generating error messages.
"""
h = file.read(TREC_HDR_LEN) #XXX must be bytes under Py3k
h = file.read(TREC_HDR_LEN) # XXX must be bytes under Py3k
if not h:
return None, None
if len(h) != TREC_HDR_LEN:
......@@ -120,7 +127,7 @@ def check_trec(path, file, pos, ltid, file_size):
(path, pos, hexify(tid), hexify(ltid)))
ltid = tid
tl = U64(stl) # transaction record length - 8
tl = U64(stl) # transaction record length - 8
if pos + tl + 8 > file_size:
raise FormatError("%s truncated possibly because of"
" damaged records at %s" % (path, pos))
......@@ -140,7 +147,7 @@ def check_trec(path, file, pos, ltid, file_size):
if status != Status.undone:
pos = tpos + tmeta_len
file.read(ul + dl + el) # skip transaction metadata
file.read(ul + dl + el) # skip transaction metadata
i = 0
while pos < tend:
......@@ -162,6 +169,7 @@ def check_trec(path, file, pos, ltid, file_size):
pos = tend + 8
return pos, tid
def check_drec(path, file, pos, tpos, tid):
"""Check a data record for the current transaction record"""
......@@ -170,7 +178,7 @@ def check_drec(path, file, pos, tpos, tid):
raise FormatError("%s truncated at %s" % (path, pos))
oid, serial, _prev, _tloc, vlen, _plen = (
struct.unpack(">8s8s8s8sH8s", h))
prev = U64(_prev)
U64(_prev)
tloc = U64(_tloc)
plen = U64(_plen)
dlen = DREC_HDR_LEN + (plen or 8)
......@@ -178,8 +186,8 @@ def check_drec(path, file, pos, tpos, tid):
if vlen:
dlen = dlen + 16 + vlen
file.seek(8, 1)
pv = U64(file.read(8))
file.seek(vlen, 1) # skip the version data
U64(file.read(8))
file.seek(vlen, 1) # skip the version data
if tloc != tpos:
raise FormatError("%s data record exceeds transaction record "
......@@ -195,9 +203,11 @@ def check_drec(path, file, pos, tpos, tid):
return pos, oid
def usage():
sys.exit(__doc__)
def main(args=None):
if args is None:
args = sys.argv[1:]
......@@ -221,5 +231,6 @@ def main(args=None):
chatter("no errors detected")
if __name__ == "__main__":
main()
......@@ -6,12 +6,12 @@ Note: To run this test script fstest.py must be on your PYTHONPATH.
from cStringIO import StringIO
import re
import struct
import unittest
import ZODB.tests.util
import fstest
from fstest import FormatError, U64
class TestCorruptedFS(ZODB.tests.util.TestCase):
f = open('test-checker.fs', 'rb')
......@@ -117,7 +117,7 @@ class TestCorruptedFS(ZODB.tests.util.TestCase):
self._file.write(data)
buf = self._datafs.read(tl - 8)
self._file.write(buf[0])
assert tl <= 1<<16, "can't use this transaction for this test"
assert tl <= 1 << 16, "can't use this transaction for this test"
self._file.write("\777\777")
self._file.write(buf[3:])
self.detectsError("invalid transaction header")
......@@ -172,6 +172,3 @@ class TestCorruptedFS(ZODB.tests.util.TestCase):
self._file.write("\000" * 4 + "\077" + "\000" * 3)
self._file.write(data[32:])
self.detectsError("record exceeds transaction")
if __name__ == "__main__":
unittest.main()
......@@ -82,7 +82,7 @@ import profile
from persistent.timestamp import TimeStamp
from ZODB import utils
from ZODB import StorageTypes # XXX: This import does not exist
from ZODB import StorageTypes # XXX: This import does not exist
PROGRAM = sys.argv[0]
......@@ -130,7 +130,7 @@ def main():
elif opt in ('-v', '--verbose'):
options.verbose += 1
elif opt in ('-T', '--storage_types'):
print_types()
print('Unknown option.')
sys.exit(0)
elif opt in ('-S', '--stype'):
options.stype = arg
......@@ -247,16 +247,16 @@ def doit(srcdb, dstdb, options):
t = TimeStamp(tid)
if t <= ts:
if ok:
print((
'Time stamps are out of order %s, %s' % (ts, t)), file=sys.stderr)
print('Time stamps are out of order %s, %s' % (ts, t),
file=sys.stderr)
ok = False
ts = t.laterThan(ts)
tid = ts.raw()
else:
ts = t
if not ok:
print((
'Time stamps are back in order %s' % t), file=sys.stderr)
print('Time stamps are back in order %s' % t,
file=sys.stderr)
ok = True
if verbose > 1:
print(ts)
......@@ -310,7 +310,7 @@ def doit(srcdb, dstdb, options):
tidstr = utils.U64(tid)
format = "%4d. %20s %6d %8d %6.4f %6.4f %6.4f %6.4f %6.4f"
print(format % (skipper, tidstr, objects, size,
t4-t0, t1-t0, t2-t1, t3-t2, t4-t3), file=outfp)
t4-t0, t1-t0, t2-t1, t3-t2, t4-t3), file=outfp)
total_pickle_size += size
total_object_count += objects
......
......@@ -23,17 +23,17 @@ from ZODB.blob import FilesystemHelper
from ZODB.utils import oid_repr
def link_or_copy(f1, f2):
try:
os.link(f1, f2)
except OSError:
shutil.copy(f1, f2)
# Check if we actually have link
try:
os.link
except AttributeError:
link_or_copy = shutil.copy
else:
def link_or_copy(f1, f2):
try:
os.link(f1, f2)
except OSError:
shutil.copy(f1, f2)
def migrate(source, dest, layout):
......
......@@ -13,6 +13,7 @@ from ZODB.utils import U64, get_pickle_metadata, load_current
from ZODB.serialize import referencesf
from six.moves import filter
def find_paths(root, maxdist):
"""Find Python attribute traversal paths for objects to maxdist distance.
......@@ -37,7 +38,7 @@ def find_paths(root, maxdist):
if oid is not None:
paths[oid] = path
if dist < maxdist:
getattr(obj, 'foo', None) # unghostify
getattr(obj, 'foo', None) # unghostify
try:
items = obj.__dict__.items()
except AttributeError:
......@@ -48,6 +49,7 @@ def find_paths(root, maxdist):
return paths
def main(path):
fs = FileStorage(path, read_only=1)
if PACK:
......@@ -60,6 +62,7 @@ def main(path):
def total_size(oid):
cache = {}
cache_size = 1000
def _total_size(oid, seen):
v = cache.get(oid)
if v is not None:
......@@ -91,10 +94,11 @@ def main(path):
for oid in keys:
data, serialno = load_current(fs, oid)
mod, klass = get_pickle_metadata(data)
refs = referencesf(data)
referencesf(data)
path = paths.get(oid, '-')
print(fmt % (U64(oid), len(data), total_size(oid), path, mod, klass))
def Main():
import sys
import getopt
......@@ -122,5 +126,6 @@ def Main():
VERBOSE += 1
main(path)
if __name__ == "__main__":
Main()
......@@ -18,6 +18,7 @@ $Id$
from ZODB.serialize import referencesf
def referrers(storage):
result = {}
for transaction in storage.iterator():
......
......@@ -85,6 +85,7 @@ Options for -V/--verify:
Verify file sizes only (skip md5 checksums).
"""
from __future__ import print_function
import re
import os
import shutil
import sys
......@@ -176,7 +177,7 @@ def parseargs(argv):
'date=',
'output=',
'with-verification',
])
])
except getopt.error as msg:
usage(1, msg)
......@@ -299,6 +300,8 @@ def fsync(afile):
# Return the total number of bytes read == the total number of bytes
# passed in all to func(). Leaves the file position just after the
# last byte read.
def dofile(func, fp, n=None):
bytesread = 0
while n is None or n > 0:
......@@ -320,6 +323,7 @@ def dofile(func, fp, n=None):
def checksum(fp, n):
# Checksum the first n bytes of the specified file
sum = md5()
def func(data):
sum.update(data)
dofile(func, fp, n)
......@@ -336,6 +340,7 @@ def file_size(fp):
def checksum_and_size(fp):
# Checksum and return it with the size of the file
sum = md5()
def func(data):
sum.update(data)
size = dofile(func, fp, None)
......@@ -374,6 +379,7 @@ def concat(files, ofp=None):
# given. Return the number of bytes written and the md5 checksum of the
# bytes.
sum = md5()
def func(data):
sum.update(data)
if ofp:
......@@ -393,6 +399,7 @@ def concat(files, ofp=None):
def gen_filedate(options):
return getattr(options, 'test_now', time.gmtime()[:6])
def gen_filename(options, ext=None, now=None):
if ext is None:
if options.full:
......@@ -412,10 +419,11 @@ def gen_filename(options, ext=None, now=None):
# files, from the time of the most recent full backup preceding
# options.date, up to options.date.
import re
is_data_file = re.compile(r'\d{4}(?:-\d\d){5}\.(?:delta)?fsz?$').match
del re
def find_files(options):
when = options.date
if not when:
......@@ -455,10 +463,11 @@ def find_files(options):
#
# None, None, None, None
def scandat(repofiles):
fullfile = repofiles[0]
datfile = os.path.splitext(fullfile)[0] + '.dat'
fn = startpos = endpos = sum = None # assume .dat file missing or empty
fn = startpos = endpos = sum = None # assume .dat file missing or empty
try:
fp = open(datfile)
except IOError as e:
......@@ -475,6 +484,7 @@ def scandat(repofiles):
return fn, startpos, endpos, sum
def delete_old_backups(options):
# Delete all full backup files except for the most recent full backup file
all = sorted(filter(is_data_file, os.listdir(options.repository)))
......@@ -515,6 +525,7 @@ def delete_old_backups(options):
pass
os.unlink(os.path.join(options.repository, fname))
def do_full_backup(options):
options.full = True
tnow = gen_filedate(options)
......@@ -714,7 +725,8 @@ def do_recover(options):
"%s has checksum %s instead of %s" % (
repofile, reposum, expected_truth['sum']))
totalsz += reposz
log("Recovered chunk %s : %s bytes, md5: %s", repofile, reposz, reposum)
log("Recovered chunk %s : %s bytes, md5: %s",
repofile, reposz, reposum)
log("Recovered a total of %s bytes", totalsz)
else:
reposz, reposum = concat(repofiles, outfp)
......@@ -725,7 +737,8 @@ def do_recover(options):
source_index = '%s.index' % last_base
target_index = '%s.index' % options.output
if os.path.exists(source_index):
log('Restoring index file %s to %s', source_index, target_index)
log('Restoring index file %s to %s',
source_index, target_index)
shutil.copyfile(source_index, target_index)
else:
log('No index file to restore: %s', source_index)
......@@ -737,8 +750,8 @@ def do_recover(options):
try:
os.rename(temporary_output_file, options.output)
except OSError:
log("ZODB has been fully recovered as %s, but it cannot be renamed into : %s",
temporary_output_file, options.output)
log("ZODB has been fully recovered as %s, but it cannot be renamed"
" into : %s", temporary_output_file, options.output)
raise
......@@ -759,10 +772,12 @@ def do_verify(options):
log("Verifying %s", filename)
try:
if filename.endswith('fsz'):
actual_sum, size = get_checksum_and_size_of_gzipped_file(filename, options.quick)
actual_sum, size = get_checksum_and_size_of_gzipped_file(
filename, options.quick)
when_uncompressed = ' (when uncompressed)'
else:
actual_sum, size = get_checksum_and_size_of_file(filename, options.quick)
actual_sum, size = get_checksum_and_size_of_file(
filename, options.quick)
when_uncompressed = ''
except IOError:
error("%s is missing", filename)
......
......@@ -12,6 +12,7 @@ from ZODB.FileStorage import FileStorage
from ZODB.utils import U64, get_pickle_metadata, load_current
import six
def run(path, v=0):
fs = FileStorage(path, read_only=1)
# break into the file implementation
......@@ -31,12 +32,13 @@ def run(path, v=0):
if v:
print("%8s %5d %s" % (U64(oid), len(data), key))
L = totals.items()
L.sort(lambda a, b: cmp(a[1], b[1]))
L.sort(key=lambda x: x[1])
L.reverse()
print("Totals per object class:")
for key, (bytes, count) in L:
print("%8d %8d %s" % (count, bytes, key))
def main():
import sys
import getopt
......@@ -56,5 +58,6 @@ def main():
path = args[0]
run(path, v)
if __name__ == "__main__":
main()
......@@ -38,6 +38,7 @@ checker = zope.testing.renormalizing.RENormalizing([
"length=<LENGTH> offset=4 (+48)"),
])
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite(
......@@ -45,4 +46,4 @@ def test_suite():
'fstail.txt',
setUp=ZODB.tests.util.setUp, tearDown=ZODB.tests.util.tearDown,
checker=checker),
))
))
......@@ -57,6 +57,3 @@ class FsdumpFsstatsTests(TestCase):
with open("stdout") as f:
self.assertEqual(f.readline().strip(),
"Summary: 1 txns, 1 objects, 1 revisions")
......@@ -19,6 +19,7 @@ import ZODB
from zope.testing import setupstack
from zope.testing.renormalizing import RENormalizing
def test_fstest_verbose():
r"""
>>> db = ZODB.DB('data.fs')
......@@ -52,4 +53,3 @@ def test_suite():
doctest.DocTestSuite(setUp=setupstack.setUpDirectory,
tearDown=setupstack.tearDown),
])
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -23,6 +23,7 @@ from ZODB.utils import load_current
from .StorageTestBase import StorageTestBase
class FileStorageCorruptTests(StorageTestBase):
def setUp(self):
......
......@@ -21,6 +21,7 @@ import sys
from time import time, sleep
from ZODB.tests.MinPO import MinPO
class HistoryStorage(object):
def checkSimpleHistory(self):
self._checkHistory((11, 12, 13))
......@@ -29,7 +30,7 @@ class HistoryStorage(object):
start = time()
# Store a couple of revisions of the object
oid = self._storage.new_oid()
self.assertRaises(KeyError,self._storage.history,oid)
self.assertRaises(KeyError, self._storage.history, oid)
revids = [None]
for data in data:
if sys.platform == 'win32':
......
This diff is collapsed.
This diff is collapsed.
......@@ -110,7 +110,7 @@ class MVCCMappingStorage(MappingStorage):
self._polled_tid = self._ltid = new_tid
return list(changed_oids)
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
self._data_snapshot = None
with self._main_lock:
return MappingStorage.tpc_finish(self, transaction, func)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment