Commit 6f2882ed authored by Jim Fulton's avatar Jim Fulton

Databases now warn when committing very large records (> 16MB).

This is to tr to warn people of likely design mistakes.  There is a
new option (large_record_size/large-record-size) to control the
Index: src/CHANGES.txt
parent 58769030
......@@ -32,6 +32,11 @@ New Features
- You can now pass None (rather than a storage or file name) to get
a database with a mapping storage.
- Databases now warn when committing very large records (> 16MB).
This is to tr to warn people of likely design mistakes. There is a
new option (large_record_size/large-record-size) to control the
record size at which the warning is issued.
Bugs Fixed
----------
......
......@@ -88,6 +88,7 @@ class Connection(ExportImport, object):
self._debug_info = ()
self._db = db
self.large_record_size = db.large_record_size
# historical connection
self.before = before
......@@ -632,6 +633,8 @@ class Connection(ExportImport, object):
raise ConflictError(object=obj)
self._modified.append(oid)
p = writer.serialize(obj) # This calls __getstate__ of obj
if len(p) >= self.large_record_size:
warnings.warn(large_object_message % (obj.__class__, len(p)))
if isinstance(obj, Blob):
if not IBlobStorage.providedBy(self._storage):
......@@ -1319,3 +1322,23 @@ class RootConvenience(object):
if len(names) > 60:
names = names[:57].rsplit(' ', 1)[0] + ' ...'
return "<root: %s>" % names
large_object_message = """The %s
object you're saving is large. (%s bytes.)
Perhaps you're storing media which should be stored in blobs.
Perhaps you're using a non-scalable data structure, such as a
PersistentMapping or PersistentList.
Perhaps you're storing data in objects that aren't persistent at
all. In cases like that, the data is stored in the record of the
containing persistent object.
In any case, storing records this big is probably a bad idea.
If you insist and want to get rid of this warning, use the
large_record_size option of the ZODB.DB constructor (or the
large-record-size option in a configuration file) to specify a larger
size.
"""
......@@ -386,6 +386,7 @@ class DB(object):
databases=None,
xrefs=True,
max_saved_oids=999,
large_record_size=1<<24,
**storage_args):
"""Create an object database.
......@@ -487,6 +488,7 @@ class DB(object):
self._saved_oids = []
self._max_saved_oids = max_saved_oids
self.large_record_size = large_record_size
def _setupUndoMethods(self):
storage = self.storage
......
......@@ -245,6 +245,7 @@
"0" means no limit.
</description>
</key>
<key name="large-record-size" datatype="byte-size" />
<key name="pool-size" datatype="integer" default="7"/>
<description>
The expected maximum number of simultaneously open connections.
......
......@@ -110,6 +110,7 @@ class ZODBDatabase(BaseConfig):
_option('pool_timeout')
_option('allow_implicit_cross_references', 'xrefs')
_option('large_record_size')
try:
return ZODB.DB(
......
......@@ -873,6 +873,8 @@ class StubDatabase:
save_oid = lambda self, oid: None
large_record_size = 1<<30
def test_suite():
s = unittest.makeSuite(ConnectionDotAdd, 'check')
s.addTest(doctest.DocTestSuite())
......
......@@ -284,6 +284,85 @@ def connection_allows_empty_version_for_idiots():
>>> db.close()
"""
def warn_when_data_records_are_big():
"""
When data records are large, a warning is issued to try to prevent new
users from shooting themselves in the foot.
>>> import warnings
>>> old_warn = warnings.warn
>>> def faux_warn(message, *args):
... print message,
... if args: print args
>>> warnings.warn = faux_warn
>>> db = ZODB.DB('t.fs', create=True)
>>> conn = db.open()
>>> conn.root.x = 'x'*(1<<24)
>>> transaction.commit()
The <class 'persistent.mapping.PersistentMapping'>
object you're saving is large. (16777284 bytes.)
<BLANKLINE>
Perhaps you're storing media which should be stored in blobs.
<BLANKLINE>
Perhaps you're using a non-scalable data structure, such as a
PersistentMapping or PersistentList.
<BLANKLINE>
Perhaps you're storing data in objects that aren't persistent at
all. In cases like that, the data is stored in the record of the
containing persistent object.
<BLANKLINE>
In any case, storing records this big is probably a bad idea.
<BLANKLINE>
If you insist and want to get rid of this warning, use the
large_record_size option of the ZODB.DB constructor (or the
large-record-size option in a configuration file) to specify a larger
size.
>>> db.close()
The large_record_size option can be used to control the record size:
>>> db = ZODB.DB('t.fs', create=True, large_record_size=999)
>>> conn = db.open()
>>> conn.root.x = 'x'
>>> transaction.commit()
>>> conn.root.x = 'x'*999
>>> transaction.commit() # doctest: +ELLIPSIS
The <class 'persistent.mapping.PersistentMapping'>
object you're saving is large. (1067 bytes.)
...
>>> db.close()
We can also specify it using a configuration option:
>>> import ZODB.config
>>> db = ZODB.config.databaseFromString('''
... <zodb>
... large-record-size 1MB
... <filestorage>
... path t.fs
... create true
... </filestorage>
... </zodb>
... ''')
>>> conn = db.open()
>>> conn.root.x = 'x'
>>> transaction.commit()
>>> conn.root.x = 'x'*(1<<20)
>>> transaction.commit() # doctest: +ELLIPSIS
The <class 'persistent.mapping.PersistentMapping'>
object you're saving is large. (1048644 bytes.)
...
>>> db.close()
>>> warnings.warn = old_warn
"""
def test_suite():
s = unittest.makeSuite(DBTests)
s.addTest(doctest.DocTestSuite(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment