Commit 1ae0ba36 authored by Christian Theune's avatar Christian Theune

Fixed bug #130459 by providing a well known temporary directory to place

uncommitted data in (ported from 3.8).
--This line, and those below, will be ignored--

M    src/ZODB/tests/blob_packing.txt
M    src/ZODB/tests/blob_tempdir.txt
M    src/ZODB/tests/testblob.py
M    src/ZODB/blob.py
parent 61fffa02
......@@ -244,7 +244,7 @@ class Blob(persistent.Persistent):
def cleanup(ref):
if os.path.exists(filename):
os.remove(filename)
self._p_blob_ref = weakref.ref(self, cleanup)
return filename
......@@ -294,6 +294,7 @@ class FilesystemHelper:
def __init__(self, base_dir):
self.base_dir = base_dir
self.temp_dir = os.path.join(base_dir, 'tmp')
def create(self):
if not os.path.exists(self.base_dir):
......@@ -301,6 +302,11 @@ class FilesystemHelper:
log("Blob cache directory '%s' does not exist. "
"Created new directory." % self.base_dir,
level=logging.INFO)
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir, 0700)
log("Blob temporary directory '%s' does not exist. "
"Created new directory." % self.temp_dir,
level=logging.INFO)
def isSecure(self, path):
"""Ensure that (POSIX) path mode bits are 0700."""
......@@ -375,6 +381,17 @@ class FilesystemHelper:
oids.append(oid)
return oids
def listOIDs(self):
"""Lists all OIDs and their paths.
"""
for candidate in os.listdir(self.base_dir):
if candidate == 'tmp':
continue
oid = utils.repr_to_oid(candidate)
yield oid, self.getPathForOID(oid)
class BlobStorage(SpecificationDecoratorBase):
"""A storage to support blobs."""
......@@ -404,8 +421,7 @@ class BlobStorage(SpecificationDecoratorBase):
@non_overridable
def temporaryDirectory(self):
return self.fshelper.base_dir
return self.fshelper.temp_dir
@non_overridable
def __repr__(self):
......@@ -471,18 +487,8 @@ class BlobStorage(SpecificationDecoratorBase):
# if they are still needed by attempting to load the revision
# of that object from the database. This is maybe the slowest
# possible way to do this, but it's safe.
# XXX we should be tolerant of "garbage" directories/files in
# the base_directory here.
# XXX If this method gets refactored we have to watch out for extra
# files from uncommitted transactions. The current implementation
# doesn't have a problem, but future refactorings likely will.
base_dir = self.fshelper.base_dir
for oid_repr in os.listdir(base_dir):
oid = utils.repr_to_oid(oid_repr)
oid_path = os.path.join(base_dir, oid_repr)
for oid, oid_path in self.fshelper.listOIDs():
files = os.listdir(oid_path)
files.sort()
......@@ -501,11 +507,8 @@ class BlobStorage(SpecificationDecoratorBase):
@non_overridable
def _packNonUndoing(self, packtime, referencesf):
base_dir = self.fshelper.base_dir
for oid_repr in os.listdir(base_dir):
oid = utils.repr_to_oid(oid_repr)
oid_path = os.path.join(base_dir, oid_repr)
for oid, oid_path in self.fshelper.listOIDs():
exists = True
try:
self.load(oid, None) # no version support
except (POSKeyError, KeyError):
......
......@@ -29,18 +29,9 @@ Set up:
>>> storagefile = mktemp()
>>> blob_dir = mkdtemp()
A helper method to assure a unique timestamp across multiple platforms. This
method also makes sure that after retrieving a timestamp that was *before* a
transaction was committed, that at least one second passes so the packing time
actually is before the commit time.
>>> import time
>>> def new_time():
... now = new_time = time.time()
... while new_time <= now:
... new_time = time.time()
... time.sleep(1)
... return new_time
A helper method to assure a unique timestamp across multiple platforms:
>>> from ZODB.tests.testblob import new_time
UNDOING
=======
......@@ -170,7 +161,7 @@ We need an database with a NON-undoing blob supporting storage:
>>> base_storage = MappingStorage('storage')
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
Create our root object:
>>> connection1 = database.open()
......@@ -228,7 +219,7 @@ Do a pack to the slightly before the first revision was written:
>>> blob_storage.pack(packtime, referencesf)
>>> [ os.path.exists(x) for x in fns ]
[False, False, False, False, True]
Do a pack to now:
>>> packtime = new_time()
......
......@@ -31,17 +31,18 @@ First, we need a datatabase with blob support::
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from tempfile import mkdtemp
>>> import os.path
>>> base_storage = MappingStorage("test")
>>> blob_dir = mkdtemp()
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
Now we create a blob and put it in the database. After that we open it for
writing and expect the file to be in the blob directory::
writing and expect the file to be in the blob temporary directory::
>>> blob = Blob()
>>> connection = database.open()
>>> connection.add(blob)
>>> w = blob.open('w')
>>> w.name.startswith(blob_dir)
>>> w.name.startswith(os.path.join(blob_dir, 'tmp'))
True
......@@ -13,6 +13,7 @@
##############################################################################
import base64, os, shutil, tempfile, unittest
import time
from zope.testing import doctest
import ZODB.tests.util
......@@ -26,6 +27,22 @@ import transaction
from ZODB.tests.testConfig import ConfigTestBase
from ZConfig import ConfigurationSyntaxError
def new_time():
"""Create a _new_ time stamp.
This method also makes sure that after retrieving a timestamp that was
*before* a transaction was committed, that at least one second passes so
the packing time actually is before the commit time.
"""
now = new_time = time.time()
while new_time <= now:
new_time = time.time()
time.sleep(1)
return new_time
class BlobConfigTestBase(ConfigTestBase):
def setUp(self):
......@@ -284,7 +301,7 @@ Works with savepoints too:
>>> root['blob2'].open().read()
'test2'
>>> os.rename = os_rename
>>> logger.propagate = True
>>> logger.setLevel(0)
......@@ -292,6 +309,86 @@ Works with savepoints too:
"""
def packing_with_uncommitted_data_non_undoing():
"""
This covers regression for bug #130459.
When uncommitted data exists it formerly was written to the root of the
blob_directory and confused our packing strategy. We now use a separate
temporary directory that is ignored while packing.
>>> import transaction
>>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> from tempfile import mkdtemp
>>> base_storage = MappingStorage("test")
>>> blob_dir = mkdtemp()
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> root['blob'].open('w').write('test')
>>> blob_storage.pack(new_time(), referencesf)
Clean up:
>>> database.close()
>>> import shutil
>>> shutil.rmtree(blob_dir)
"""
def packing_with_uncommitted_data_undoing():
"""
This covers regression for bug #130459.
When uncommitted data exists it formerly was written to the root of the
blob_directory and confused our packing strategy. We now use a separate
temporary directory that is ignored while packing.
>>> import transaction
>>> from ZODB.FileStorage.FileStorage import FileStorage
>>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> from tempfile import mkdtemp, mktemp
>>> storagefile = mktemp()
>>> base_storage = FileStorage(storagefile)
>>> blob_dir = mkdtemp()
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
>>> from ZODB.blob import Blob
>>> root['blob'] = Blob()
>>> connection.add(root['blob'])
>>> root['blob'].open('w').write('test')
>>> blob_storage.pack(new_time(), referencesf)
Clean up:
>>> database.close()
>>> import shutil
>>> shutil.rmtree(blob_dir)
>>> os.unlink(storagefile)
>>> os.unlink(storagefile+".index")
>>> os.unlink(storagefile+".tmp")
"""
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment