Commit ce8372cf authored by Christian Theune's avatar Christian Theune

Fix bug 251037: make packing of blob storages non-blocking.

parent 06d93892
......@@ -37,6 +37,8 @@ New Features
Bugs Fixed
----------
- Fix for bug #251037: Make packing of blob storages non-blocking.
- Fix for bug #220856: Completed implementation of ZEO authentication.
- Fix for bug #184057: Make initialisation of small ZEO client file cache
......
......@@ -392,6 +392,10 @@ class FilesystemHelper:
yield oid, self.getPathForOID(oid)
class BlobStorageError(Exception):
"""The blob storage encountered an invalid state."""
class BlobStorage(SpecificationDecoratorBase):
"""A storage to support blobs."""
......@@ -399,7 +403,8 @@ class BlobStorage(SpecificationDecoratorBase):
# Proxies can't have a __dict__ so specifying __slots__ here allows
# us to have instance attributes explicitly on the proxy.
__slots__ = ('fshelper', 'dirty_oids', '_BlobStorage__supportsUndo')
__slots__ = ('fshelper', 'dirty_oids', '_BlobStorage__supportsUndo',
'_blobs_pack_is_in_progress', )
def __new__(self, base_directory, storage):
return SpecificationDecoratorBase.__new__(self, storage)
......@@ -418,6 +423,7 @@ class BlobStorage(SpecificationDecoratorBase):
else:
supportsUndo = supportsUndo()
self.__supportsUndo = supportsUndo
self._blobs_pack_is_in_progress = False
@non_overridable
def temporaryDirectory(self):
......@@ -528,21 +534,29 @@ class BlobStorage(SpecificationDecoratorBase):
@non_overridable
def pack(self, packtime, referencesf):
"""Remove all unused oid/tid combinations."""
unproxied = getProxiedObject(self)
"""Remove all unused OID/TID combinations."""
self._lock_acquire()
try:
if self._blobs_pack_is_in_progress:
raise BlobStorageError('Already packing')
self._blobs_pack_is_in_progress = True
finally:
self._lock_release()
# pack the underlying storage, which will allow us to determine
try:
# Pack the underlying storage, which will allow us to determine
# which serials are current.
unproxied = getProxiedObject(self)
result = unproxied.pack(packtime, referencesf)
# perform a pack on blob data
self._lock_acquire()
try:
# Perform a pack on the blob data.
if self.__supportsUndo:
self._packUndoing(packtime, referencesf)
else:
self._packNonUndoing(packtime, referencesf)
finally:
self._lock_acquire()
self._blobs_pack_is_in_progress = False
self._lock_release()
return result
......
......@@ -240,6 +240,37 @@ revision as well as the entire directory:
>>> os.path.exists(os.path.split(fns[0])[0])
False
Avoiding parallel packs
=======================
Blob packing (similar to FileStorage) can only be run once at a time. For
this, a flag (_blobs_pack_is_in_progress) is set. If the pack method is called
while this flag is set, it will refuse to perform another pack, until the flag
is reset:
>>> blob_storage._blobs_pack_is_in_progress
False
>>> blob_storage._blobs_pack_is_in_progress = True
>>> blob_storage.pack(packtime, referencesf)
Traceback (most recent call last):
BlobStorageError: Already packing
>>> blob_storage._blobs_pack_is_in_progress = False
>>> blob_storage.pack(packtime, referencesf)
We can also see, that the flag is set during the pack, by leveraging the
knowledge that the underlying storage's pack method is also called:
>>> def dummy_pack(time, ref):
... print "_blobs_pack_is_in_progress =", blob_storage._blobs_pack_is_in_progress
... return base_pack(time, ref)
>>> base_pack = base_storage.pack
>>> base_storage.pack = dummy_pack
>>> blob_storage.pack(packtime, referencesf)
_blobs_pack_is_in_progress = True
>>> blob_storage._blobs_pack_is_in_progress
False
>>> base_storage.pack = base_pack
Clean up our blob directory:
>>> shutil.rmtree(blob_dir)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment