Commit 3037f42a authored by Christian Theune's avatar Christian Theune

Back out my changes that only belong onto the branch right now.

parent 44900f9c
...@@ -8,12 +8,6 @@ Change History ...@@ -8,12 +8,6 @@ Change History
New Features New Features
------------ ------------
- Changed layout strategy for the blob directory to a bushy approach (8 levels
deep, at most ~256 entries per directory level, one directory for each
blob). Old directories are automatically detected and will be handled with
the old strategy. A migration script (`migrateblobs.py`) is provided to
convert the different layouts.
- Versions are no-longer supported. - Versions are no-longer supported.
- ZEO cache files can be larger than 4G. Note that older ZEO cache - ZEO cache files can be larger than 4G. Note that older ZEO cache
......
...@@ -855,7 +855,9 @@ class ClientStorage(object): ...@@ -855,7 +855,9 @@ class ClientStorage(object):
def _storeBlob_shared(self, oid, serial, data, filename, txn): def _storeBlob_shared(self, oid, serial, data, filename, txn):
# First, move the blob into the blob directory # First, move the blob into the blob directory
self.fshelper.getPathForOID(oid, create=True) dir = self.fshelper.getPathForOID(oid)
if not os.path.exists(dir):
os.mkdir(dir)
fd, target = self.fshelper.blob_mkstemp(oid, serial) fd, target = self.fshelper.blob_mkstemp(oid, serial)
os.close(fd) os.close(fd)
...@@ -922,7 +924,14 @@ class ClientStorage(object): ...@@ -922,7 +924,14 @@ class ClientStorage(object):
raise POSException.POSKeyError("No blob file", oid, serial) raise POSException.POSKeyError("No blob file", oid, serial)
# First, we'll create the directory for this oid, if it doesn't exist. # First, we'll create the directory for this oid, if it doesn't exist.
targetpath = self.fshelper.getPathForOID(oid, create=True) targetpath = self.fshelper.getPathForOID(oid)
if not os.path.exists(targetpath):
try:
os.makedirs(targetpath, 0700)
except OSError:
# We might have lost a race. If so, the directory
# must exist now
assert os.path.exists(targetpath)
# OK, it's not here and we (or someone) needs to get it. We # OK, it's not here and we (or someone) needs to get it. We
# want to avoid getting it multiple times. We want to avoid # want to avoid getting it multiple times. We want to avoid
...@@ -1109,15 +1118,19 @@ class ClientStorage(object): ...@@ -1109,15 +1118,19 @@ class ClientStorage(object):
assert s == tid, (s, tid) assert s == tid, (s, tid)
self._cache.store(oid, s, None, data) self._cache.store(oid, s, None, data)
if self.fshelper is not None: if self.fshelper is not None:
blobs = self._tbuf.blobs blobs = self._tbuf.blobs
while blobs: while blobs:
oid, blobfilename = blobs.pop() oid, blobfilename = blobs.pop()
targetpath = self.fshelper.getPathForOID(oid, create=True) targetpath = self.fshelper.getPathForOID(oid)
if not os.path.exists(targetpath):
os.makedirs(targetpath, 0700)
rename_or_copy_blob(blobfilename, rename_or_copy_blob(blobfilename,
self.fshelper.getBlobFilename(oid, tid), self.fshelper.getBlobFilename(oid, tid),
) )
self._tbuf.clear() self._tbuf.clear()
def undo(self, trans_id, txn): def undo(self, trans_id, txn):
......
...@@ -515,7 +515,8 @@ class CommonBlobTests: ...@@ -515,7 +515,8 @@ class CommonBlobTests:
self._storage.tpc_abort(t) self._storage.tpc_abort(t)
raise raise
self.assert_(not os.path.exists(tfname)) self.assert_(not os.path.exists(tfname))
filename = self._storage.fshelper.getBlobFilename(oid, revid) filename = os.path.join(self.blobdir, oid_repr(oid),
tid_repr(revid) + BLOB_SUFFIX)
self.assert_(os.path.exists(filename)) self.assert_(os.path.exists(filename))
self.assertEqual(somedata, open(filename).read()) self.assertEqual(somedata, open(filename).read())
...@@ -630,14 +631,16 @@ class BlobAdaptedFileStorageTests(GenericTests, CommonBlobTests): ...@@ -630,14 +631,16 @@ class BlobAdaptedFileStorageTests(GenericTests, CommonBlobTests):
d2 = somedata.read(8096) d2 = somedata.read(8096)
self.assertEqual(d1, d2) self.assertEqual(d1, d2)
# The file should be in the cache ...
filename = self._storage.fshelper.getBlobFilename(oid, revid) # The file should have been copied to the server:
filename = os.path.join(self.blobdir, oid_repr(oid),
tid_repr(revid) + BLOB_SUFFIX)
check_data(filename) check_data(filename)
# ... and on the server # It should also be in the cache:
server_filename = filename.replace(self.blob_cache_dir, self.blobdir) filename = os.path.join(self.blob_cache_dir, oid_repr(oid),
self.assert_(server_filename.startswith(self.blobdir)) tid_repr(revid) + BLOB_SUFFIX)
check_data(server_filename) check_data(filename)
# If we remove it from the cache and call loadBlob, it should # If we remove it from the cache and call loadBlob, it should
# come back. We can do this in many threads. We'll instrument # come back. We can do this in many threads. We'll instrument
......
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2008 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""A script to migrate a blob directory into a different layout.
"""
import logging
import optparse
import os
from ZODB.blob import FilesystemHelper, rename_or_copy_blob
from ZODB.utils import cp, oid_repr
def link_or_copy(f1, f2):
try:
os.link(f1, f2)
except OSError:
shutil.copy(f1, f2)
def migrate(source, dest, layout):
source_fsh = FilesystemHelper(source)
source_fsh.create()
dest_fsh = FilesystemHelper(dest, layout)
dest_fsh.create()
print "Migrating blob data from `%s` (%s) to `%s` (%s)" % (
source, source_fsh.layout_name, dest, dest_fsh.layout_name)
for oid, path in source_fsh.listOIDs():
dest_path = dest_fsh.getPathForOID(oid, create=True)
files = os.listdir(path)
for file in files:
source_file = os.path.join(path, file)
dest_file = os.path.join(dest_path, file)
link_or_copy(source_file, dest_file)
print "\tOID: %s - %s files " % (oid_repr(oid), len(files))
def main(source=None, dest=None, layout="bushy"):
usage = "usage: %prog [options] <source> <dest> <layout>"
description = ("Create the new directory <dest> and migrate all blob "
"data <source> to <dest> while using the new <layout> for "
"<dest>")
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option("-l", "--layout",
default=layout, type='choice',
choices=['bushy', 'lawn'],
help="Define the layout to use for the new directory "
"(bushy or lawn). Default: %default")
options, args = parser.parse_args()
if not len(args) == 2:
parser.error("source and destination must be given")
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().setLevel(0)
source, dest = args
migrate(source, dest, options.layout)
if __name__ == '__main__':
main()
This diff is collapsed.
...@@ -32,7 +32,7 @@ First, we need a datatabase with blob support:: ...@@ -32,7 +32,7 @@ First, we need a datatabase with blob support::
>>> from ZODB.DB import DB >>> from ZODB.DB import DB
>>> from tempfile import mkdtemp >>> from tempfile import mkdtemp
>>> import os.path >>> import os.path
>>> base_storage = MappingStorage('test') >>> base_storage = MappingStorage("test")
>>> blob_dir = mkdtemp() >>> blob_dir = mkdtemp()
>>> blob_storage = BlobStorage(blob_dir, base_storage) >>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage) >>> database = DB(blob_storage)
......
...@@ -322,9 +322,9 @@ clean up dirty files: ...@@ -322,9 +322,9 @@ clean up dirty files:
>>> base_storage = DummyBaseStorage() >>> base_storage = DummyBaseStorage()
>>> blob_dir2 = mkdtemp() >>> blob_dir2 = mkdtemp()
>>> blob_storage2 = BlobStorage(blob_dir2, base_storage) >>> blob_storage2 = BlobStorage(blob_dir2, base_storage)
>>> committed_blob_dir = blob_storage2.fshelper.getPathForOID(0) >>> committed_blob_dir = os.path.join(blob_dir2, '0')
>>> os.makedirs(committed_blob_dir) >>> committed_blob_file = os.path.join(committed_blob_dir, '0.blob')
>>> committed_blob_file = blob_storage2.fshelper.getBlobFilename(0, 0) >>> os.mkdir(committed_blob_dir)
>>> open(os.path.join(committed_blob_file), 'w').write('foo') >>> open(os.path.join(committed_blob_file), 'w').write('foo')
>>> os.path.exists(committed_blob_file) >>> os.path.exists(committed_blob_file)
True True
......
...@@ -105,6 +105,7 @@ class BlobUndoTests(unittest.TestCase): ...@@ -105,6 +105,7 @@ class BlobUndoTests(unittest.TestCase):
self.here = os.getcwd() self.here = os.getcwd()
os.chdir(self.test_dir) os.chdir(self.test_dir)
self.storagefile = 'Data.fs' self.storagefile = 'Data.fs'
os.mkdir('blobs')
self.blob_dir = 'blobs' self.blob_dir = 'blobs'
def tearDown(self): def tearDown(self):
...@@ -482,7 +483,7 @@ def loadblob_tmpstore(): ...@@ -482,7 +483,7 @@ def loadblob_tmpstore():
We can access the blob correctly: We can access the blob correctly:
>>> tmpstore.loadBlob(blob_oid, tid) # doctest: +ELLIPSIS >>> tmpstore.loadBlob(blob_oid, tid) # doctest: +ELLIPSIS
'.../0x01/0x00/0x00/0x00/0x00/0x00/0x00/0x00/0x...blob' '.../0x01/0x...blob'
Clean up: Clean up:
...@@ -506,12 +507,6 @@ def test_suite(): ...@@ -506,12 +507,6 @@ def test_suite():
setUp=ZODB.tests.util.setUp, setUp=ZODB.tests.util.setUp,
tearDown=ZODB.tests.util.tearDown, tearDown=ZODB.tests.util.tearDown,
)) ))
suite.addTest(doctest.DocFileSuite(
"blob_layout.txt",
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE|doctest.REPORT_NDIFF,
setUp=ZODB.tests.util.setUp,
tearDown=ZODB.tests.util.tearDown,
))
suite.addTest(doctest.DocTestSuite( suite.addTest(doctest.DocTestSuite(
setUp=ZODB.tests.util.setUp, setUp=ZODB.tests.util.setUp,
tearDown=ZODB.tests.util.tearDown, tearDown=ZODB.tests.util.tearDown,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment