Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZEO
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
ZEO
Commits
d3f87f26
Commit
d3f87f26
authored
Dec 09, 2008
by
Jim Fulton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Split blob packing tests into generic resuable and non-generic tests.
parent
1f6813ce
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
176 additions
and
159 deletions
+176
-159
src/ZEO/tests/testZEO.py
src/ZEO/tests/testZEO.py
+2
-4
src/ZODB/tests/blob_packing.txt
src/ZODB/tests/blob_packing.txt
+7
-150
src/ZODB/tests/blobstorage_packing.txt
src/ZODB/tests/blobstorage_packing.txt
+155
-0
src/ZODB/tests/testblob.py
src/ZODB/tests/testblob.py
+12
-5
No files found.
src/ZEO/tests/testZEO.py
View file @
d3f87f26
...
@@ -1233,11 +1233,9 @@ def test_suite():
...
@@ -1233,11 +1233,9 @@ def test_suite():
suite
.
addTest
(
sub
)
suite
.
addTest
(
sub
)
suite
.
addTest
(
ZODB
.
tests
.
testblob
.
storage_reusable_suite
(
suite
.
addTest
(
ZODB
.
tests
.
testblob
.
storage_reusable_suite
(
'ClientStorageNonSharedBlobs'
,
ServerManagingClientStorage
,
'ClientStorageNonSharedBlobs'
,
ServerManagingClientStorage
))
test_blob_storage_recovery
=
False
))
suite
.
addTest
(
ZODB
.
tests
.
testblob
.
storage_reusable_suite
(
suite
.
addTest
(
ZODB
.
tests
.
testblob
.
storage_reusable_suite
(
'ClientStorageSharedBlobs'
,
create_storage_shared
,
'ClientStorageSharedBlobs'
,
create_storage_shared
))
test_blob_storage_recovery
=
False
))
return
suite
return
suite
...
...
src/ZODB/tests/blob_packing.txt
View file @
d3f87f26
##############################################################################
#
# Copyright (c) 2005 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Packing support for blob data
Packing support for blob data
=============================
=============================
Set up:
Set up:
>>> from ZODB.FileStorage import FileStorage
>>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.serialize import referencesf
>>> from ZODB.serialize import referencesf
>>> from ZODB.blob import Blob
, BlobStorage
>>> from ZODB.blob import Blob
>>> from ZODB import utils
>>> from ZODB import utils
>>> from ZODB.DB import DB
>>> from ZODB.DB import DB
>>> import transaction
>>> import transaction
>>> storagefile = 'Data.fs'
>>> blob_dir = 'blobs'
A helper method to assure a unique timestamp across multiple platforms:
A helper method to assure a unique timestamp across multiple platforms:
...
@@ -36,8 +18,7 @@ UNDOING
...
@@ -36,8 +18,7 @@ UNDOING
We need a database with an undoing blob supporting storage:
We need a database with an undoing blob supporting storage:
>>> base_storage = FileStorage(storagefile)
>>> blob_storage = create_storage()
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
>>> database = DB(blob_storage)
Create our root object:
Create our root object:
...
@@ -62,25 +43,25 @@ Put some revisions of a blob object in our database and on the filesystem:
...
@@ -62,25 +43,25 @@ Put some revisions of a blob object in our database and on the filesystem:
>>> times.append(new_time())
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 1')
>>> root['blob'].open('w').write('this is blob data 1')
>>> transaction.commit()
>>> transaction.commit()
>>> tids.append(blob
_storage._tid
)
>>> tids.append(blob
._p_serial
)
>>> nothing = transaction.begin()
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 2')
>>> root['blob'].open('w').write('this is blob data 2')
>>> transaction.commit()
>>> transaction.commit()
>>> tids.append(blob
_storage._tid
)
>>> tids.append(blob
._p_serial
)
>>> nothing = transaction.begin()
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 3')
>>> root['blob'].open('w').write('this is blob data 3')
>>> transaction.commit()
>>> transaction.commit()
>>> tids.append(blob
_storage._tid
)
>>> tids.append(blob
._p_serial
)
>>> nothing = transaction.begin()
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 4')
>>> root['blob'].open('w').write('this is blob data 4')
>>> transaction.commit()
>>> transaction.commit()
>>> tids.append(blob
_storage._tid
)
>>> tids.append(blob
._p_serial
)
>>> oid = root['blob']._p_oid
>>> oid = root['blob']._p_oid
>>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
>>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
...
@@ -144,128 +125,4 @@ revision as well as the entire directory:
...
@@ -144,128 +125,4 @@ revision as well as the entire directory:
Clean up our blob directory and database:
Clean up our blob directory and database:
>>> rmtree(blob_dir)
>>> blob_storage.close()
>>> base_storage.close()
>>> os.unlink(storagefile)
>>> os.unlink(storagefile+".index")
>>> os.unlink(storagefile+".tmp")
>>> os.unlink(storagefile+".old")
NON-UNDOING
===========
We need an database with a NON-undoing blob supporting storage:
>>> base_storage = MappingStorage('storage')
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
Create our root object:
>>> connection1 = database.open()
>>> root = connection1.root()
Put some revisions of a blob object in our database and on the filesystem:
>>> import time, os
>>> tids = []
>>> times = []
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> blob = Blob()
>>> blob.open('w').write('this is blob data 0')
>>> root['blob'] = blob
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 1')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 2')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 3')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 4')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> oid = root['blob']._p_oid
>>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
>>> [ os.path.exists(x) for x in fns ]
[True, True, True, True, True]
Get our blob filenames for this oid.
>>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
Do a pack to the slightly before the first revision was written:
>>> packtime = times[0]
>>> blob_storage.pack(packtime, referencesf)
>>> [ os.path.exists(x) for x in fns ]
[False, False, False, False, True]
Do a pack to now:
>>> packtime = new_time()
>>> blob_storage.pack(packtime, referencesf)
>>> [ os.path.exists(x) for x in fns ]
[False, False, False, False, True]
Delete the object and do a pack, it should get rid of the most current
revision as well as the entire directory:
>>> nothing = transaction.begin()
>>> del root['blob']
>>> transaction.commit()
>>> packtime = new_time()
>>> blob_storage.pack(packtime, referencesf)
>>> [ os.path.exists(x) for x in fns ]
[False, False, False, False, False]
>>> os.path.exists(os.path.split(fns[0])[0])
False
Avoiding parallel packs
=======================
Blob packing (similar to FileStorage) can only be run once at a time. For
this, a flag (_blobs_pack_is_in_progress) is set. If the pack method is called
while this flag is set, it will refuse to perform another pack, until the flag
is reset:
>>> blob_storage._blobs_pack_is_in_progress
False
>>> blob_storage._blobs_pack_is_in_progress = True
>>> blob_storage.pack(packtime, referencesf)
Traceback (most recent call last):
BlobStorageError: Already packing
>>> blob_storage._blobs_pack_is_in_progress = False
>>> blob_storage.pack(packtime, referencesf)
We can also see, that the flag is set during the pack, by leveraging the
knowledge that the underlying storage's pack method is also called:
>>> def dummy_pack(time, ref):
... print "_blobs_pack_is_in_progress =",
... print blob_storage._blobs_pack_is_in_progress
... return base_pack(time, ref)
>>> base_pack = base_storage.pack
>>> base_storage.pack = dummy_pack
>>> blob_storage.pack(packtime, referencesf)
_blobs_pack_is_in_progress = True
>>> blob_storage._blobs_pack_is_in_progress
False
>>> base_storage.pack = base_pack
src/ZODB/tests/blobstorage_packing.txt
0 → 100644
View file @
d3f87f26
##############################################################################
#
# Copyright (c) 2005 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
Packing support for blob data
=============================
Set up:
>>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.serialize import referencesf
>>> from ZODB.blob import Blob, BlobStorage
>>> from ZODB import utils
>>> from ZODB.DB import DB
>>> import transaction
>>> storagefile = 'Data.fs'
>>> blob_dir = 'blobs'
A helper method to assure a unique timestamp across multiple platforms:
>>> from ZODB.tests.testblob import new_time
UNDOING
=======
See blob_packing.txt.
NON-UNDOING
===========
We need an database with a NON-undoing blob supporting storage:
>>> base_storage = MappingStorage('storage')
>>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
Create our root object:
>>> connection1 = database.open()
>>> root = connection1.root()
Put some revisions of a blob object in our database and on the filesystem:
>>> import time, os
>>> tids = []
>>> times = []
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> blob = Blob()
>>> blob.open('w').write('this is blob data 0')
>>> root['blob'] = blob
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 1')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 2')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 3')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 4')
>>> transaction.commit()
>>> tids.append(blob_storage._tid)
>>> oid = root['blob']._p_oid
>>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
>>> [ os.path.exists(x) for x in fns ]
[True, True, True, True, True]
Get our blob filenames for this oid.
>>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
Do a pack to the slightly before the first revision was written:
>>> packtime = times[0]
>>> blob_storage.pack(packtime, referencesf)
>>> [ os.path.exists(x) for x in fns ]
[False, False, False, False, True]
Do a pack to now:
>>> packtime = new_time()
>>> blob_storage.pack(packtime, referencesf)
>>> [ os.path.exists(x) for x in fns ]
[False, False, False, False, True]
Delete the object and do a pack, it should get rid of the most current
revision as well as the entire directory:
>>> nothing = transaction.begin()
>>> del root['blob']
>>> transaction.commit()
>>> packtime = new_time()
>>> blob_storage.pack(packtime, referencesf)
>>> [ os.path.exists(x) for x in fns ]
[False, False, False, False, False]
>>> os.path.exists(os.path.split(fns[0])[0])
False
Avoiding parallel packs
=======================
Blob packing (similar to FileStorage) can only be run once at a time. For
this, a flag (_blobs_pack_is_in_progress) is set. If the pack method is called
while this flag is set, it will refuse to perform another pack, until the flag
is reset:
>>> blob_storage._blobs_pack_is_in_progress
False
>>> blob_storage._blobs_pack_is_in_progress = True
>>> blob_storage.pack(packtime, referencesf)
Traceback (most recent call last):
BlobStorageError: Already packing
>>> blob_storage._blobs_pack_is_in_progress = False
>>> blob_storage.pack(packtime, referencesf)
We can also see, that the flag is set during the pack, by leveraging the
knowledge that the underlying storage's pack method is also called:
>>> def dummy_pack(time, ref):
... print "_blobs_pack_is_in_progress =",
... print blob_storage._blobs_pack_is_in_progress
... return base_pack(time, ref)
>>> base_pack = base_storage.pack
>>> base_storage.pack = dummy_pack
>>> blob_storage.pack(packtime, referencesf)
_blobs_pack_is_in_progress = True
>>> blob_storage._blobs_pack_is_in_progress
False
>>> base_storage.pack = base_pack
src/ZODB/tests/testblob.py
View file @
d3f87f26
...
@@ -552,7 +552,8 @@ def setUpBlobAdaptedFileStorage(test):
...
@@ -552,7 +552,8 @@ def setUpBlobAdaptedFileStorage(test):
test
.
globs
[
'create_storage'
]
=
create_storage
test
.
globs
[
'create_storage'
]
=
create_storage
def
storage_reusable_suite
(
prefix
,
factory
,
def
storage_reusable_suite
(
prefix
,
factory
,
test_blob_storage_recovery
=
True
,
test_blob_storage_recovery
=
False
,
test_packing
=
False
,
):
):
"""Return a test suite for a generic IBlobStorage.
"""Return a test suite for a generic IBlobStorage.
...
@@ -575,6 +576,11 @@ def storage_reusable_suite(prefix, factory,
...
@@ -575,6 +576,11 @@ def storage_reusable_suite(prefix, factory,
setUp
=
setup
,
tearDown
=
zope
.
testing
.
setupstack
.
tearDown
,
setUp
=
setup
,
tearDown
=
zope
.
testing
.
setupstack
.
tearDown
,
optionflags
=
doctest
.
ELLIPSIS
,
optionflags
=
doctest
.
ELLIPSIS
,
))
))
if
test_packing
:
suite
.
addTest
(
doctest
.
DocFileSuite
(
"blob_packing.txt"
,
setUp
=
setup
,
tearDown
=
zope
.
testing
.
setupstack
.
tearDown
,
))
suite
.
addTest
(
doctest
.
DocTestSuite
(
suite
.
addTest
(
doctest
.
DocTestSuite
(
setUp
=
setup
,
tearDown
=
zope
.
testing
.
setupstack
.
tearDown
,
setUp
=
setup
,
tearDown
=
zope
.
testing
.
setupstack
.
tearDown
,
checker
=
zope
.
testing
.
renormalizing
.
RENormalizing
([
checker
=
zope
.
testing
.
renormalizing
.
RENormalizing
([
...
@@ -608,9 +614,8 @@ def test_suite():
...
@@ -608,9 +614,8 @@ def test_suite():
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
suite.addTest(unittest.makeSuite(BlobCloneTests))
suite.addTest(unittest.makeSuite(BlobCloneTests))
suite.addTest(doctest.DocFileSuite(
suite.addTest(doctest.DocFileSuite(
"blob_basic.txt",
"blob_basic.txt", "blob_consume.txt", "blob_tempdir.txt",
"blob_packing.txt", "blob_consume.txt",
"blobstorage_packing.txt",
"blob_tempdir.txt",
setUp=setUp,
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
tearDown=zope.testing.setupstack.tearDown,
optionflags=doctest.ELLIPSIS,
optionflags=doctest.ELLIPSIS,
...
@@ -629,7 +634,9 @@ def test_suite():
...
@@ -629,7 +634,9 @@ def test_suite():
suite.addTest(storage_reusable_suite(
suite.addTest(storage_reusable_suite(
'
BlobAdaptedFileStorage
',
'
BlobAdaptedFileStorage
',
lambda name, blob_dir:
lambda name, blob_dir:
ZODB.blob.BlobStorage(blob_dir, FileStorage('
%
s
.
fs
' % name))
ZODB.blob.BlobStorage(blob_dir, FileStorage('
%
s
.
fs
' % name)),
test_blob_storage_recovery=True,
test_packing=True,
))
))
return suite
return suite
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment