Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZEO
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
ZEO
Commits
d4ee116e
Commit
d4ee116e
authored
Nov 12, 2008
by
Jim Fulton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Allow client caches larger than 4G.
parent
55b8dcd5
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
57 additions
and
6 deletions
+57
-6
src/CHANGES.txt
src/CHANGES.txt
+2
-2
src/ZEO/cache.py
src/ZEO/cache.py
+21
-3
src/ZEO/tests/test_cache.py
src/ZEO/tests/test_cache.py
+34
-1
No files found.
src/CHANGES.txt
View file @
d4ee116e
...
...
@@ -37,9 +37,9 @@ New Features
- Object saves are a little faster
- The previous (ZODB 3.8) ZEO clien-cache format is supported.
- The previous (ZODB 3.8) ZEO clien
t
-cache format is supported.
The newer cache format introduced in ZODB 3.9.0a1 is no-longer
supported
and cache files are limited to
4G.
supported
. Cache files can still be larger than
4G.
3.9.0a4 (2008-11-06)
====================
...
...
src/ZEO/cache.py
View file @
d4ee116e
...
...
@@ -82,6 +82,12 @@ logger = logging.getLogger("ZEO.cache")
magic
=
"ZEC3"
ZEC_HEADER_SIZE
=
12
# Maximum block size. Note that while we are doing a store, we may
# need to write a free block that is almost twice as big. If we die
# in the middle of a store, then we need to split the large free records
# while opening.
max_block_size
=
(
1
<<
31
)
-
1
# After the header, the file contains a contiguous sequence of blocks. All
# blocks begin with a one-byte status indicator:
#
...
...
@@ -203,8 +209,12 @@ class ClientCache(object):
self
.
f
.
seek
(
0
)
self
.
f
.
write
(
magic
)
self
.
f
.
write
(
z64
)
# and one free block.
self
.
f
.
write
(
'f'
+
pack
(
">I"
,
self
.
maxsize
-
ZEC_HEADER_SIZE
))
# add as many free blocks as are needed to fill the space
nfree
=
self
.
maxsize
-
ZEC_HEADER_SIZE
for
i
in
range
(
0
,
nfree
,
max_block_size
):
block_size
=
min
(
max_block_size
,
nfree
-
i
)
self
.
f
.
write
(
'f'
+
pack
(
">I"
,
block_size
))
self
.
f
.
seek
(
block_size
-
5
,
1
)
sync
(
self
.
f
)
# Statistics: _n_adds, _n_added_bytes,
...
...
@@ -273,6 +283,14 @@ class ClientCache(object):
l
+=
1
elif
status
==
'f'
:
size
,
=
unpack
(
">I"
,
read
(
4
))
if
size
>
max_block_size
:
# Oops, we either have an old cache, or a we
# crashed while storing. Split this block into two.
assert
size
<=
max_block_size
*
2
seek
(
ofs
+
max_block_size
)
self
.
f
.
write
(
'f'
+
pack
(
">I"
,
size
-
max_block_size
))
seek
(
ofs
)
self
.
f
.
write
(
'f'
+
pack
(
">I"
,
max_block_size
))
elif
status
in
'1234'
:
size
=
int
(
status
)
else
:
...
...
@@ -506,7 +524,7 @@ class ClientCache(object):
# 2nd-level ZEO cache got a much higher hit rate if "very large"
# objects simply weren't cached. For now, we ignore the request
# only if the entire cache file is too small to hold the object.
if
size
>
self
.
maxsize
-
ZEC_HEADER_SIZE
:
if
size
>
min
(
max_block_size
,
self
.
maxsize
-
ZEC_HEADER_SIZE
)
:
return
self
.
_n_adds
+=
1
...
...
src/ZEO/tests/test_cache.py
View file @
d4ee116e
...
...
@@ -18,10 +18,12 @@ from zope.testing import doctest
import
os
import
random
import
string
import
struct
import
sys
import
tempfile
import
unittest
import
ZEO.cache
import
ZODB.tests.util
import
zope.testing.setupstack
import
ZEO.cache
...
...
@@ -61,17 +63,19 @@ def oid(o):
return
repr_to_oid
(
repr
)
tid
=
oid
class
CacheTests
(
unittest
.
TestCase
):
class
CacheTests
(
ZODB
.
tests
.
util
.
TestCase
):
def
setUp
(
self
):
# The default cache size is much larger than we need here. Since
# testSerialization reads the entire file into a string, it's not
# good to leave it that big.
ZODB
.
tests
.
util
.
TestCase
.
setUp
(
self
)
self
.
cache
=
ZEO
.
cache
.
ClientCache
(
size
=
1024
**
2
)
def
tearDown
(
self
):
if
self
.
cache
.
path
:
os
.
remove
(
self
.
cache
.
path
)
ZODB
.
tests
.
util
.
TestCase
.
tearDown
(
self
)
def
testLastTid
(
self
):
self
.
assertEqual
(
self
.
cache
.
getLastTid
(),
None
)
...
...
@@ -192,6 +196,35 @@ class CacheTests(unittest.TestCase):
# recorded as non-current.
self
.
assert_
(
1
not
in
cache
.
noncurrent
)
def
testVeryLargeCaches
(
self
):
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
(
1
<<
33
))
cache
.
store
(
n1
,
n2
,
None
,
"x"
)
cache
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
(
1
<<
33
))
self
.
assertEquals
(
cache
.
load
(
n1
),
(
'x'
,
n2
))
cache
.
close
()
def
testConversionOfLargeFreeBlocks
(
self
):
f
=
open
(
'cache'
,
'wb'
)
f
.
write
(
ZEO
.
cache
.
magic
+
'
\
0
'
*
8
+
'f'
+
struct
.
pack
(
">I"
,
(
1
<<
32
)
-
12
)
)
f
.
seek
((
1
<<
32
)
-
1
)
f
.
write
(
'x'
)
f
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
1
<<
32
)
cache
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
1
<<
32
)
cache
.
close
()
f
=
open
(
'cache'
,
'rb'
)
f
.
seek
(
12
)
self
.
assertEquals
(
f
.
read
(
1
),
'f'
)
self
.
assertEquals
(
struct
.
unpack
(
">I"
,
f
.
read
(
4
))[
0
],
ZEO
.
cache
.
max_block_size
)
f
.
close
()
__test__
=
dict
(
kill_does_not_cause_cache_corruption
=
r"""
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment