Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZODB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Kirill Smelkov
ZODB
Commits
a3b47217
Commit
a3b47217
authored
Nov 12, 2008
by
Jim Fulton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Allow client caches larger than 4G.
parent
911e8f5d
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
57 additions
and
6 deletions
+57
-6
src/CHANGES.txt
src/CHANGES.txt
+2
-2
src/ZEO/cache.py
src/ZEO/cache.py
+21
-3
src/ZEO/tests/test_cache.py
src/ZEO/tests/test_cache.py
+34
-1
No files found.
src/CHANGES.txt
View file @
a3b47217
...
@@ -37,9 +37,9 @@ New Features
...
@@ -37,9 +37,9 @@ New Features
- Object saves are a little faster
- Object saves are a little faster
- The previous (ZODB 3.8) ZEO clien-cache format is supported.
- The previous (ZODB 3.8) ZEO clien
t
-cache format is supported.
The newer cache format introduced in ZODB 3.9.0a1 is no-longer
The newer cache format introduced in ZODB 3.9.0a1 is no-longer
supported
and cache files are limited to
4G.
supported
. Cache files can still be larger than
4G.
3.9.0a4 (2008-11-06)
3.9.0a4 (2008-11-06)
====================
====================
...
...
src/ZEO/cache.py
View file @
a3b47217
...
@@ -82,6 +82,12 @@ logger = logging.getLogger("ZEO.cache")
...
@@ -82,6 +82,12 @@ logger = logging.getLogger("ZEO.cache")
magic
=
"ZEC3"
magic
=
"ZEC3"
ZEC_HEADER_SIZE
=
12
ZEC_HEADER_SIZE
=
12
# Maximum block size. Note that while we are doing a store, we may
# need to write a free block that is almost twice as big. If we die
# in the middle of a store, then we need to split the large free records
# while opening.
max_block_size
=
(
1
<<
31
)
-
1
# After the header, the file contains a contiguous sequence of blocks. All
# After the header, the file contains a contiguous sequence of blocks. All
# blocks begin with a one-byte status indicator:
# blocks begin with a one-byte status indicator:
#
#
...
@@ -203,8 +209,12 @@ class ClientCache(object):
...
@@ -203,8 +209,12 @@ class ClientCache(object):
self
.
f
.
seek
(
0
)
self
.
f
.
seek
(
0
)
self
.
f
.
write
(
magic
)
self
.
f
.
write
(
magic
)
self
.
f
.
write
(
z64
)
self
.
f
.
write
(
z64
)
# and one free block.
# add as many free blocks as are needed to fill the space
self
.
f
.
write
(
'f'
+
pack
(
">I"
,
self
.
maxsize
-
ZEC_HEADER_SIZE
))
nfree
=
self
.
maxsize
-
ZEC_HEADER_SIZE
for
i
in
range
(
0
,
nfree
,
max_block_size
):
block_size
=
min
(
max_block_size
,
nfree
-
i
)
self
.
f
.
write
(
'f'
+
pack
(
">I"
,
block_size
))
self
.
f
.
seek
(
block_size
-
5
,
1
)
sync
(
self
.
f
)
sync
(
self
.
f
)
# Statistics: _n_adds, _n_added_bytes,
# Statistics: _n_adds, _n_added_bytes,
...
@@ -273,6 +283,14 @@ class ClientCache(object):
...
@@ -273,6 +283,14 @@ class ClientCache(object):
l
+=
1
l
+=
1
elif
status
==
'f'
:
elif
status
==
'f'
:
size
,
=
unpack
(
">I"
,
read
(
4
))
size
,
=
unpack
(
">I"
,
read
(
4
))
if
size
>
max_block_size
:
# Oops, we either have an old cache, or a we
# crashed while storing. Split this block into two.
assert
size
<=
max_block_size
*
2
seek
(
ofs
+
max_block_size
)
self
.
f
.
write
(
'f'
+
pack
(
">I"
,
size
-
max_block_size
))
seek
(
ofs
)
self
.
f
.
write
(
'f'
+
pack
(
">I"
,
max_block_size
))
elif
status
in
'1234'
:
elif
status
in
'1234'
:
size
=
int
(
status
)
size
=
int
(
status
)
else
:
else
:
...
@@ -506,7 +524,7 @@ class ClientCache(object):
...
@@ -506,7 +524,7 @@ class ClientCache(object):
# 2nd-level ZEO cache got a much higher hit rate if "very large"
# 2nd-level ZEO cache got a much higher hit rate if "very large"
# objects simply weren't cached. For now, we ignore the request
# objects simply weren't cached. For now, we ignore the request
# only if the entire cache file is too small to hold the object.
# only if the entire cache file is too small to hold the object.
if
size
>
self
.
maxsize
-
ZEC_HEADER_SIZE
:
if
size
>
min
(
max_block_size
,
self
.
maxsize
-
ZEC_HEADER_SIZE
)
:
return
return
self
.
_n_adds
+=
1
self
.
_n_adds
+=
1
...
...
src/ZEO/tests/test_cache.py
View file @
a3b47217
...
@@ -18,10 +18,12 @@ from zope.testing import doctest
...
@@ -18,10 +18,12 @@ from zope.testing import doctest
import
os
import
os
import
random
import
random
import
string
import
string
import
struct
import
sys
import
sys
import
tempfile
import
tempfile
import
unittest
import
unittest
import
ZEO.cache
import
ZEO.cache
import
ZODB.tests.util
import
zope.testing.setupstack
import
zope.testing.setupstack
import
ZEO.cache
import
ZEO.cache
...
@@ -61,17 +63,19 @@ def oid(o):
...
@@ -61,17 +63,19 @@ def oid(o):
return
repr_to_oid
(
repr
)
return
repr_to_oid
(
repr
)
tid
=
oid
tid
=
oid
class
CacheTests
(
unittest
.
TestCase
):
class
CacheTests
(
ZODB
.
tests
.
util
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
# The default cache size is much larger than we need here. Since
# The default cache size is much larger than we need here. Since
# testSerialization reads the entire file into a string, it's not
# testSerialization reads the entire file into a string, it's not
# good to leave it that big.
# good to leave it that big.
ZODB
.
tests
.
util
.
TestCase
.
setUp
(
self
)
self
.
cache
=
ZEO
.
cache
.
ClientCache
(
size
=
1024
**
2
)
self
.
cache
=
ZEO
.
cache
.
ClientCache
(
size
=
1024
**
2
)
def
tearDown
(
self
):
def
tearDown
(
self
):
if
self
.
cache
.
path
:
if
self
.
cache
.
path
:
os
.
remove
(
self
.
cache
.
path
)
os
.
remove
(
self
.
cache
.
path
)
ZODB
.
tests
.
util
.
TestCase
.
tearDown
(
self
)
def
testLastTid
(
self
):
def
testLastTid
(
self
):
self
.
assertEqual
(
self
.
cache
.
getLastTid
(),
None
)
self
.
assertEqual
(
self
.
cache
.
getLastTid
(),
None
)
...
@@ -192,6 +196,35 @@ class CacheTests(unittest.TestCase):
...
@@ -192,6 +196,35 @@ class CacheTests(unittest.TestCase):
# recorded as non-current.
# recorded as non-current.
self
.
assert_
(
1
not
in
cache
.
noncurrent
)
self
.
assert_
(
1
not
in
cache
.
noncurrent
)
def
testVeryLargeCaches
(
self
):
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
(
1
<<
33
))
cache
.
store
(
n1
,
n2
,
None
,
"x"
)
cache
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
(
1
<<
33
))
self
.
assertEquals
(
cache
.
load
(
n1
),
(
'x'
,
n2
))
cache
.
close
()
def
testConversionOfLargeFreeBlocks
(
self
):
f
=
open
(
'cache'
,
'wb'
)
f
.
write
(
ZEO
.
cache
.
magic
+
'
\
0
'
*
8
+
'f'
+
struct
.
pack
(
">I"
,
(
1
<<
32
)
-
12
)
)
f
.
seek
((
1
<<
32
)
-
1
)
f
.
write
(
'x'
)
f
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
1
<<
32
)
cache
.
close
()
cache
=
ZEO
.
cache
.
ClientCache
(
'cache'
,
size
=
1
<<
32
)
cache
.
close
()
f
=
open
(
'cache'
,
'rb'
)
f
.
seek
(
12
)
self
.
assertEquals
(
f
.
read
(
1
),
'f'
)
self
.
assertEquals
(
struct
.
unpack
(
">I"
,
f
.
read
(
4
))[
0
],
ZEO
.
cache
.
max_block_size
)
f
.
close
()
__test__
=
dict
(
__test__
=
dict
(
kill_does_not_cause_cache_corruption
=
kill_does_not_cause_cache_corruption
=
r"""
r"""
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment