Commit 1a451be0 authored by Jim Fulton's avatar Jim Fulton

Fixed numerous bugs in the ZEO cache tracing and analysys code.

Cache simulation, while not perfect, seems to be much more accurate
now than it was before.

The ZEO cache trace statistics and simulation scripts have been
given more descriptive names and moved to the ZEO scripts package.
parent aa0e58c2
...@@ -26,6 +26,13 @@ Bugs fixed ...@@ -26,6 +26,13 @@ Bugs fixed
https://bugs.launchpad.net/zodb/+bug/143237 https://bugs.launchpad.net/zodb/+bug/143237
- There were numerous bugs in the ZEO cache tracing and analysys code.
Cache simulation, while not perfect, seems to be much more accurate
now than it was before.
The ZEO cache trace statistics and simulation scripts have been
given more descriptive names and moved to the ZEO scripts package.
3.10.0b1 (2010-05-18) 3.10.0b1 (2010-05-18)
===================== =====================
......
...@@ -379,6 +379,7 @@ class ClientCache(object): ...@@ -379,6 +379,7 @@ class ClientCache(object):
# Close the underlying file. No methods accessing the cache should be # Close the underlying file. No methods accessing the cache should be
# used after this. # used after this.
def close(self): def close(self):
self._unsetup_trace()
f = self.f f = self.f
self.f = None self.f = None
if f is not None: if f is not None:
...@@ -726,6 +727,10 @@ class ClientCache(object): ...@@ -726,6 +727,10 @@ class ClientCache(object):
# self._tracefile. If not, or we can't write to the trace file, disable # self._tracefile. If not, or we can't write to the trace file, disable
# tracing by setting self._trace to a dummy function, and set # tracing by setting self._trace to a dummy function, and set
# self._tracefile to None. # self._tracefile to None.
_tracefile = None
def _trace(self, *a, **kw):
pass
def _setup_trace(self, path): def _setup_trace(self, path):
_tracefile = None _tracefile = None
if path and os.environ.get("ZEO_CACHE_TRACE"): if path and os.environ.get("ZEO_CACHE_TRACE"):
...@@ -738,7 +743,6 @@ class ClientCache(object): ...@@ -738,7 +743,6 @@ class ClientCache(object):
logger.info("opened tracefile %r", tfn) logger.info("opened tracefile %r", tfn)
if _tracefile is None: if _tracefile is None:
self._trace = lambda *a, **k: None
return return
now = time.time now = time.time
...@@ -747,7 +751,7 @@ class ClientCache(object): ...@@ -747,7 +751,7 @@ class ClientCache(object):
# The first hex digit shows the operation, the second the outcome. # The first hex digit shows the operation, the second the outcome.
# This method has been carefully tuned to be as fast as possible. # This method has been carefully tuned to be as fast as possible.
# Note: when tracing is disabled, this method is hidden by a dummy. # Note: when tracing is disabled, this method is hidden by a dummy.
encoded = (dlen + 255) & 0x7fffff00 | code encoded = (dlen << 8) + code
if tid is None: if tid is None:
tid = z64 tid = z64
if end_tid is None: if end_tid is None:
...@@ -762,8 +766,15 @@ class ClientCache(object): ...@@ -762,8 +766,15 @@ class ClientCache(object):
raise raise
self._trace = _trace self._trace = _trace
self._tracefile = _tracefile
_trace(0x00) _trace(0x00)
def _unsetup_trace(self):
if self._tracefile is not None:
del self._trace
self._tracefile.close()
del self._tracefile
def sync(f): def sync(f):
f.flush() f.flush()
......
#! /usr/bin/env python
##############################################################################
#
# Copyright (c) 2001-2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Cache simulation.
Usage: simul.py [-s size] tracefile
Options:
-s size: cache size in MB (default 20 MB)
-i: summarizing interval in minutes (default 15; max 60)
Note:
- The simulation isn't perfect.
- The simulation will be far off if the trace file
was created starting with a non-empty cache
"""
from sets import Set
import bisect
import getopt
import math
import struct
import re
import sys
import time
import ZEO.cache
from ZODB.utils import z64, u64
def usage(msg):
print >> sys.stderr, msg
print >> sys.stderr, __doc__
def main(args=None):
if args is None:
args = sys.argv[1:]
# Parse options.
MB = 1<<20
cachelimit = 20*MB
simclass = CircularCacheSimulation
interval_step = 15
try:
opts, args = getopt.getopt(args, "s:i:")
except getopt.error, msg:
usage(msg)
return 2
for o, a in opts:
if o == '-s':
cachelimit = int(float(a)*MB)
elif o == '-i':
interval_step = int(a)
else:
assert False, (o, a)
interval_step *= 60
if interval_step <= 0:
interval_step = 60
elif interval_step > 3600:
interval_step = 3600
if len(args) != 1:
usage("exactly one file argument required")
return 2
filename = args[0]
# Open file.
if filename.endswith(".gz"):
# Open gzipped file.
try:
import gzip
except ImportError:
print >> sys.stderr, "can't read gzipped files (no module gzip)"
return 1
try:
f = gzip.open(filename, "rb")
except IOError, msg:
print >> sys.stderr, "can't open %s: %s" % (filename, msg)
return 1
elif filename == "-":
# Read from stdin.
f = sys.stdin
else:
# Open regular file.
try:
f = open(filename, "rb")
except IOError, msg:
print >> sys.stderr, "can't open %s: %s" % (filename, msg)
return 1
# Create simulation object.
sim = simclass(cachelimit)
interval_sim = simclass(cachelimit)
# Print output header.
sim.printheader()
# Read trace file, simulating cache behavior.
f_read = f.read
unpack = struct.unpack
FMT = ">iiH8s8s"
FMT_SIZE = struct.calcsize(FMT)
assert FMT_SIZE == 26
last_interval = None
while 1:
# Read a record and decode it.
r = f_read(FMT_SIZE)
if len(r) < FMT_SIZE:
break
ts, code, oidlen, start_tid, end_tid = unpack(FMT, r)
if ts == 0:
# Must be a misaligned record caused by a crash; skip 8 bytes
# and try again. Why 8? Lost in the mist of history.
f.seek(f.tell() - FMT_SIZE + 8)
continue
oid = f_read(oidlen)
if len(oid) < oidlen:
break
# Decode the code.
dlen, version, code = ((code & 0x7fffff00) >> 8,
code & 0x80,
code & 0x7e)
# And pass it to the simulation.
this_interval = int(ts)/interval_step
if this_interval != last_interval:
if last_interval is not None:
interval_sim.report()
interval_sim.restart()
last_interval = this_interval
sim.event(ts, dlen, version, code, oid, start_tid, end_tid)
interval_sim.event(ts, dlen, version, code, oid, start_tid, end_tid)
f.close()
# Finish simulation.
interval_sim.report()
sim.finish()
class Simulation(object):
"""Base class for simulations.
The driver program calls: event(), printheader(), finish().
The standard event() method calls these additional methods:
write(), load(), inval(), report(), restart(); the standard
finish() method also calls report().
"""
def __init__(self, cachelimit):
self.cachelimit = cachelimit
# Initialize global statistics.
self.epoch = None
self.total_loads = 0
self.total_hits = 0 # subclass must increment
self.total_invals = 0 # subclass must increment
self.total_writes = 0
if not hasattr(self, "extras"):
self.extras = (self.extraname,)
self.format = self.format + " %7s" * len(self.extras)
# Reset per-run statistics and set up simulation data.
self.restart()
def restart(self):
# Reset per-run statistics.
self.loads = 0
self.hits = 0 # subclass must increment
self.invals = 0 # subclass must increment
self.writes = 0
self.ts0 = None
def event(self, ts, dlen, _version, code, oid,
start_tid, end_tid):
# Record first and last timestamp seen.
if self.ts0 is None:
self.ts0 = ts
if self.epoch is None:
self.epoch = ts
self.ts1 = ts
# Simulate cache behavior. Caution: the codes in the trace file
# record whether the actual cache missed or hit on each load, but
# that bears no necessary relationship to whether the simulated cache
# will hit or miss. Relatedly, if the actual cache needed to store
# an object, the simulated cache may not need to (it may already
# have the data).
action = code & 0x70
if action & 0x20:
# Load.
self.loads += 1
self.total_loads += 1
# Asserting that dlen is 0 iff it's a load miss.
# assert (dlen == 0) == (code in (0x20, 0x24))
self.load(oid, dlen, start_tid, code)
elif action & 0x40:
# Store.
assert dlen
self.write(oid, dlen, start_tid, end_tid)
elif action & 0x10:
# Invalidate.
self.inval(oid, start_tid)
elif action == 0x00:
# Restart.
self.restart()
else:
raise ValueError("unknown trace code 0x%x" % code)
def write(self, oid, size, start_tid, end_tid):
pass
def load(self, oid, size, start_tid, code):
# Must increment .hits and .total_hits as appropriate.
pass
def inval(self, oid, start_tid):
# Must increment .invals and .total_invals as appropriate.
pass
format = "%12s %6s %7s %7s %6s %6s %7s"
# Subclass should override extraname to name known instance variables;
# if extraname is 'foo', both self.foo and self.total_foo must exist:
extraname = "*** please override ***"
def printheader(self):
print "%s, cache size %s bytes" % (self.__class__.__name__,
addcommas(self.cachelimit))
self.extraheader()
extranames = tuple([s.upper() for s in self.extras])
args = ("START TIME", "DUR.", "LOADS", "HITS",
"INVALS", "WRITES", "HITRATE") + extranames
print self.format % args
def extraheader(self):
pass
nreports = 0
def report(self):
self.nreports += 1
args = (time.ctime(self.ts0)[4:-8],
duration(self.ts1 - self.ts0),
self.loads, self.hits, self.invals, self.writes,
hitrate(self.loads, self.hits))
args += tuple([getattr(self, name) for name in self.extras])
print self.format % args
def finish(self):
# Make sure that the last line of output ends with "OVERALL". This
# makes it much easier for another program parsing the output to
# find summary statistics.
print '-'*74
if self.nreports < 2:
self.report()
else:
self.report()
args = (
time.ctime(self.epoch)[4:-8],
duration(self.ts1 - self.epoch),
self.total_loads,
self.total_hits,
self.total_invals,
self.total_writes,
hitrate(self.total_loads, self.total_hits))
args += tuple([getattr(self, "total_" + name)
for name in self.extras])
print self.format % args
# For use in CircularCacheSimulation.
class CircularCacheEntry(object):
__slots__ = (# object key: an (oid, start_tid) pair, where
# start_tid is the tid of the transaction that created
# this revision of oid
'key',
# tid of transaction that created the next revision;
# z64 iff this is the current revision
'end_tid',
# Offset from start of file to the object's data
# record; this includes all overhead bytes (status
# byte, size bytes, etc).
'offset',
)
def __init__(self, key, end_tid, offset):
self.key = key
self.end_tid = end_tid
self.offset = offset
from ZEO.cache import ZEC_HEADER_SIZE
class CircularCacheSimulation(Simulation):
"""Simulate the ZEO 3.0 cache."""
# The cache is managed as a single file with a pointer that
# goes around the file, circularly, forever. New objects
# are written at the current pointer, evicting whatever was
# there previously.
extras = "evicts", "inuse"
def __init__(self, cachelimit):
from ZEO import cache
Simulation.__init__(self, cachelimit)
self.total_evicts = 0 # number of cache evictions
# Current offset in file.
self.offset = ZEC_HEADER_SIZE
# Map offset in file to (size, CircularCacheEntry) pair, or to
# (size, None) if the offset starts a free block.
self.filemap = {ZEC_HEADER_SIZE: (self.cachelimit - ZEC_HEADER_SIZE,
None)}
# Map key to CircularCacheEntry. A key is an (oid, tid) pair.
self.key2entry = {}
# Map oid to tid of current revision.
self.current = {}
# Map oid to list of (start_tid, end_tid) pairs in sorted order.
# Used to find matching key for load of non-current data.
self.noncurrent = {}
# The number of overhead bytes needed to store an object pickle
# on disk (all bytes beyond those needed for the object pickle).
self.overhead = ZEO.cache.allocated_record_overhead
# save evictions so we can replay them, if necessary
self.evicted = {}
def restart(self):
Simulation.restart(self)
self.evicts = 0
self.evicted_hit = self.evicted_miss = 0
evicted_hit = evicted_miss = 0
def load(self, oid, size, tid, code):
if (code == 0x20) or (code == 0x22):
# Trying to load current revision.
if oid in self.current: # else it's a cache miss
self.hits += 1
self.total_hits += 1
elif oid in self.evicted:
size, e = self.evicted[oid]
self.write(oid, size, e.key[1], z64, 1)
self.evicted_hit += 1
else:
self.evicted_miss += 1
return
# May or may not be trying to load current revision.
cur_tid = self.current.get(oid)
if cur_tid == tid:
self.hits += 1
self.total_hits += 1
return
# It's a load for non-current data. Do we know about this oid?
L = self.noncurrent.get(oid)
if L is None:
return # cache miss
i = bisect.bisect_left(L, (tid, None))
if i == 0:
# This tid is smaller than any we know about -- miss.
return
lo, hi = L[i-1]
assert lo < tid
if tid > hi:
# No data in the right tid range -- miss.
return
# Cache hit.
self.hits += 1
self.total_hits += 1
# (oid, tid) is in the cache. Remove it: take it out of key2entry,
# and in `filemap` mark the space it occupied as being free. The
# caller is responsible for removing it from `current` or `noncurrent`.
def _remove(self, oid, tid):
key = oid, tid
e = self.key2entry.pop(key)
pos = e.offset
size, _e = self.filemap[pos]
assert e is _e
self.filemap[pos] = size, None
def _remove_noncurrent_revisions(self, oid):
noncurrent_list = self.noncurrent.get(oid)
if noncurrent_list:
self.invals += len(noncurrent_list)
self.total_invals += len(noncurrent_list)
for start_tid, end_tid in noncurrent_list:
self._remove(oid, start_tid)
del self.noncurrent[oid]
def inval(self, oid, tid):
if tid == z64:
# This is part of startup cache verification: forget everything
# about this oid.
self._remove_noncurrent_revisions(oid)
cur_tid = self.current.get(oid)
if cur_tid is None:
# We don't have current data, so nothing more to do.
return
# We had current data for oid, but no longer.
self.invals += 1
self.total_invals += 1
del self.current[oid]
if tid == z64:
# Startup cache verification: forget this oid entirely.
self._remove(oid, cur_tid)
return
# Our current data becomes non-current data.
# Add the validity range to the list of non-current data for oid.
assert cur_tid < tid
L = self.noncurrent.setdefault(oid, [])
bisect.insort_left(L, (cur_tid, tid))
# Update the end of oid's validity range in its CircularCacheEntry.
e = self.key2entry[oid, cur_tid]
assert e.end_tid == z64
e.end_tid = tid
def write(self, oid, size, start_tid, end_tid, evhit=0):
if end_tid == z64:
# Storing current revision.
if oid in self.current: # we already have it in cache
if evhit:
import pdb; pdb.set_trace()
raise ValueError('WTF')
return
self.current[oid] = start_tid
self.writes += 1
self.total_writes += 1
self.add(oid, size, start_tid)
return
if evhit:
import pdb; pdb.set_trace()
raise ValueError('WTF')
# Storing non-current revision.
L = self.noncurrent.setdefault(oid, [])
p = start_tid, end_tid
if p in L:
return # we already have it in cache
bisect.insort_left(L, p)
self.writes += 1
self.total_writes += 1
self.add(oid, size, start_tid, end_tid)
# Add `oid` to the cache, evicting objects as needed to make room.
# This updates `filemap` and `key2entry`; it's the caller's
# responsibilty to update `current` or `noncurrent` appropriately.
def add(self, oid, size, start_tid, end_tid=z64):
key = oid, start_tid
assert key not in self.key2entry
size += self.overhead
avail = self.makeroom(size+1) # see cache.py
e = CircularCacheEntry(key, end_tid, self.offset)
self.filemap[self.offset] = size, e
self.key2entry[key] = e
self.offset += size
# All the space made available must be accounted for in filemap.
excess = avail - size
if excess:
self.filemap[self.offset] = excess, None
# Evict enough objects to make at least `need` contiguous bytes, starting
# at `self.offset`, available. Evicted objects are removed from
# `filemap`, `key2entry`, `current` and `noncurrent`. The caller is
# responsible for adding new entries to `filemap` to account for all
# the freed bytes, and for advancing `self.offset`. The number of bytes
# freed is the return value, and will be >= need.
def makeroom(self, need):
if self.offset + need > self.cachelimit:
self.offset = ZEC_HEADER_SIZE
pos = self.offset
while need > 0:
assert pos < self.cachelimit
size, e = self.filemap.pop(pos)
if e: # there is an object here (else it's already free space)
self.evicts += 1
self.total_evicts += 1
assert pos == e.offset
_e = self.key2entry.pop(e.key)
assert e is _e
oid, start_tid = e.key
if e.end_tid == z64:
del self.current[oid]
self.evicted[oid] = size-self.overhead, e
else:
L = self.noncurrent[oid]
L.remove((start_tid, e.end_tid))
need -= size
pos += size
return pos - self.offset # total number of bytes freed
def report(self):
self.check()
free = used = total = 0
for size, e in self.filemap.itervalues():
total += size
if e:
used += size
else:
free += size
self.inuse = round(100.0 * used / total, 1)
self.total_inuse = self.inuse
Simulation.report(self)
#print self.evicted_hit, self.evicted_miss
def check(self):
oidcount = 0
pos = ZEC_HEADER_SIZE
while pos < self.cachelimit:
size, e = self.filemap[pos]
if e:
oidcount += 1
assert self.key2entry[e.key].offset == pos
pos += size
assert oidcount == len(self.key2entry)
assert pos == self.cachelimit
def dump(self):
print len(self.filemap)
L = list(self.filemap)
L.sort()
for k in L:
v = self.filemap[k]
print k, v[0], repr(v[1])
def roundup(size):
k = MINSIZE
while k < size:
k += k
return k
def hitrate(loads, hits):
if loads < 1:
return 'n/a'
return "%5.1f%%" % (100.0 * hits / loads)
def duration(secs):
mm, ss = divmod(secs, 60)
hh, mm = divmod(mm, 60)
if hh:
return "%d:%02d:%02d" % (hh, mm, ss)
if mm:
return "%d:%02d" % (mm, ss)
return "%d" % ss
nre = re.compile('([=-]?)(\d+)([.]\d*)?').match
def addcommas(n):
sign, s, d = nre(str(n)).group(1, 2, 3)
if d == '.0':
d = ''
result = s[-3:]
s = s[:-3]
while s:
result = s[-3:]+','+result
s = s[:-3]
return (sign or '') + result + (d or '')
import random
def maybe(f, p=0.5):
if random.random() < p:
f()
if __name__ == "__main__":
sys.exit(main())
...@@ -66,7 +66,9 @@ def usage(msg): ...@@ -66,7 +66,9 @@ def usage(msg):
print >> sys.stderr, msg print >> sys.stderr, msg
print >> sys.stderr, __doc__ print >> sys.stderr, __doc__
def main(): def main(args=None):
if args is None:
args = sys.argv[1:]
# Parse options # Parse options
verbose = False verbose = False
quiet = False quiet = False
...@@ -76,7 +78,7 @@ def main(): ...@@ -76,7 +78,7 @@ def main():
interval = 15*60 # Every 15 minutes interval = 15*60 # Every 15 minutes
heuristic = False heuristic = False
try: try:
opts, args = getopt.getopt(sys.argv[1:], "hi:qsSvX") opts, args = getopt.getopt(args, "hi:qsSvX")
except getopt.error, msg: except getopt.error, msg:
usage(msg) usage(msg)
return 2 return 2
...@@ -154,6 +156,8 @@ def main(): ...@@ -154,6 +156,8 @@ def main():
FMT_SIZE = struct.calcsize(FMT) FMT_SIZE = struct.calcsize(FMT)
assert FMT_SIZE == 26 assert FMT_SIZE == 26
# Read file, gathering statistics, and printing each record if verbose. # Read file, gathering statistics, and printing each record if verbose.
print ' '*16, "%7s %7s %7s %7s" % ('loads', 'hits', 'inv(h)', 'writes'),
print 'hitrate'
try: try:
while 1: while 1:
r = f_read(FMT_SIZE) r = f_read(FMT_SIZE)
...@@ -182,7 +186,7 @@ def main(): ...@@ -182,7 +186,7 @@ def main():
thisinterval = ts // interval thisinterval = ts // interval
h0 = ts h0 = ts
he = ts he = ts
dlen, code = code & 0x7fffff00, code & 0xff dlen, code = (code & 0x7fffff00) >> 8, code & 0xff
if dlen: if dlen:
datarecords += 1 datarecords += 1
datasize += dlen datasize += dlen
...@@ -245,10 +249,10 @@ def main(): ...@@ -245,10 +249,10 @@ def main():
print "First time: %s" % time.ctime(t0) print "First time: %s" % time.ctime(t0)
print "Last time: %s" % time.ctime(te) print "Last time: %s" % time.ctime(te)
print "Duration: %s seconds" % addcommas(te-t0) print "Duration: %s seconds" % addcommas(te-t0)
print "Data recs: %s (%.1f%%), average size %.1f KB" % ( print "Data recs: %s (%.1f%%), average size %d bytes" % (
addcommas(datarecords), addcommas(datarecords),
100.0 * datarecords / records, 100.0 * datarecords / records,
datasize / 1024.0 / datarecords) datasize / datarecords)
print "Hit rate: %.1f%% (load hits / loads)" % hitrate(bycode) print "Hit rate: %.1f%% (load hits / loads)" % hitrate(bycode)
print print
codes = bycode.keys() codes = bycode.keys()
...@@ -303,22 +307,27 @@ def dumpbysize(bysize, how, how2): ...@@ -303,22 +307,27 @@ def dumpbysize(bysize, how, how2):
loads) loads)
def dumpbyinterval(byinterval, h0, he): def dumpbyinterval(byinterval, h0, he):
loads = hits = 0 loads = hits = invals = writes = 0
for code in byinterval: for code in byinterval:
if code & 0x70 == 0x20: if code & 0x20:
n = byinterval[code] n = byinterval[code]
loads += n loads += n
if code in (0x22, 0x26): if code in (0x22, 0x26):
hits += n hits += n
if not loads: elif code & 0x40:
return writes += byinterval[code]
elif code & 0x10:
if code != 0x10:
invals += byinterval[code]
if loads: if loads:
hr = 100.0 * hits / loads hr = "%5.1f%%" % (100.0 * hits / loads)
else: else:
hr = 0.0 hr = 'n/a'
print "%s-%s %10s loads, %10s hits,%5.1f%% hit rate" % (
print "%s-%s %7s %7s %7s %7s %7s" % (
time.ctime(h0)[4:-8], time.ctime(he)[14:-8], time.ctime(h0)[4:-8], time.ctime(he)[14:-8],
addcommas(loads), addcommas(hits), hr) loads, hits, invals, writes, hr)
def hitrate(bycode): def hitrate(bycode):
loads = hits = 0 loads = hits = 0
......
...@@ -248,7 +248,7 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -248,7 +248,7 @@ class CacheTests(ZODB.tests.util.TestCase):
self.assertEqual(len(cache), 0) self.assertEqual(len(cache), 0)
self.assertEqual(cache.load(n3), None) self.assertEqual(cache.load(n3), None)
self.assertEqual(cache.loadBefore(n3, n2), None) self.assertEqual(cache.loadBefore(n3, n2), None)
def testChangingCacheSize(self): def testChangingCacheSize(self):
# start with a small cache # start with a small cache
data = 'x' data = 'x'
...@@ -309,7 +309,7 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -309,7 +309,7 @@ class CacheTests(ZODB.tests.util.TestCase):
for i in range(200, 305): for i in range(200, 305):
cache.store(p64(i), n1, None, data) cache.store(p64(i), n1, None, data)
# We use large-2 for the same reason we used small-1 above. # We use large-2 for the same reason we used small-1 above.
expected_len = large-2 expected_len = large-2
self.assertEquals(len(cache), expected_len) self.assertEquals(len(cache), expected_len)
...@@ -335,169 +335,695 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -335,169 +335,695 @@ class CacheTests(ZODB.tests.util.TestCase):
self.cache.setLastTid(p64(3)) self.cache.setLastTid(p64(3))
self.cache.setLastTid(p64(4)) self.cache.setLastTid(p64(4))
__test__ = dict( def kill_does_not_cause_cache_corruption():
kill_does_not_cause_cache_corruption =
r""" r"""
If we kill a process while a cache is being written to, the cache If we kill a process while a cache is being written to, the cache
isn't corrupted. To see this, we'll write a little script that isn't corrupted. To see this, we'll write a little script that
writes records to a cache file repeatedly. writes records to a cache file repeatedly.
>>> import os, random, sys, time >>> import os, random, sys, time
>>> open('t', 'w').write(''' >>> open('t', 'w').write('''
... import os, random, sys, thread, time ... import os, random, sys, thread, time
... sys.path = %r ... sys.path = %r
... ...
... def suicide(): ... def suicide():
... time.sleep(random.random()/10) ... time.sleep(random.random()/10)
... os._exit(0) ... os._exit(0)
... ...
... import ZEO.cache ... import ZEO.cache
... from ZODB.utils import p64 ... from ZODB.utils import p64
... cache = ZEO.cache.ClientCache('cache') ... cache = ZEO.cache.ClientCache('cache')
... oid = 0 ... oid = 0
... t = 0 ... t = 0
... thread.start_new_thread(suicide, ()) ... thread.start_new_thread(suicide, ())
... while 1: ... while 1:
... oid += 1 ... oid += 1
... t += 1 ... t += 1
... data = 'X' * random.randint(5000,25000) ... data = 'X' * random.randint(5000,25000)
... cache.store(p64(oid), p64(t), None, data) ... cache.store(p64(oid), p64(t), None, data)
... ...
... ''' % sys.path) ... ''' % sys.path)
>>> for i in range(10): >>> for i in range(10):
... _ = os.spawnl(os.P_WAIT, sys.executable, sys.executable, 't') ... _ = os.spawnl(os.P_WAIT, sys.executable, sys.executable, 't')
... if os.path.exists('cache'): ... if os.path.exists('cache'):
... cache = ZEO.cache.ClientCache('cache') ... cache = ZEO.cache.ClientCache('cache')
... cache.close() ... cache.close()
... os.remove('cache') ... os.remove('cache')
... os.remove('cache.lock') ... os.remove('cache.lock')
""", """
full_cache_is_valid = def full_cache_is_valid():
r""" r"""
If we fill up the cache without any free space, the cache can If we fill up the cache without any free space, the cache can
still be used. still be used.
>>> import ZEO.cache >>> import ZEO.cache
>>> cache = ZEO.cache.ClientCache('cache', 1000) >>> cache = ZEO.cache.ClientCache('cache', 1000)
>>> data = 'X' * (1000 - ZEO.cache.ZEC_HEADER_SIZE - 41) >>> data = 'X' * (1000 - ZEO.cache.ZEC_HEADER_SIZE - 41)
>>> cache.store(p64(1), p64(1), None, data) >>> cache.store(p64(1), p64(1), None, data)
>>> cache.close() >>> cache.close()
>>> cache = ZEO.cache.ClientCache('cache', 1000) >>> cache = ZEO.cache.ClientCache('cache', 1000)
>>> cache.store(p64(2), p64(2), None, 'XXX') >>> cache.store(p64(2), p64(2), None, 'XXX')
>>> cache.close() >>> cache.close()
""", """
cannot_open_same_cache_file_twice = def cannot_open_same_cache_file_twice():
r""" r"""
>>> import ZEO.cache >>> import ZEO.cache
>>> cache = ZEO.cache.ClientCache('cache', 1000) >>> cache = ZEO.cache.ClientCache('cache', 1000)
>>> cache2 = ZEO.cache.ClientCache('cache', 1000) >>> cache2 = ZEO.cache.ClientCache('cache', 1000)
Traceback (most recent call last): Traceback (most recent call last):
... ...
LockError: Couldn't lock 'cache.lock' LockError: Couldn't lock 'cache.lock'
>>> cache.close() >>> cache.close()
""", """
thread_safe = def thread_safe():
r""" r"""
>>> import ZEO.cache, ZODB.utils >>> import ZEO.cache, ZODB.utils
>>> cache = ZEO.cache.ClientCache('cache', 1000000) >>> cache = ZEO.cache.ClientCache('cache', 1000000)
>>> for i in range(100): >>> for i in range(100):
... cache.store(ZODB.utils.p64(i), ZODB.utils.p64(1), None, '0') ... cache.store(ZODB.utils.p64(i), ZODB.utils.p64(1), None, '0')
>>> import random, sys, threading >>> import random, sys, threading
>>> random = random.Random(0) >>> random = random.Random(0)
>>> stop = False >>> stop = False
>>> read_failure = None >>> read_failure = None
>>> def read_thread():
... def pick_oid():
... return ZODB.utils.p64(random.randint(0,99))
...
... try:
... while not stop:
... cache.load(pick_oid())
... cache.loadBefore(pick_oid(), ZODB.utils.p64(2))
... except:
... global read_failure
... read_failure = sys.exc_info()
>>> thread = threading.Thread(target=read_thread)
>>> thread.start()
>>> for tid in range(2,10):
... for oid in range(100):
... oid = ZODB.utils.p64(oid)
... cache.invalidate(oid, ZODB.utils.p64(tid))
... cache.store(oid, ZODB.utils.p64(tid), None, str(tid))
>>> stop = True
>>> thread.join()
>>> if read_failure:
... print 'Read failure:'
... import traceback
... traceback.print_exception(*read_failure)
>>> expected = '9', ZODB.utils.p64(9)
>>> for oid in range(100):
... loaded = cache.load(ZODB.utils.p64(oid))
... if loaded != expected:
... print oid, loaded
>>> cache.close()
"""
def broken_non_current():
r"""
>>> def read_thread(): In production, we saw a situation where an _del_noncurrent raused
... def pick_oid(): a key error when trying to free space, causing the cache to become
... return ZODB.utils.p64(random.randint(0,99)) unusable. I can't see why this would occur, but added a logging
... exception handler so, in the future, we'll still see cases in the
... try: log, but will ignore the error and keep going.
... while not stop:
... cache.load(pick_oid()) >>> import ZEO.cache, ZODB.utils, logging, sys
... cache.loadBefore(pick_oid(), ZODB.utils.p64(2)) >>> logger = logging.getLogger('ZEO.cache')
... except: >>> logger.setLevel(logging.ERROR)
... global read_failure >>> handler = logging.StreamHandler(sys.stdout)
... read_failure = sys.exc_info() >>> logger.addHandler(handler)
>>> cache = ZEO.cache.ClientCache('cache', 1000)
>>> thread = threading.Thread(target=read_thread) >>> cache.store(ZODB.utils.p64(1), ZODB.utils.p64(1), None, '0')
>>> thread.start() >>> cache.invalidate(ZODB.utils.p64(1), ZODB.utils.p64(2))
>>> cache._del_noncurrent(ZODB.utils.p64(1), ZODB.utils.p64(2))
>>> for tid in range(2,10): ... # doctest: +NORMALIZE_WHITESPACE
... for oid in range(100): Couldn't find non-current
... oid = ZODB.utils.p64(oid) ('\x00\x00\x00\x00\x00\x00\x00\x01', '\x00\x00\x00\x00\x00\x00\x00\x02')
... cache.invalidate(oid, ZODB.utils.p64(tid)) >>> cache._del_noncurrent(ZODB.utils.p64(1), ZODB.utils.p64(1))
... cache.store(oid, ZODB.utils.p64(tid), None, str(tid)) >>> cache._del_noncurrent(ZODB.utils.p64(1), ZODB.utils.p64(1)) #
... # doctest: +NORMALIZE_WHITESPACE
>>> stop = True Couldn't find non-current
>>> thread.join() ('\x00\x00\x00\x00\x00\x00\x00\x01', '\x00\x00\x00\x00\x00\x00\x00\x01')
>>> if read_failure:
... print 'Read failure:' >>> logger.setLevel(logging.NOTSET)
... import traceback >>> logger.removeHandler(handler)
... traceback.print_exception(*read_failure)
>>> cache.close()
>>> expected = '9', ZODB.utils.p64(9) """
>>> for oid in range(100):
... loaded = cache.load(ZODB.utils.p64(oid)) def bad_magic_number():
... if loaded != expected:
... print oid, loaded
>>> cache.close()
""",
broken_non_current =
r""" r"""
>>> open('cache', 'w').write("Hi world!")
>>> try: cache = ZEO.cache.ClientCache('cache', 1000)
... except Exception, v: print v
unexpected magic number: 'Hi w'
"""
In production, we saw a situation where an _del_noncurrent raused def cache_trace_analysis():
a key error when trying to free space, causing the cache to become
unusable. I can't see why this would occur, but added a logging
exception handler so, in the future, we'll still see cases in the
log, but will ignore the error and keep going.
>>> import ZEO.cache, ZODB.utils, logging, sys
>>> logger = logging.getLogger('ZEO.cache')
>>> logger.setLevel(logging.ERROR)
>>> handler = logging.StreamHandler(sys.stdout)
>>> logger.addHandler(handler)
>>> cache = ZEO.cache.ClientCache('cache', 1000)
>>> cache.store(ZODB.utils.p64(1), ZODB.utils.p64(1), None, '0')
>>> cache.invalidate(ZODB.utils.p64(1), ZODB.utils.p64(2))
>>> cache._del_noncurrent(ZODB.utils.p64(1), ZODB.utils.p64(2))
... # doctest: +NORMALIZE_WHITESPACE
Couldn't find non-current
('\x00\x00\x00\x00\x00\x00\x00\x01', '\x00\x00\x00\x00\x00\x00\x00\x02')
>>> cache._del_noncurrent(ZODB.utils.p64(1), ZODB.utils.p64(1))
>>> cache._del_noncurrent(ZODB.utils.p64(1), ZODB.utils.p64(1)) #
... # doctest: +NORMALIZE_WHITESPACE
Couldn't find non-current
('\x00\x00\x00\x00\x00\x00\x00\x01', '\x00\x00\x00\x00\x00\x00\x00\x01')
>>> logger.setLevel(logging.NOTSET)
>>> logger.removeHandler(handler)
>>> cache.close()
""",
bad_magic_number =
r""" r"""
>>> open('cache', 'w').write("Hi world!") Check to make sure the cache analysis scripts work.
>>> try: cache = ZEO.cache.ClientCache('cache', 1000)
... except Exception, v: print v >>> import time
unexpected magic number: 'Hi w' >>> timetime = time.time
""" >>> now = 1278864701.5
) >>> time.time = lambda : now
>>> os.environ["ZEO_CACHE_TRACE"] = 'yes'
>>> import random
>>> random = random.Random(42)
>>> history = []
>>> serial = 1
>>> for i in range(1000):
... serial += 1
... oid = random.randint(i+1000, i+6000)
... history.append(('s', p64(oid), p64(serial),
... 'x'*random.randint(200,2000)))
... for j in range(10):
... oid = random.randint(i+1000, i+6000)
... history.append(('l', p64(oid), p64(serial),
... 'x'*random.randint(200,2000)))
>>> def cache_run(name, size):
... serial = 1
... random.seed(42)
... global now
... now = 1278864701.5
... cache = ZEO.cache.ClientCache(name, size*(1<<20))
... for action, oid, serial, data in history:
... now += 1
... if action == 's':
... cache.invalidate(oid, serial)
... cache.store(oid, serial, None, data)
... else:
... v = cache.load(oid)
... if v is None:
... cache.store(oid, serial, None, data)
... cache.close()
>>> cache_run('cache', 2)
>>> import ZEO.scripts.cache_stats, ZEO.scripts.cache_simul
############################################################
Stats
>>> ZEO.scripts.cache_stats.main(['cache.trace'])
loads hits inv(h) writes hitrate
Jul 11 12:11-11 0 0 0 0 n/a
Jul 11 12:11:41 ==================== Restart ====================
Jul 11 12:11-14 180 1 2 197 0.6%
Jul 11 12:15-29 818 107 9 793 13.1%
Jul 11 12:30-44 818 213 22 687 26.0%
Jul 11 12:45-59 818 297 20 603 36.3%
Jul 11 13:00-14 818 297 35 603 36.3%
Jul 11 13:15-29 818 280 29 620 34.2%
Jul 11 13:30-44 819 275 26 625 33.6%
Jul 11 13:45-59 818 254 24 646 31.1%
Jul 11 14:00-14 818 300 32 600 36.7%
Jul 11 14:15-29 818 267 35 633 32.6%
Jul 11 14:30-44 818 309 37 591 37.8%
Jul 11 14:45-59 819 271 26 629 33.1%
Jul 11 15:00-14 818 292 30 608 35.7%
Jul 11 15:15-15 2 1 0 1 50.0%
<BLANKLINE>
Read 18,837 trace records (640,450 bytes) in 0.0 seconds
Versions: 0 records used a version
First time: Sun Jul 11 12:11:41 2010
Last time: Sun Jul 11 15:15:01 2010
Duration: 11,000 seconds
Data recs: 11,000 (58.4%), average size 1106 bytes
Hit rate: 31.6% (load hits / loads)
<BLANKLINE>
Count Code Function (action)
1 00 _setup_trace (initialization)
673 10 invalidate (miss)
327 1c invalidate (hit, saving non-current)
6,836 20 load (miss)
3,164 22 load (hit)
7,836 52 store (current, non-version)
>>> ZEO.scripts.cache_stats.main('-q cache.trace'.split())
loads hits inv(h) writes hitrate
<BLANKLINE>
Read 18,837 trace records (640,450 bytes) in 0.0 seconds
Versions: 0 records used a version
First time: Sun Jul 11 12:11:41 2010
Last time: Sun Jul 11 15:15:01 2010
Duration: 11,000 seconds
Data recs: 11,000 (58.4%), average size 1106 bytes
Hit rate: 31.6% (load hits / loads)
<BLANKLINE>
Count Code Function (action)
1 00 _setup_trace (initialization)
673 10 invalidate (miss)
327 1c invalidate (hit, saving non-current)
6,836 20 load (miss)
3,164 22 load (hit)
7,836 52 store (current, non-version)
>>> ZEO.scripts.cache_stats.main('-v cache.trace'.split())
... # doctest: +ELLIPSIS
loads hits inv(h) writes hitrate
Jul 11 12:11:41 00 '' 0000000000000000 0000000000000000 -
Jul 11 12:11-11 0 0 0 0 n/a
Jul 11 12:11:41 ==================== Restart ====================
Jul 11 12:11:42 10 1065 0000000000000002 0000000000000000 -
Jul 11 12:11:42 52 1065 0000000000000002 0000000000000000 - 245
Jul 11 12:11:43 20 947 0000000000000000 0000000000000000 -
Jul 11 12:11:43 52 947 0000000000000002 0000000000000000 - 602
Jul 11 12:11:44 20 124b 0000000000000000 0000000000000000 -
Jul 11 12:11:44 52 124b 0000000000000002 0000000000000000 - 1418
...
Jul 11 15:14:55 52 10cc 00000000000003e9 0000000000000000 - 1306
Jul 11 15:14:56 20 18a7 0000000000000000 0000000000000000 -
Jul 11 15:14:56 52 18a7 00000000000003e9 0000000000000000 - 1610
Jul 11 15:14:57 22 18b5 000000000000031d 0000000000000000 - 1636
Jul 11 15:14:58 20 b8a 0000000000000000 0000000000000000 -
Jul 11 15:14:58 52 b8a 00000000000003e9 0000000000000000 - 838
Jul 11 15:14:59 22 1085 0000000000000357 0000000000000000 - 217
Jul 11 15:00-14 818 292 30 608 35.7%
Jul 11 15:15:00 22 1072 000000000000037e 0000000000000000 - 204
Jul 11 15:15:01 20 16c5 0000000000000000 0000000000000000 -
Jul 11 15:15:01 52 16c5 00000000000003e9 0000000000000000 - 1712
Jul 11 15:15-15 2 1 0 1 50.0%
<BLANKLINE>
Read 18,837 trace records (640,450 bytes) in 0.0 seconds
Versions: 0 records used a version
First time: Sun Jul 11 12:11:41 2010
Last time: Sun Jul 11 15:15:01 2010
Duration: 11,000 seconds
Data recs: 11,000 (58.4%), average size 1106 bytes
Hit rate: 31.6% (load hits / loads)
<BLANKLINE>
Count Code Function (action)
1 00 _setup_trace (initialization)
673 10 invalidate (miss)
327 1c invalidate (hit, saving non-current)
6,836 20 load (miss)
3,164 22 load (hit)
7,836 52 store (current, non-version)
>>> ZEO.scripts.cache_stats.main('-h cache.trace'.split())
loads hits inv(h) writes hitrate
Jul 11 12:11-11 0 0 0 0 n/a
Jul 11 12:11:41 ==================== Restart ====================
Jul 11 12:11-14 180 1 2 197 0.6%
Jul 11 12:15-29 818 107 9 793 13.1%
Jul 11 12:30-44 818 213 22 687 26.0%
Jul 11 12:45-59 818 297 20 603 36.3%
Jul 11 13:00-14 818 297 35 603 36.3%
Jul 11 13:15-29 818 280 29 620 34.2%
Jul 11 13:30-44 819 275 26 625 33.6%
Jul 11 13:45-59 818 254 24 646 31.1%
Jul 11 14:00-14 818 300 32 600 36.7%
Jul 11 14:15-29 818 267 35 633 32.6%
Jul 11 14:30-44 818 309 37 591 37.8%
Jul 11 14:45-59 819 271 26 629 33.1%
Jul 11 15:00-14 818 292 30 608 35.7%
Jul 11 15:15-15 2 1 0 1 50.0%
<BLANKLINE>
Read 18,837 trace records (640,450 bytes) in 0.0 seconds
Versions: 0 records used a version
First time: Sun Jul 11 12:11:41 2010
Last time: Sun Jul 11 15:15:01 2010
Duration: 11,000 seconds
Data recs: 11,000 (58.4%), average size 1106 bytes
Hit rate: 31.6% (load hits / loads)
<BLANKLINE>
Count Code Function (action)
1 00 _setup_trace (initialization)
673 10 invalidate (miss)
327 1c invalidate (hit, saving non-current)
6,836 20 load (miss)
3,164 22 load (hit)
7,836 52 store (current, non-version)
<BLANKLINE>
Histogram of object load frequency
Unique oids: 4,585
Total loads: 10,000
loads objects %obj %load %cum
1 1,645 35.9% 16.4% 16.4%
2 1,465 32.0% 29.3% 45.8%
3 809 17.6% 24.3% 70.0%
4 430 9.4% 17.2% 87.2%
5 167 3.6% 8.3% 95.6%
6 49 1.1% 2.9% 98.5%
7 12 0.3% 0.8% 99.3%
8 7 0.2% 0.6% 99.9%
9 1 0.0% 0.1% 100.0%
>>> ZEO.scripts.cache_stats.main('-s cache.trace'.split())
... # doctest: +ELLIPSIS
loads hits inv(h) writes hitrate
Jul 11 12:11-11 0 0 0 0 n/a
Jul 11 12:11:41 ==================== Restart ====================
Jul 11 12:11-14 180 1 2 197 0.6%
Jul 11 12:15-29 818 107 9 793 13.1%
Jul 11 12:30-44 818 213 22 687 26.0%
Jul 11 12:45-59 818 297 20 603 36.3%
Jul 11 13:00-14 818 297 35 603 36.3%
Jul 11 13:15-29 818 280 29 620 34.2%
Jul 11 13:30-44 819 275 26 625 33.6%
Jul 11 13:45-59 818 254 24 646 31.1%
Jul 11 14:00-14 818 300 32 600 36.7%
Jul 11 14:15-29 818 267 35 633 32.6%
Jul 11 14:30-44 818 309 37 591 37.8%
Jul 11 14:45-59 819 271 26 629 33.1%
Jul 11 15:00-14 818 292 30 608 35.7%
Jul 11 15:15-15 2 1 0 1 50.0%
<BLANKLINE>
Read 18,837 trace records (640,450 bytes) in 0.0 seconds
Versions: 0 records used a version
First time: Sun Jul 11 12:11:41 2010
Last time: Sun Jul 11 15:15:01 2010
Duration: 11,000 seconds
Data recs: 11,000 (58.4%), average size 1106 bytes
Hit rate: 31.6% (load hits / loads)
<BLANKLINE>
Count Code Function (action)
1 00 _setup_trace (initialization)
673 10 invalidate (miss)
327 1c invalidate (hit, saving non-current)
6,836 20 load (miss)
3,164 22 load (hit)
7,836 52 store (current, non-version)
<BLANKLINE>
Histograms of object sizes
<BLANKLINE>
<BLANKLINE>
Unique sizes written: 1,781
size objs writes
200 6 6
201 4 4
202 5 5
203 1 1
204 1 1
205 6 6
206 8 8
...
1,995 1 2
1,996 2 2
1,997 2 3
1,998 2 2
1,999 2 2
2,000 1 1
>>> ZEO.scripts.cache_stats.main('-S cache.trace'.split())
loads hits inv(h) writes hitrate
Jul 11 12:11-11 0 0 0 0 n/a
Jul 11 12:11:41 ==================== Restart ====================
Jul 11 12:11-14 180 1 2 197 0.6%
Jul 11 12:15-29 818 107 9 793 13.1%
Jul 11 12:30-44 818 213 22 687 26.0%
Jul 11 12:45-59 818 297 20 603 36.3%
Jul 11 13:00-14 818 297 35 603 36.3%
Jul 11 13:15-29 818 280 29 620 34.2%
Jul 11 13:30-44 819 275 26 625 33.6%
Jul 11 13:45-59 818 254 24 646 31.1%
Jul 11 14:00-14 818 300 32 600 36.7%
Jul 11 14:15-29 818 267 35 633 32.6%
Jul 11 14:30-44 818 309 37 591 37.8%
Jul 11 14:45-59 819 271 26 629 33.1%
Jul 11 15:00-14 818 292 30 608 35.7%
Jul 11 15:15-15 2 1 0 1 50.0%
>>> ZEO.scripts.cache_stats.main('-X cache.trace'.split())
loads hits inv(h) writes hitrate
Jul 11 12:11-11 0 0 0 0 n/a
Jul 11 12:11:41 ==================== Restart ====================
Jul 11 12:11-14 180 1 2 197 0.6%
Jul 11 12:15-29 818 107 9 793 13.1%
Jul 11 12:30-44 818 213 22 687 26.0%
Jul 11 12:45-59 818 297 20 603 36.3%
Jul 11 13:00-14 818 297 35 603 36.3%
Jul 11 13:15-29 818 280 29 620 34.2%
Jul 11 13:30-44 819 275 26 625 33.6%
Jul 11 13:45-59 818 254 24 646 31.1%
Jul 11 14:00-14 818 300 32 600 36.7%
Jul 11 14:15-29 818 267 35 633 32.6%
Jul 11 14:30-44 818 309 37 591 37.8%
Jul 11 14:45-59 819 271 26 629 33.1%
Jul 11 15:00-14 818 292 30 608 35.7%
Jul 11 15:15-15 2 1 0 1 50.0%
<BLANKLINE>
Read 18,837 trace records (640,450 bytes) in 0.0 seconds
Versions: 0 records used a version
First time: Sun Jul 11 12:11:41 2010
Last time: Sun Jul 11 15:15:01 2010
Duration: 11,000 seconds
Data recs: 11,000 (58.4%), average size 1106 bytes
Hit rate: 31.6% (load hits / loads)
<BLANKLINE>
Count Code Function (action)
1 00 _setup_trace (initialization)
673 10 invalidate (miss)
327 1c invalidate (hit, saving non-current)
6,836 20 load (miss)
3,164 22 load (hit)
7,836 52 store (current, non-version)
>>> ZEO.scripts.cache_stats.main('-i 5 cache.trace'.split())
loads hits inv(h) writes hitrate
Jul 11 12:11-11 0 0 0 0 n/a
Jul 11 12:11:41 ==================== Restart ====================
Jul 11 12:11-14 180 1 2 197 0.6%
Jul 11 12:15-19 272 19 2 281 7.0%
Jul 11 12:20-24 273 35 5 265 12.8%
Jul 11 12:25-29 273 53 2 247 19.4%
Jul 11 12:30-34 272 60 8 240 22.1%
Jul 11 12:35-39 273 68 6 232 24.9%
Jul 11 12:40-44 273 85 8 215 31.1%
Jul 11 12:45-49 273 85 7 215 31.1%
Jul 11 12:50-54 272 105 9 195 38.6%
Jul 11 12:55-59 273 107 4 193 39.2%
Jul 11 13:00-04 273 93 12 207 34.1%
Jul 11 13:05-09 273 103 7 197 37.7%
Jul 11 13:10-14 272 101 16 199 37.1%
Jul 11 13:15-19 273 93 9 207 34.1%
Jul 11 13:20-24 273 94 9 206 34.4%
Jul 11 13:25-29 272 93 11 207 34.2%
Jul 11 13:30-34 273 84 14 216 30.8%
Jul 11 13:35-39 273 102 6 198 37.4%
Jul 11 13:40-44 273 89 6 211 32.6%
Jul 11 13:45-49 272 81 6 219 29.8%
Jul 11 13:50-54 273 87 8 213 31.9%
Jul 11 13:55-59 273 86 10 214 31.5%
Jul 11 14:00-04 273 96 12 204 35.2%
Jul 11 14:05-09 272 95 11 205 34.9%
Jul 11 14:10-14 273 109 9 191 39.9%
Jul 11 14:15-19 273 92 9 208 33.7%
Jul 11 14:20-24 272 84 17 216 30.9%
Jul 11 14:25-29 273 91 9 209 33.3%
Jul 11 14:30-34 273 106 13 194 38.8%
Jul 11 14:35-39 273 90 13 210 33.0%
Jul 11 14:40-44 272 113 11 187 41.5%
Jul 11 14:45-49 273 83 10 217 30.4%
Jul 11 14:50-54 273 101 9 199 37.0%
Jul 11 14:55-59 273 87 7 213 31.9%
Jul 11 15:00-04 272 99 6 201 36.4%
Jul 11 15:05-09 273 95 12 205 34.8%
Jul 11 15:10-14 273 98 12 202 35.9%
Jul 11 15:15-15 2 1 0 1 50.0%
<BLANKLINE>
Read 18,837 trace records (640,450 bytes) in 0.0 seconds
Versions: 0 records used a version
First time: Sun Jul 11 12:11:41 2010
Last time: Sun Jul 11 15:15:01 2010
Duration: 11,000 seconds
Data recs: 11,000 (58.4%), average size 1106 bytes
Hit rate: 31.6% (load hits / loads)
<BLANKLINE>
Count Code Function (action)
1 00 _setup_trace (initialization)
673 10 invalidate (miss)
327 1c invalidate (hit, saving non-current)
6,836 20 load (miss)
3,164 22 load (hit)
7,836 52 store (current, non-version)
>>> ZEO.scripts.cache_simul.main('-s 2 -i 5 cache.trace'.split())
CircularCacheSimulation, cache size 2,097,152 bytes
START TIME DUR. LOADS HITS INVALS WRITES HITRATE EVICTS INUSE
Jul 11 12:11 3:17 180 1 2 197 0.6% 0 10.7
Jul 11 12:15 4:59 272 19 2 281 7.0% 0 26.4
Jul 11 12:20 4:59 273 35 5 265 12.8% 0 40.4
Jul 11 12:25 4:59 273 53 2 247 19.4% 0 54.8
Jul 11 12:30 4:59 272 60 8 240 22.1% 0 67.1
Jul 11 12:35 4:59 273 68 6 232 24.9% 0 79.8
Jul 11 12:40 4:59 273 85 8 215 31.1% 0 91.4
Jul 11 12:45 4:59 273 85 7 215 31.1% 61 100.0
Jul 11 12:50 4:59 272 105 9 195 38.6% 193 100.0
Jul 11 12:55 4:59 273 107 4 193 39.2% 184 100.0
Jul 11 13:00 4:59 273 93 12 207 34.1% 215 99.9
Jul 11 13:05 4:59 273 103 7 197 37.7% 191 99.9
Jul 11 13:10 4:59 272 101 16 199 37.1% 191 99.9
Jul 11 13:15 4:59 273 93 9 207 34.1% 227 99.9
Jul 11 13:20 4:59 273 94 9 206 34.4% 211 99.9
Jul 11 13:25 4:59 272 93 11 207 34.2% 208 99.9
Jul 11 13:30 4:59 273 84 14 216 30.8% 221 99.9
Jul 11 13:35 4:59 273 102 6 198 37.4% 201 99.9
Jul 11 13:40 4:59 273 88 6 212 32.2% 215 100.0
Jul 11 13:45 4:59 272 81 5 219 29.8% 209 100.0
Jul 11 13:50 4:59 273 87 8 213 31.9% 223 100.0
Jul 11 13:55 4:59 273 86 10 214 31.5% 196 99.9
Jul 11 14:00 4:59 273 96 12 204 35.2% 195 100.0
Jul 11 14:05 4:59 272 95 11 205 34.9% 201 99.9
Jul 11 14:10 4:59 273 110 9 190 40.3% 189 99.9
Jul 11 14:15 4:59 273 94 9 206 34.4% 210 99.9
Jul 11 14:20 4:59 272 87 17 213 32.0% 207 100.0
Jul 11 14:25 4:59 273 91 10 209 33.3% 214 99.9
Jul 11 14:30 4:59 273 106 13 194 38.8% 210 100.0
Jul 11 14:35 4:59 273 90 13 210 33.0% 206 100.0
Jul 11 14:40 4:59 272 113 11 187 41.5% 180 100.0
Jul 11 14:45 4:59 273 83 10 217 30.4% 230 99.9
Jul 11 14:50 4:59 273 101 9 199 37.0% 204 100.0
Jul 11 14:55 4:59 273 87 7 213 31.9% 223 100.0
Jul 11 15:00 4:59 272 99 6 201 36.4% 192 99.9
Jul 11 15:05 4:59 273 96 12 204 35.2% 203 99.9
Jul 11 15:10 4:59 273 98 12 202 35.9% 182 99.9
Jul 11 15:15 1 2 1 0 1 50.0% 1 99.9
--------------------------------------------------------------------------
Jul 11 12:11 3:03:19 10000 3170 327 7830 31.7% 5993 99.9
>>> cache_run('cache4', 4)
>>> ZEO.scripts.cache_stats.main('cache4.trace'.split())
loads hits inv(h) writes hitrate
Jul 11 12:11-11 0 0 0 0 n/a
Jul 11 12:11:41 ==================== Restart ====================
Jul 11 12:11-14 180 1 2 197 0.6%
Jul 11 12:15-29 818 107 9 793 13.1%
Jul 11 12:30-44 818 213 22 687 26.0%
Jul 11 12:45-59 818 322 23 578 39.4%
Jul 11 13:00-14 818 381 43 519 46.6%
Jul 11 13:15-29 818 450 44 450 55.0%
Jul 11 13:30-44 819 503 47 397 61.4%
Jul 11 13:45-59 818 501 51 399 61.2%
Jul 11 14:00-14 818 523 50 377 63.9%
Jul 11 14:15-29 818 528 61 372 64.5%
Jul 11 14:30-44 818 509 49 391 62.2%
Jul 11 14:45-59 819 522 51 378 63.7%
Jul 11 15:00-14 818 504 52 396 61.6%
Jul 11 15:15-15 2 2 0 0 100.0%
<BLANKLINE>
Read 16,935 trace records (575,782 bytes) in 0.0 seconds
Versions: 0 records used a version
First time: Sun Jul 11 12:11:41 2010
Last time: Sun Jul 11 15:15:01 2010
Duration: 11,000 seconds
Data recs: 11,000 (65.0%), average size 1105 bytes
Hit rate: 50.7% (load hits / loads)
<BLANKLINE>
Count Code Function (action)
1 00 _setup_trace (initialization)
496 10 invalidate (miss)
504 1c invalidate (hit, saving non-current)
4,934 20 load (miss)
5,066 22 load (hit)
5,934 52 store (current, non-version)
>>> ZEO.scripts.cache_simul.main('-s 4 cache.trace'.split())
CircularCacheSimulation, cache size 4,194,304 bytes
START TIME DUR. LOADS HITS INVALS WRITES HITRATE EVICTS INUSE
Jul 11 12:11 3:17 180 1 2 197 0.6% 0 5.4
Jul 11 12:15 14:59 818 107 9 793 13.1% 0 27.4
Jul 11 12:30 14:59 818 213 22 687 26.0% 0 45.7
Jul 11 12:45 14:59 818 322 23 578 39.4% 0 61.4
Jul 11 13:00 14:59 818 381 43 519 46.6% 0 75.8
Jul 11 13:15 14:59 818 450 44 450 55.0% 0 88.2
Jul 11 13:30 14:59 819 503 47 397 61.4% 0 98.9
Jul 11 13:45 14:59 818 501 51 399 61.2% 368 100.0
Jul 11 14:00 14:59 818 523 50 377 63.9% 372 100.0
Jul 11 14:15 14:59 818 528 61 372 64.5% 361 100.0
Jul 11 14:30 14:59 818 507 49 393 62.0% 404 100.0
Jul 11 14:45 14:59 819 523 51 377 63.9% 387 100.0
Jul 11 15:00 14:59 818 503 52 397 61.5% 384 100.0
Jul 11 15:15 1 2 2 0 0 100.0% 0 100.0
--------------------------------------------------------------------------
Jul 11 12:11 3:03:19 10000 5064 504 5936 50.6% 2276 100.0
>>> cache_run('cache1', 1)
>>> ZEO.scripts.cache_stats.main('cache1.trace'.split())
loads hits inv(h) writes hitrate
Jul 11 12:11-11 0 0 0 0 n/a
Jul 11 12:11:41 ==================== Restart ====================
Jul 11 12:11-14 180 1 2 197 0.6%
Jul 11 12:15-29 818 107 9 793 13.1%
Jul 11 12:30-44 818 159 16 741 19.4%
Jul 11 12:45-59 818 155 7 745 18.9%
Jul 11 13:00-14 818 141 21 759 17.2%
Jul 11 13:15-29 818 128 17 772 15.6%
Jul 11 13:30-44 819 152 12 748 18.6%
Jul 11 13:45-59 818 121 17 779 14.8%
Jul 11 14:00-14 818 157 17 743 19.2%
Jul 11 14:15-29 818 149 14 751 18.2%
Jul 11 14:30-44 818 159 16 741 19.4%
Jul 11 14:45-59 819 140 14 760 17.1%
Jul 11 15:00-14 818 155 10 745 18.9%
Jul 11 15:15-15 2 0 0 2 0.0%
<BLANKLINE>
Read 20,277 trace records (689,410 bytes) in 0.0 seconds
Versions: 0 records used a version
First time: Sun Jul 11 12:11:41 2010
Last time: Sun Jul 11 15:15:01 2010
Duration: 11,000 seconds
Data recs: 11,000 (54.2%), average size 1105 bytes
Hit rate: 17.2% (load hits / loads)
<BLANKLINE>
Count Code Function (action)
1 00 _setup_trace (initialization)
828 10 invalidate (miss)
172 1c invalidate (hit, saving non-current)
8,276 20 load (miss)
1,724 22 load (hit)
9,276 52 store (current, non-version)
>>> ZEO.scripts.cache_simul.main('-s 1 cache.trace'.split())
CircularCacheSimulation, cache size 1,048,576 bytes
START TIME DUR. LOADS HITS INVALS WRITES HITRATE EVICTS INUSE
Jul 11 12:11 3:17 180 1 2 197 0.6% 0 21.5
Jul 11 12:15 14:59 818 107 9 793 13.1% 94 99.9
Jul 11 12:30 14:59 818 159 16 741 19.4% 722 99.9
Jul 11 12:45 14:59 818 156 8 744 19.1% 743 99.9
Jul 11 13:00 14:59 818 141 21 759 17.2% 767 99.9
Jul 11 13:15 14:59 818 126 16 774 15.4% 786 99.9
Jul 11 13:30 14:59 819 148 12 752 18.1% 736 100.0
Jul 11 13:45 14:59 818 123 17 777 15.0% 764 99.9
Jul 11 14:00 14:59 818 158 18 742 19.3% 739 100.0
Jul 11 14:15 14:59 818 148 14 752 18.1% 776 99.9
Jul 11 14:30 14:59 818 156 15 744 19.1% 745 99.9
Jul 11 14:45 14:59 819 142 15 758 17.3% 749 99.9
Jul 11 15:00 14:59 818 150 10 750 18.3% 740 99.8
Jul 11 15:15 1 2 1 0 1 50.0% 0 99.9
--------------------------------------------------------------------------
Jul 11 12:11 3:03:19 10000 1716 173 9284 17.2% 8361 99.9
Cleanup:
>>> del os.environ["ZEO_CACHE_TRACE"]
>>> time.time = timetime
"""
def test_suite(): def test_suite():
suite = unittest.TestSuite() suite = unittest.TestSuite()
......
#! /usr/bin/env python
##############################################################################
#
# Copyright (c) 2001-2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Cache simulation.
Usage: simul.py [-s size] tracefile
Options:
-s size: cache size in MB (default 20 MB)
"""
import sys
import time
import getopt
import struct
import math
import bisect
from sets import Set
from ZODB.utils import z64
def usage(msg):
print >> sys.stderr, msg
print >> sys.stderr, __doc__
def main():
# Parse options.
MB = 1024**2
cachelimit = 20*MB
simclass = CircularCacheSimulation
try:
opts, args = getopt.getopt(sys.argv[1:], "bflyz2cOaTUs:")
except getopt.error, msg:
usage(msg)
return 2
for o, a in opts:
if o == '-b':
simclass = BuddyCacheSimulation
elif o == '-f':
simclass = SimpleCacheSimulation
elif o == '-l':
simclass = LRUCacheSimulation
elif o == '-y':
simclass = AltZEOCacheSimulation
elif o == '-z':
simclass = ZEOCacheSimulation
elif o == '-s':
cachelimit = int(float(a)*MB)
elif o == '-2':
simclass = TwoQSimluation
elif o == '-c':
simclass = CircularCacheSimulation
elif o == '-O':
simclass = OracleSimulation
elif o == '-a':
simclass = ARCCacheSimulation
elif o == '-T':
simclass = ThorSimulation
elif o == '-U':
simclass = UnboundedSimulation
else:
assert False, (o, a)
if len(args) != 1:
usage("exactly one file argument required")
return 2
filename = args[0]
# Open file.
if filename.endswith(".gz"):
# Open gzipped file.
try:
import gzip
except ImportError:
print >> sys.stderr, "can't read gzipped files (no module gzip)"
return 1
try:
f = gzip.open(filename, "rb")
except IOError, msg:
print >> sys.stderr, "can't open %s: %s" % (filename, msg)
return 1
elif filename == "-":
# Read from stdin.
f = sys.stdin
else:
# Open regular file.
try:
f = open(filename, "rb")
except IOError, msg:
print >> sys.stderr, "can't open %s: %s" % (filename, msg)
return 1
# Create simulation object.
if simclass is OracleSimulation:
sim = simclass(cachelimit, filename)
else:
sim = simclass(cachelimit)
# Print output header.
sim.printheader()
# Read trace file, simulating cache behavior.
f_read = f.read
unpack = struct.unpack
FMT = ">iiH8s8s"
FMT_SIZE = struct.calcsize(FMT)
assert FMT_SIZE == 26
while 1:
# Read a record and decode it.
r = f_read(FMT_SIZE)
if len(r) < FMT_SIZE:
break
ts, code, oidlen, start_tid, end_tid = unpack(FMT, r)
if ts == 0:
# Must be a misaligned record caused by a crash; skip 8 bytes
# and try again. Why 8? Lost in the mist of history.
f.seek(f.tell() - FMT_SIZE + 8)
continue
oid = f_read(oidlen)
if len(oid) < oidlen:
break
# Decode the code.
dlen, version, code = (code & 0x7fffff00,
code & 0x80,
code & 0x7e)
# And pass it to the simulation.
sim.event(ts, dlen, version, code, oid, start_tid, end_tid)
f.close()
# Finish simulation.
sim.finish()
# Exit code from main().
return 0
class Simulation(object):
"""Base class for simulations.
The driver program calls: event(), printheader(), finish().
The standard event() method calls these additional methods:
write(), load(), inval(), report(), restart(); the standard
finish() method also calls report().
"""
def __init__(self, cachelimit):
self.cachelimit = cachelimit
# Initialize global statistics.
self.epoch = None
self.total_loads = 0
self.total_hits = 0 # subclass must increment
self.total_invals = 0 # subclass must increment
self.total_writes = 0
if not hasattr(self, "extras"):
self.extras = (self.extraname,)
self.format = self.format + " %7s" * len(self.extras)
# Reset per-run statistics and set up simulation data.
self.restart()
def restart(self):
# Reset per-run statistics.
self.loads = 0
self.hits = 0 # subclass must increment
self.invals = 0 # subclass must increment
self.writes = 0
self.ts0 = None
def event(self, ts, dlen, _version, code, oid,
start_tid, end_tid):
# Record first and last timestamp seen.
if self.ts0 is None:
self.ts0 = ts
if self.epoch is None:
self.epoch = ts
self.ts1 = ts
# Simulate cache behavior. Caution: the codes in the trace file
# record whether the actual cache missed or hit on each load, but
# that bears no necessary relationship to whether the simulated cache
# will hit or miss. Relatedly, if the actual cache needed to store
# an object, the simulated cache may not need to (it may already
# have the data).
action = code & 0x70
if action == 0x20:
# Load.
self.loads += 1
self.total_loads += 1
# Asserting that dlen is 0 iff it's a load miss.
# assert (dlen == 0) == (code in (0x20, 0x24))
self.load(oid, dlen, start_tid)
elif action == 0x50:
# Store.
assert dlen
self.write(oid, dlen, start_tid, end_tid)
elif action == 0x10:
# Invalidate.
self.inval(oid, start_tid)
elif action == 0x00:
# Restart.
self.report()
self.restart()
else:
raise ValueError("unknown trace code 0x%x" % code)
def write(self, oid, size, start_tid, end_tid):
pass
def load(self, oid, size, start_tid):
# Must increment .hits and .total_hits as appropriate.
pass
def inval(self, oid, start_tid):
# Must increment .invals and .total_invals as appropriate.
pass
format = "%12s %9s %8s %8s %6s %6s %7s"
# Subclass should override extraname to name known instance variables;
# if extraname is 'foo', both self.foo and self.total_foo must exist:
extraname = "*** please override ***"
def printheader(self):
print "%s, cache size %s bytes" % (self.__class__.__name__,
addcommas(self.cachelimit))
self.extraheader()
extranames = tuple([s.upper() for s in self.extras])
args = ("START TIME", "DURATION", "LOADS", "HITS",
"INVALS", "WRITES", "HITRATE") + extranames
print self.format % args
def extraheader(self):
pass
nreports = 0
def report(self, extratext=''):
if self.loads:
self.nreports += 1
args = (time.ctime(self.ts0)[4:-8],
duration(self.ts1 - self.ts0),
self.loads, self.hits, self.invals, self.writes,
hitrate(self.loads, self.hits))
args += tuple([getattr(self, name) for name in self.extras])
print self.format % args, extratext
def finish(self):
# Make sure that the last line of output ends with "OVERALL". This
# makes it much easier for another program parsing the output to
# find summary statistics.
if self.nreports < 2:
self.report('OVERALL')
else:
self.report()
args = (
time.ctime(self.epoch)[4:-8],
duration(self.ts1 - self.epoch),
self.total_loads,
self.total_hits,
self.total_invals,
self.total_writes,
hitrate(self.total_loads, self.total_hits))
args += tuple([getattr(self, "total_" + name)
for name in self.extras])
print (self.format + " OVERALL") % args
# For use in CircularCacheSimulation.
class CircularCacheEntry(object):
__slots__ = (# object key: an (oid, start_tid) pair, where
# start_tid is the tid of the transaction that created
# this revision of oid
'key',
# tid of transaction that created the next revision;
# z64 iff this is the current revision
'end_tid',
# Offset from start of file to the object's data
# record; this includes all overhead bytes (status
# byte, size bytes, etc).
'offset',
)
def __init__(self, key, end_tid, offset):
self.key = key
self.end_tid = end_tid
self.offset = offset
from ZEO.cache import ZEC3_HEADER_SIZE
class CircularCacheSimulation(Simulation):
"""Simulate the ZEO 3.0 cache."""
# The cache is managed as a single file with a pointer that
# goes around the file, circularly, forever. New objects
# are written at the current pointer, evicting whatever was
# there previously.
extras = "evicts", "inuse"
def __init__(self, cachelimit):
from ZEO import cache
Simulation.__init__(self, cachelimit)
self.total_evicts = 0 # number of cache evictions
# Current offset in file.
self.offset = ZEC3_HEADER_SIZE
# Map offset in file to (size, CircularCacheEntry) pair, or to
# (size, None) if the offset starts a free block.
self.filemap = {ZEC3_HEADER_SIZE: (self.cachelimit - ZEC3_HEADER_SIZE,
None)}
# Map key to CircularCacheEntry. A key is an (oid, tid) pair.
self.key2entry = {}
# Map oid to tid of current revision.
self.current = {}
# Map oid to list of (start_tid, end_tid) pairs in sorted order.
# Used to find matching key for load of non-current data.
self.noncurrent = {}
# The number of overhead bytes needed to store an object pickle
# on disk (all bytes beyond those needed for the object pickle).
self.overhead = (cache.Object.TOTAL_FIXED_SIZE +
cache.OBJECT_HEADER_SIZE)
def restart(self):
Simulation.restart(self)
self.evicts = 0
def load(self, oid, size, tid):
if tid == z64:
# Trying to load current revision.
if oid in self.current: # else it's a cache miss
self.hits += 1
self.total_hits += 1
return
# May or may not be trying to load current revision.
cur_tid = self.current.get(oid)
if cur_tid == tid:
self.hits += 1
self.total_hits += 1
return
# It's a load for non-current data. Do we know about this oid?
L = self.noncurrent.get(oid)
if L is None:
return # cache miss
i = bisect.bisect_left(L, (tid, None))
if i == 0:
# This tid is smaller than any we know about -- miss.
return
lo, hi = L[i-1]
assert lo < tid
if tid > hi:
# No data in the right tid range -- miss.
return
# Cache hit.
self.hits += 1
self.total_hits += 1
# (oid, tid) is in the cache. Remove it: take it out of key2entry,
# and in `filemap` mark the space it occupied as being free. The
# caller is responsible for removing it from `current` or `noncurrent`.
def _remove(self, oid, tid):
key = oid, tid
e = self.key2entry.pop(key)
pos = e.offset
size, _e = self.filemap[pos]
assert e is _e
self.filemap[pos] = size, None
def _remove_noncurrent_revisions(self, oid):
noncurrent_list = self.noncurrent.get(oid)
if noncurrent_list:
self.invals += len(noncurrent_list)
self.total_invals += len(noncurrent_list)
for start_tid, end_tid in noncurrent_list:
self._remove(oid, start_tid)
del self.noncurrent[oid]
def inval(self, oid, tid):
if tid == z64:
# This is part of startup cache verification: forget everything
# about this oid.
self._remove_noncurrent_revisions(oid)
cur_tid = self.current.get(oid)
if cur_tid is None:
# We don't have current data, so nothing more to do.
return
# We had current data for oid, but no longer.
self.invals += 1
self.total_invals += 1
del self.current[oid]
if tid == z64:
# Startup cache verification: forget this oid entirely.
self._remove(oid, cur_tid)
return
# Our current data becomes non-current data.
# Add the validity range to the list of non-current data for oid.
assert cur_tid < tid
L = self.noncurrent.setdefault(oid, [])
bisect.insort_left(L, (cur_tid, tid))
# Update the end of oid's validity range in its CircularCacheEntry.
e = self.key2entry[oid, cur_tid]
assert e.end_tid == z64
e.end_tid = tid
def write(self, oid, size, start_tid, end_tid):
if end_tid == z64:
# Storing current revision.
if oid in self.current: # we already have it in cache
return
self.current[oid] = start_tid
self.writes += 1
self.total_writes += 1
self.add(oid, size, start_tid)
return
# Storing non-current revision.
L = self.noncurrent.setdefault(oid, [])
p = start_tid, end_tid
if p in L:
return # we already have it in cache
bisect.insort_left(L, p)
self.writes += 1
self.total_writes += 1
self.add(oid, size, start_tid, end_tid)
# Add `oid` to the cache, evicting objects as needed to make room.
# This updates `filemap` and `key2entry`; it's the caller's
# responsibilty to update `current` or `noncurrent` appropriately.
def add(self, oid, size, start_tid, end_tid=z64):
key = oid, start_tid
assert key not in self.key2entry
size += self.overhead
avail = self.makeroom(size)
e = CircularCacheEntry(key, end_tid, self.offset)
self.filemap[self.offset] = size, e
self.key2entry[key] = e
self.offset += size
# All the space made available must be accounted for in filemap.
excess = avail - size
if excess:
self.filemap[self.offset] = excess, None
# Evict enough objects to make at least `need` contiguous bytes, starting
# at `self.offset`, available. Evicted objects are removed from
# `filemap`, `key2entry`, `current` and `noncurrent`. The caller is
# responsible for adding new entries to `filemap` to account for all
# the freed bytes, and for advancing `self.offset`. The number of bytes
# freed is the return value, and will be >= need.
def makeroom(self, need):
if self.offset + need > self.cachelimit:
self.offset = ZEC3_HEADER_SIZE
pos = self.offset
while need > 0:
assert pos < self.cachelimit
size, e = self.filemap.pop(pos)
if e: # there is an object here (else it's already free space)
self.evicts += 1
self.total_evicts += 1
assert pos == e.offset
_e = self.key2entry.pop(e.key)
assert e is _e
oid, start_tid = e.key
if e.end_tid == z64:
del self.current[oid]
else:
L = self.noncurrent[oid]
L.remove((start_tid, e.end_tid))
need -= size
pos += size
return pos - self.offset # total number of bytes freed
def report(self):
self.check()
free = used = total = 0
for size, e in self.filemap.itervalues():
total += size
if e:
used += size
else:
free += size
self.inuse = round(100.0 * used / total, 1)
self.total_inuse = self.inuse
Simulation.report(self)
def check(self):
oidcount = 0
pos = ZEC3_HEADER_SIZE
while pos < self.cachelimit:
size, e = self.filemap[pos]
if e:
oidcount += 1
assert self.key2entry[e.key].offset == pos
pos += size
assert oidcount == len(self.key2entry)
assert pos == self.cachelimit
def dump(self):
print len(self.filemap)
L = list(self.filemap)
L.sort()
for k in L:
v = self.filemap[k]
print k, v[0], repr(v[1])
#############################################################################
# CAUTION: It's most likely that none of the simulators below this
# point work anymore. A great many changes were needed to teach
# CircularCacheSimulation (above) about MVCC, including method signature
# changes and changes in cache file format, and none of the other simulator
# classes were changed.
#############################################################################
class ZEOCacheSimulation(Simulation):
"""Simulate the ZEO 1.0 and 2.0 cache behavior.
This assumes the cache is not persistent (we don't know how to
simulate cache validation.)
"""
extraname = "flips"
def __init__(self, cachelimit):
# Initialize base class
Simulation.__init__(self, cachelimit)
# Initialize additional global statistics
self.total_flips = 0
def restart(self):
# Reset base class
Simulation.restart(self)
# Reset additional per-run statistics
self.flips = 0
# Set up simulation
self.filesize = [4, 4] # account for magic number
self.fileoids = [{}, {}]
self.current = 0 # index into filesize, fileoids
def load(self, oid, size):
if (self.fileoids[self.current].get(oid) or
self.fileoids[1 - self.current].get(oid)):
self.hits += 1
self.total_hits += 1
else:
self.write(oid, size)
def write(self, oid, size):
# Fudge because size is rounded up to multiples of 256. (31
# is header overhead per cache record; 127 is to compensate
# for rounding up to multiples of 256.)
size = size + 31 - 127
if self.filesize[self.current] + size > self.cachelimit / 2:
# Cache flip
self.flips += 1
self.total_flips += 1
self.current = 1 - self.current
self.filesize[self.current] = 4
self.fileoids[self.current] = {}
self.filesize[self.current] += size
self.fileoids[self.current][oid] = 1
def inval(self, oid):
if self.fileoids[self.current].get(oid):
self.invals += 1
self.total_invals += 1
del self.fileoids[self.current][oid]
elif self.fileoids[1 - self.current].get(oid):
self.invals += 1
self.total_invals += 1
del self.fileoids[1 - self.current][oid]
class AltZEOCacheSimulation(ZEOCacheSimulation):
"""A variation of the ZEO cache that copies to the current file.
When a hit is found in the non-current cache file, it is copied to
the current cache file. Exception: when the copy would cause a
cache flip, we don't copy (this is part laziness, part concern
over causing extraneous flips).
"""
def load(self, oid, size):
if self.fileoids[self.current].get(oid):
self.hits += 1
self.total_hits += 1
elif self.fileoids[1 - self.current].get(oid):
self.hits += 1
self.total_hits += 1
# Simulate a write, unless it would cause a flip
size = size + 31 - 127
if self.filesize[self.current] + size <= self.cachelimit / 2:
self.filesize[self.current] += size
self.fileoids[self.current][oid] = 1
del self.fileoids[1 - self.current][oid]
else:
self.write(oid, size)
class LRUCacheSimulation(Simulation):
extraname = "evicts"
def __init__(self, cachelimit):
# Initialize base class
Simulation.__init__(self, cachelimit)
# Initialize additional global statistics
self.total_evicts = 0
def restart(self):
# Reset base class
Simulation.restart(self)
# Reset additional per-run statistics
self.evicts = 0
# Set up simulation
self.cache = {}
self.size = 0
self.head = Node(None, None)
self.head.linkbefore(self.head)
def load(self, oid, size):
node = self.cache.get(oid)
if node is not None:
self.hits += 1
self.total_hits += 1
node.linkbefore(self.head)
else:
self.write(oid, size)
def write(self, oid, size):
node = self.cache.get(oid)
if node is not None:
node.unlink()
assert self.head.next is not None
self.size -= node.size
node = Node(oid, size)
self.cache[oid] = node
node.linkbefore(self.head)
self.size += size
# Evict LRU nodes
while self.size > self.cachelimit:
self.evicts += 1
self.total_evicts += 1
node = self.head.next
assert node is not self.head
node.unlink()
assert self.head.next is not None
del self.cache[node.oid]
self.size -= node.size
def inval(self, oid):
node = self.cache.get(oid)
if node is not None:
assert node.oid == oid
self.invals += 1
self.total_invals += 1
node.unlink()
assert self.head.next is not None
del self.cache[oid]
self.size -= node.size
assert self.size >= 0
class Node(object):
"""Node in a doubly-linked list, storing oid and size as payload.
A node can be linked or unlinked; in the latter case, next and
prev are None. Initially a node is unlinked.
"""
__slots__ = ['prev', 'next', 'oid', 'size']
def __init__(self, oid, size):
self.oid = oid
self.size = size
self.prev = self.next = None
def unlink(self):
prev = self.prev
next = self.next
if prev is not None:
assert next is not None
assert prev.next is self
assert next.prev is self
prev.next = next
next.prev = prev
self.prev = self.next = None
else:
assert next is None
def linkbefore(self, next):
self.unlink()
prev = next.prev
if prev is None:
assert next.next is None
prev = next
self.prev = prev
self.next = next
prev.next = next.prev = self
am = object()
a1in = object()
a1out = object()
class Node2Q(Node):
__slots__ = ["kind", "hits"]
def __init__(self, oid, size, kind=None):
Node.__init__(self, oid, size)
self.kind = kind
self.hits = 0
def linkbefore(self, next):
if next.kind != self.kind:
self.kind = next.kind
Node.linkbefore(self, next)
class TwoQSimluation(Simulation):
# The original 2Q algorithm is page based and the authors offer
# tuning guidlines based on a page-based cache. Our cache is
# object based, so, for example, it's hard to compute the number
# of oids to store in a1out based on the size of a1in.
extras = "evicts", "hothit", "am_add"
NodeClass = Node2Q
def __init__(self, cachelimit, outlen=10000, threshold=0):
Simulation.__init__(self, cachelimit)
# The promotion threshold: If a hit occurs in a1out, it is
# promoted to am if the number of hits on the object while it
# was in a1in is at least threshold. The standard 2Q scheme
# uses a threshold of 0.
self.threshold = threshold
self.am_limit = 3 * self.cachelimit / 4
self.a1in_limit = self.cachelimit / 4
self.cache = {}
self.am_size = 0
self.a1in_size = 0
self.a1out_size = 0
self.total_evicts = 0
self.total_hothit = 0
self.total_am_add = 0
self.a1out_limit = outlen
# An LRU queue of hot objects
self.am = self.NodeClass(None, None, am)
self.am.linkbefore(self.am)
# A FIFO queue of recently referenced objects. It's purpose
# is to absorb references to objects that are accessed a few
# times in short order, then forgotten about.
self.a1in = self.NodeClass(None, None, a1in)
self.a1in.linkbefore(self.a1in)
# A FIFO queue of recently reference oids.
# This queue only stores the oids, not any data. If we get a
# hit in this queue, promote the object to am.
self.a1out = self.NodeClass(None, None, a1out)
self.a1out.linkbefore(self.a1out)
def makespace(self, size):
for space in 0, size:
if self.enoughspace(size):
return
self.evict_a1in(space)
if self.enoughspace(size):
return
self.evict_am(space)
def enoughspace(self, size):
totalsize = self.a1in_size + self.am_size
return totalsize + size < self.cachelimit
def evict_a1in(self, extra):
while self.a1in_size + extra > self.a1in_limit:
if self.a1in.next is self.a1in:
return
assert self.a1in.next is not None
node = self.a1in.next
self.evicts += 1
self.total_evicts += 1
node.linkbefore(self.a1out)
self.a1out_size += 1
self.a1in_size -= node.size
if self.a1out_size > self.a1out_limit:
assert self.a1out.next is not None
node = self.a1out.next
node.unlink()
del self.cache[node.oid]
self.a1out_size -= 1
def evict_am(self, extra):
while self.am_size + extra > self.am_limit:
if self.am.next is self.am:
return
assert self.am.next is not None
node = self.am.next
self.evicts += 1
self.total_evicts += 1
# This node hasn't been accessed in a while, so just
# forget about it.
node.unlink()
del self.cache[node.oid]
self.am_size -= node.size
def write(self, oid, size):
# A write always follows a read (ZODB doesn't allow blind writes).
# So this write must have followed a recent read of the object.
# Don't change it's position, but do update the size.
# XXX For now, don't evict pages if the new version of the object
# is big enough to require eviction.
node = self.cache.get(oid)
if node is None or node.kind is a1out:
return
if node.kind is am:
self.am_size = self.am_size - node.size + size
node.size = size
else:
self.a1in_size = self.a1in_size - node.size + size
node.size = size
def load(self, oid, size):
node = self.cache.get(oid)
if node is not None:
if node.kind is am:
self.hits += 1
self.total_hits += 1
self.hothit += 1
self.total_hothit += 1
node.hits += 1
node.linkbefore(self.am)
elif node.kind is a1in:
self.hits += 1
self.total_hits += 1
node.hits += 1
elif node.kind is a1out:
self.a1out_size -= 1
if node.hits >= self.threshold:
self.makespace(node.size)
self.am_size += node.size
node.linkbefore(self.am)
self.cache[oid] = node
self.am_add += 1
self.total_am_add += 1
else:
node.unlink()
self.insert(oid, size)
else:
self.insert(oid, size)
def insert(self, oid, size):
# New objects enter the cache via a1in. If they
# are frequently used over a long enough time, they
# will be promoted to am -- but only via a1out.
self.makespace(size)
node = self.NodeClass(oid, size, a1in)
node.linkbefore(self.a1in)
self.cache[oid] = node
self.a1in_size += node.size
def inval(self, oid):
# The original 2Q algorithm didn't have to deal with
# invalidations. My own solution: Move it to the head of
# a1out.
node = self.cache.get(oid)
if node is None:
return
self.invals += 1
self.total_invals += 1
# XXX Should an invalidation to a1out count?
if node.kind is a1out:
return
node.linkbefore(self.a1out)
if node.kind is am:
self.am_size -= node.size
else:
self.a1in_size -= node.size
def restart(self):
Simulation.restart(self)
self.evicts = 0
self.hothit = 0
self.am_add = 0
lruT = object()
lruB = object()
fifoT = object()
fifoB = object()
class ARCCacheSimulation(Simulation):
# Based on the paper ARC: A Self-Tuning, Low Overhead Replacement
# Cache by Nimrod Megiddo and Dharmendra S. Modha, USENIX FAST
# 2003. The paper describes a block-based cache. A lot of the
# details need to be fiddled to work with an object-based cache.
# For size issues, the key insight ended up being conditions
# A.1-A.5 rather than the details of the algorithm in Fig. 4.
extras = "lruThits", "evicts", "p"
def __init__(self, cachelimit):
Simulation.__init__(self, cachelimit)
# There are two pairs of linked lists. Each pair has a top and
# bottom half. The bottom half contains metadata, but not actual
# objects.
# LRU list of frequently used objects
self.lruT = Node2Q(None, None, lruT)
self.lruT.linkbefore(self.lruT)
self.lruT_len = 0
self.lruT_size = 0
self.lruB = Node2Q(None, None, lruB)
self.lruB.linkbefore(self.lruB)
self.lruB_len = 0
self.lruB_size = 0
# FIFO list of objects seen once
self.fifoT = Node2Q(None, None, fifoT)
self.fifoT.linkbefore(self.fifoT)
self.fifoT_len = 0
self.fifoT_size = 0
self.fifoB = Node2Q(None, None, fifoB)
self.fifoB.linkbefore(self.fifoB)
self.fifoB_len = 0
self.fifoB_size = 0
# maps oid to node
self.cache = {}
# The paper says that p should be adjust be 1 as the minimum:
# "The compound effect of such small increments and decrements
# to p s quite profound as we will demonstrated in the next
# section." Not really, as far as I can tell. In my traces
# with a very small cache, it was taking far too long to adjust
# towards favoring some FIFO component. I would guess that the
# chief difference is that our caches are much bigger than the
# ones they experimented with. Their biggest cache had 512K
# entries, while our smallest cache will have 40 times that many
# entries.
self.p = 0
# XXX multiply computed adjustments to p by walk_factor
self.walk_factor = 500
# statistics
self.total_hits = 0
self.total_lruThits = 0
self.total_fifoThits = 0
self.total_evicts = 0
def restart(self):
Simulation.restart(self)
self.hits = 0
self.lruThits = 0
self.fifoThits = 0
self.evicts = 0
def write(self, oid, size):
pass
def replace(self, lruB=False):
self.evicts += 1
self.total_evicts += 1
if self.fifoT_size > self.p or (lruB and self.fifoT_size == self.p):
node = self.fifoT.next
if node is self.fifoT:
return 0
assert node is not self.fifoT, self.stats()
node.linkbefore(self.fifoB)
self.fifoT_len -= 1
self.fifoT_size -= node.size
self.fifoB_len += 1
self.fifoB_size += node.size
else:
node = self.lruT.next
if node is self.lruT:
return 0
assert node is not self.lruT, self.stats()
node.linkbefore(self.lruB)
self.lruT_len -= 1
self.lruT_size -= node.size
self.lruB_len += 1
self.lruB_size += node.size
return node.size
def stats(self):
self.totalsize = self.lruT_size + self.fifoT_size
self.allsize = self.totalsize + self.lruB_size + self.fifoB_size
print "cachelimit = %s totalsize=%s allsize=%s" % (
addcommas(self.cachelimit),
addcommas(self.totalsize),
addcommas(self.allsize))
fmt = (
"p=%(p)d\n"
"lruT = %(lruT_len)5d / %(lruT_size)8d / %(lruThits)d\n"
"fifoT = %(fifoT_len)5d / %(fifoT_size)8d / %(fifoThits)d\n"
"lruB = %(lruB_len)5d / %(lruB_size)8d\n"
"fifoB = %(fifoB_len)5d / %(fifoB_size)8d\n"
"loads=%(loads)d hits=%(hits)d evicts=%(evicts)d\n"
)
print fmt % self.__dict__
def report(self):
self.total_p = self.p
Simulation.report(self)
## self.stats()
def load(self, oid, size):
## maybe(self.stats, p=0.002)
node = self.cache.get(oid)
if node is None:
# cache miss: We're going to insert a new object in fifoT.
# If fifo is full, we'll need to evict something to make
# room for it.
prev = need = size
while need > 0:
if size + self.fifoT_size + self.fifoB_size >= self.cachelimit:
if need + self.fifoT_size >= self.cachelimit:
node = self.fifoB.next
assert node is not self.fifoB, self.stats()
node.unlink()
del self.cache[node.oid]
self.fifoB_size -= node.size
self.fifoB_len -= 1
self.evicts += 1
self.total_evicts += 1
else:
node = self.fifoB.next
assert node is not self.fifoB, self.stats()
node.unlink()
del self.cache[node.oid]
self.fifoB_size -= node.size
self.fifoB_len -= 1
if self.fifoT_size + self.lruT_size > self.cachelimit:
need -= self.replace()
else:
incache_size = self.fifoT_size + self.lruT_size + need
total_size = (incache_size + self.fifoB_size
+ self.lruB_size)
if total_size >= self.cachelimit * 2:
node = self.lruB.next
if node is self.lruB:
break
assert node is not self.lruB
node.unlink()
del self.cache[node.oid]
self.lruB_size -= node.size
self.lruB_len -= 1
elif incache_size > self.cachelimit:
need -= self.replace()
else:
break
if need == prev:
# XXX hack, apparently we can't get rid of anything else
break
prev = need
node = Node2Q(oid, size)
node.linkbefore(self.fifoT)
self.fifoT_len += 1
self.fifoT_size += size
self.cache[oid] = node
else:
# a cache hit, but possibly in a bottom list that doesn't
# actually hold the object
if node.kind is lruT:
node.linkbefore(self.lruT)
self.hits += 1
self.total_hits += 1
self.lruThits += 1
self.total_lruThits += 1
elif node.kind is fifoT:
node.linkbefore(self.lruT)
self.fifoT_len -= 1
self.lruT_len += 1
self.fifoT_size -= node.size
self.lruT_size += node.size
self.hits += 1
self.total_hits += 1
self.fifoThits += 1
self.total_fifoThits += 1
elif node.kind is fifoB:
node.linkbefore(self.lruT)
self.fifoB_len -= 1
self.lruT_len += 1
self.fifoB_size -= node.size
self.lruT_size += node.size
# XXX need a better min than 1?
## print "adapt+", max(1, self.lruB_size // self.fifoB_size)
delta = max(1, self.lruB_size / max(1, self.fifoB_size))
self.p += delta * self.walk_factor
if self.p > self.cachelimit:
self.p = self.cachelimit
need = node.size
if self.lruT_size + self.fifoT_size + need > self.cachelimit:
while need > 0:
r = self.replace()
if not r:
break
need -= r
elif node.kind is lruB:
node.linkbefore(self.lruT)
self.lruB_len -= 1
self.lruT_len += 1
self.lruB_size -= node.size
self.lruT_size += node.size
# XXX need a better min than 1?
## print "adapt-", max(1, self.fifoB_size // self.lruB_size)
delta = max(1, self.fifoB_size / max(1, self.lruB_size))
self.p -= delta * self.walk_factor
if self.p < 0:
self.p = 0
need = node.size
if self.lruT_size + self.fifoT_size + need > self.cachelimit:
while need > 0:
r = self.replace(lruB=True)
if not r:
break
need -= r
def inval(self, oid):
pass
def extraheader(self):
pass
class OracleSimulation(LRUCacheSimulation):
# Not sure how to implement this yet. This is a cache where I
# cheat to see how good we could actually do. The cache
# replacement problem for multi-size caches is NP-hard, so we're
# not going to have an optimal solution.
# At the moment, the oracle is mostly blind. It knows which
# objects will be referenced more than once, so that it can
# ignore objects referenced only once. In most traces, these
# objects account for about 20% of references.
def __init__(self, cachelimit, filename):
LRUCacheSimulation.__init__(self, cachelimit)
self.count = {}
self.scan(filename)
def load(self, oid, size):
node = self.cache.get(oid)
if node is not None:
self.hits += 1
self.total_hits += 1
node.linkbefore(self.head)
else:
if oid in self.count:
self.write(oid, size)
def scan(self, filename):
# scan the file in advance to figure out which objects will
# be referenced more than once.
f = open(filename, "rb")
struct_unpack = struct.unpack
f_read = f.read
offset = 0
while 1:
# Read a record and decode it
r = f_read(8)
if len(r) < 8:
break
offset += 8
ts, code = struct_unpack(">ii", r)
if ts == 0:
# Must be a misaligned record caused by a crash
##print "Skipping 8 bytes at offset", offset-8
continue
r = f_read(16)
if len(r) < 16:
break
offset += 16
oid, serial = struct_unpack(">8s8s", r)
if code & 0x70 == 0x20:
# only look at loads
self.count[oid] = self.count.get(oid, 0) + 1
all = len(self.count)
# Now remove everything with count == 1
once = [oid for oid, count in self.count.iteritems()
if count == 1]
for oid in once:
del self.count[oid]
print "Scanned file, %d unique oids, %d repeats" % (
all, len(self.count))
class BuddyCacheSimulation(LRUCacheSimulation):
def __init__(self, cachelimit):
LRUCacheSimulation.__init__(self, roundup(cachelimit))
def restart(self):
LRUCacheSimulation.restart(self)
self.allocator = self.allocatorFactory(self.cachelimit)
def allocatorFactory(self, size):
return BuddyAllocator(size)
# LRUCacheSimulation.load() is just fine
def write(self, oid, size):
node = self.cache.get(oid)
if node is not None:
node.unlink()
assert self.head.next is not None
self.size -= node.size
self.allocator.free(node)
while 1:
node = self.allocator.alloc(size)
if node is not None:
break
# Failure to allocate. Evict something and try again.
node = self.head.next
assert node is not self.head
self.evicts += 1
self.total_evicts += 1
node.unlink()
assert self.head.next is not None
del self.cache[node.oid]
self.size -= node.size
self.allocator.free(node)
node.oid = oid
self.cache[oid] = node
node.linkbefore(self.head)
self.size += node.size
def inval(self, oid):
node = self.cache.get(oid)
if node is not None:
assert node.oid == oid
self.invals += 1
self.total_invals += 1
node.unlink()
assert self.head.next is not None
del self.cache[oid]
self.size -= node.size
assert self.size >= 0
self.allocator.free(node)
class SimpleCacheSimulation(BuddyCacheSimulation):
def allocatorFactory(self, size):
return SimpleAllocator(size)
def finish(self):
BuddyCacheSimulation.finish(self)
self.allocator.report()
MINSIZE = 256
class BuddyAllocator:
def __init__(self, cachelimit):
cachelimit = roundup(cachelimit)
self.cachelimit = cachelimit
self.avail = {} # Map rounded-up sizes to free list node heads
self.nodes = {} # Map address to node
k = MINSIZE
while k <= cachelimit:
self.avail[k] = n = Node(None, None) # Not BlockNode; has no addr
n.linkbefore(n)
k += k
node = BlockNode(None, cachelimit, 0)
self.nodes[0] = node
node.linkbefore(self.avail[cachelimit])
def alloc(self, size):
size = roundup(size)
k = size
while k <= self.cachelimit:
head = self.avail[k]
node = head.next
if node is not head:
break
k += k
else:
return None # Store is full, or block is too large
node.unlink()
size2 = node.size
while size2 > size:
size2 = size2 / 2
assert size2 >= size
node.size = size2
buddy = BlockNode(None, size2, node.addr + size2)
self.nodes[buddy.addr] = buddy
buddy.linkbefore(self.avail[size2])
node.oid = 1 # Flag as in-use
return node
def free(self, node):
assert node is self.nodes[node.addr]
assert node.prev is node.next is None
node.oid = None # Flag as free
while node.size < self.cachelimit:
buddy_addr = node.addr ^ node.size
buddy = self.nodes[buddy_addr]
assert buddy.addr == buddy_addr
if buddy.oid is not None or buddy.size != node.size:
break
# Merge node with buddy
buddy.unlink()
if buddy.addr < node.addr: # buddy prevails
del self.nodes[node.addr]
node = buddy
else: # node prevails
del self.nodes[buddy.addr]
node.size *= 2
assert node is self.nodes[node.addr]
node.linkbefore(self.avail[node.size])
def dump(self, msg=""):
if msg:
print msg,
size = MINSIZE
blocks = bytes = 0
while size <= self.cachelimit:
head = self.avail[size]
node = head.next
count = 0
while node is not head:
count += 1
node = node.next
if count:
print "%d:%d" % (size, count),
blocks += count
bytes += count*size
size += size
print "-- %d, %d" % (bytes, blocks)
def roundup(size):
k = MINSIZE
while k < size:
k += k
return k
class SimpleAllocator:
def __init__(self, arenasize):
self.arenasize = arenasize
self.avail = BlockNode(None, 0, 0) # Weird: empty block as list head
self.rover = self.avail
node = BlockNode(None, arenasize, 0)
node.linkbefore(self.avail)
self.taglo = {0: node}
self.taghi = {arenasize: node}
# Allocator statistics
self.nallocs = 0
self.nfrees = 0
self.allocloops = 0
self.freebytes = arenasize
self.freeblocks = 1
self.allocbytes = 0
self.allocblocks = 0
def report(self):
print ("NA=%d AL=%d NF=%d ABy=%d ABl=%d FBy=%d FBl=%d" %
(self.nallocs, self.allocloops,
self.nfrees,
self.allocbytes, self.allocblocks,
self.freebytes, self.freeblocks))
def alloc(self, size):
self.nallocs += 1
# First fit algorithm
rover = stop = self.rover
while 1:
self.allocloops += 1
if rover.size >= size:
break
rover = rover.next
if rover is stop:
return None # We went round the list without finding space
if rover.size == size:
self.rover = rover.next
rover.unlink()
del self.taglo[rover.addr]
del self.taghi[rover.addr + size]
self.freeblocks -= 1
self.allocblocks += 1
self.freebytes -= size
self.allocbytes += size
return rover
# Take space from the beginning of the roving pointer
assert rover.size > size
node = BlockNode(None, size, rover.addr)
del self.taglo[rover.addr]
rover.size -= size
rover.addr += size
self.taglo[rover.addr] = rover
#self.freeblocks += 0 # No change here
self.allocblocks += 1
self.freebytes -= size
self.allocbytes += size
return node
def free(self, node):
self.nfrees += 1
self.freeblocks += 1
self.allocblocks -= 1
self.freebytes += node.size
self.allocbytes -= node.size
node.linkbefore(self.avail)
self.taglo[node.addr] = node
self.taghi[node.addr + node.size] = node
x = self.taghi.get(node.addr)
if x is not None:
# Merge x into node
x.unlink()
self.freeblocks -= 1
del self.taglo[x.addr]
del self.taghi[x.addr + x.size]
del self.taglo[node.addr]
node.addr = x.addr
node.size += x.size
self.taglo[node.addr] = node
x = self.taglo.get(node.addr + node.size)
if x is not None:
# Merge x into node
x.unlink()
self.freeblocks -= 1
del self.taglo[x.addr]
del self.taghi[x.addr + x.size]
del self.taghi[node.addr + node.size]
node.size += x.size
self.taghi[node.addr + node.size] = node
# It's possible that either one of the merges above invalidated
# the rover.
# It's simplest to simply reset the rover to the newly freed block.
self.rover = node
def dump(self, msg=""):
if msg:
print msg,
count = 0
bytes = 0
node = self.avail.next
while node is not self.avail:
bytes += node.size
count += 1
node = node.next
print count, "free blocks,", bytes, "free bytes"
self.report()
class BlockNode(Node):
__slots__ = ['addr']
def __init__(self, oid, size, addr):
Node.__init__(self, oid, size)
self.addr = addr
def testallocator(factory=BuddyAllocator):
# Run one of Knuth's experiments as a test
import random
import heapq # This only runs with Python 2.3, folks :-)
reportfreq = 100
cachelimit = 2**17
cache = factory(cachelimit)
queue = []
T = 0
blocks = 0
while T < 5000:
while queue and queue[0][0] <= T:
time, node = heapq.heappop(queue)
assert time == T
##print "free addr=%d, size=%d" % (node.addr, node.size)
cache.free(node)
blocks -= 1
size = random.randint(100, 2000)
lifetime = random.randint(1, 100)
node = cache.alloc(size)
if node is None:
print "out of mem"
cache.dump("T=%4d: %d blocks;" % (T, blocks))
break
else:
##print "alloc addr=%d, size=%d" % (node.addr, node.size)
blocks += 1
heapq.heappush(queue, (T + lifetime, node))
T = T+1
if T % reportfreq == 0:
cache.dump("T=%4d: %d blocks;" % (T, blocks))
def hitrate(loads, hits):
return "%5.1f%%" % (100.0 * hits / max(1, loads))
def duration(secs):
mm, ss = divmod(secs, 60)
hh, mm = divmod(mm, 60)
if hh:
return "%d:%02d:%02d" % (hh, mm, ss)
if mm:
return "%d:%02d" % (mm, ss)
return "%d" % ss
def addcommas(n):
sign, s = '', str(n)
if s[0] == '-':
sign, s = '-', s[1:]
i = len(s) - 3
while i > 0:
s = s[:i] + ',' + s[i:]
i -= 3
return sign + s
import random
def maybe(f, p=0.5):
if random.random() < p:
f()
#############################################################################
# Thor-like eviction scheme.
#
# The cache keeps a list of all objects, and uses a travelling pointer
# to decay the worth of objects over time.
class ThorNode(Node):
__slots__ = ['worth']
def __init__(self, oid, size, worth=None):
Node.__init__(self, oid, size)
self.worth = worth
class ThorListHead(Node):
def __init__(self):
Node.__init__(self, 0, 0)
self.next = self.prev = self
class ThorSimulation(Simulation):
extras = "evicts", "trips"
def __init__(self, cachelimit):
Simulation.__init__(self, cachelimit)
# Maximum total of object sizes we keep in cache.
self.maxsize = cachelimit
# Current total of object sizes in cache.
self.currentsize = 0
# A worth byte maps to a set of all objects with that worth.
# This is cheap to keep updated, and makes finding low-worth
# objects for eviction trivial (just march over the worthsets
# list, in order).
self.worthsets = [Set() for dummy in range(256)]
# We keep a circular list of all objects in cache. currentobj
# walks around it forever. Each time _tick() is called, the
# worth of currentobj is decreased, basically by shifting
# right 1, and currentobj moves on to the next object. When
# an object is first inserted, it enters the list right before
# currentobj. When an object is accessed, its worth is
# increased by or'ing in 0x80. This scheme comes from the
# Thor system, and is an inexpensive way to account for both
# recency and frequency of access: recency is reflected in
# the leftmost bit set, and frequency by how many bits are
# set.
#
# Note: because evictions are interleaved with ticks,
# unlinking an object is tricky, lest we evict currentobj. The
# class _unlink method takes care of this properly.
self.listhead = ThorListHead()
self.currentobj = self.listhead
# Map an object.oid to its ThorNode.
self.oid2object = {}
self.total_evicts = self.total_trips = 0
# Unlink object from the circular list, taking care not to lose
# track of the current object. Always call this instead of
# invoking obj.unlink() directly.
def _unlink(self, obj):
assert obj is not self.listhead
if obj is self.currentobj:
self.currentobj = obj.next
obj.unlink()
# Change obj.worth to newworth, maintaining invariants.
def _change_worth(self, obj, newworth):
if obj.worth != newworth:
self.worthsets[obj.worth].remove(obj)
obj.worth = newworth
self.worthsets[newworth].add(obj)
def add(self, object):
assert object.oid not in self.oid2object
self.oid2object[object.oid] = object
newsize = self.currentsize + object.size
if newsize > self.maxsize:
self._evictbytes(newsize - self.maxsize)
self.currentsize += object.size
object.linkbefore(self.currentobj)
if object.worth is None:
# Give smaller objects higher initial worth. This favors kicking
# out unreferenced large objects before kicking out unreferenced
# small objects. On real life traces, this is a significant
# win for the hit rate.
object.worth = 32 - int(round(math.log(object.size, 2)))
self.worthsets[object.worth].add(object)
# Decrease the worth of the current object, and advance the
# current object.
def _tick(self):
c = self.currentobj
if c is self.listhead:
c = c.next
if c is self.listhead: # list is empty
return
self.total_trips += 1
self.trips += 1
self._change_worth(c, (c.worth + 1) >> 1)
self.currentobj = c.next
def access(self, oid):
self._tick()
obj = self.oid2object.get(oid)
if obj is None:
return None
self._change_worth(obj, obj.worth | 0x80)
return obj
# Evict objects of least worth first, until at least nbytes bytes
# have been freed.
def _evictbytes(self, nbytes):
for s in self.worthsets:
while s:
if nbytes <= 0:
return
obj = s.pop()
nbytes -= obj.size
self._evictobj(obj)
def _evictobj(self, obj):
self.currentsize -= obj.size
self.worthsets[obj.worth].discard(obj)
del self.oid2object[obj.oid]
self._unlink(obj)
self.evicts += 1
self.total_evicts += 1
def _evict_without_bumping_evict_stats(self, obj):
self._evictobj(obj)
self.evicts -= 1
self.total_evicts -= 1
# Simulator overrides from here on.
def restart(self):
# Reset base class
Simulation.restart(self)
# Reset additional per-run statistics
self.evicts = self.trips = 0
def write(self, oid, size):
obj = self.oid2object.get(oid)
worth = None
if obj is not None:
worth = obj.worth
self._evict_without_bumping_evict_stats(obj)
self.add(ThorNode(oid, size, worth))
def load(self, oid, size):
if self.access(oid) is not None:
self.hits += 1
self.total_hits += 1
else:
self.write(oid, size)
def inval(self, oid):
obj = self.oid2object.get(oid)
if obj is not None:
self.invals += 1
self.total_invals += 1
self._evict_without_bumping_evict_stats(obj)
# Take the "x" off to see additional stats after each restart period.
def xreport(self):
Simulation.report(self)
print 'non-empty worth sets', sum(map(bool, self.worthsets)),
print 'objects', len(self.oid2object),
print 'size', self.currentsize
#############################################################################
# Perfection: What if the cache were unbounded, and never forgot anything?
# This simulator answers that question directly; the cache size parameter
# isn't used.
class UnboundedSimulation(Simulation):
extraname = 'evicts' # for some reason we *have* to define >= 1 extra
def __init__(self, cachelimit):
Simulation.__init__(self, cachelimit)
self.oids = Set()
self.evicts = self.total_evicts = 0
def write(self, oid, size):
self.oids.add(oid)
def load(self, oid, size):
if oid in self.oids:
self.hits += 1
self.total_hits += 1
else:
self.oids.add(oid)
def inval(self, oid):
if oid in self.oids:
self.invals += 1
self.total_invals += 1
self.oids.remove(oid)
if __name__ == "__main__":
sys.exit(main())
...@@ -138,3 +138,20 @@ def store(storage, oid, value='x', serial=ZODB.utils.z64): ...@@ -138,3 +138,20 @@ def store(storage, oid, value='x', serial=ZODB.utils.z64):
storage.store(oid, serial, value, '', t) storage.store(oid, serial, value, '', t)
storage.tpc_vote(t) storage.tpc_vote(t)
storage.tpc_finish(t) storage.tpc_finish(t)
def mess_with_time(test=None, globs=None, now=1278864701.5):
now = [now]
def faux_time():
now[0] += 1
return now[0]
if test is None and globs is not None:
# sigh
faux_time.globs = globs
test = faux_time
import time
zope.testing.setupstack.register(test, setattr, time, 'time', time.time)
time.time = faux_time
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment