bench_1filezodb.py 1.59 KB
Newer Older
1 2 3 4 5
# Wendelin.bigfile | benchmarks for zodb backend
#
# TODO text
from wendelin.bigfile.file_zodb import ZBigFile
from wendelin.lib.mem import memset
6 7
from wendelin.lib.testing import getTestDB, Adler32, nulladler32_bysize, ffadler32_bysize
from wendelin.lib.zodb import dbclose
8 9
import transaction

10
testdb = None
11 12 13 14 15 16 17
from wendelin.bigfile.tests.bench_0virtmem import filesize, blksize # to get comparable timings
blen = filesize // blksize
nulladler32 = nulladler32_bysize(blen * blksize)
ffadler32   = ffadler32_bysize(blen * blksize)


def setup_module():
18 19 20
    global testdb
    testdb = getTestDB()
    testdb.setup()
21

22
    root = testdb.dbopen()
23 24 25 26 27 28 29
    root['zfile'] = ZBigFile(blksize)
    transaction.commit()

    dbclose(root)


def teardown_module():
30
    testdb.teardown()
31 32 33 34 35 36


# NOTE runs before _writeff
def bench_bigz_readhole():  _bench_bigz_hash(Adler32,   nulladler32)

def bench_bigz_writeff():
37
    root = testdb.dbopen()
38 39 40 41 42 43 44 45 46 47 48 49 50 51
    f   = root['zfile']
    fh  = f.fileh_open()    # TODO + ram
    vma = fh.mmap(0, blen)  # XXX assumes blksize == pagesize

    memset(vma, 0xff)
    transaction.commit()

    del vma # TODO vma.close()
    del fh  # TODO fh.close()
    del f   # XXX  f.close() ?
    dbclose(root)


def _bench_bigz_hash(hasher, expect):
52
    root = testdb.dbopen()
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
    f   = root['zfile']
    fh  = f.fileh_open()    # TODO + ram
    vma = fh.mmap(0, blen)  # XXX assumes blksize == pagesize

    h = hasher()
    h.update(vma)

    del vma # vma.close()
    del fh  # fh.close()
    del f   # f.close()
    dbclose(root)
    assert h.digest() == expect


def bench_bigz_read():      _bench_bigz_hash(Adler32,   ffadler32)