Commit ec3ae866 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 00ab5fb4
......@@ -27,6 +27,7 @@ import transaction
from transaction import TransactionManager
from ZODB.POSException import ConflictError
from numpy import dtype, uint8, all, array_equal, arange
from golang import defer, func
from threading import Thread
from six.moves import _thread
......@@ -240,12 +241,14 @@ def test_zbigarray_order():
# the same as test_bigfile_filezodb_vs_conn_migration but explicitly for ZBigArray
# ( NOTE this test is almost dup of test_zbigarray_vs_conn_migration() )
@func
def test_zbigarray_vs_conn_migration():
root01 = testdb.dbopen()
conn01 = root01._p_jar
db = conn01.db()
conn01.close()
del root01
defer(db.close)
c12_1 = NotifyChannel() # T11 -> T21
c21_1 = NotifyChannel() # T21 -> T11
......@@ -342,12 +345,14 @@ def test_zbigarray_vs_conn_migration():
c21_2 = NotifyChannel() # T22 -> T12
# open, abort
@func
def T12():
tell, wait = c12_2.tell, c21_2.wait
wait('T2-conn22-opened')
conn12 = db.open()
defer(conn12.close)
tell('T1-conn12-opened')
wait('T2-zarray2-modified')
......@@ -357,10 +362,9 @@ def test_zbigarray_vs_conn_migration():
tell('T1-txn-aborted')
wait('T2-txn-committed')
conn12.close()
# open, modify, commit
@func
def T22():
tell, wait = c21_2.tell, c12_2.wait
......@@ -369,6 +373,7 @@ def test_zbigarray_vs_conn_migration():
assert _thread.get_ident() != t11_ident
conn22 = db.open()
defer(conn22.close)
assert conn22 is conn01
tell('T2-conn22-opened')
......@@ -388,8 +393,6 @@ def test_zbigarray_vs_conn_migration():
tell('T2-txn-committed')
conn22.close()
t12, t22 = Thread(target=T12), Thread(target=T22)
t12.start(); t22.start()
......@@ -401,6 +404,7 @@ def test_zbigarray_vs_conn_migration():
# now verify that zarray2 changed to 22 state, i.e. T22 was really committed
conn03 = db.open()
defer(conn03.close)
# NOTE top of connection stack is conn22(=conn01), becase it has most # of
# active objectd
assert conn03 is conn01
......@@ -410,23 +414,25 @@ def test_zbigarray_vs_conn_migration():
assert a03[0] == 22
del a03
dbclose(root03)
# underlying ZBigFile/ZBigFileH should properly handle 'invalidate' messages from DB
# ( NOTE this test is almost dup of test_zbigarray_vs_cache_invalidation() )
@func
def test_zbigarray_vs_cache_invalidation():
root = testdb.dbopen()
conn = root._p_jar
db = conn.db()
conn.close()
del root, conn
defer(db.close)
tm1 = TransactionManager()
tm2 = TransactionManager()
conn1 = db.open(transaction_manager=tm1)
root1 = conn1.root()
defer(conn1.close)
# setup zarray
root1['zarray3'] = a1 = ZBigArray((10,), uint8)
......@@ -440,6 +446,7 @@ def test_zbigarray_vs_cache_invalidation():
# read zarray in conn2
conn2 = db.open(transaction_manager=tm2)
root2 = conn2.root()
defer(conn2.close)
a2 = root2['zarray3']
assert a2[0:1] == [1] # read data in conn2 + make sure read correctly
......@@ -465,25 +472,26 @@ def test_zbigarray_vs_cache_invalidation():
# data from tm1 should propagate -> ZODB -> ram pages for _ZBigFileH in conn2
assert a2[0] == 2
conn2.close()
del conn2, root2
dbclose(root1)
# verify that conflicts on array content are handled properly
# ( NOTE this test is almost dup of test_bigfile_filezodb_vs_conflicts() )
@func
def test_zbigarray_vs_conflicts():
root = testdb.dbopen()
conn = root._p_jar
db = conn.db()
conn.close()
del root, conn
defer(db.close)
tm1 = TransactionManager()
tm2 = TransactionManager()
conn1 = db.open(transaction_manager=tm1)
root1 = conn1.root()
defer(conn1.close)
# setup zarray
root1['zarray3a'] = a1 = ZBigArray((10,), uint8)
......@@ -496,6 +504,7 @@ def test_zbigarray_vs_conflicts():
# read zarray in conn2
conn2 = db.open(transaction_manager=tm2)
root2 = conn2.root()
defer(conn2.close)
a2 = root2['zarray3a']
assert a2[0:1] == [1] # read data in conn2 + make sure read correctly
......@@ -525,24 +534,24 @@ def test_zbigarray_vs_conflicts():
assert a1[0:1] == [13] # re-read in conn1 XXX -> [0] == 13
conn2.close()
dbclose(root1)
# verify that conflicts on array metadata are handled properly
# ( NOTE this test is close to test_zbigarray_vs_conflicts() )
@func
def test_zbigarray_vs_conflicts_metadata():
root = testdb.dbopen()
conn = root._p_jar
db = conn.db()
conn.close()
del root, conn
defer(db.close)
tm1 = TransactionManager()
tm2 = TransactionManager()
conn1 = db.open(transaction_manager=tm1)
root1 = conn1.root()
defer(conn1.close)
# setup zarray
root1['zarray3b'] = a1 = ZBigArray((10,), uint8)
......@@ -555,6 +564,7 @@ def test_zbigarray_vs_conflicts_metadata():
# read zarray in conn2
conn2 = db.open(transaction_manager=tm2)
root2 = conn2.root()
defer(conn2.close)
a2 = root2['zarray3b']
assert a2[0:1] == [1] # read data in conn2 + make sure read correctly
......@@ -584,17 +594,16 @@ def test_zbigarray_vs_conflicts_metadata():
assert len(a1) == 13 # re-read in conn1
conn2.close()
dbclose(root1)
# verify how ZBigArray behaves when plain properties are changed / invalidated
@func
def test_zbigarray_invalidate_shape():
root = testdb.dbopen()
conn = root._p_jar
db = conn.db()
conn.close()
del root, conn
defer(db.close)
print
tm1 = TransactionManager()
......@@ -602,6 +611,7 @@ def test_zbigarray_invalidate_shape():
conn1 = db.open(transaction_manager=tm1)
root1 = conn1.root()
defer(conn1.close)
# setup zarray
root1['zarray4'] = a1 = ZBigArray((10,), uint8)
......@@ -614,6 +624,7 @@ def test_zbigarray_invalidate_shape():
# read zarray in conn2
conn2 = db.open(transaction_manager=tm2)
root2 = conn2.root()
defer(conn2.close)
a2 = root2['zarray4']
assert a2[0:1] == [1] # read data in conn2 + make sure read correctly
......@@ -632,6 +643,4 @@ def test_zbigarray_invalidate_shape():
assert a2[10:11] == [123] # XXX -> [10] = 123 after BigArray can
conn2.close()
del conn2, root2, a2
dbclose(root1)
# Wendelin.bigfile | benchmarks for zodb backend
# Copyright (C) 2014-2015 Nexedi SA and Contributors.
# Copyright (C) 2014-2019 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
#
# This program is free software: you can Use, Study, Modify and Redistribute
......@@ -24,6 +24,7 @@ from wendelin.lib.mem import memset
from wendelin.lib.testing import getTestDB, Adler32, nulladler32_bysize, ffadler32_bysize
from wendelin.lib.zodb import dbclose
import transaction
from pygolang import defer, func
testdb = None
from wendelin.bigfile.tests.bench_0virtmem import filesize, blksize # to get comparable timings
......@@ -51,8 +52,10 @@ def teardown_module():
# NOTE runs before _writeff
def bench_bigz_readhole(): _bench_bigz_hash(Adler32, nulladler32)
@func
def bench_bigz_writeff():
root = testdb.dbopen()
defer(lambda: dbclose(root))
f = root['zfile']
fh = f.fileh_open() # TODO + ram
vma = fh.mmap(0, blen) # XXX assumes blksize == pagesize
......@@ -63,11 +66,12 @@ def bench_bigz_writeff():
del vma # TODO vma.close()
del fh # TODO fh.close()
del f # XXX f.close() ?
dbclose(root)
@func
def _bench_bigz_hash(hasher, expect):
root = testdb.dbopen()
defer(lambda: dbclose(root))
f = root['zfile']
fh = f.fileh_open() # TODO + ram
vma = fh.mmap(0, blen) # XXX assumes blksize == pagesize
......@@ -78,7 +82,6 @@ def _bench_bigz_hash(hasher, expect):
del vma # vma.close()
del fh # fh.close()
del f # f.close()
dbclose(root)
assert h.digest() == expect
......
......@@ -75,6 +75,7 @@ def cacheInfo(db):
def kkey(klass):
return '%s.%s' % (klass.__module__, klass.__name__)
@func
def test_livepersistent():
root = dbopen()
transaction.commit() # set root._p_jar
......@@ -153,10 +154,12 @@ def test_livepersistent():
conn1 = db.open(transaction_manager=tm1)
root1 = conn1.root()
defer(lambda: dbclose(root1))
lp1 = root1['live']
conn2 = db.open(transaction_manager=tm2)
root2 = conn2.root()
defer(conn2.close)
lp2 = root2['live']
# 2 connections are setup running in parallel with initial obj state as ghost
......@@ -203,9 +206,7 @@ def test_livepersistent():
assert a == 1
conn2.close()
del conn2, root2
dbclose(root1)
......@@ -536,6 +537,7 @@ def test_bigfile_filezodb_vs_conn_migration():
# now verify that zfile2 changed to 22 state, i.e. T22 was really committed
conn03 = db.open()
defer(conn03.close)
# NOTE top of connection stack is conn22(=conn01), because it has most # of
# active objects
assert conn03 is conn01
......@@ -566,6 +568,7 @@ def test_bigfile_filezodb_vs_cache_invalidation():
conn1 = db.open(transaction_manager=tm1)
root1 = conn1.root()
defer(conn1.close)
# setup zfile with fileh view to it
root1['zfile3'] = f1 = ZBigFile(blksize)
......@@ -583,6 +586,7 @@ def test_bigfile_filezodb_vs_cache_invalidation():
# read zfile and setup fileh for it in conn2
conn2 = db.open(transaction_manager=tm2)
root2 = conn2.root()
defer(conn2.close)
f2 = root2['zfile3']
fh2 = f2.fileh_open()
......@@ -620,6 +624,8 @@ def test_bigfile_filezodb_vs_cache_invalidation():
# data from tm1 should propagate -> ZODB -> ram pages for _ZBigFileH in conn2
assert Blk(vma2, 0)[0] == 2
del conn2, root2
# verify that conflicts on ZBlk are handled properly
# ( NOTE this test is almost dupped at test_zbigarray_vs_conflicts() )
......@@ -637,6 +643,7 @@ def test_bigfile_filezodb_vs_conflicts():
conn1 = db.open(transaction_manager=tm1)
root1 = conn1.root()
defer(conn1.close)
# setup zfile with fileh view to it
root1['zfile3a'] = f1 = ZBigFile(blksize)
......@@ -653,6 +660,7 @@ def test_bigfile_filezodb_vs_conflicts():
# read zfile and setup fileh for it in conn2
conn2 = db.open(transaction_manager=tm2)
root2 = conn2.root()
defer(conn2.close)
f2 = root2['zfile3a']
fh2 = f2.fileh_open()
......@@ -707,6 +715,7 @@ def test_bigfile_filezodb_fileh_gc():
conn2 = db.open()
root2 = conn2.root()
defer(conn2.close)
f2 = root2['zfile4']
fh2 = f2.fileh_open()
......@@ -731,43 +740,40 @@ def test_bigfile_filezodb_fmt_change():
# save/restore original ZBlk_fmt_write
fmt_write_save = file_zodb.ZBlk_fmt_write
def _():
file_zodb.ZBlk_fmt_write = fmt_write_save
defer(_)
try:
# check all combinations of format pairs via working with blk #0 and
# checking internal f structure
for src_fmt, src_type in ZBlk_fmt_registry.items():
for dst_fmt, dst_type in ZBlk_fmt_registry.items():
if src_fmt == dst_fmt:
continue # skip checking e.g. ZBlk0 -> ZBlk0
file_zodb.ZBlk_fmt_write = src_fmt
struct.pack_into('p', vma, 0, b(src_fmt))
transaction.commit()
# check all combinations of format pairs via working with blk #0 and
# checking internal f structure
for src_fmt, src_type in ZBlk_fmt_registry.items():
for dst_fmt, dst_type in ZBlk_fmt_registry.items():
if src_fmt == dst_fmt:
continue # skip checking e.g. ZBlk0 -> ZBlk0
assert type(f.blktab[0]) is src_type
file_zodb.ZBlk_fmt_write = src_fmt
struct.pack_into('p', vma, 0, b(src_fmt))
transaction.commit()
file_zodb.ZBlk_fmt_write = dst_fmt
struct.pack_into('p', vma, 0, b(dst_fmt))
transaction.commit()
assert type(f.blktab[0]) is src_type
assert type(f.blktab[0]) is dst_type
file_zodb.ZBlk_fmt_write = dst_fmt
struct.pack_into('p', vma, 0, b(dst_fmt))
transaction.commit()
finally:
file_zodb.ZBlk_fmt_write = fmt_write_save
assert type(f.blktab[0]) is dst_type
# test that ZData are reused for changed chunks in ZBlk1 format
@func
def test_bigfile_zblk1_zdata_reuse():
# set ZBlk_fmt_write to ZBlk1 for this test
fmt_write_save = file_zodb.ZBlk_fmt_write
file_zodb.ZBlk_fmt_write = 'ZBlk1'
try:
_test_bigfile_zblk1_zdata_reuse()
finally:
def _():
file_zodb.ZBlk_fmt_write = fmt_write_save
defer(_)
@func
def _test_bigfile_zblk1_zdata_reuse():
root = dbopen()
defer(lambda: dbclose(root))
root['zfile6'] = f = ZBigFile(blksize)
......
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Nexedi SA and Contributors.
# Copyright (C) 2014-2019 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
#
# This program is free software: you can Use, Study, Modify and Redistribute
......@@ -37,6 +37,7 @@ from __future__ import print_function
from wendelin.bigarray.array_zodb import ZBigArray
from wendelin.lib.zodb import dbopen, dbclose
import transaction
from golang import defer, func
from numpy import float64, dtype, cumsum, sin
import psutil
......@@ -96,7 +97,7 @@ options:
""" % sys.argv[0], file=sys.stderr)
sys.exit(1)
@func
def main():
worksize = None
optv, argv = getopt.getopt(sys.argv[1:], '', ['worksize='])
......@@ -120,6 +121,7 @@ def main():
print('I: RAM: %.2fGB' % (float(ram_nbytes) / GB))
root = dbopen(dburi)
defer(lambda: dbclose(root))
if act == 'gen':
if worksize is None:
......@@ -145,8 +147,6 @@ def main():
m = p.memory_info()
print('VIRT: %i MB\tRSS: %iMB' % (m.vms//MB, m.rss//MB))
dbclose(root)
if __name__ == '__main__':
main()
# Wendelin.core.bigfile | Tests for ZODB utilities
# Copyright (C) 2014-2016 Nexedi SA and Contributors.
# Copyright (C) 2014-2019 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
#
# This program is free software: you can Use, Study, Modify and Redistribute
......@@ -22,6 +22,7 @@ from wendelin.lib.testing import getTestDB
from persistent import Persistent, UPTODATE, GHOST
from BTrees.IOBTree import IOBTree
import transaction
from golang import defer, func
import gc
testdb = None
......@@ -45,8 +46,11 @@ class XInt(Persistent):
def objscachedv(jar):
return [obj for oid, obj in jar._cache.lru_items()]
@func
def test_deactivate_btree():
root = dbopen()
defer(lambda: dbclose(root))
# init btree with many leaf nodes
leafv = []
root['btree'] = B = IOBTree()
......@@ -81,5 +85,3 @@ def test_deactivate_btree():
for obj in [B] + leafv:
assert obj._p_state == GHOST
assert obj not in cached
dbclose(root)
......@@ -244,12 +244,12 @@ setup(
# specify either to use e.g. ZODB3.10 or ZODB4 )
'ZODB3 >= 3.10',
'pygolang >= 0.0.2', # defer, sync.WaitGroup, ...
'six', # compat py2/py3
'psutil', # demo_zbigarray
'pygolang >= 0.0.2', # wcfs
'zodbtools', # XXX clarify whether we can always require it (check lib/zodb.py)
],
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment