Commit ec103e5c authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent fb35f696
......@@ -73,7 +73,7 @@ def teardown_function(f):
os.rmdir(testmntpt)
# ---- tests ----
# ---- test join/autostart ----
# test that zurl does not change from one open to another storage open.
def test_zurlstable():
......@@ -116,9 +116,15 @@ def test_join_autostart():
assert os.path.isdir(wc.mountpoint + "/head/bigfile")
# --- test access to data ----
# tDB provides database/wcfs testing environment.
#
# XXX link -> tFile + tWatch.
# BigFiles opened under tDB are represented as tFile - see .open for details.
# Watches opened under tDB are represented as tWatch - see .openwatch for details.
#
# XXX .open -> .topen
# XXX .openwatch -> .topenwatch ?
#
# XXX print -> t.trace/debug() + t.verbose depending on py.test -v -v ?
class tDB:
......@@ -144,6 +150,7 @@ class tDB:
# tracked tFiles & tWatches
t._tracked = set()
# close closes test database as well as all tracked files, watches and wcfs.
def close(t):
for tf in t._tracked.copy():
tf.close()
......@@ -251,6 +258,10 @@ class tDB:
# tFile provides testing environment for one bigfile on wcfs.
#
# .blk() provides access to data of a block. .cached() gives state of which
# blocks are in OS pagecache. .assertCache and .assertBlk/.assertData assert
# on state of cache and data.
class tFile:
# maximum number of pages we mmap for 1 file.
# this should be not big not to exceed mlock limit.
......@@ -277,51 +288,15 @@ class tFile:
mm.unmap(t.fmmap)
t.f.close()
# sizeblk returns file size in blocks.
def sizeblk(t):
st = os.fstat(t.f.fileno())
assert st.st_size % t.blksize == 0
assert st.st_size // t.blksize <= t._max_tracked
return st.st_size // t.blksize
"""
# readblk reads ZBigFile[blk] from wcfs.
# XXX not needed?
@func
def readblk(t, zf, blk, at=None):
assert isinstance(zf, ZBigFile)
f = t._open(zf, at=at) # XXX binary, !buffered
defer(f.close)
blksize = zf.blksize
f.seek(blk*blksize)
n = blksize
data = b''
while n > 0:
chunk = f.read(n)
assert len(chunk) > 0
data += chunk
n -= len(chunk)
return data
"""
# assertCache asserts state of OS file cache.
#
# incorev is [] of 1/0 representing whether block data is present or not.
def assertCache(t, incorev):
assert t.cached() == incorev
# blk returns bytearray view of file[blk].
def blk(t, blk):
assert blk <= t._max_tracked
return bytearray(t.fmmap[blk*t.blksize:(blk+1)*t.blksize])
# cached returns [] with indicating whether of file block is cached or not.
# cached returns [] with indicating whether a file block is cached or not.
# 1 - cached, 0 - not cached, fractional (0,1) - some pages of the block are cached some not.
def cached(t):
l = t.sizeblk()
l = t._sizeinblk()
incorev = mm.incore(t.fmmap[:l*t.blksize])
# incorev is in pages; convert to in blocks
assert t.blksize % mm.PAGE_SIZE == 0
......@@ -336,6 +311,19 @@ class tFile:
cachev[blk] = int(cachev[blk]) # 0.0 -> 0, 1.0 -> 1
return cachev
# _sizeinblk returns file size in blocks.
def _sizeinblk(t):
st = os.fstat(t.f.fileno())
assert st.st_size % t.blksize == 0
assert st.st_size // t.blksize <= t._max_tracked
return st.st_size // t.blksize
# assertCache asserts state of OS cache for file.
#
# incorev is [] of 1/0 representing whether block data is present or not.
def assertCache(t, incorev):
assert t.cached() == incorev
# assertBlk asserts that file block #blk has data as expected.
#
# Expected data may be given with size < t.blksize. In such case the data
......@@ -343,10 +331,7 @@ class tFile:
def assertBlk(t, blk, data):
assert len(data) <= t.blksize
data += b'\0'*(t.blksize - len(data)) # tailing zeros
st = os.fstat(t.f.fileno())
assert st.st_size % t.blksize == 0
assert blk < (st.st_size // t.blksize)
assert st.st_size // t.blksize <= t._max_tracked
assert blk < t._sizeinblk()
# XXX assert individually for every block's page? (easier debugging?)
assert t.blk(blk) == data, ("#blk: %d" % blk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment