Commit ef6d0669 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent e76f9f9a
......@@ -17,7 +17,7 @@
#
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
"""test wcfs filesystem from outside as python client process"""
"""wcfs_test tests wcfs filesystem from outside as python client process"""
from __future__ import print_function
......@@ -116,7 +116,11 @@ def test_join_autostart():
assert os.path.isdir(wc.mountpoint + "/head/bigfile")
# tDB is database/wcfs testing environment.
# tDB provides database/wcfs testing environment.
#
# XXX link -> tFile + tWatch.
#
# XXX print -> t.trace/debug() + t.verbose depending on py.test -v -v ?
class tDB:
def __init__(t):
t.root = testdb.dbopen()
......@@ -126,6 +130,7 @@ class tDB:
t._changed = {} # ZBigFile -> {} blk -> data
# committed: head + head history
# XXX -> vδF (committed changes to files)
t.head = None
t._headv = []
......@@ -157,7 +162,9 @@ class tDB:
assert len(data) <= zf.blksize
zfDelta[blk] = data
# commit commits transaction and remembers/returns committed transaction ID.
# commit commits transaction and makes sure wcfs is synchronized to it.
#
# It remembers/returns committed transaction ID.
def commit(t):
# perform modifications scheduled by change.
# use !wcfs mode so that we prepare data independently of wcfs code paths.
......@@ -171,7 +178,7 @@ class tDB:
t._changed = {}
# NOTE there is no clean way to retrieve tid of just committed transaction
# we are using last._p_serial as workaround.
# we use last._p_serial as workaround.
t.root['_last'] = last = Persistent()
last._p_changed = 1
......@@ -181,10 +188,14 @@ class tDB:
t.ncommit += 1
t.head = head
t._headv.append(head)
# sync wcfs
t._wcsync()
return head
# wcsync makes sure wcfs synchronized to latest committed transaction.
def wcsync(t):
# _wcsync makes sure wcfs is synchronized to latest committed transaction.
def _wcsync(t):
while len(t._wc_zheadv) < len(t._headv):
l = t._wc_zheadfh.readline()
#print('> zhead read: %r' % l)
......@@ -239,7 +250,7 @@ class tDB:
return tWatch(t)
# tFile is testing environment for one bigfile on wcfs.
# tFile provides testing environment for one bigfile on wcfs.
class tFile:
# maximum number of pages we mmap for 1 file.
# this should be not big not to exceed mlock limit.
......@@ -251,10 +262,10 @@ class tFile:
t.f = tdb._open(zf, at=at)
t.blksize = zf.blksize
# mmap the file past the end up to XXX pages and lock the pages with
# MLOCK_ONFAULT. This way when a page is read by mmap access we have
# the guarantee from kernel that the page will stay in pagecache. We
# rely on this to verify OS cache state.
# mmap the file past the end up to _max_tracked pages and lock the
# pages with MLOCK_ONFAULT. This way when a page is read by mmap access
# we have the guarantee from kernel that the page will stay in
# pagecache. We rely on this to verify OS cache state.
assert t.blksize % mm.PAGE_SIZE == 0
t.fmmap = mm.map_ro(t.f.fileno(), 0, t._max_tracked*t.blksize)
mm.lock(t.fmmap, mm.MLOCK_ONFAULT)
......@@ -302,7 +313,7 @@ class tFile:
assert t.cached() == incorev
# blk returns bytearray connected to view of file[blk].
# blk returns bytearray view of file[blk].
def blk(t, blk):
assert blk <= t._max_tracked
return bytearray(t.fmmap[blk*t.blksize:(blk+1)*t.blksize])
......@@ -348,9 +359,7 @@ class tFile:
# Expected blocks may be given with size < zf.blksize. In such case they
# are implicitly appended with trailing zeros.
#
# It also check file size and optionally mtime.
#
# XXX also check pagecache state?
# It also checks file size and optionally mtime.
def assertData(t, datav, mtime=None):
st = os.fstat(t.f.fileno())
assert st.st_size == len(datav)*t.blksize
......@@ -364,7 +373,7 @@ class tFile:
t.assertCache([1]*len(datav))
# tWatch is testing environment for /head/watch opened on wcfs.
# tWatch provides testing environment for /head/watch opened on wcfs.
class tWatch:
def __init__(t, tdb):
......@@ -540,7 +549,6 @@ def test_wcfs():
t.ncommit = 0 # so that atX in the code correspond with debug output
at0_ = t.commit()
assert tidtime(at0_) > tidtime(at0)
t.wcsync()
# >>> lookup non-BigFile -> must be rejected
with raises(OSError) as exc:
......@@ -556,7 +564,6 @@ def test_wcfs():
t.change(zf, {2: b'alpha'})
at1 = t.commit()
t.wcsync()
f.assertCache([0,0,0]) # initially not cached
f.assertData ([b'',b'',b'alpha'], mtime=t.head)
......@@ -564,8 +571,6 @@ def test_wcfs():
t.change(zf, {2: b'beta', 3: b'gamma'})
at2 = t.commit()
t.wcsync()
# f @head
f.assertCache([1,1,0,0])
f.assertData ([b'',b'', b'beta', b'gamma'], mtime=t.head)
......@@ -581,7 +586,6 @@ def test_wcfs():
t.change(zf, {2: b'kitty'})
at3 = t.commit()
t.wcsync()
f.assertCache([1,1,0,1])
# f @head is opened again -> cache must not be lost
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment