Commit 899b6102 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 29c9f13d
...@@ -535,7 +535,7 @@ func (bfdata *BigFileData) Read(_ nodefs.File, dest []byte, off int64, fctx *fus ...@@ -535,7 +535,7 @@ func (bfdata *BigFileData) Read(_ nodefs.File, dest []byte, off int64, fctx *fus
blk := blkoff / zbf.blksize blk := blkoff / zbf.blksize
wg.Go(func() error { wg.Go(func() error {
δ := blkoff-aoff // blk position in dest δ := blkoff-aoff // blk position in dest
log.Infof("readBlk #%d dest[%d:+%d]", blk, δ, zbf.blksize) //log.Infof("readBlk #%d dest[%d:+%d]", blk, δ, zbf.blksize)
return bfdata.readBlk(ctx, blk, dest[δ:δ+zbf.blksize]) return bfdata.readBlk(ctx, blk, dest[δ:δ+zbf.blksize])
}) })
} }
...@@ -697,6 +697,11 @@ func main() { ...@@ -697,6 +697,11 @@ func main() {
zurl := flag.Args()[0] zurl := flag.Args()[0]
mntpt := flag.Args()[1] mntpt := flag.Args()[1]
// debug -> precise t, no dates (XXX -> always precise t?)
if *debug {
stdlog.SetFlags(stdlog.Lmicroseconds)
}
// open zodb storage // open zodb storage
ctx := context.Background() // XXX + timeout? ctx := context.Background() // XXX + timeout?
zstor, err := zodb.OpenStorage(ctx, zurl, &zodb.OpenOptions{ReadOnly: true}) zstor, err := zodb.OpenStorage(ctx, zurl, &zodb.OpenOptions{ReadOnly: true})
......
...@@ -187,7 +187,8 @@ def test_bigfile_empty(): ...@@ -187,7 +187,8 @@ def test_bigfile_empty():
# commit data to f and make sure we can see it on wcfs # commit data to f and make sure we can see it on wcfs
hole = 1 # XXX -> back to 10 #hole = 10 XXX reenable
hole = 1
fh = f.fileh_open() fh = f.fileh_open()
vma = fh.mmap(hole, 1) # 1 page at offset=10 vma = fh.mmap(hole, 1) # 1 page at offset=10
s = b"hello world" s = b"hello world"
...@@ -200,7 +201,7 @@ def test_bigfile_empty(): ...@@ -200,7 +201,7 @@ def test_bigfile_empty():
# sync wcfs to ZODB # sync wcfs to ZODB
wc._sync() wc._sync()
# we wrote "hello world" afte 10th block, but size is always mutiple of blksize. # we wrote "hello world" after 10th block, but size is always mutiple of blksize.
fsize = (hole + 1)*blksize fsize = (hole + 1)*blksize
st = os.stat(fpath + "/head/data") st = os.stat(fpath + "/head/data")
...@@ -217,5 +218,59 @@ def test_bigfile_empty(): ...@@ -217,5 +218,59 @@ def test_bigfile_empty():
assert tail[len(s):] == b'\0'*(blksize - len(s)) assert tail[len(s):] == b'\0'*(blksize - len(s))
# commit data again and make sure we can see both latest and snapshotted states.
tcommit1 = tidlast
fh = f.fileh_open()
vma1 = fh.mmap(hole, 1)
vma2 = fh.mmap(hole+1, 1)
s1 = b"hello 123"
s2 = b"alpha"
memcpy(vma1,s1)
memcpy(vma2,s2)
last._p_changed = 1
transaction.commit()
tidlast = last._p_serial
wc._sync()
fsize1 = fsize
fsize = fsize1 + blksize # we added one more block
st = os.stat(fpath + "/head/data")
assert st.st_size == fsize
#assert st.st_mtime == tidtime(tidlast) FIXME proper sync
assert readfile(fpath + "/head/at") == tidlast.encode("hex")
data = readfile(fpath + "/head/data")
assert len(data) == fsize
for i in range(hole):
assert data[i*blksize:(i+1)*blksize] == b'\0'*blksize
tail1 = data[hole*blksize:(hole+1)*blksize]
assert tail1[:len(s1)] == s1
assert tail1[len(s1):len(s)] == "ld"
assert tail1[len(s):] == b'\0'*(blksize - len(s))
tail2 = data[(hole+1)*blksize:]
assert tail2[:len(s2)] == s2
assert tail2[len(s2):] == b'\0'*(blksize - len(s2))
# path to f's state @tcommit1
fpath1 = fpath + ("/@%s" % tcommit1.encode("hex"))
os.mkdir(fpath1)
st = os.stat(fpath1 + "/data")
assert st.st_size == fsize1
#assert st.st_mtime == tidtime(tcommit1) FIXME proper sync
#assert readfile(fpath + "/at") == tcommit1.encode("hex") XXX do we need it?
data = readfile(fpath1 + "/data")
assert len(data) == fsize1
for i in range(hole):
assert data[i*blksize:(i+1)*blksize] == b'\0'*blksize
tail = data[hole*blksize:]
assert tail[:len(s)] == s
assert tail[len(s):] == b'\0'*(blksize - len(s))
# TODO pagecache state after loading (via mincore) # TODO pagecache state after loading (via mincore)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment