Commit 6a3a590a authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent a41635da
......@@ -100,6 +100,36 @@ func asctx(fctx *fuse.Context) context.Context {
}
// nodefs.DefaultNode.Open returns ENOSYS. This is convenient for filesystems
// that have no dynamic files at all. But for filesystems, where there are some
// dynamic files - i.e. nodes which do need to support Open, returning ENOSYS
// from any single node will make the kernel think that the filesystem does not
// support Open at all.
//
// In wcfs we have dynamic files (e.g. /head/watch) and this way we have to
// avoid returning ENOSYS on nodes, that do not need file handles.
//
// defaultNode is like nodefs.defaultNode, but Open returns to kernel fh=0 and
// FOPEN_KEEP_CACHE - similarly how openless case is handled there.
type defaultNode struct{
nodefs.Node
}
func (n *defaultNode) Open(flags uint32, fctx *fuse.Context) (nodefs.File, fuse.Status) {
// XXX return something else for directory?
return &nodefs.WithFlags{
File: nil,
FuseFlags: fuse.FOPEN_KEEP_CACHE,
}, fuse.OK
}
func newDefaultNode() nodefs.Node {
return &defaultNode{
Node: nodefs.NewDefaultNode(),
}
}
// NewStaticFile creates nodefs.Node for file with static data.
func NewStaticFile(data []byte) *SmallFile {
return NewSmallFile(func() []byte {
......@@ -116,7 +146,7 @@ type SmallFile struct {
}
func NewSmallFile(readData func() []byte) *SmallFile {
return &SmallFile{Node: nodefs.NewDefaultNode(), readData: readData}
return &SmallFile{Node: newDefaultNode(), readData: readData}
}
func (f *SmallFile) GetAttr(out *fuse.Attr, _ nodefs.File, _ *fuse.Context) fuse.Status {
......
......@@ -843,13 +843,13 @@ func (root *Root) mkdir(name string, fctx *fuse.Context) (_ *nodefs.Inode, err e
// XXX -> newHead()
revDir := &Head{
Node: nodefs.NewDefaultNode(),
Node: newDefaultNode(),
rev: rev,
zconn: zconnRev,
}
bfdir := &BigFileDir{
Node: nodefs.NewDefaultNode(),
Node: newDefaultNode(),
head: revDir,
fileTab: make(map[zodb.Oid]*BigFile),
}
......@@ -915,7 +915,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
// zconn.Incref()
return &BigFile{
Node: nodefs.NewDefaultNode(),
Node: newDefaultNode(),
head: head,
zbf: zbf,
zbfSize: zbfSize,
......@@ -1161,6 +1161,10 @@ var gdebug = struct {
zheadSockTab map[*FileSock]struct{}
}{}
func init() {
gdebug.zheadSockTab = make(map[*FileSock]struct{})
}
// _wcfs_Zhead serves .wcfs/zhead opens.
type _wcfs_Zhead struct {
nodefs.Node
......@@ -1221,19 +1225,19 @@ func main() {
// mount root + head/
// XXX -> newHead()
head := &Head{
Node: nodefs.NewDefaultNode(),
Node: newDefaultNode(),
rev: 0,
zconn: zhead,
}
bfdir := &BigFileDir{
Node: nodefs.NewDefaultNode(),
Node: newDefaultNode(),
head: head,
fileTab: make(map[zodb.Oid]*BigFile),
}
head.bfdir = bfdir
root := &Root{
Node: nodefs.NewDefaultNode(),
Node: newDefaultNode(),
zstor: zstor,
zdb: zdb,
head: head,
......@@ -1269,7 +1273,7 @@ func main() {
// XXX ^^^ invalidate cache or direct io
// for debugging/testing
_wcfs := nodefs.NewDefaultNode()
_wcfs := newDefaultNode()
mkdir(root, ".wcfs", _wcfs)
mkfile(_wcfs, "zurl", NewStaticFile([]byte(zurl)))
......@@ -1280,7 +1284,7 @@ func main() {
// There can be multiple openers. Once opened, the file must be read,
// as wcfs blocks waiting for data to be read.
mkfile(_wcfs, "zhead", &_wcfs_Zhead{
Node: nodefs.NewDefaultNode(),
Node: newDefaultNode(),
})
// TODO handle autoexit
......
......@@ -121,6 +121,7 @@ def test_join_autostart():
assert os.path.isdir(wc.mountpoint + "/head/bigfile")
"""
# XXX hack -> kill (?) or read-wait on head/at
# Conn._sync makes sure that underlying wcfs is synced to last ZODB data
@method(wcfs.Conn)
......@@ -140,15 +141,19 @@ def _sync(self):
os.stat("%s/head/bigfile/%s" % (self.mountpoint, bf))
print >>sys.stderr, "# <<< wcfs.Conn.sync\n"
"""
# XXX parametrize zblk0, zblk1
# XXX select !wcfs mode so that we prepare data through !wcfs path.
@func
def test_bigfile_empty(): # XXX name
def test_wcfs():
root = testdb.dbopen()
defer(lambda: dbclose(root))
wc = wcfs.join(testzurl, autostart=True)
defer(wc.close)
root['zfile'] = f = ZBigFile(blksize)
# NOTE there is no clean way to retrieve tid of just committed transaction
......@@ -170,12 +175,26 @@ def test_bigfile_empty(): # XXX name
Z.tidv.append(tid)
return tid
# wcsync makes sure wc synchronized to latest committed transaction
Z.wctidv = []
wc_zhead = open(wc.mountpoint + "/.wcfs/zhead")
defer(wc_zhead.close)
def wcsync():
while len(Z.wctidv) < len(Z.tidv):
l = wc_zhead.readline()
l = rstrip('\n')
wctid = fromhex(l)
i = len(Z.wctidv)
if wctid != Z.tidv[i]:
raise RuntimeError("wcsync #%d: Z.wctid (%s) != Z.tid (%s)" % (i, wctid, Z.tidv[i]))
Z.wctidv.append(wctid)
tid1 = commit()
tid2 = commit()
assert tidtime(tid2) > tidtime(tid1)
wc = wcfs.join(testzurl, autostart=True)
defer(wc.close)
wcsync()
return
# path to head/ and head/bigfile/ under wcfs
head = wc.mountpoint + "/head"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment