Commit 6a3a590a authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent a41635da
...@@ -100,6 +100,36 @@ func asctx(fctx *fuse.Context) context.Context { ...@@ -100,6 +100,36 @@ func asctx(fctx *fuse.Context) context.Context {
} }
// nodefs.DefaultNode.Open returns ENOSYS. This is convenient for filesystems
// that have no dynamic files at all. But for filesystems, where there are some
// dynamic files - i.e. nodes which do need to support Open, returning ENOSYS
// from any single node will make the kernel think that the filesystem does not
// support Open at all.
//
// In wcfs we have dynamic files (e.g. /head/watch) and this way we have to
// avoid returning ENOSYS on nodes, that do not need file handles.
//
// defaultNode is like nodefs.defaultNode, but Open returns to kernel fh=0 and
// FOPEN_KEEP_CACHE - similarly how openless case is handled there.
type defaultNode struct{
nodefs.Node
}
func (n *defaultNode) Open(flags uint32, fctx *fuse.Context) (nodefs.File, fuse.Status) {
// XXX return something else for directory?
return &nodefs.WithFlags{
File: nil,
FuseFlags: fuse.FOPEN_KEEP_CACHE,
}, fuse.OK
}
func newDefaultNode() nodefs.Node {
return &defaultNode{
Node: nodefs.NewDefaultNode(),
}
}
// NewStaticFile creates nodefs.Node for file with static data. // NewStaticFile creates nodefs.Node for file with static data.
func NewStaticFile(data []byte) *SmallFile { func NewStaticFile(data []byte) *SmallFile {
return NewSmallFile(func() []byte { return NewSmallFile(func() []byte {
...@@ -116,7 +146,7 @@ type SmallFile struct { ...@@ -116,7 +146,7 @@ type SmallFile struct {
} }
func NewSmallFile(readData func() []byte) *SmallFile { func NewSmallFile(readData func() []byte) *SmallFile {
return &SmallFile{Node: nodefs.NewDefaultNode(), readData: readData} return &SmallFile{Node: newDefaultNode(), readData: readData}
} }
func (f *SmallFile) GetAttr(out *fuse.Attr, _ nodefs.File, _ *fuse.Context) fuse.Status { func (f *SmallFile) GetAttr(out *fuse.Attr, _ nodefs.File, _ *fuse.Context) fuse.Status {
......
...@@ -843,13 +843,13 @@ func (root *Root) mkdir(name string, fctx *fuse.Context) (_ *nodefs.Inode, err e ...@@ -843,13 +843,13 @@ func (root *Root) mkdir(name string, fctx *fuse.Context) (_ *nodefs.Inode, err e
// XXX -> newHead() // XXX -> newHead()
revDir := &Head{ revDir := &Head{
Node: nodefs.NewDefaultNode(), Node: newDefaultNode(),
rev: rev, rev: rev,
zconn: zconnRev, zconn: zconnRev,
} }
bfdir := &BigFileDir{ bfdir := &BigFileDir{
Node: nodefs.NewDefaultNode(), Node: newDefaultNode(),
head: revDir, head: revDir,
fileTab: make(map[zodb.Oid]*BigFile), fileTab: make(map[zodb.Oid]*BigFile),
} }
...@@ -915,7 +915,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er ...@@ -915,7 +915,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
// zconn.Incref() // zconn.Incref()
return &BigFile{ return &BigFile{
Node: nodefs.NewDefaultNode(), Node: newDefaultNode(),
head: head, head: head,
zbf: zbf, zbf: zbf,
zbfSize: zbfSize, zbfSize: zbfSize,
...@@ -1161,6 +1161,10 @@ var gdebug = struct { ...@@ -1161,6 +1161,10 @@ var gdebug = struct {
zheadSockTab map[*FileSock]struct{} zheadSockTab map[*FileSock]struct{}
}{} }{}
func init() {
gdebug.zheadSockTab = make(map[*FileSock]struct{})
}
// _wcfs_Zhead serves .wcfs/zhead opens. // _wcfs_Zhead serves .wcfs/zhead opens.
type _wcfs_Zhead struct { type _wcfs_Zhead struct {
nodefs.Node nodefs.Node
...@@ -1221,19 +1225,19 @@ func main() { ...@@ -1221,19 +1225,19 @@ func main() {
// mount root + head/ // mount root + head/
// XXX -> newHead() // XXX -> newHead()
head := &Head{ head := &Head{
Node: nodefs.NewDefaultNode(), Node: newDefaultNode(),
rev: 0, rev: 0,
zconn: zhead, zconn: zhead,
} }
bfdir := &BigFileDir{ bfdir := &BigFileDir{
Node: nodefs.NewDefaultNode(), Node: newDefaultNode(),
head: head, head: head,
fileTab: make(map[zodb.Oid]*BigFile), fileTab: make(map[zodb.Oid]*BigFile),
} }
head.bfdir = bfdir head.bfdir = bfdir
root := &Root{ root := &Root{
Node: nodefs.NewDefaultNode(), Node: newDefaultNode(),
zstor: zstor, zstor: zstor,
zdb: zdb, zdb: zdb,
head: head, head: head,
...@@ -1269,7 +1273,7 @@ func main() { ...@@ -1269,7 +1273,7 @@ func main() {
// XXX ^^^ invalidate cache or direct io // XXX ^^^ invalidate cache or direct io
// for debugging/testing // for debugging/testing
_wcfs := nodefs.NewDefaultNode() _wcfs := newDefaultNode()
mkdir(root, ".wcfs", _wcfs) mkdir(root, ".wcfs", _wcfs)
mkfile(_wcfs, "zurl", NewStaticFile([]byte(zurl))) mkfile(_wcfs, "zurl", NewStaticFile([]byte(zurl)))
...@@ -1280,7 +1284,7 @@ func main() { ...@@ -1280,7 +1284,7 @@ func main() {
// There can be multiple openers. Once opened, the file must be read, // There can be multiple openers. Once opened, the file must be read,
// as wcfs blocks waiting for data to be read. // as wcfs blocks waiting for data to be read.
mkfile(_wcfs, "zhead", &_wcfs_Zhead{ mkfile(_wcfs, "zhead", &_wcfs_Zhead{
Node: nodefs.NewDefaultNode(), Node: newDefaultNode(),
}) })
// TODO handle autoexit // TODO handle autoexit
......
...@@ -121,6 +121,7 @@ def test_join_autostart(): ...@@ -121,6 +121,7 @@ def test_join_autostart():
assert os.path.isdir(wc.mountpoint + "/head/bigfile") assert os.path.isdir(wc.mountpoint + "/head/bigfile")
"""
# XXX hack -> kill (?) or read-wait on head/at # XXX hack -> kill (?) or read-wait on head/at
# Conn._sync makes sure that underlying wcfs is synced to last ZODB data # Conn._sync makes sure that underlying wcfs is synced to last ZODB data
@method(wcfs.Conn) @method(wcfs.Conn)
...@@ -140,15 +141,19 @@ def _sync(self): ...@@ -140,15 +141,19 @@ def _sync(self):
os.stat("%s/head/bigfile/%s" % (self.mountpoint, bf)) os.stat("%s/head/bigfile/%s" % (self.mountpoint, bf))
print >>sys.stderr, "# <<< wcfs.Conn.sync\n" print >>sys.stderr, "# <<< wcfs.Conn.sync\n"
"""
# XXX parametrize zblk0, zblk1 # XXX parametrize zblk0, zblk1
# XXX select !wcfs mode so that we prepare data through !wcfs path. # XXX select !wcfs mode so that we prepare data through !wcfs path.
@func @func
def test_bigfile_empty(): # XXX name def test_wcfs():
root = testdb.dbopen() root = testdb.dbopen()
defer(lambda: dbclose(root)) defer(lambda: dbclose(root))
wc = wcfs.join(testzurl, autostart=True)
defer(wc.close)
root['zfile'] = f = ZBigFile(blksize) root['zfile'] = f = ZBigFile(blksize)
# NOTE there is no clean way to retrieve tid of just committed transaction # NOTE there is no clean way to retrieve tid of just committed transaction
...@@ -170,12 +175,26 @@ def test_bigfile_empty(): # XXX name ...@@ -170,12 +175,26 @@ def test_bigfile_empty(): # XXX name
Z.tidv.append(tid) Z.tidv.append(tid)
return tid return tid
# wcsync makes sure wc synchronized to latest committed transaction
Z.wctidv = []
wc_zhead = open(wc.mountpoint + "/.wcfs/zhead")
defer(wc_zhead.close)
def wcsync():
while len(Z.wctidv) < len(Z.tidv):
l = wc_zhead.readline()
l = rstrip('\n')
wctid = fromhex(l)
i = len(Z.wctidv)
if wctid != Z.tidv[i]:
raise RuntimeError("wcsync #%d: Z.wctid (%s) != Z.tid (%s)" % (i, wctid, Z.tidv[i]))
Z.wctidv.append(wctid)
tid1 = commit() tid1 = commit()
tid2 = commit() tid2 = commit()
assert tidtime(tid2) > tidtime(tid1) assert tidtime(tid2) > tidtime(tid1)
wc = wcfs.join(testzurl, autostart=True) wcsync()
defer(wc.close) return
# path to head/ and head/bigfile/ under wcfs # path to head/ and head/bigfile/ under wcfs
head = wc.mountpoint + "/head" head = wc.mountpoint + "/head"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment