Commit f6e10c9d authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 4bcde975
......@@ -218,7 +218,16 @@
// data directly into the file.
package main
// WCFS organization
//
// TODO
//
// - 1 ZODB connection per 1 bigfile (each bigfile can be at its different @at,
// because invalidations for different bigfiles can be processed with different
// timings depending on clients). No harm here as different bigfiles use
// completely different ZODB BTree and data objects.
//
//
// Notes on OS pagecache control:
//
// the cache of snapshotted bigfile can be pre-made hot, if invalidated region
......@@ -241,12 +250,6 @@ package main
// link above), but better we have proper FUSE flag for filesystem server to
// tell the kernel it is fully responsible for invalidating pagecache.
// TODO implementation organization.
// - 1 ZODB connection per 1 bigfile (each bigfile can be at its different @at,
// because invalidations for different bigfiles can be processed with different
// timings depending on clients). No harm here as different bigfiles use
// completely different ZODB BTree and data objects.
import (
"context"
"flag"
......@@ -283,49 +286,44 @@ type BigFileRoot struct {
type BigFileDir struct {
nodefs.Node
zdb *zodb.DB
// zconn *zodb.Conn
//oid zodb.Oid
//root *BigFileRoot
zdb *zodb.DB
}
// BigFileHead represents "/bigfile/<bigfileX>/head"
// XXX -> BigFileRev (with head | @tid) ?
type BigFileHead struct {
nodefs.Node
//x *BigFileDir
data *BigFileData
//inv *BigFileInvalidations
}
// BigFile is object that serves "/bigfile/<bigfileX>/<rev>/{data,at}"
// BigFile is object that serves "/bigfile/<bigfileX>/(head|<rev>)/{data,at}"
type BigFile struct {
// current read-only transaction under which we access ZODB data
txnCtx context.Context // XXX -> better directly store txn
// connection via which ZODB object for this bigfile are accessed
// connection via which ZODB objects for this bigfile are accessed
// XXX do we need to keep it here explicitly?
zconn *zodb.Connection
zbf *ZBigFile // XXX kept always activated
// ZBigFile top-level object. Kept always activated (XXX clarify)
zbf *ZBigFile
// TODO
// lastChange zodb.Tid // last change to whole bigfile as of .zconn.At view
}
// BigFileData represents "/bigfile/<bigfileX>/head/data"
// XXX also @<tidX>/data ?
// BigFileData represents "/bigfile/<bigfileX>/(head|<rev>)/data"
type BigFileData struct {
nodefs.Node
bigfile *BigFile
// inflight loadings of ZBigFile from ZODB
// inflight loadings of ZBigFile from ZODB.
// successfull load results are kept here until blkdata is put to OS pagecache.
loadMu sync.Mutex
loading map[int64]*blkLoadState // #blk -> ...
loading map[int64]*blkLoadState // #blk -> {... blkdata}
}
// blkLoadState represents a ZBlk load state/result.
......@@ -340,6 +338,9 @@ type blkLoadState struct {
}
// ----------------------------------------
// /bigfile -> Mkdir receives client request to create /bigfile/<bigfileX>.
//
// It creates <bigfileX>/head/data along the way.
......@@ -391,7 +392,7 @@ func (bfroot *BigFileRoot) Mkdir(name string, mode uint32, fctx *fuse.Context) (
case *zodb.NoObjectError:
return nil, fuse.EINVAL
case *zodb.NoDataError:
return nil, fuse.EINVAL // XXX ok?
return nil, fuse.EINVAL // XXX what to do if it was existing and got deleted?
default:
log.Printf("/bigfile: mkdir %q: %s", name, err)
return nil, fuse.EIO
......@@ -677,12 +678,10 @@ func main() {
Name: "wcfs",
DisableXAttrs: true, // we don't use
Debug: *debug,
Debug: *debug,
}
root := nodefs.NewDefaultNode()
fssrv, fsconn, err := mount(mntpt, root, opts)
if err != nil {
log.Fatal(err)
......@@ -692,7 +691,7 @@ func main() {
// we require proper pagecache control (added to Linux 2.6.36 in 2010)
supports := fssrv.KernelSettings().SupportsNotify
if !(supports(fuse.NOTIFY_STORE) && supports(fuse.NOTIFY_RETRIEVE)) {
log.Fatalf("kernel FUSE does not support pagecache control") // XXX more details?
log.Fatalf("kernel FUSE does not support pagecache control")
}
// add entries to /
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment