Commit 372fab58 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 71bb99b2
// Copyright (C) 2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package set
//go:generate ./gen-set set I64 int64 zset_i64.go
//go:generate ./gen-set set Oid _Oid zset_oid.go
import (
"lab.nexedi.com/kirr/neo/go/zodb"
)
type _Oid = zodb.Oid
...@@ -19,6 +19,12 @@ ...@@ -19,6 +19,12 @@
package PACKAGE package PACKAGE
import (
"fmt"
"sort"
"strings"
)
// Set is a set of VALUE. // Set is a set of VALUE.
type Set map[VALUE]struct{} type Set map[VALUE]struct{}
...@@ -129,3 +135,22 @@ func (a Set) Equal(b Set) bool { ...@@ -129,3 +135,22 @@ func (a Set) Equal(b Set) bool {
return true return true
} }
// --------
func (s Set) SortedElements() []VALUE {
ev := s.Elements()
sort.Slice(ev, func(i, j int) bool {
return ev[i] < ev[j]
})
return ev
}
func (s Set) String() string {
ev := s.SortedElements()
strv := make([]string, len(ev))
for i, v := range ev {
strv[i] = fmt.Sprintf("%v", v)
}
return "{" + strings.Join(strv, " ") + "}"
}
...@@ -19,7 +19,13 @@ ...@@ -19,7 +19,13 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main package set
import (
"fmt"
"sort"
"strings"
)
// SetI64 is a set of int64. // SetI64 is a set of int64.
type SetI64 map[int64]struct{} type SetI64 map[int64]struct{}
...@@ -131,3 +137,22 @@ func (a SetI64) Equal(b SetI64) bool { ...@@ -131,3 +137,22 @@ func (a SetI64) Equal(b SetI64) bool {
return true return true
} }
// --------
func (s SetI64) SortedElements() []int64 {
ev := s.Elements()
sort.Slice(ev, func(i, j int) bool {
return ev[i] < ev[j]
})
return ev
}
func (s SetI64) String() string {
ev := s.SortedElements()
strv := make([]string, len(ev))
for i, v := range ev {
strv[i] = fmt.Sprintf("%v", v)
}
return "{" + strings.Join(strv, " ") + "}"
}
// Code generated by gen-set Oid Oid; DO NOT EDIT. // Code generated by gen-set Oid _Oid; DO NOT EDIT.
// Copyright (C) 2015-2021 Nexedi SA and Contributors. // Copyright (C) 2015-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com> // Kirill Smelkov <kirr@nexedi.com>
...@@ -19,24 +19,30 @@ ...@@ -19,24 +19,30 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main package set
// SetOid is a set of Oid. import (
type SetOid map[Oid]struct{} "fmt"
"sort"
"strings"
)
// SetOid is a set of _Oid.
type SetOid map[_Oid]struct{}
// Add adds v to the set. // Add adds v to the set.
func (s SetOid) Add(v Oid) { func (s SetOid) Add(v _Oid) {
s[v] = struct{}{} s[v] = struct{}{}
} }
// Del removes v from the set. // Del removes v from the set.
// it is noop if v was not in the set. // it is noop if v was not in the set.
func (s SetOid) Del(v Oid) { func (s SetOid) Del(v _Oid) {
delete(s, v) delete(s, v)
} }
// Has checks whether the set contains v. // Has checks whether the set contains v.
func (s SetOid) Has(v Oid) bool { func (s SetOid) Has(v _Oid) bool {
_, ok := s[v] _, ok := s[v]
return ok return ok
} }
...@@ -49,8 +55,8 @@ func (s SetOid) Update(t SetOid) { ...@@ -49,8 +55,8 @@ func (s SetOid) Update(t SetOid) {
} }
// Elements returns all elements of set as slice. // Elements returns all elements of set as slice.
func (s SetOid) Elements() []Oid { func (s SetOid) Elements() []_Oid {
ev := make([]Oid, len(s)) ev := make([]_Oid, len(s))
i := 0 i := 0
for e := range s { for e := range s {
ev[i] = e ev[i] = e
...@@ -131,3 +137,22 @@ func (a SetOid) Equal(b SetOid) bool { ...@@ -131,3 +137,22 @@ func (a SetOid) Equal(b SetOid) bool {
return true return true
} }
// --------
func (s SetOid) SortedElements() []_Oid {
ev := s.Elements()
sort.Slice(ev, func(i, j int) bool {
return ev[i] < ev[j]
})
return ev
}
func (s SetOid) String() string {
ev := s.SortedElements()
strv := make([]string, len(ev))
for i, v := range ev {
strv[i] = fmt.Sprintf("%v", v)
}
return "{" + strings.Join(strv, " ") + "}"
}
// Copyright (C) 2018-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtree
import (
"fmt"
)
func panicf(format string, argv ...interface{}) {
panic(fmt.Sprintf(format, argv...))
}
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main package xbtree
// PP-connected subset of tree nodes. // PP-connected subset of tree nodes.
import ( import (
......
...@@ -17,9 +17,8 @@ ...@@ -17,9 +17,8 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main package xbtree
// diff for BTrees // diff for BTrees
// XXX move -> btree? (needs generics)
// XXX doc // XXX doc
// FIXME the algorythm is different: recursion is implemented by expanding rangeSplit step by step. // FIXME the algorythm is different: recursion is implemented by expanding rangeSplit step by step.
...@@ -71,45 +70,22 @@ package main ...@@ -71,45 +70,22 @@ package main
// . -k(blk) -> invalidate #blk // . -k(blk) -> invalidate #blk
// . +k(blk) -> invalidate #blk (e.g. if blk was previously read as hole) // . +k(blk) -> invalidate #blk (e.g. if blk was previously read as hole)
//go:generate ./gen-set main Oid Oid zset_oid.go
import ( import (
"context" "context"
"math"
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
"lab.nexedi.com/kirr/go123/xerr" "lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/zodb" "lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/kirr/neo/go/zodb/btree"
) )
type Tree = btree.LOBTree
type Bucket = btree.LOBucket
type Node = btree.LONode
type TreeEntry = btree.LOEntry
type BucketEntry = btree.LOBucketEntry
type Key = int64
const KeyMax Key = math.MaxInt64
const KeyMin Key = math.MinInt64
// value is assumed to be persistent reference.
// deletion is represented as VDEL.
type Value = zodb.Oid
const VDEL = zodb.InvalidOid
// ΔValue represents change in value. // ΔValue represents change in value.
type ΔValue struct { type ΔValue struct {
Old Value Old Value
New Value New Value
} }
type Oid = zodb.Oid
type SetKey = SetI64
// treeSetKey represents ordered set of keys. // treeSetKey represents ordered set of keys.
// it can be point-queried and range-accessed. // it can be point-queried and range-accessed.
......
// Package xbtree complements package lab.nexedi.com/kirr/neo/go/zodb/btree.
// XXX doc
package xbtree
import (
"math"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/kirr/neo/go/zodb/btree"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/set"
)
// XXX instead of generics
type Tree = btree.LOBTree
type Bucket = btree.LOBucket
type Node = btree.LONode
type TreeEntry = btree.LOEntry
type BucketEntry = btree.LOBucketEntry
type Key = int64
const KeyMax Key = math.MaxInt64
const KeyMin Key = math.MinInt64
// value is assumed to be persistent reference.
// deletion is represented as VDEL.
type Value = zodb.Oid
const VDEL = zodb.InvalidOid
type SetKey = set.SetI64
type SetOid = set.SetOid
...@@ -17,10 +17,8 @@ ...@@ -17,10 +17,8 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main package xbtree
// XXX -> Package xbtree complements package lab.nexedi.com/kirr/neo/go/zodb/btree. // ΔBtail
// TODO move -> btree when ΔTail matures.
// XXX doc
import ( import (
"context" "context"
...@@ -30,6 +28,8 @@ import ( ...@@ -30,6 +28,8 @@ import (
"lab.nexedi.com/kirr/go123/xerr" "lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/transaction" "lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb" "lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xtail"
) )
// ΔBtail represents tail of revisional changes to BTrees. // ΔBtail represents tail of revisional changes to BTrees.
...@@ -80,7 +80,7 @@ import ( ...@@ -80,7 +80,7 @@ import (
// See also zodb.ΔTail // See also zodb.ΔTail
// XXX naming -> ΔBTail ? // XXX naming -> ΔBTail ?
type ΔBtail struct { type ΔBtail struct {
// raw ZODB changes; Kept to rebuild δBtail/byRoot after new Track. // raw ZODB changes; Kept to rebuild .byRoot after new Track.
// includes all changed objects, not only tracked ones. // includes all changed objects, not only tracked ones.
δZtail *zodb.ΔTail δZtail *zodb.ΔTail
...@@ -544,7 +544,7 @@ func (δBtail *ΔBtail) GetAt(ctx context.Context, root *Tree, key Key, at zodb. ...@@ -544,7 +544,7 @@ func (δBtail *ΔBtail) GetAt(ctx context.Context, root *Tree, key Key, at zodb.
// XXX don't need // XXX don't need
//func (δBtail *ΔBtail) SliceByRev(lo, hi zodb.Tid) /*readonly*/ []ΔB { //func (δBtail *ΔBtail) SliceByRev(lo, hi zodb.Tid) /*readonly*/ []ΔB {
// δassertSlice(δBtail, lo, hi) // xtail.AssertSlice(δBtail, lo, hi)
// panic("TODO") // panic("TODO")
//} //}
...@@ -560,7 +560,7 @@ func (δBtail *ΔBtail) GetAt(ctx context.Context, root *Tree, key Key, at zodb. ...@@ -560,7 +560,7 @@ func (δBtail *ΔBtail) GetAt(ctx context.Context, root *Tree, key Key, at zodb.
// //
// XXX root -> *Tree ? // XXX root -> *Tree ?
func (δBtail *ΔBtail) SliceByRootRev(root zodb.Oid, lo, hi zodb.Tid) /*readonly*/[]ΔTree { func (δBtail *ΔBtail) SliceByRootRev(root zodb.Oid, lo, hi zodb.Tid) /*readonly*/[]ΔTree {
δassertSlice(δBtail, lo, hi) xtail.AssertSlice(δBtail, lo, hi)
// XXX locking // XXX locking
δTtail, ok := δBtail.byRoot[root] δTtail, ok := δBtail.byRoot[root]
if !ok { if !ok {
...@@ -571,13 +571,9 @@ func (δBtail *ΔBtail) SliceByRootRev(root zodb.Oid, lo, hi zodb.Tid) /*readonl ...@@ -571,13 +571,9 @@ func (δBtail *ΔBtail) SliceByRootRev(root zodb.Oid, lo, hi zodb.Tid) /*readonl
} }
// ---- misc ---- // ΔZtail returns raw ZODB changes accumulated in δBtail so far.
//
// δassertSlice asserts that δ.tail ≤ lo ≤ hi ≤ δ.head // the caller must not modify returned δZtail.
func δassertSlice(δ interface { Head() zodb.Tid; Tail() zodb.Tid }, lo, hi zodb.Tid) { func (δBtail *ΔBtail) ΔZtail() /*readonly*/*zodb.ΔTail {
tail := δ.Tail() return δBtail.δZtail
head := δ.Head()
if !(tail <= lo && lo <= hi && hi <= head) {
panicf("invalid slice: (%s, %s]; (tail, head] = (%s, %s]", lo, hi, tail, head)
}
} }
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main package xbtree
// tests for δbtail.go // tests for δbtail.go
// XXX doc (2 ways of testing: explicit + allstructs), treegen py helper // XXX doc (2 ways of testing: explicit + allstructs), treegen py helper
...@@ -42,6 +42,9 @@ import ( ...@@ -42,6 +42,9 @@ import (
"lab.nexedi.com/kirr/go123/xerr" "lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/transaction" "lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb" "lab.nexedi.com/kirr/neo/go/zodb"
_ "lab.nexedi.com/kirr/neo/go/zodb/wks"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
) )
const kInf Key = 10000 // inf key (TeX hack) const kInf Key = 10000 // inf key (TeX hack)
...@@ -245,9 +248,9 @@ func (tg *AllStructsSrv) AllStructs(kv1, kv2 map[Key]string, maxdepth, maxsplit, ...@@ -245,9 +248,9 @@ func (tg *AllStructsSrv) AllStructs(kv1, kv2 map[Key]string, maxdepth, maxsplit,
structv = append(structv, reply) structv = append(structv, reply)
} }
} }
// RTree represents Tree node covering [lo, hi_] key range in its parent tree. // RTree represents Tree node covering [lo, hi_] key range in its parent tree.
type RTree struct { type RTree struct {
oid zodb.Oid oid zodb.Oid
...@@ -378,7 +381,7 @@ func XGetTree(db *zodb.DB, at zodb.Tid, root zodb.Oid) RBucketSet { ...@@ -378,7 +381,7 @@ func XGetTree(db *zodb.DB, at zodb.Tid, root zodb.Oid) RBucketSet {
xztree, err := zconn.Get(ctx, root); X(err) xztree, err := zconn.Get(ctx, root); X(err)
ztree, ok := xztree.(*Tree) ztree, ok := xztree.(*Tree)
if !ok { if !ok {
exc.Raisef("expected %s, got %s", typeOf(ztree), typeOf(xztree)) exc.Raisef("expected %s, got %s", xzodb.TypeOf(ztree), xzodb.TypeOf(xztree))
} }
rbucketv := RBucketSet{} rbucketv := RBucketSet{}
...@@ -402,28 +405,6 @@ func XGetTree(db *zodb.DB, at zodb.Tid, root zodb.Oid) RBucketSet { ...@@ -402,28 +405,6 @@ func XGetTree(db *zodb.DB, at zodb.Tid, root zodb.Oid) RBucketSet {
return rbucketv return rbucketv
} }
// ztreeGetBlk returns ztree[k] and tree path that lead to this block.
// XXX naming, place
// XXX +return blkRevMax and use it
func ztreeGetBlk(ctx context.Context, ztree *Tree, k Key) (zblk ZBlk, ok bool, path []Node, err error) {
path = []Node{}
xzblk, ok, err := ztree.VGet(ctx, k, func(node Node) {
path = append(path, node)
})
if err != nil {
return nil, false, nil, err
}
if ok {
zblk, ok = xzblk.(ZBlk)
if !ok {
return nil, false, nil, fmt.Errorf("expect ZBlk*; got %s", typeOf(xzblk)) // XXX errctx
}
}
return zblk, ok, path, nil
}
// xwalkDFS walks ztree in depth-first order emitting bvisit callback on visited bucket nodes. // xwalkDFS walks ztree in depth-first order emitting bvisit callback on visited bucket nodes.
func xwalkDFS(ctx context.Context, lo, hi_ Key, ztree *Tree, bvisit func(*RBucket)) { func xwalkDFS(ctx context.Context, lo, hi_ Key, ztree *Tree, bvisit func(*RBucket)) {
_xwalkDFS(ctx, lo, hi_, ztree, /*rparent*/nil, bvisit) _xwalkDFS(ctx, lo, hi_, ztree, /*rparent*/nil, bvisit)
...@@ -461,13 +442,15 @@ func _xwalkDFS(ctx context.Context, lo, hi_ Key, ztree *Tree, rparent *RTree, bv ...@@ -461,13 +442,15 @@ func _xwalkDFS(ctx context.Context, lo, hi_ Key, ztree *Tree, rparent *RTree, bv
for _, __ := range bentryv { for _, __ := range bentryv {
k := __.Key() k := __.Key()
xv := __.Value() xv := __.Value()
zv, ok := xv.(ZBlk) pv, ok := xv.(zodb.IPersistent)
if !ok { if !ok {
exc.Raisef("[%d] -> %s; want ZBlk", k, typeOf(xv)) exc.Raisef("[%d] -> %s; want IPersistent", k, xzodb.TypeOf(xv))
} }
data, err := ZGetBlkData(ctx, pv.PJar(), pv.POid())
data, _, err := zv.loadBlkData(ctx); X(err) if err != nil {
bkv[k] = string(data) exc.Raisef("[%d]: %s", k, err)
}
bkv[k] = data
} }
...@@ -497,31 +480,6 @@ func XGetδKV(db *zodb.DB, at1, at2 zodb.Tid, δkvOid map[Key]ΔValue) map[Key] ...@@ -497,31 +480,6 @@ func XGetδKV(db *zodb.DB, at1, at2 zodb.Tid, δkvOid map[Key]ΔValue) map[Key]
return δkv return δkv
} }
// xzgetBlkData loads block data from ZBlk object specified by its oid.
func xzgetBlkData(ctx context.Context, zconn *zodb.Connection, zblkOid zodb.Oid) string {
X := exc.Raiseif
if zblkOid == VDEL {
return DEL
}
xblk, err := zconn.Get(ctx, zblkOid); X(err)
zblk := xblk.(ZBlk)
data, _, err := zblk.loadBlkData(ctx); X(err)
return string(data)
}
// xzgetBlkDataAt loads block data from ZBlk object specified by oid@at.
func xzgetBlkDataAt(db *zodb.DB, zblkOid zodb.Oid, at zodb.Tid) string {
X := exc.Raiseif
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: at}); X(err)
return xzgetBlkData(ctx, zconn, zblkOid)
}
// KAdjMatrix is adjacency matrix that describes how set of tracked keys // KAdjMatrix is adjacency matrix that describes how set of tracked keys
// changes (always grow) when tree topology is updated from A to B. // changes (always grow) when tree topology is updated from A to B.
// //
...@@ -710,7 +668,7 @@ func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zod ...@@ -710,7 +668,7 @@ func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zod
kadjTracked = SetKey{} // kadj[Tracked] (all keys adjacent to tracked keys) kadjTracked = SetKey{} // kadj[Tracked] (all keys adjacent to tracked keys)
for k := range initialTrackedKeys { for k := range initialTrackedKeys {
_, ok, path, err := ztreeGetBlk(ctx, ztree, k); X(err) _, ok, path, err := ZTreeGetBlkData(ctx, ztree, k); X(err)
err = δbtail.Track(k, ok, path); X(err) err = δbtail.Track(k, ok, path); X(err)
kadjTracked.Update(kadj[k]) kadjTracked.Update(kadj[k])
...@@ -1024,7 +982,7 @@ func xverifyΔBTail_rebuild_TR(t *testing.T, db *zodb.DB, δbtail *ΔBtail, tj * ...@@ -1024,7 +982,7 @@ func xverifyΔBTail_rebuild_TR(t *testing.T, db *zodb.DB, δbtail *ΔBtail, tj *
xtree, err := zconn.Get(ctx, treeRoot); X(err) xtree, err := zconn.Get(ctx, treeRoot); X(err)
ztree := xtree.(*Tree) ztree := xtree.(*Tree)
for k := range keys { for k := range keys {
_, ok, path, err := ztreeGetBlk(ctx, ztree, k); X(err) _, ok, path, err := ZTreeGetBlkData(ctx, ztree, k); X(err)
err = δbtail.Track(k, ok, path); X(err) err = δbtail.Track(k, ok, path); X(err)
} }
...@@ -1134,7 +1092,7 @@ func xverifyΔBTail_GetAt1(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt []*t ...@@ -1134,7 +1092,7 @@ func xverifyΔBTail_GetAt1(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt []*t
ztree := xtree.(*Tree) ztree := xtree.(*Tree)
for k := range keys { for k := range keys {
_, ok, path, err := ztreeGetBlk(ctx, ztree, k); X(err) _, ok, path, err := ZTreeGetBlkData(ctx, ztree, k); X(err)
err = δbtail.Track(k, ok, path); X(err) err = δbtail.Track(k, ok, path); X(err)
} }
...@@ -1802,6 +1760,7 @@ func allTestKeys(vt ...*tTreeCommit) SetKey { ...@@ -1802,6 +1760,7 @@ func allTestKeys(vt ...*tTreeCommit) SetKey {
return allKeys return allKeys
} }
/*
// easies debugging / makes error output stable from run to run. // easies debugging / makes error output stable from run to run.
func (ks SetKey) SortedElements() []Key { func (ks SetKey) SortedElements() []Key {
keyv := ks.Elements() keyv := ks.Elements()
...@@ -1818,7 +1777,7 @@ func (ks SetKey) String() string { ...@@ -1818,7 +1777,7 @@ func (ks SetKey) String() string {
return "{" + strings.Join(strv, " ") + "}" return "{" + strings.Join(strv, " ") + "}"
} }
func (os SetOid) SortedElements() []Oid { func (os SetOid) SortedElements() []zodb.Oid {
oidv := os.Elements() oidv := os.Elements()
sort.Slice(oidv, func(i, j int) bool { sort.Slice(oidv, func(i, j int) bool {
return oidv[i] < oidv[j] return oidv[i] < oidv[j]
...@@ -1832,6 +1791,7 @@ func (os SetOid) String() string { ...@@ -1832,6 +1791,7 @@ func (os SetOid) String() string {
} }
return "{" + strings.Join(strv, " ") + "}" return "{" + strings.Join(strv, " ") + "}"
} }
*/
func sortedKeys(kv map[Key]Δstring) []Key { func sortedKeys(kv map[Key]Δstring) []Key {
keyv := []Key{} keyv := []Key{}
...@@ -1863,3 +1823,35 @@ func (δv ΔValue) String() string { ...@@ -1863,3 +1823,35 @@ func (δv ΔValue) String() string {
} }
return fmt.Sprintf("{%s %s}", old, new) return fmt.Sprintf("{%s %s}", old, new)
} }
// ----------------------------------------
// ZBlk-related functions are imported at runtime by δbtail_x_test
var (
ZTreeGetBlkData func(context.Context, *Tree, Key) (string, bool, []Node, error)
ZGetBlkData func(context.Context, *zodb.Connection, zodb.Oid) (string, error)
)
// xzgetBlkData loads block data from ZBlk object specified by its oid.
func xzgetBlkData(ctx context.Context, zconn *zodb.Connection, zblkOid zodb.Oid) string {
X := exc.Raiseif
if zblkOid == VDEL {
return DEL
}
data, err := ZGetBlkData(ctx, zconn, zblkOid); X(err)
return string(data)
}
// xzgetBlkDataAt loads block data from ZBlk object specified by oid@at.
func xzgetBlkDataAt(db *zodb.DB, zblkOid zodb.Oid, at zodb.Tid) string {
X := exc.Raiseif
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: at}); X(err)
return xzgetBlkData(ctx, zconn, zblkOid)
}
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtree_test
// ZBlk-related part of δbtail_test
import (
"context"
"fmt"
"lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/zdata"
)
type Tree = xbtree.Tree
type Node = xbtree.Node
type Key = xbtree.Key
type ZBlk = zdata.ZBlk
// ztreeGetBlk returns ztree[k] and tree path that lead to this block.
// XXX +return blkRevMax and use it ?
func ztreeGetBlk(ctx context.Context, ztree *Tree, k Key) (zblk ZBlk, ok bool, path []Node, err error) {
path = []Node{}
xzblk, ok, err := ztree.VGet(ctx, k, func(node Node) {
path = append(path, node)
})
if err != nil {
return nil, false, nil, err
}
if ok {
zblk, ok = xzblk.(ZBlk)
if !ok {
return nil, false, nil, fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xzblk)) // XXX errctx
}
}
return zblk, ok, path, nil
}
func init() {
xbtree.ZTreeGetBlkData = ZTreeGetBlkData
xbtree.ZGetBlkData = ZGetBlkData
}
// ZTreeGetBlkData returns block data from block pointed to by ztree[k].
func ZTreeGetBlkData(ctx context.Context, ztree *Tree, k Key) (data string, ok bool, path []Node, err error) {
defer xerr.Contextf(&err, "@%s: tree<%s>: get blkdata from [%d]", ztree.PJar().At(), ztree.POid(), k)
zblk, ok, path, err := ztreeGetBlk(ctx, ztree, k)
if err != nil || !ok {
return "", ok, path, err
}
bdata, _, err := zblk.LoadBlkData(ctx)
if err != nil {
return "", false, nil, err
}
return string(bdata), true, path, nil
}
// ZGetBlkData loads block data from ZBlk object specified by its oid.
func ZGetBlkData(ctx context.Context, zconn *zodb.Connection, zblkOid zodb.Oid) (data string, err error) {
defer xerr.Contextf(&err, "@%s: get blkdata from obj %s", zconn.At(), zblkOid)
xblk, err := zconn.Get(ctx, zblkOid)
if err != nil {
return "", err
}
zblk, ok := xblk.(ZBlk)
if !ok {
return "", fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xblk))
}
bdata, _, err := zblk.LoadBlkData(ctx)
if err != nil {
return "", err
}
return string(bdata), nil
}
// Copyright (C) 2018-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
// Package xtail provides utilities that are common to ΔBtail and ΔFtail.
package xtail
import (
"fmt"
"lab.nexedi.com/kirr/neo/go/zodb"
)
// AssertSlice asserts that δ.tail ≤ lo ≤ hi ≤ δ.head
func AssertSlice(δ interface { Head() zodb.Tid; Tail() zodb.Tid }, lo, hi zodb.Tid) {
tail := δ.Tail()
head := δ.Head()
if !(tail <= lo && lo <= hi && hi <= head) {
panic(fmt.Sprintf("invalid slice: (%s, %s]; (tail, head] = (%s, %s]", lo, hi, tail, head))
}
}
// Copyright (C) 2018-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
// Package xzodb compements package zodb.
package xzodb
import (
"context"
"fmt"
"lab.nexedi.com/kirr/go123/xcontext"
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb"
)
// TypeOf returns string for object's type.
//
// - for ZODB objects, it uses zodb.ClassOf, which in particular supports
// printing zodb.Broken with details properly.
//
// - for other objects, it uses %T.
func TypeOf(obj interface{}) string {
switch obj := obj.(type) {
case zodb.IPersistent:
return zodb.ClassOf(obj)
default:
return fmt.Sprintf("%T", obj)
}
}
// ZConn is zodb.Connection + associated read-only transaction under which
// objects of the connection are accessed.
type ZConn struct {
*zodb.Connection
// read-only transaction under which we access zodb.Connection data.
TxnCtx context.Context // XXX -> better directly store txn
}
// ZOpen opens new connection to ZODB database + associated read-only transaction.
func ZOpen(ctx context.Context, zdb *zodb.DB, zopt *zodb.ConnOptions) (_ *ZConn, err error) {
// create new read-only transaction
txn, txnCtx := transaction.New(context.Background())
defer func() {
if err != nil {
txn.Abort()
}
}()
// XXX better ctx = transaction.PutIntoContext(ctx, txn)
ctx, cancel := xcontext.Merge(ctx, txnCtx)
defer cancel()
zconn, err := zdb.Open(ctx, zopt)
if err != nil {
return nil, err
}
return &ZConn{
Connection: zconn,
TxnCtx: txnCtx,
}, nil
}
// Copyright (C) 2018-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package zdata
import (
"fmt"
"lab.nexedi.com/kirr/neo/go/zodb"
)
// tidmax returns latest revision.
func tidmax(a, b zodb.Tid) zodb.Tid {
if a > b {
return a
} else {
return b
}
}
// tidmin returns earliest revision.
func tidmin(a, b zodb.Tid) zodb.Tid {
if a < b {
return a
} else {
return b
}
}
func panicf(format string, argv ...interface{}) {
panic(fmt.Sprintf(format, argv...))
}
...@@ -71,7 +71,7 @@ def main2(): ...@@ -71,7 +71,7 @@ def main2():
def emit(v): def emit(v):
print >>f, v print >>f, v
emit("// Code generated by %s; DO NOT EDIT." % __file__) emit("// Code generated by %s; DO NOT EDIT." % __file__)
emit("package main\n") emit("package zdata\n")
emit('import "lab.nexedi.com/kirr/neo/go/zodb"\n') emit('import "lab.nexedi.com/kirr/neo/go/zodb"\n')
emit("const zf_blksize = %d" % zf.blksize) emit("const zf_blksize = %d" % zf.blksize)
......
...@@ -17,8 +17,10 @@ ...@@ -17,8 +17,10 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main // Package zdata provides access for wendelin.core in-ZODB data.
// ZBlk* + ZBigFile loading //
// ZBlk* + ZBigFile + ΔFtail for ZBigFile-level ZODB history.
package zdata
// module: "wendelin.bigfile.file_zodb" // module: "wendelin.bigfile.file_zodb"
// //
...@@ -52,6 +54,7 @@ import ( ...@@ -52,6 +54,7 @@ import (
"lab.nexedi.com/kirr/neo/go/zodb/btree" "lab.nexedi.com/kirr/neo/go/zodb/btree"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/pycompat" "lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/pycompat"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
) )
// ZBlk is the interface that every ZBlk* block implements. // ZBlk is the interface that every ZBlk* block implements.
...@@ -59,13 +62,13 @@ type ZBlk interface { ...@@ -59,13 +62,13 @@ type ZBlk interface {
zodb.IPersistent zodb.IPersistent
_ZBlkInΔFtail _ZBlkInΔFtail
// loadBlkData loads from database and returns data block stored by this ZBlk. // LoadBlkData loads from database and returns data block stored by this ZBlk.
// //
// If returned data size is less than the block size of containing ZBigFile, // If returned data size is less than the block size of containing ZBigFile,
// the block trailing is assumed to be trailing \0. // the block trailing is assumed to be trailing \0.
// //
// returns data and revision of ZBlk. // returns data and revision of ZBlk.
loadBlkData(ctx context.Context) (data []byte, rev zodb.Tid, _ error) LoadBlkData(ctx context.Context) (data []byte, rev zodb.Tid, _ error)
} }
var _ ZBlk = (*ZBlk0)(nil) var _ ZBlk = (*ZBlk0)(nil)
...@@ -98,15 +101,15 @@ func (zb *zBlk0State) PyGetState() interface{} { ...@@ -98,15 +101,15 @@ func (zb *zBlk0State) PyGetState() interface{} {
func (zb *zBlk0State) PySetState(pystate interface{}) error { func (zb *zBlk0State) PySetState(pystate interface{}) error {
blkdata, ok := pystate.(string) blkdata, ok := pystate.(string)
if !ok { if !ok {
return fmt.Errorf("expect str; got %s", typeOf(pystate)) return fmt.Errorf("expect str; got %s", xzodb.TypeOf(pystate))
} }
zb.blkdata = blkdata zb.blkdata = blkdata
return nil return nil
} }
// loadBlkData implements ZBlk. // LoadBlkData implements ZBlk.
func (zb *ZBlk0) loadBlkData(ctx context.Context) (_ []byte, _ zodb.Tid, err error) { func (zb *ZBlk0) LoadBlkData(ctx context.Context) (_ []byte, _ zodb.Tid, err error) {
defer xerr.Contextf(&err, "ZBlk0(%s): loadBlkData", zb.POid()) defer xerr.Contextf(&err, "ZBlk0(%s): loadBlkData", zb.POid())
err = zb.PActivate(ctx) err = zb.PActivate(ctx)
...@@ -144,7 +147,7 @@ func (zd *zDataState) PyGetState() interface{} { ...@@ -144,7 +147,7 @@ func (zd *zDataState) PyGetState() interface{} {
func (zd *zDataState) PySetState(pystate interface{}) error { func (zd *zDataState) PySetState(pystate interface{}) error {
data, ok := pystate.(string) data, ok := pystate.(string)
if !ok { if !ok {
return fmt.Errorf("expect str; got %s", typeOf(pystate)) return fmt.Errorf("expect str; got %s", xzodb.TypeOf(pystate))
} }
zd.data = data zd.data = data
...@@ -175,15 +178,15 @@ func (zb *zBlk1State) PyGetState() interface{} { ...@@ -175,15 +178,15 @@ func (zb *zBlk1State) PyGetState() interface{} {
func (zb *zBlk1State) PySetState(pystate interface{}) error { func (zb *zBlk1State) PySetState(pystate interface{}) error {
chunktab, ok := pystate.(*btree.IOBTree) chunktab, ok := pystate.(*btree.IOBTree)
if !ok { if !ok {
return fmt.Errorf("expect IOBTree; got %s", typeOf(pystate)) return fmt.Errorf("expect IOBTree; got %s", xzodb.TypeOf(pystate))
} }
zb.chunktab = chunktab zb.chunktab = chunktab
return nil return nil
} }
// loadBlkData implements ZBlk. // LoadBlkData implements ZBlk.
func (zb *ZBlk1) loadBlkData(ctx context.Context) (_ []byte, _ zodb.Tid, err error) { func (zb *ZBlk1) LoadBlkData(ctx context.Context) (_ []byte, _ zodb.Tid, err error) {
defer xerr.Contextf(&err, "ZBlk1(%s): loadBlkData", zb.POid()) defer xerr.Contextf(&err, "ZBlk1(%s): loadBlkData", zb.POid())
err = zb.PActivate(ctx) err = zb.PActivate(ctx)
...@@ -240,13 +243,13 @@ func (zb *ZBlk1) loadBlkData(ctx context.Context) (_ []byte, _ zodb.Tid, err err ...@@ -240,13 +243,13 @@ func (zb *ZBlk1) loadBlkData(ctx context.Context) (_ []byte, _ zodb.Tid, err err
defer b.PDeactivate() defer b.PDeactivate()
// go through all bucket key/v -> chunktab // go through all bucket key/v -> chunktab
defer xerr.Contextf(&err, "%s(%s)", typeOf(b), b.POid()) defer xerr.Contextf(&err, "%s(%s)", xzodb.TypeOf(b), b.POid())
//fmt.Printf("\nbucket: %v\n\n", b.Entryv()) //fmt.Printf("\nbucket: %v\n\n", b.Entryv())
for i, e := range b.Entryv() { for i, e := range b.Entryv() {
zd, ok := e.Value().(*ZData) zd, ok := e.Value().(*ZData)
if !ok { if !ok {
return fmt.Errorf("[%d]: !ZData (%s)", i, typeOf(e.Value())) return fmt.Errorf("[%d]: !ZData (%s)", i, xzodb.TypeOf(e.Value()))
} }
offset := e.Key() offset := e.Key()
...@@ -284,7 +287,7 @@ func (zb *ZBlk1) loadBlkData(ctx context.Context) (_ []byte, _ zodb.Tid, err err ...@@ -284,7 +287,7 @@ func (zb *ZBlk1) loadBlkData(ctx context.Context) (_ []byte, _ zodb.Tid, err err
}) })
default: default:
panicf("IOBTree has %s child", typeOf(child)) panicf("IOBTree has %s child", xzodb.TypeOf(child))
} }
} }
...@@ -376,7 +379,7 @@ func (bf *zBigFileState) PyGetState() interface{} { ...@@ -376,7 +379,7 @@ func (bf *zBigFileState) PyGetState() interface{} {
func (bf *zBigFileState) PySetState(pystate interface{}) (err error) { func (bf *zBigFileState) PySetState(pystate interface{}) (err error) {
t, ok := pystate.(pickle.Tuple) t, ok := pystate.(pickle.Tuple)
if !ok { if !ok {
return fmt.Errorf("expect [2](); got %s", typeOf(pystate)) return fmt.Errorf("expect [2](); got %s", xzodb.TypeOf(pystate))
} }
if len(t) != 2 { if len(t) != 2 {
return fmt.Errorf("expect [2](); got [%d]()", len(t)) return fmt.Errorf("expect [2](); got [%d]()", len(t))
...@@ -384,7 +387,7 @@ func (bf *zBigFileState) PySetState(pystate interface{}) (err error) { ...@@ -384,7 +387,7 @@ func (bf *zBigFileState) PySetState(pystate interface{}) (err error) {
blksize, ok := pycompat.Int64(t[0]) blksize, ok := pycompat.Int64(t[0])
if !ok { if !ok {
return fmt.Errorf("blksize: expect integer; got %s", typeOf(t[0])) return fmt.Errorf("blksize: expect integer; got %s", xzodb.TypeOf(t[0]))
} }
if blksize <= 0 { if blksize <= 0 {
return fmt.Errorf("blksize: must be > 0; got %d", blksize) return fmt.Errorf("blksize: must be > 0; got %d", blksize)
...@@ -392,7 +395,7 @@ func (bf *zBigFileState) PySetState(pystate interface{}) (err error) { ...@@ -392,7 +395,7 @@ func (bf *zBigFileState) PySetState(pystate interface{}) (err error) {
blktab, ok := t[1].(*btree.LOBTree) blktab, ok := t[1].(*btree.LOBTree)
if !ok { if !ok {
return fmt.Errorf("blktab: expect LOBTree; got %s", typeOf(t[1])) return fmt.Errorf("blktab: expect LOBTree; got %s", xzodb.TypeOf(t[1]))
} }
bf.blksize = blksize bf.blksize = blksize
...@@ -400,6 +403,13 @@ func (bf *zBigFileState) PySetState(pystate interface{}) (err error) { ...@@ -400,6 +403,13 @@ func (bf *zBigFileState) PySetState(pystate interface{}) (err error) {
return nil return nil
} }
// BlkSize returns size of block used by this file.
//
// The file must be activated.
func (bf *ZBigFile) BlkSize() int64 {
return bf.blksize
}
// LoadBlk loads data for file block #blk. // LoadBlk loads data for file block #blk.
// //
// it also returns: // it also returns:
...@@ -432,10 +442,10 @@ func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath ...@@ -432,10 +442,10 @@ func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath
zblk, ok = xzblk.(ZBlk) zblk, ok = xzblk.(ZBlk)
if !ok { if !ok {
return nil, nil, nil, 0, fmt.Errorf("expect ZBlk*; got %s", typeOf(xzblk)) return nil, nil, nil, 0, fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xzblk))
} }
blkdata, zblkrev, err := zblk.loadBlkData(ctx) blkdata, zblkrev, err := zblk.LoadBlkData(ctx)
if err != nil { if err != nil {
return nil, nil, nil, 0, err return nil, nil, nil, 0, err
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main package zdata
//go:generate ./testdata/zblk_test_gen.py //go:generate ./testdata/zblk_test_gen.py
...@@ -83,12 +83,12 @@ func TestZBlk(t *testing.T) { ...@@ -83,12 +83,12 @@ func TestZBlk(t *testing.T) {
} }
z0Data, z0Rev, err := z0.loadBlkData(ctx); X(err) z0Data, z0Rev, err := z0.LoadBlkData(ctx); X(err)
z0DataOK := brange32(z0_len) z0DataOK := brange32(z0_len)
assert.Equal(z0Data, z0DataOK, "ZBlk0 data wrong") assert.Equal(z0Data, z0DataOK, "ZBlk0 data wrong")
assert.Equal(z0Rev, z0_rev, "ZBlk0 rev wrong") assert.Equal(z0Rev, z0_rev, "ZBlk0 rev wrong")
z1Data, z1Rev, err := z1.loadBlkData(ctx); X(err) z1Data, z1Rev, err := z1.LoadBlkData(ctx); X(err)
z1DataOK := make([]byte, zf_blksize) // zeros z1DataOK := make([]byte, zf_blksize) // zeros
copy(z1DataOK[0:], brange32(z1_htlen)) // head copy(z1DataOK[0:], brange32(z1_htlen)) // head
copy(z1DataOK[len(z1DataOK)-z1_htlen:], breverse(brange32(z1_htlen))) // tail copy(z1DataOK[len(z1DataOK)-z1_htlen:], breverse(brange32(z1_htlen))) // tail
......
// Code generated by gen-set BigFile *BigFile; DO NOT EDIT. // Code generated by gen-set ZBigFile *ZBigFile; DO NOT EDIT.
// Copyright (C) 2015-2021 Nexedi SA and Contributors. // Copyright (C) 2015-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com> // Kirill Smelkov <kirr@nexedi.com>
...@@ -19,38 +19,38 @@ ...@@ -19,38 +19,38 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main package zdata
// SetBigFile is a set of *BigFile. // SetZBigFile is a set of *ZBigFile.
type SetBigFile map[*BigFile]struct{} type SetZBigFile map[*ZBigFile]struct{}
// Add adds v to the set. // Add adds v to the set.
func (s SetBigFile) Add(v *BigFile) { func (s SetZBigFile) Add(v *ZBigFile) {
s[v] = struct{}{} s[v] = struct{}{}
} }
// Del removes v from the set. // Del removes v from the set.
// it is noop if v was not in the set. // it is noop if v was not in the set.
func (s SetBigFile) Del(v *BigFile) { func (s SetZBigFile) Del(v *ZBigFile) {
delete(s, v) delete(s, v)
} }
// Has checks whether the set contains v. // Has checks whether the set contains v.
func (s SetBigFile) Has(v *BigFile) bool { func (s SetZBigFile) Has(v *ZBigFile) bool {
_, ok := s[v] _, ok := s[v]
return ok return ok
} }
// Update adds t values to s. // Update adds t values to s.
func (s SetBigFile) Update(t SetBigFile) { func (s SetZBigFile) Update(t SetZBigFile) {
for v := range t { for v := range t {
s.Add(v) s.Add(v)
} }
} }
// Elements returns all elements of set as slice. // Elements returns all elements of set as slice.
func (s SetBigFile) Elements() []*BigFile { func (s SetZBigFile) Elements() []*ZBigFile {
ev := make([]*BigFile, len(s)) ev := make([]*ZBigFile, len(s))
i := 0 i := 0
for e := range s { for e := range s {
ev[i] = e ev[i] = e
...@@ -60,14 +60,14 @@ func (s SetBigFile) Elements() []*BigFile { ...@@ -60,14 +60,14 @@ func (s SetBigFile) Elements() []*BigFile {
} }
// Union returns s ∪ t // Union returns s ∪ t
func (s SetBigFile) Union(t SetBigFile) SetBigFile { func (s SetZBigFile) Union(t SetZBigFile) SetZBigFile {
// l = max(len(s), len(t)) // l = max(len(s), len(t))
l := len(s) l := len(s)
if lt := len(t); lt > l { if lt := len(t); lt > l {
l = lt l = lt
} }
u := make(SetBigFile, l) u := make(SetZBigFile, l)
for v := range s { for v := range s {
u.Add(v) u.Add(v)
...@@ -79,8 +79,8 @@ func (s SetBigFile) Union(t SetBigFile) SetBigFile { ...@@ -79,8 +79,8 @@ func (s SetBigFile) Union(t SetBigFile) SetBigFile {
} }
// Intersection returns s ∩ t // Intersection returns s ∩ t
func (s SetBigFile) Intersection(t SetBigFile) SetBigFile { func (s SetZBigFile) Intersection(t SetZBigFile) SetZBigFile {
i := SetBigFile{} i := SetZBigFile{}
for v := range s { for v := range s {
if t.Has(v) { if t.Has(v) {
i.Add(v) i.Add(v)
...@@ -90,8 +90,8 @@ func (s SetBigFile) Intersection(t SetBigFile) SetBigFile { ...@@ -90,8 +90,8 @@ func (s SetBigFile) Intersection(t SetBigFile) SetBigFile {
} }
// Difference returns s\t. // Difference returns s\t.
func (s SetBigFile) Difference(t SetBigFile) SetBigFile { func (s SetZBigFile) Difference(t SetZBigFile) SetZBigFile {
d := SetBigFile{} d := SetZBigFile{}
for v := range s { for v := range s {
if !t.Has(v) { if !t.Has(v) {
d.Add(v) d.Add(v)
...@@ -101,8 +101,8 @@ func (s SetBigFile) Difference(t SetBigFile) SetBigFile { ...@@ -101,8 +101,8 @@ func (s SetBigFile) Difference(t SetBigFile) SetBigFile {
} }
// SymmetricDifference returns s Δ t. // SymmetricDifference returns s Δ t.
func (s SetBigFile) SymmetricDifference(t SetBigFile) SetBigFile { func (s SetZBigFile) SymmetricDifference(t SetZBigFile) SetZBigFile {
d := SetBigFile{} d := SetZBigFile{}
for v := range s { for v := range s {
if !t.Has(v) { if !t.Has(v) {
d.Add(v) d.Add(v)
...@@ -117,7 +117,7 @@ func (s SetBigFile) SymmetricDifference(t SetBigFile) SetBigFile { ...@@ -117,7 +117,7 @@ func (s SetBigFile) SymmetricDifference(t SetBigFile) SetBigFile {
} }
// Equal returns whether a == b. // Equal returns whether a == b.
func (a SetBigFile) Equal(b SetBigFile) bool { func (a SetZBigFile) Equal(b SetZBigFile) bool {
if len(a) != len(b) { if len(a) != len(b) {
return false return false
} }
......
// Code generated by ./testdata/zblk_test_gen.py; DO NOT EDIT. // Code generated by ./testdata/zblk_test_gen.py; DO NOT EDIT.
package main package zdata
import "lab.nexedi.com/kirr/neo/go/zodb" import "lab.nexedi.com/kirr/neo/go/zodb"
......
...@@ -17,10 +17,9 @@ ...@@ -17,10 +17,9 @@
// See COPYING file for full licensing terms. // See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options. // See https://www.nexedi.com/licensing for rationale and options.
package main package zdata
//go:generate ./gen-set main I64 int64 zset_i64.go //go:generate ../set/gen-set zdata ZBigFile *ZBigFile zset_bigfile.go
//go:generate ./gen-set main BigFile *BigFile zset_bigfile.go
import ( import (
"context" "context"
...@@ -31,8 +30,15 @@ import ( ...@@ -31,8 +30,15 @@ import (
"lab.nexedi.com/kirr/go123/xerr" "lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/zodb" "lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/kirr/neo/go/zodb/btree" "lab.nexedi.com/kirr/neo/go/zodb/btree"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/set"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xtail"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
) )
type SetI64 = set.SetI64
// ΔFtail represents tail of revisional changes to files. // ΔFtail represents tail of revisional changes to files.
// //
// It semantically consists of // It semantically consists of
...@@ -71,8 +77,8 @@ import ( ...@@ -71,8 +77,8 @@ import (
// See also zodb.ΔTail // See also zodb.ΔTail
type ΔFtail struct { type ΔFtail struct {
// ΔFtail merges ΔBtail with history of ZBlk // ΔFtail merges ΔBtail with history of ZBlk
δBtail *ΔBtail δBtail *xbtree.ΔBtail
fileIdx map[zodb.Oid]SetBigFile // tree-root -> {} BigFile as of @head fileIdx map[zodb.Oid]SetZBigFile // tree-root -> {} ZBigFile as of @head
// XXX don't need vδF - everything is reconstructed at runtime from .δBtail.vδT // XXX don't need vδF - everything is reconstructed at runtime from .δBtail.vδT
// this way we also don't need to keep up updating vδF from vδT on its rebuild during. // this way we also don't need to keep up updating vδF from vδT on its rebuild during.
...@@ -82,13 +88,13 @@ type ΔFtail struct { ...@@ -82,13 +88,13 @@ type ΔFtail struct {
// tracked ZBlk that are not yet taken into account in current vδF. // tracked ZBlk that are not yet taken into account in current vδF.
// grows on new track requests; flushes on queries and update. // grows on new track requests; flushes on queries and update.
trackNew map[*BigFile]map[zodb.Oid]*zblkInΔFtail // {} file -> {} oid -> zblk trackNew map[*ZBigFile]map[zodb.Oid]*zblkInΔFtail // {} file -> {} oid -> zblk
} }
// ΔF represents a change in files space. // ΔF represents a change in files space.
type ΔF struct { type ΔF struct {
Rev zodb.Tid Rev zodb.Tid
ByFile map[*BigFile]*ΔFile // file -> δfile ByFile map[*ZBigFile]*ΔFile // file -> δfile
} }
// ΔFile represents a change to one file. // ΔFile represents a change to one file.
...@@ -111,7 +117,7 @@ type zblkInΔFtail struct { ...@@ -111,7 +117,7 @@ type zblkInΔFtail struct {
// tree nodes and for tree_root->file) // tree nodes and for tree_root->file)
// with which files/blocks this ZBlk is associated with as of @head state // with which files/blocks this ZBlk is associated with as of @head state
infile map[*BigFile]SetI64 // {} file -> set(#blk) infile map[*ZBigFile]SetI64 // {} file -> set(#blk)
} }
type _ZBlkInΔFtail interface { inΔFtail() *zblkInΔFtail } type _ZBlkInΔFtail interface { inΔFtail() *zblkInΔFtail }
...@@ -126,9 +132,9 @@ func (z *zblkInΔFtail) inΔFtail() *zblkInΔFtail { return z } ...@@ -126,9 +132,9 @@ func (z *zblkInΔFtail) inΔFtail() *zblkInΔFtail { return z }
// ZODB when needed. // ZODB when needed.
func NewΔFtail(at0 zodb.Tid, db *zodb.DB) *ΔFtail { func NewΔFtail(at0 zodb.Tid, db *zodb.DB) *ΔFtail {
return &ΔFtail{ return &ΔFtail{
δBtail: NewΔBtail(at0, db), δBtail: xbtree.NewΔBtail(at0, db),
fileIdx: make(map[zodb.Oid]SetBigFile), fileIdx: make(map[zodb.Oid]SetZBigFile),
trackNew: make(map[*BigFile]map[zodb.Oid]*zblkInΔFtail), trackNew: make(map[*ZBigFile]map[zodb.Oid]*zblkInΔFtail),
} }
} }
...@@ -149,10 +155,10 @@ func (δFtail *ΔFtail) Tail() zodb.Tid { return δFtail.δBtail.Tail() } ...@@ -149,10 +155,10 @@ func (δFtail *ΔFtail) Tail() zodb.Tid { return δFtail.δBtail.Tail() }
// XXX objects in path and zblk must be with .PJar().At() == .head // XXX objects in path and zblk must be with .PJar().At() == .head
// //
// A root can be associated with several files (each provided on different Track call). // A root can be associated with several files (each provided on different Track call).
func (δFtail *ΔFtail) Track(file *BigFile, blk int64, path []btree.LONode, zblk ZBlk) { func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zblk ZBlk) {
if blk == -1 { if blk == -1 {
// XXX blk = ∞ from beginning ? // XXX blk = ∞ from beginning ?
blk = KeyMax blk = xbtree.KeyMax
} }
err := δFtail.δBtail.Track(blk, zblk != nil, path) err := δFtail.δBtail.Track(blk, zblk != nil, path)
if err != nil { if err != nil {
...@@ -161,7 +167,7 @@ func (δFtail *ΔFtail) Track(file *BigFile, blk int64, path []btree.LONode, zbl ...@@ -161,7 +167,7 @@ func (δFtail *ΔFtail) Track(file *BigFile, blk int64, path []btree.LONode, zbl
root := path[0].(*btree.LOBTree) root := path[0].(*btree.LOBTree)
files, ok := δFtail.fileIdx[root.POid()] files, ok := δFtail.fileIdx[root.POid()]
if !ok { if !ok {
files = SetBigFile{} files = SetZBigFile{}
δFtail.fileIdx[root.POid()] = files δFtail.fileIdx[root.POid()] = files
} }
files.Add(file) files.Add(file)
...@@ -174,7 +180,7 @@ func (δFtail *ΔFtail) Track(file *BigFile, blk int64, path []btree.LONode, zbl ...@@ -174,7 +180,7 @@ func (δFtail *ΔFtail) Track(file *BigFile, blk int64, path []btree.LONode, zbl
if !ok { if !ok {
blocks = make(SetI64, 1) blocks = make(SetI64, 1)
if z.infile == nil { if z.infile == nil {
z.infile = make(map[*BigFile]SetI64) z.infile = make(map[*ZBigFile]SetI64)
} }
z.infile[file] = blocks z.infile[file] = blocks
} }
...@@ -213,7 +219,7 @@ func (δFtail *ΔFtail) Track(file *BigFile, blk int64, path []btree.LONode, zbl ...@@ -213,7 +219,7 @@ func (δFtail *ΔFtail) Track(file *BigFile, blk int64, path []btree.LONode, zbl
// Zhead must be active connection at δFtail.Head() database state. // Zhead must be active connection at δFtail.Head() database state.
// Objects in Zhead must not be modified. // Objects in Zhead must not be modified.
// During call to Update zhead must not be otherwise used - even for reading. // During call to Update zhead must not be otherwise used - even for reading.
func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *ZConn) (_ ΔF, err error) { func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF, err error) {
defer xerr.Contextf(&err, "ΔFtail update %s -> %s", δFtail.Head(), δZ.Tid) defer xerr.Contextf(&err, "ΔFtail update %s -> %s", δFtail.Head(), δZ.Tid)
// XXX δFtail.update() first? // XXX δFtail.update() first?
...@@ -224,7 +230,7 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *ZConn) (_ ΔF, err ...@@ -224,7 +230,7 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *ZConn) (_ ΔF, err
return ΔF{}, err return ΔF{}, err
} }
δF := ΔF{Rev: δB.Rev, ByFile: make(map[*BigFile]*ΔFile)} δF := ΔF{Rev: δB.Rev, ByFile: make(map[*ZBigFile]*ΔFile)}
// take btree changes into account // take btree changes into account
for root, δt := range δB.ByRoot { for root, δt := range δB.ByRoot {
...@@ -301,7 +307,7 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *ZConn) (_ ΔF, err ...@@ -301,7 +307,7 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *ZConn) (_ ΔF, err
// //
// If file != nil only track requests related to file are processed. // If file != nil only track requests related to file are processed.
// Otherwise all track requests are processed. // Otherwise all track requests are processed.
func (δFtail *ΔFtail) update(file *BigFile) { func (δFtail *ΔFtail) update(file *ZBigFile) {
if file == nil { if file == nil {
panic("TODO") panic("TODO")
} }
...@@ -318,7 +324,7 @@ func (δFtail *ΔFtail) update(file *BigFile) { ...@@ -318,7 +324,7 @@ func (δFtail *ΔFtail) update(file *BigFile) {
delete(δFtail.trackNew, file) delete(δFtail.trackNew, file)
// XXX unlock here // XXX unlock here
for i, δZ := range δFtail.δBtail.δZtail.Data() { for i, δZ := range δFtail.δBtail.ΔZtail().Data() {
δF := δFtail.vδF[i] δF := δFtail.vδF[i]
// XXX assert δF.Rev == δZ.Rev // XXX assert δF.Rev == δZ.Rev
...@@ -348,7 +354,7 @@ func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) { ...@@ -348,7 +354,7 @@ func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) {
// XXX don't need // XXX don't need
func (δFtail *ΔFtail) SliceByRev(lo, hi zodb.Tid) /*readonly*/ []ΔF { func (δFtail *ΔFtail) SliceByRev(lo, hi zodb.Tid) /*readonly*/ []ΔF {
δassertSlice(δFtail, lo, hi) xtail.AssertSlice(δFtail, lo, hi)
panic("TODO") panic("TODO")
} }
...@@ -361,8 +367,8 @@ func (δFtail *ΔFtail) SliceByRev(lo, hi zodb.Tid) /*readonly*/ []ΔF { ...@@ -361,8 +367,8 @@ func (δFtail *ΔFtail) SliceByRev(lo, hi zodb.Tid) /*readonly*/ []ΔF {
// the caller must not modify returned slice. // the caller must not modify returned slice.
// //
// Note: contrary to regular go slicing, low is exclusive while high is inclusive. // Note: contrary to regular go slicing, low is exclusive while high is inclusive.
func (δFtail *ΔFtail) SliceByFileRev(file *BigFile, lo, hi zodb.Tid) /*readonly*/[]*ΔFile { func (δFtail *ΔFtail) SliceByFileRev(file *ZBigFile, lo, hi zodb.Tid) /*readonly*/[]*ΔFile {
δassertSlice(δFtail, lo, hi) xtail.AssertSlice(δFtail, lo, hi)
// FIXME rework to just query .δBtail.SliceByRootRev(file.blktab, lo, hi) + // FIXME rework to just query .δBtail.SliceByRootRev(file.blktab, lo, hi) +
// merge δZBlk history with that. // merge δZBlk history with that.
...@@ -419,7 +425,7 @@ func (δFtail *ΔFtail) SliceByFileRev(file *BigFile, lo, hi zodb.Tid) /*readonl ...@@ -419,7 +425,7 @@ func (δFtail *ΔFtail) SliceByFileRev(file *BigFile, lo, hi zodb.Tid) /*readonl
// δFile ────────o───────o──────x─────x──────────────────────── // δFile ────────o───────o──────x─────x────────────────────────
/* /*
vδZ := δFtail.δBtail.δZtail.SliceByRev(lo, hi) vδZ := δFtail.δBtail.ΔZtail().SliceByRev(lo, hi)
// XXX stub that takes only ZBlk changes into account // XXX stub that takes only ZBlk changes into account
// XXX dumb // XXX dumb
...@@ -467,19 +473,19 @@ func (δFtail *ΔFtail) SliceByFileRev(file *BigFile, lo, hi zodb.Tid) /*readonl ...@@ -467,19 +473,19 @@ func (δFtail *ΔFtail) SliceByFileRev(file *BigFile, lo, hi zodb.Tid) /*readonl
// //
// if exact=False - what is returned is only an upper bound for last block revision. // if exact=False - what is returned is only an upper bound for last block revision.
// //
// f must be from head/ // zf must be from @head
// at must ∈ (tail, head] // at must ∈ (tail, head]
// blk must be tracked // blk must be tracked
// //
// XXX +ctx, error rebuild []δF here // XXX +ctx, error rebuild []δF here
func (δFtail *ΔFtail) LastBlkRev(ctx context.Context, f *BigFile, blk int64, at zodb.Tid) (_ zodb.Tid, exact bool) { func (δFtail *ΔFtail) LastBlkRev(ctx context.Context, zf *ZBigFile, blk int64, at zodb.Tid) (_ zodb.Tid, exact bool) {
//defer xerr.Contextf(&err, "") // XXX text //defer xerr.Contextf(&err, "") // XXX text
// XXX assert δFtail == f.head.bfdir.δFtail ? // XXX assert δFtail == f.head.bfdir.δFtail ?
// XXX tabRev -> treeRev ? // XXX tabRev -> treeRev ?
// XXX activate zfile? // XXX activate zfile?
zblkOid, ok, tabRev, tabRevExact, err := δFtail.δBtail.GetAt(ctx, f.zfile.blktab, blk, at) zblkOid, ok, tabRev, tabRevExact, err := δFtail.δBtail.GetAt(ctx, zf.blktab, blk, at)
if err != nil { if err != nil {
panic(err) // XXX panic(err) // XXX
} }
...@@ -494,7 +500,7 @@ func (δFtail *ΔFtail) LastBlkRev(ctx context.Context, f *BigFile, blk int64, a ...@@ -494,7 +500,7 @@ func (δFtail *ΔFtail) LastBlkRev(ctx context.Context, f *BigFile, blk int64, a
// blk revision is max rev and when zblk changed last in (rev, at] range. // blk revision is max rev and when zblk changed last in (rev, at] range.
// //
// XXX need to use full δZ, not only connected to tracked subset? // XXX need to use full δZ, not only connected to tracked subset?
zblkRev, zblkRevExact := δFtail.δBtail.δZtail.LastRevOf(zblkOid, at) zblkRev, zblkRevExact := δFtail.δBtail.ΔZtail().LastRevOf(zblkOid, at)
if zblkRev > tabRev { if zblkRev > tabRev {
return zblkRev, zblkRevExact return zblkRev, zblkRevExact
} else { } else {
......
...@@ -36,10 +36,8 @@ import ( ...@@ -36,10 +36,8 @@ import (
"github.com/hanwen/go-fuse/v2/fuse/nodefs" "github.com/hanwen/go-fuse/v2/fuse/nodefs"
"github.com/pkg/errors" "github.com/pkg/errors"
"lab.nexedi.com/kirr/go123/xcontext"
"lab.nexedi.com/kirr/go123/xio" "lab.nexedi.com/kirr/go123/xio"
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb" "lab.nexedi.com/kirr/neo/go/zodb"
) )
...@@ -430,76 +428,6 @@ func (f *skFile) Release() { ...@@ -430,76 +428,6 @@ func (f *skFile) Release() {
} }
// ---- ZODB ---
// typeOf returns string for object's type.
//
// - for ZODB objects, it uses zodb.ClassOf, which in particular supports
// printing zodb.Broken with details properly.
//
// - for other objects, it uses %T.
func typeOf(obj interface{}) string {
switch obj := obj.(type) {
case zodb.IPersistent:
return zodb.ClassOf(obj)
default:
return fmt.Sprintf("%T", obj)
}
}
// ZConn is zodb.Connection + associated read-only transaction under which
// objects of the connection are accessed.
type ZConn struct {
*zodb.Connection
// read-only transaction under which we access zodb.Connection data.
txnCtx context.Context // XXX -> better directly store txn
}
// zopen opens new connection to ZODB database + associated read-only transaction.
func zopen(ctx context.Context, zdb *zodb.DB, zopt *zodb.ConnOptions) (_ *ZConn, err error) {
// create new read-only transaction
txn, txnCtx := transaction.New(context.Background())
defer func() {
if err != nil {
txn.Abort()
}
}()
// XXX better ctx = transaction.PutIntoContext(ctx, txn)
ctx, cancel := xcontext.Merge(ctx, txnCtx)
defer cancel()
zconn, err := zdb.Open(ctx, zopt)
if err != nil {
return nil, err
}
return &ZConn{
Connection: zconn,
txnCtx: txnCtx,
}, nil
}
// tidmax returns latest revision.
func tidmax(a, b zodb.Tid) zodb.Tid {
if a > b {
return a
} else {
return b
}
}
// tidmin returns earliest revision.
func tidmin(a, b zodb.Tid) zodb.Tid {
if a < b {
return a
} else {
return b
}
}
// ---- parsing ---- // ---- parsing ----
// parseWatchFrame parses line going through /head/watch into (stream, msg) // parseWatchFrame parses line going through /head/watch into (stream, msg)
......
...@@ -510,6 +510,22 @@ import ( ...@@ -510,6 +510,22 @@ import (
"github.com/hanwen/go-fuse/v2/fuse" "github.com/hanwen/go-fuse/v2/fuse"
"github.com/hanwen/go-fuse/v2/fuse/nodefs" "github.com/hanwen/go-fuse/v2/fuse/nodefs"
"github.com/pkg/errors" "github.com/pkg/errors"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/set"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/zdata"
)
// ZBigFile-related types
// XXX place
type (
ZBlk = zdata.ZBlk
ZBlk0 = zdata.ZBlk0
ZBlk1 = zdata.ZBlk1
ZData = zdata.ZData
ZBigFile = zdata.ZBigFile
SetI64 = set.SetI64
) )
// Root represents root of wcfs filesystem. // Root represents root of wcfs filesystem.
...@@ -555,7 +571,7 @@ type Head struct { ...@@ -555,7 +571,7 @@ type Head struct {
// with additional locking protocol to avoid deadlocks (see below for // with additional locking protocol to avoid deadlocks (see below for
// pauseOSCacheUpload + ...). // pauseOSCacheUpload + ...).
zheadMu sync.RWMutex zheadMu sync.RWMutex
zconn *ZConn // for head/ zwatcher resyncs head.zconn; others only read zconn objects. zconn *xzodb.ZConn // for head/ zwatcher resyncs head.zconn; others only read zconn objects.
// zwatcher signals to uploadBlk to pause/continue uploads to OS cache to avoid deadlocks. // zwatcher signals to uploadBlk to pause/continue uploads to OS cache to avoid deadlocks.
// see notes.txt -> "Kernel locks page on read/cache store/..." for details. // see notes.txt -> "Kernel locks page on read/cache store/..." for details.
...@@ -585,7 +601,7 @@ type BigFileDir struct { ...@@ -585,7 +601,7 @@ type BigFileDir struct {
// δ tail of tracked BTree nodes of all BigFiles + -> which file // δ tail of tracked BTree nodes of all BigFiles + -> which file
// (used only for head/, not revX/) // (used only for head/, not revX/)
δFmu sync.RWMutex // zheadMu.W | zheadMu.R + δFmu.X δFmu sync.RWMutex // zheadMu.W | zheadMu.R + δFmu.X
δFtail *ΔFtail δFtail *zdata.ΔFtail
} }
// /(head|<rev>)/bigfile/<bigfileX> - served by BigFile. // /(head|<rev>)/bigfile/<bigfileX> - served by BigFile.
...@@ -867,7 +883,7 @@ retry: ...@@ -867,7 +883,7 @@ retry:
if !log.V(2) { if !log.V(2) {
// debug dump δF // debug dump δF
log.Infof("\n\nS: handleδZ: δF (#%d):\n", len(δF.ByFile)) log.Infof("\n\nS: handleδZ: δF (#%d):\n", len(δF.ByFile))
for file, δfile := range δF.ByFile { for zfile, δfile := range δF.ByFile {
blkv := δfile.Blocks.Elements() blkv := δfile.Blocks.Elements()
sort.Slice(blkv, func(i, j int) bool { sort.Slice(blkv, func(i, j int) bool {
return blkv[i] < blkv[j] return blkv[i] < blkv[j]
...@@ -876,18 +892,19 @@ retry: ...@@ -876,18 +892,19 @@ retry:
if δfile.Size { if δfile.Size {
size = "S" size = "S"
} }
log.Infof("S: \t- %s\t%s %v\n", file.zfile.POid(), size, blkv) log.Infof("S: \t- %s\t%s %v\n", zfile.POid(), size, blkv)
} }
log.Infof("\n\n") log.Infof("\n\n")
} }
wg := xsync.NewWorkGroup(ctx) wg := xsync.NewWorkGroup(ctx)
for file, δfile := range δF.ByFile { for zfile, δfile := range δF.ByFile {
// // XXX needed? // // XXX needed?
// // XXX even though δBtail is complete, not all ZBlk are present here // // XXX even though δBtail is complete, not all ZBlk are present here
// file.δtail.Append(δF.Rev, δfile.Blocks.Elements()) // file.δtail.Append(δF.Rev, δfile.Blocks.Elements())
file := file // zfile was requested to be tracked -> it must be present in fileTab
file := bfdir.fileTab[zfile.POid()]
for blk := range δfile.Blocks { for blk := range δfile.Blocks {
blk := blk blk := blk
wg.Go(func(ctx context.Context) error { wg.Go(func(ctx context.Context) error {
...@@ -905,11 +922,11 @@ retry: ...@@ -905,11 +922,11 @@ retry:
// //
// do it after completing data invalidations. // do it after completing data invalidations.
wg = xsync.NewWorkGroup(ctx) wg = xsync.NewWorkGroup(ctx)
for file, δfile := range δF.ByFile { for zfile, δfile := range δF.ByFile {
if !δfile.Size { if !δfile.Size {
continue continue
} }
file := file file := bfdir.fileTab[zfile.POid()] // must be present
wg.Go(func(ctx context.Context) error { wg.Go(func(ctx context.Context) error {
return file.invalidateAttr() // NOTE does not accept ctx return file.invalidateAttr() // NOTE does not accept ctx
}) })
...@@ -923,25 +940,26 @@ retry: ...@@ -923,25 +940,26 @@ retry:
// XXX -> Head.Resync() ? // XXX -> Head.Resync() ?
// 1. abort old and resync to new txn/at // 1. abort old and resync to new txn/at
transaction.Current(zhead.txnCtx).Abort() transaction.Current(zhead.TxnCtx).Abort()
_, ctx = transaction.New(context.Background()) // XXX bg ok? _, ctx = transaction.New(context.Background()) // XXX bg ok?
err = zhead.Resync(ctx, δZ.Tid) err = zhead.Resync(ctx, δZ.Tid)
if err != nil { if err != nil {
return err return err
} }
zhead.txnCtx = ctx zhead.TxnCtx = ctx
// 2. restat invalidated ZBigFile // 2. restat invalidated ZBigFile
// NOTE no lock needed since .blksize and .size are constant during lifetime of one txn. // NOTE no lock needed since .blksize and .size are constant during lifetime of one txn.
// XXX -> parallel // XXX -> parallel
for file := range δF.ByFile { for zfile := range δF.ByFile {
size, sizePath, err := file.zfile.Size(ctx) size, sizePath, err := zfile.Size(ctx)
if err != nil { if err != nil {
return err return err
} }
file := bfdir.fileTab[zfile.POid()] // must be present
file.size = size file.size = size
bfdir.δFtail.Track(file, -1, sizePath, nil) bfdir.δFtail.Track(zfile, -1, sizePath, nil)
// XXX we can miss a change to file if δblk is not yet tracked // XXX we can miss a change to file if δblk is not yet tracked
// -> need to update file.rev at read time -> locking=XXX // -> need to update file.rev at read time -> locking=XXX
...@@ -1060,7 +1078,7 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) { ...@@ -1060,7 +1078,7 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) {
func() { func() {
// store retrieved data back to OS cache for file @<rev>/file[blk] // store retrieved data back to OS cache for file @<rev>/file[blk]
δFtail := f.head.bfdir.δFtail δFtail := f.head.bfdir.δFtail
blkrev, _ := δFtail.LastBlkRev(ctx, f, blk, f.head.zconn.At()) blkrev, _ := δFtail.LastBlkRev(ctx, f.zfile, blk, f.head.zconn.At())
frev, funlock, err := groot.lockRevFile(blkrev, f.zfile.POid()) frev, funlock, err := groot.lockRevFile(blkrev, f.zfile.POid())
if err != nil { if err != nil {
log.Errorf("BUG: %s: invalidate blk #%d: %s (ignoring, but reading @revX/bigfile will be slow)", f.path(), blk, err) log.Errorf("BUG: %s: invalidate blk #%d: %s (ignoring, but reading @revX/bigfile will be slow)", f.path(), blk, err)
...@@ -1182,7 +1200,7 @@ func (f *BigFile) Read(_ nodefs.File, dest []byte, off int64, fctx *fuse.Context ...@@ -1182,7 +1200,7 @@ func (f *BigFile) Read(_ nodefs.File, dest []byte, off int64, fctx *fuse.Context
dest = make([]byte, aend - aoff) // ~> [aoff:aend) in file dest = make([]byte, aend - aoff) // ~> [aoff:aend) in file
// XXX better ctx = transaction.PutIntoContext(ctx, txn) // XXX better ctx = transaction.PutIntoContext(ctx, txn)
ctx, cancel := xcontext.Merge(fctx, f.head.zconn.txnCtx) ctx, cancel := xcontext.Merge(fctx, f.head.zconn.TxnCtx)
defer cancel() defer cancel()
// read/load all block(s) in parallel // read/load all block(s) in parallel
...@@ -1487,7 +1505,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr ...@@ -1487,7 +1505,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
bfdir := f.head.bfdir bfdir := f.head.bfdir
δFtail := bfdir.δFtail δFtail := bfdir.δFtail
bfdir.δFmu.Lock() // XXX locking correct? XXX -> better push down? bfdir.δFmu.Lock() // XXX locking correct? XXX -> better push down?
δFtail.Track(f, blk, treepath, zblk) // XXX pass in zblk.rev here? δFtail.Track(f.zfile, blk, treepath, zblk) // XXX pass in zblk.rev here?
f.accessed.Add(blk) f.accessed.Add(blk)
bfdir.δFmu.Unlock() bfdir.δFmu.Unlock()
...@@ -1522,7 +1540,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr ...@@ -1522,7 +1540,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
// we'll relock atMu again and recheck blkrev vs w.at after. // we'll relock atMu again and recheck blkrev vs w.at after.
w.atMu.RUnlock() w.atMu.RUnlock()
blkrev, _ = δFtail.LastBlkRev(ctx, f, blk, f.head.zconn.At()) blkrev, _ = δFtail.LastBlkRev(ctx, f.zfile, blk, f.head.zconn.At())
blkrevRough = false blkrevRough = false
w.atMu.RLock() w.atMu.RLock()
...@@ -1538,7 +1556,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr ...@@ -1538,7 +1556,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
// and most of them would be on different w.at - cache of the file will // and most of them would be on different w.at - cache of the file will
// be lost. Via pinning to particular block revision, we make sure the // be lost. Via pinning to particular block revision, we make sure the
// revision to pin is the same on all clients, and so file cache is shared. // revision to pin is the same on all clients, and so file cache is shared.
pinrev, _ := δFtail.LastBlkRev(ctx, w.file, blk, w.at) // XXX move into go? pinrev, _ := δFtail.LastBlkRev(ctx, w.file.zfile, blk, w.at) // XXX move into go?
// XXX ^^^ w.file vs f ? // XXX ^^^ w.file vs f ?
//fmt.Printf("S: read #%d: watch @%s: pin -> @%s\n", blk, w.at, pinrev) //fmt.Printf("S: read #%d: watch @%s: pin -> @%s\n", blk, w.at, pinrev)
...@@ -1684,7 +1702,7 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T ...@@ -1684,7 +1702,7 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
toPin := map[int64]zodb.Tid{} // blk -> @rev toPin := map[int64]zodb.Tid{} // blk -> @rev
δFtail := bfdir.δFtail δFtail := bfdir.δFtail
for _, δfile := range δFtail.SliceByFileRev(f, at, headAt) { // XXX locking δFtail for _, δfile := range δFtail.SliceByFileRev(f.zfile, at, headAt) { // XXX locking δFtail
for blk := range δfile.Blocks { for blk := range δfile.Blocks {
_, already := toPin[blk] _, already := toPin[blk]
if already { if already {
...@@ -1705,7 +1723,7 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T ...@@ -1705,7 +1723,7 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
continue continue
} }
toPin[blk], _ = δFtail.LastBlkRev(ctx, f, blk, at) // XXX err toPin[blk], _ = δFtail.LastBlkRev(ctx, f.zfile, blk, at) // XXX err
} }
} }
...@@ -2079,7 +2097,7 @@ func (root *Root) lookup(name string, fctx *fuse.Context) (_ *Head, err error) { ...@@ -2079,7 +2097,7 @@ func (root *Root) lookup(name string, fctx *fuse.Context) (_ *Head, err error) {
// not there - without revMu lock proceed to open @rev view of ZODB // not there - without revMu lock proceed to open @rev view of ZODB
// zconnRev, err := root.zopenAt(fctx, rev) // zconnRev, err := root.zopenAt(fctx, rev)
zconnRev, err := zopen(fctx, root.zdb, &zodb.ConnOptions{At: rev}) zconnRev, err := xzodb.ZOpen(fctx, root.zdb, &zodb.ConnOptions{At: rev})
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -2092,7 +2110,7 @@ func (root *Root) lookup(name string, fctx *fuse.Context) (_ *Head, err error) { ...@@ -2092,7 +2110,7 @@ func (root *Root) lookup(name string, fctx *fuse.Context) (_ *Head, err error) {
if already { if already {
root.revMu.Unlock() root.revMu.Unlock()
// zconnRev.Release() // zconnRev.Release()
transaction.Current(zconnRev.txnCtx).Abort() transaction.Current(zconnRev.TxnCtx).Abort()
return revDir, nil return revDir, nil
} }
...@@ -2103,7 +2121,7 @@ func (root *Root) lookup(name string, fctx *fuse.Context) (_ *Head, err error) { ...@@ -2103,7 +2121,7 @@ func (root *Root) lookup(name string, fctx *fuse.Context) (_ *Head, err error) {
// notify invalidate dentry from inside fs // notify invalidate dentry from inside fs
fsNode: newFSNode(&fsOptions{Sticky: false}), // XXX + Head.OnForget() -> del root.revTab[] fsNode: newFSNode(&fsOptions{Sticky: false}), // XXX + Head.OnForget() -> del root.revTab[]
rev: rev, rev: rev,
zconn: zconnRev, // XXX + Head.OnForget() -> release zconn (= abort zconn.txnCtx) zconn: zconnRev, // XXX + Head.OnForget() -> release zconn (= abort zconn.TxnCtx)
} }
bfdir := &BigFileDir{ bfdir := &BigFileDir{
...@@ -2136,7 +2154,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er ...@@ -2136,7 +2154,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
defer xerr.Contextf(&err, "bigopen %s @%s", oid, zconn.At()) defer xerr.Contextf(&err, "bigopen %s @%s", oid, zconn.At())
// XXX better ctx = transaction.PutIntoContext(ctx, txn) // XXX better ctx = transaction.PutIntoContext(ctx, txn)
ctx, cancel := xcontext.Merge(ctx, zconn.txnCtx) ctx, cancel := xcontext.Merge(ctx, zconn.TxnCtx)
defer cancel() defer cancel()
xzfile, err := zconn.Get(ctx, oid) xzfile, err := zconn.Get(ctx, oid)
...@@ -2153,7 +2171,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er ...@@ -2153,7 +2171,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
zfile, ok := xzfile.(*ZBigFile) zfile, ok := xzfile.(*ZBigFile)
if !ok { if !ok {
return nil, eINVALf("%s is not a ZBigFile", typeOf(xzfile)) return nil, eINVALf("%s is not a ZBigFile", xzodb.TypeOf(xzfile))
} }
// extract blksize, size and initial approximation for file revision // extract blksize, size and initial approximation for file revision
...@@ -2161,7 +2179,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er ...@@ -2161,7 +2179,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
if err != nil { if err != nil {
return nil, err return nil, err
} }
blksize := zfile.blksize blksize := zfile.BlkSize()
// XXX it should be revision of both ZBigFile and its data. But we // XXX it should be revision of both ZBigFile and its data. But we
// cannot get data revision without expensive scan of all ZBigFile's objects. // cannot get data revision without expensive scan of all ZBigFile's objects.
// -> approximate mtime initially with ZBigFile object mtime. // -> approximate mtime initially with ZBigFile object mtime.
...@@ -2188,7 +2206,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er ...@@ -2188,7 +2206,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
// only head/ needs δFtail, f.δtail and watches. // only head/ needs δFtail, f.δtail and watches.
if head.rev == 0 { if head.rev == 0 {
head.bfdir.δFmu.Lock() // XXX locking ok? head.bfdir.δFmu.Lock() // XXX locking ok?
head.bfdir.δFtail.Track(f, -1, sizePath, nil) head.bfdir.δFtail.Track(f.zfile, -1, sizePath, nil)
head.bfdir.δFmu.Unlock() head.bfdir.δFmu.Unlock()
// FIXME: scan zfile.blktab - so that we can detect all btree changes // FIXME: scan zfile.blktab - so that we can detect all btree changes
...@@ -2371,7 +2389,7 @@ func _main() (err error) { ...@@ -2371,7 +2389,7 @@ func _main() (err error) {
// also need to traverse BigFile.blktab btree ) // also need to traverse BigFile.blktab btree )
zdb := zodb.NewDB(zstor, &zodb.DBOptions{}) zdb := zodb.NewDB(zstor, &zodb.DBOptions{})
defer xclose(zdb) defer xclose(zdb)
zhead, err := zopen(ctx, zdb, &zodb.ConnOptions{ zhead, err := xzodb.ZOpen(ctx, zdb, &zodb.ConnOptions{
At: at0, At: at0,
// we need zhead.cache to be maintained across several transactions. // we need zhead.cache to be maintained across several transactions.
...@@ -2403,7 +2421,7 @@ func _main() (err error) { ...@@ -2403,7 +2421,7 @@ func _main() (err error) {
fsNode: newFSNode(fSticky), fsNode: newFSNode(fSticky),
head: head, head: head,
fileTab: make(map[zodb.Oid]*BigFile), fileTab: make(map[zodb.Oid]*BigFile),
δFtail: NewΔFtail(zhead.At(), zdb), δFtail: zdata.NewΔFtail(zhead.At(), zdb),
} }
head.bfdir = bfdir head.bfdir = bfdir
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment