Commit b74dda09 authored by Kirill Smelkov's avatar Kirill Smelkov

X Start switching Track from Track(key) to Track(keycov)

Knowing which key coverage is being tracked will be needed to implement
ΔBtail concurrency.
parent 290d417c
......@@ -46,9 +46,10 @@ type Node = blib.Node
type TreeEntry = blib.TreeEntry
type BucketEntry = blib.BucketEntry
type Key = blib.Key
const KeyMax = blib.KeyMax
const KeyMin = blib.KeyMin
type Key = blib.Key
type KeyRange = blib.KeyRange
const KeyMax = blib.KeyMax
const KeyMin = blib.KeyMin
// value is assumed to be persistent reference.
// deletion is represented as VDEL.
......
......@@ -312,20 +312,20 @@ func (δBtail *ΔBtail) Tail() zodb.Tid { return δBtail.δZtail.Tail() }
// path[0] signifies tree root.
// All path elements must be Tree except last one which, for non-empty tree, must be Bucket.
//
// Besides key (which might point to value or hole), δBtail will also track all
// Besides key (which might point to value or hole), δBtail will also track all XXX
// keys covered by leaf node. In particular after request for KeyMax or KeyMin
// to be tracked, δBtail will keep on tracking changes to maximum or minimum
// keys correspondingly.
//
// Objects in path must be with .PJar().At() == .Head()
func (δBtail *ΔBtail) Track(key Key, nodePath []Node) {
// NOTE key not needed for anything besides tracing
func (δBtail *ΔBtail) Track(keycov KeyRange, nodePath []Node) {
// NOTE key not needed for anything besides tracing XXX kill comment
// (tracking set will be added with all keys, covered by leaf keyrange)
if traceΔBtail {
pathv := []string{}
for _, node := range nodePath { pathv = append(pathv, vnode(node)) }
tracefΔBtail("\nTrack [%v] %s\n", key, strings.Join(pathv, " -> "))
tracefΔBtail("\nTrack %s %s\n", keycov, strings.Join(pathv, " -> "))
tracefΔBtail("trackSet: %s\n", δBtail.trackSet) // XXX locking
}
......@@ -338,7 +338,7 @@ func (δBtail *ΔBtail) Track(key Key, nodePath []Node) {
}
path := nodePathToPath(nodePath)
δBtail.track(key, path)
δBtail.track(keycov, path)
}
// nodePathToPath converts path from []Node to []Oid.
......@@ -366,7 +366,7 @@ func nodePathToPath(nodePath []Node) (path []zodb.Oid) {
return path
}
func (δBtail *ΔBtail) track(key Key, path []zodb.Oid) {
func (δBtail *ΔBtail) track(keycov KeyRange, path []zodb.Oid) {
// XXX locking
// first normalize path: remove embedded bucket and check if it was an
......@@ -380,13 +380,13 @@ func (δBtail *ΔBtail) track(key Key, path []zodb.Oid) {
root := path[0]
// nothing to do if key is already tracked
// nothing to do if keycov is already tracked
leaf := path[len(path)-1]
if δBtail.trackSet.Has(leaf) {
tracefΔBtail("->T: nop\n")
path_ := δBtail.trackSet.Path(leaf)
if !pathEqual(path, path_) {
panicf("BUG: key %s is already tracked via path=%v\ntrack requests path=%v", kstr(key), path_, path)
panicf("BUG: keyrange %s is already tracked via path=%v\ntrack requests path=%v", keycov, path_, path)
}
return
}
......@@ -568,7 +568,6 @@ func vδTBuild(root zodb.Oid, trackNew blib.PPTreeSubSet, δZtail *zodb.ΔTail,
atPrev = δZtail.Tail()
}
// XXX δkv instead of δT ?
δkv, δtrackNew, δtkeycov_, err := vδTBuild1(atPrev, δZ, trackNewCur, db)
if err != nil {
return nil, nil, err
......@@ -682,7 +681,7 @@ func vδTBuild1(atPrev zodb.Tid, δZ zodb.ΔRevEntry, trackNew blib.PPTreeSubSet
// δrevSet indicates set of new revisions created in vδT.
// vδT is modified inplace.
func vδTMergeInplace(pvδT *[]ΔTree, vδTnew []ΔTree) (δrevSet setTid) {
// TODO optimize to go through vδT and vδTnew sequentially if needed
// TODO if needed: optimize to go through vδT and vδTnew sequentially
δrevSet = setTid{}
for _, δT := range vδTnew {
newRevEntry := vδTMerge1Inplace(pvδT, δT)
......
......@@ -1768,7 +1768,7 @@ func trackKeys(δbtail *ΔBtail, t *xbtreetest.Commit, keys setKey) {
// tracking set. By aligning initial state to the same as after
// T1->ø, we test what will happen on ø->T2.
b := t.Xkv.Get(k)
δbtail.track(k, b.Path())
δbtail.track(b.Keycov, b.Path())
}
}
......
......@@ -412,46 +412,48 @@ func (bf *ZBigFile) BlkSize() int64 {
// it also returns:
//
// - BTree path in .blktab to loaded block,
// - blocks covered by leaf node in the BTree path,
// - max(_.serial for _ in ZBlk(#blk), all BTree/Bucket that lead to ZBlk)
// which provides a rough upper-bound estimate for file[blk] revision.
//
// TODO load into user-provided buf.
func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath []btree.LONode, zblk ZBlk, blkRevMax zodb.Tid, err error) {
// XXX blkCov -> treeLeafCov ?
func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath []btree.LONode, blkCov btree.LKeyRange, zblk ZBlk, blkRevMax zodb.Tid, err error) {
defer xerr.Contextf(&err, "bigfile %s: loadblk %d", bf.POid(), blk)
err = bf.PActivate(ctx)
if err != nil {
return nil, nil, nil, 0, err
return nil, nil, btree.LKeyRange{}, nil, 0, err
}
defer bf.PDeactivate()
blkRevMax = 0
xzblk, ok, err := bf.blktab.VGet(ctx, blk, func(node btree.LONode, keycov btree.LKeyRange) {
// XXX use keycov
blkCov = keycov // will be set last for leaf
treePath = append(treePath, node)
blkRevMax = tidmax(blkRevMax, node.PSerial())
})
if err != nil {
return nil, nil, nil, 0, err
return nil, nil, btree.LKeyRange{}, nil, 0, err
}
if !ok {
return make([]byte, bf.blksize), treePath, nil, blkRevMax, nil
return make([]byte, bf.blksize), treePath, blkCov, nil, blkRevMax, nil
}
zblk, err = vZBlk(xzblk)
if err != nil {
return nil, nil, nil, 0, err
return nil, nil, btree.LKeyRange{}, nil, 0, err
}
blkdata, zblkrev, err := zblk.LoadBlkData(ctx)
if err != nil {
return nil, nil, nil, 0, err
return nil, nil, btree.LKeyRange{}, nil, 0, err
}
blkRevMax = tidmax(blkRevMax, zblkrev)
l := int64(len(blkdata))
if l > bf.blksize {
return nil, nil, nil, 0, fmt.Errorf("zblk %s: invalid blk: size = %d (> blksize = %d)", zblk.POid(), l, bf.blksize)
return nil, nil, btree.LKeyRange{}, nil, 0, fmt.Errorf("zblk %s: invalid blk: size = %d (> blksize = %d)", zblk.POid(), l, bf.blksize)
}
// append trailing \0 to data to reach .blksize
......@@ -461,38 +463,39 @@ func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath
blkdata = d
}
return blkdata, treePath, zblk, blkRevMax, nil
return blkdata, treePath, blkCov, zblk, blkRevMax, nil
}
// Size returns whole file size.
//
// it also returns BTree path scanned to obtain the size.
func (bf *ZBigFile) Size(ctx context.Context) (_ int64, treePath []btree.LONode, err error) {
// XXX naming leafCov -> blkCov ?
func (bf *ZBigFile) Size(ctx context.Context) (_ int64, treePath []btree.LONode, leafCov btree.LKeyRange, err error) {
defer xerr.Contextf(&err, "bigfile %s: size", bf.POid())
err = bf.PActivate(ctx)
if err != nil {
return 0, nil, err
return 0, nil, btree.LKeyRange{}, err
}
defer bf.PDeactivate()
tailblk, ok, err := bf.blktab.VMaxKey(ctx, func(node btree.LONode, keycov btree.LKeyRange) {
// XXX use keycov
leafCov = keycov // will be set last for leaf
treePath = append(treePath, node)
})
if err != nil {
return 0, nil, err
return 0, nil, btree.LKeyRange{}, err
}
if !ok {
return 0, treePath, nil
return 0, treePath, leafCov, nil
}
size := (tailblk + 1) * bf.blksize
if size / bf.blksize != tailblk + 1 {
return 0, nil, syscall.EFBIG // overflow
return 0, nil, btree.LKeyRange{}, syscall.EFBIG // overflow
}
return size, treePath, nil
return size, treePath, leafCov, nil
}
// vZBlk checks and converts xzblk to a ZBlk object.
......
......@@ -110,17 +110,17 @@ func TestZBlk(t *testing.T) {
t.Fatalf("zf: [1] -> %#v; want z1", z1_)
}
size, _, err := zf.Size(ctx); X(err)
size, _, _, err := zf.Size(ctx); X(err)
assert.Equal(size, int64(zf_size), "ZBigFile size wrong")
// LoadBlk
z0Data, _, _, _, err = zf.LoadBlk(ctx, 1); X(err)
z0Data, _, _, _, _, err = zf.LoadBlk(ctx, 1); X(err)
assert.Equal(len(z0Data), int(zf.blksize))
z0Data = bytes.TrimRight(z0Data, "\x00")
assert.Equal(z0Data, z0DataOK)
z1Data, _, _, _, err = zf.LoadBlk(ctx, 3); X(err)
z1Data, _, _, _, _, err = zf.LoadBlk(ctx, 3); X(err)
assert.Equal(len(z1Data), int(zf.blksize))
z1Data = bytes.TrimRight(z1Data, "\x00")
assert.Equal(z1Data, z1DataOK)
......
......@@ -195,16 +195,16 @@ func (δFtail *ΔFtail) Tail() zodb.Tid { return δFtail.δBtail.Tail() }
// ---- Track/rebuild/Update/Forget ----
// Track associates file[blk]@head with tree path and zblk object.
// Track associates file[blkcov]@head with tree path and zblk object.
//
// Path root becomes associated with the file, and the path and zblk object become tracked.
// One root can be associated with several files (each provided on different Track calls).
//
// zblk can be nil, which represents a hole.
// blk=-1 should be used for tracking after ZBigFile.Size() query (no zblk is accessed at all).
// XXX kill: blk=-1 should be used for tracking after ZBigFile.Size() query (no zblk is accessed at all).
//
// Objects in path and zblk must be with .PJar().At() == .head
func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zblk ZBlk) {
func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, blkcov btree.LKeyRange, zblk ZBlk) {
// XXX locking
head := δFtail.Head()
......@@ -222,10 +222,10 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
// path.at == head is verified by ΔBtail.Track
foid := file.POid()
if blk == -1 {
blk = xbtree.KeyMax
}
δFtail.δBtail.Track(blk, path)
// if blk == -1 {
// blk = xbtree.KeyMax
// }
δFtail.δBtail.Track(blkcov, path)
rootObj := path[0].(*btree.LOBTree)
root := rootObj.POid()
......
......@@ -266,8 +266,8 @@ func testΔFtail(t_ *testing.T, testq chan ΔFTestEntry) {
// ( later retrackAll should be called after new epoch to track zfile[-∞,∞) again )
retrackAll := func() {
for blk := range blkTab {
_, path, zblk, _, err := zfile.LoadBlk(ctx, blk); X(err)
δFtail.Track(zfile, blk, path, zblk)
_, path, blkcov, zblk, _, err := zfile.LoadBlk(ctx, blk); X(err)
δFtail.Track(zfile, blk, path, blkcov, zblk)
}
}
retrackAll()
......@@ -614,8 +614,8 @@ func TestΔFtailSliceUntrackedUniform(t_ *testing.T) {
zfile, _ := t.XLoadZFile(ctx, zconn)
xtrackBlk := func(blk int64) {
_, path, zblk, _, err := zfile.LoadBlk(ctx, blk); X(err)
δFtail.Track(zfile, blk, path, zblk)
_, path, blkcov, zblk, _, err := zfile.LoadBlk(ctx, blk); X(err)
δFtail.Track(zfile, blk, path, blkcov, zblk)
}
// track 0, but do not track 1 and 2.
......
......@@ -963,14 +963,14 @@ retry:
zfile := file.zfile
// XXX need to do only if δfile.Size changed
size, sizePath, err := zfile.Size(ctx)
size, sizePath, blkCov, err := zfile.Size(ctx)
if err != nil {
return err
}
file.size = size
// see "3) for */head/data the following invariant is maintained..."
bfdir.δFtail.Track(zfile, -1, sizePath, nil)
bfdir.δFtail.Track(zfile, -1, sizePath, blkCov, nil)
// XXX we can miss a change to file if δblk is not yet tracked
// -> need to update file.rev at read time -> locking=XXX
......@@ -1283,7 +1283,7 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err erro
}
// noone was loading - we became responsible to load this block
blkdata, treepath, zblk, blkrevMax, err := f.zfile.LoadBlk(ctx, blk)
blkdata, treepath, blkcov, zblk, blkrevMax, err := f.zfile.LoadBlk(ctx, blk)
loading.blkdata = blkdata
loading.err = err
......@@ -1298,7 +1298,7 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err erro
// we have the data - it can be used after watchers are updated
// XXX should we use ctx here? (see readPinWatchers comments)
f.readPinWatchers(ctx, blk, treepath, zblk, blkrevMax)
f.readPinWatchers(ctx, blk, treepath, blkcov, zblk, blkrevMax)
// data can be used now
close(loading.ready)
......@@ -1516,7 +1516,7 @@ func (w *Watch) _pin(ctx context.Context, blk int64, rev zodb.Tid) (err error) {
// XXX do we really need to use/propagate caller context here? ideally update
// watchers should be synchronous, and in practice we just use 30s timeout.
// Should a READ interrupt cause watch update failure? -> probably no
func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btree.LONode, zblk ZBlk, blkrevMax zodb.Tid) {
func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btree.LONode, blkcov btree.LKeyRange, zblk ZBlk, blkrevMax zodb.Tid) {
// only head/ is being watched for
if f.head.rev != 0 {
return
......@@ -1531,7 +1531,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
bfdir := f.head.bfdir
δFtail := bfdir.δFtail
bfdir.δFmu.Lock() // XXX locking correct? XXX -> better push down?
δFtail.Track(f.zfile, blk, treepath, zblk) // XXX pass in zblk.rev here?
δFtail.Track(f.zfile, blk, treepath, blkcov, zblk) // XXX pass in zblk.rev here?
f.accessed.Add(blk)
bfdir.δFmu.Unlock()
......@@ -2229,7 +2229,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
rev := zfile.PSerial()
zfile.PDeactivate()
size, sizePath, err := zfile.Size(ctx)
size, sizePath, blkCov, err := zfile.Size(ctx)
if err != nil {
return nil, err
}
......@@ -2248,7 +2248,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
if head.rev == 0 {
// see "3) for */head/data the following invariant is maintained..."
head.bfdir.δFmu.Lock() // XXX locking ok?
head.bfdir.δFtail.Track(f.zfile, -1, sizePath, nil)
head.bfdir.δFtail.Track(f.zfile, -1, sizePath, blkCov, nil)
head.bfdir.δFmu.Unlock()
// FIXME: scan zfile.blktab - so that we can detect all btree changes
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment