Commit e7b598c6 authored by Kirill Smelkov's avatar Kirill Smelkov

X start of ΔFtail.SliceByFileRev rework to function via merging δB and δZ histories on the fly

parent 19c593ab
...@@ -81,7 +81,7 @@ type ΔFtail struct { ...@@ -81,7 +81,7 @@ type ΔFtail struct {
trackSetZBlk map[zodb.Oid]*zblkTrack // zblk -> {} root -> {}blk as of @head trackSetZBlk map[zodb.Oid]*zblkTrack // zblk -> {} root -> {}blk as of @head
// XXX kill // XXX kill
///* /*
// XXX don't need vδF - everything is reconstructed at runtime from .δBtail.vδT // XXX don't need vδF - everything is reconstructed at runtime from .δBtail.vδT
// this way we also don't need to keep up updating vδF from vδT on its rebuild during. // this way we also don't need to keep up updating vδF from vδT on its rebuild during.
// data with δF changes. Actual for part of tracked set that was taken // data with δF changes. Actual for part of tracked set that was taken
...@@ -91,7 +91,7 @@ type ΔFtail struct { ...@@ -91,7 +91,7 @@ type ΔFtail struct {
// tracked ZBlk that are not yet taken into account in current vδF. // tracked ZBlk that are not yet taken into account in current vδF.
// grows on new track requests; flushes on queries and update. // grows on new track requests; flushes on queries and update.
trackNew map[zodb.Oid]map[zodb.Oid]*zblkTrack // {} foid -> {} zoid -> zblk trackNew map[zodb.Oid]map[zodb.Oid]*zblkTrack // {} foid -> {} zoid -> zblk
//*/ */
} }
// zblkTrack keeps information in which root/blocks ZBlk is present as of @head. // zblkTrack keeps information in which root/blocks ZBlk is present as of @head.
...@@ -126,7 +126,7 @@ func NewΔFtail(at0 zodb.Tid, db *zodb.DB) *ΔFtail { ...@@ -126,7 +126,7 @@ func NewΔFtail(at0 zodb.Tid, db *zodb.DB) *ΔFtail {
fileIdx: map[zodb.Oid]setOid{}, fileIdx: map[zodb.Oid]setOid{},
trackSetZFile: setOid{}, trackSetZFile: setOid{},
trackSetZBlk: map[zodb.Oid]*zblkTrack{}, trackSetZBlk: map[zodb.Oid]*zblkTrack{},
trackNew: map[zodb.Oid]map[zodb.Oid]*zblkTrack{}, // trackNew: map[zodb.Oid]map[zodb.Oid]*zblkTrack{},
} }
} }
...@@ -190,6 +190,7 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb ...@@ -190,6 +190,7 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
} }
inblk.Add(blk) inblk.Add(blk)
/* XXX kill
if !ok { if !ok {
// zblk was not associated with this file // zblk was not associated with this file
ft := δFtail.trackNew[foid] ft := δFtail.trackNew[foid]
...@@ -199,17 +200,8 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb ...@@ -199,17 +200,8 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
} }
ft[zoid] = zt ft[zoid] = zt
} }
}
// XXX mark something dirty so that LastBlkRev and Slice* know what to rebuild?
// XXX debug
/*
leaf := path[len(path)-1].(*btree.LOBucket)
for _, e := range leaf.Entryv() { // XXX activate
δFtail.tracked.Add(e.Key())
}
*/ */
}
} }
// Update updates δFtail given raw ZODB changes. // Update updates δFtail given raw ZODB changes.
...@@ -225,7 +217,6 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb ...@@ -225,7 +217,6 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
//func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF, err error) { //func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF, err error) {
func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit) (_ ΔF, err error) { func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit) (_ ΔF, err error) {
defer xerr.Contextf(&err, "ΔFtail update %s -> %s", δFtail.Head(), δZ.Tid) defer xerr.Contextf(&err, "ΔFtail update %s -> %s", δFtail.Head(), δZ.Tid)
// XXX δFtail.update() first?
// XXX verify zhead.At() == δFtail.Head() // XXX verify zhead.At() == δFtail.Head()
// XXX locking // XXX locking
...@@ -328,10 +319,11 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit) (_ ΔF, err error) { ...@@ -328,10 +319,11 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit) (_ ΔF, err error) {
// fmt.Printf("-> δF: %v\n", δF) // fmt.Printf("-> δF: %v\n", δF)
δFtail.vδF = append(δFtail.vδF, δF) // δFtail.vδF = append(δFtail.vδF, δF)
return δF, nil return δF, nil
} }
/*
// XXX kill after vδF is gone // XXX kill after vδF is gone
// update processes new track requests and updates vδF. // update processes new track requests and updates vδF.
// //
...@@ -378,6 +370,7 @@ func (δFtail *ΔFtail) update(file *ZBigFile) { ...@@ -378,6 +370,7 @@ func (δFtail *ΔFtail) update(file *ZBigFile) {
} }
} }
} }
*/
// ForgetPast discards all δFtail entries with rev ≤ revCut. // ForgetPast discards all δFtail entries with rev ≤ revCut.
func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) { func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) {
...@@ -399,50 +392,11 @@ func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) { ...@@ -399,50 +392,11 @@ func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) {
// the caller must not modify returned slice. // the caller must not modify returned slice.
// //
// Note: contrary to regular go slicing, low is exclusive while high is inclusive. // Note: contrary to regular go slicing, low is exclusive while high is inclusive.
func (δFtail *ΔFtail) SliceByFileRev(file *ZBigFile, lo, hi zodb.Tid) /*readonly*/[]*ΔFile { func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*readonly*/[]*ΔFile {
xtail.AssertSlice(δFtail, lo, hi) xtail.AssertSlice(δFtail, lo, hi)
// FIXME rework to just query .δBtail.SliceByRootRev(file.blktab, lo, hi) + // query .δBtail.SliceByRootRev(file.blktab, lo, hi) +
// merge δZBlk history with that. // merge δZBlk history with that.
// XXX locking?
δFtail.update(file)
// find vδF range corresponding to (lo, hi]
// XXX linear scan
vδF := δFtail.vδF
if len(vδF) == 0 {
return nil
}
// find max j : [j].rev ≤ hi XXX linear scan -> binary search
j := len(vδF)-1
for ; j >= 0 && vδF[j].Rev > hi; j-- {}
if j < 0 {
return nil // ø
}
// find max i : [i].rev > low XXX linear scan -> binary search
i := j
for ; i >= 0 && vδF[i].Rev > lo; i-- {}
i++
vδF = vδF[i:j+1]
// filter found changed to have only file-related bits
foid := file.POid()
var vδfile []*ΔFile
for _, δF := range vδF {
δfile, ok := δF.ByFile[foid]
if ok {
vδfile = append(vδfile, δfile)
}
}
// XXX merge into vδF zblk from not yet handled tracked part
return vδfile
// merging tree (δT) and Zblk (δZblk) histories into file history (δFile): // merging tree (δT) and Zblk (δZblk) histories into file history (δFile):
...@@ -457,19 +411,10 @@ func (δFtail *ΔFtail) SliceByFileRev(file *ZBigFile, lo, hi zodb.Tid) /*readon ...@@ -457,19 +411,10 @@ func (δFtail *ΔFtail) SliceByFileRev(file *ZBigFile, lo, hi zodb.Tid) /*readon
// //
// δFile ────────o───────o──────x─────x──────────────────────── // δFile ────────o───────o──────x─────x────────────────────────
/*
vδZ := δFtail.δBtail.ΔZtail().SliceByRev(lo, hi)
// XXX stub that takes only ZBlk changes into account vδT := δFtail.δBtail.SliceByRootRev(zfile.blktab, lo, hi) // XXX needs activate zfile
// XXX dumb vδZ := δFtail.δBtail.ΔZtail().SliceByRev(lo, hi)
for _, δZ := range vδZ { var vδf []*ΔFile
}
*/
/*
// XXX activate zfile?
vδT := δFtail.δBtail.SliceByRootRev(file.zfile.blktab, lo, hi)
// state of `{} blk -> zblk` as we are scanning ↓ // state of `{} blk -> zblk` as we are scanning ↓
δblktab := map[int64]struct { δblktab := map[int64]struct {
...@@ -497,8 +442,49 @@ func (δFtail *ΔFtail) SliceByFileRev(file *ZBigFile, lo, hi zodb.Tid) /*readon ...@@ -497,8 +442,49 @@ func (δFtail *ΔFtail) SliceByFileRev(file *ZBigFile, lo, hi zodb.Tid) /*readon
... ...
} }
} }
*/
return vδf
/*
// XXX locking?
δFtail.update(file)
// find vδF range corresponding to (lo, hi]
// XXX linear scan
vδF := δFtail.vδF
if len(vδF) == 0 {
return nil
}
// find max j : [j].rev ≤ hi XXX linear scan -> binary search
j := len(vδF)-1
for ; j >= 0 && vδF[j].Rev > hi; j-- {}
if j < 0 {
return nil // ø
}
// find max i : [i].rev > low XXX linear scan -> binary search
i := j
for ; i >= 0 && vδF[i].Rev > lo; i-- {}
i++
vδF = vδF[i:j+1]
// filter found changed to have only file-related bits
foid := file.POid()
var vδfile []*ΔFile
for _, δF := range vδF {
δfile, ok := δF.ByFile[foid]
if ok {
vδfile = append(vδfile, δfile)
}
}
// XXX merge into vδF zblk from not yet handled tracked part
return vδfile
*/
} }
// XXX rename -> BlkRevAt // XXX rename -> BlkRevAt
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment