Commit 124688f9 authored by Kirill Smelkov's avatar Kirill Smelkov

X ΔFtail fixes

* t2:
  X ΔFtail.SliceByFileRev: Fix untracked entries to be present uniformly in result
  .
  .
  .
  .
  .
  X test that shows problem of SliceByRootRev where untracked blocks are not added uniformly into whole history
  .
  .
  .
  .
  .
  .
  .
  .
  X Size no longer tracks [0,∞) since we start tracking when zfile is non-empty
  X ΔFtail: `go test -failfast -short -v -run Random -randseed=1626793016249041295` discovered problems
parents 0853cc9f c0b7e4c3
......@@ -66,7 +66,7 @@ type setOid = set.Oid
// .Update(δZ) -> δF - update files δ tail given raw ZODB changes
// .ForgetPast(revCut) - forget changes past revCut
// .SliceByRev(lo, hi) -> []δF - query for all files changes with rev ∈ (lo, hi]
// .SliceByFileRev(file, lo, hi) -> []δfile - query for changes of file with rev ∈ (lo, hi]
// .SliceByFileRev(file, lo, hi) -> []δfile - query for changes of a file with rev ∈ (lo, hi]
// .BlkRevAt(file, #blk, at) - query for what is last revision that changed
// file[#blk] as of @at database state.
//
......@@ -74,7 +74,7 @@ type setOid = set.Oid
//
// δfile:
// .rev↑
// {}blk
// {}blk | EPOCH
//
// XXX concurrent use
//
......@@ -85,7 +85,7 @@ type ΔFtail struct {
fileIdx map[zodb.Oid]setOid // tree-root -> {} ZBigFile<oid> as of @head XXX -> root2file ?
byFile map[zodb.Oid]*_ΔFileTail // file -> vδf tail XXX
// set of files, which are newly tracked and for which vδE was not yet rebuilt
// set of files, which are newly tracked and for which byFile[foid].vδE was not yet rebuilt
trackNew setOid // {}foid
trackSetZBlk map[zodb.Oid]*zblkTrack // zblk -> {} root -> {}blk as of @head
......@@ -157,14 +157,26 @@ func (δFtail *ΔFtail) Tail() zodb.Tid { return δFtail.δBtail.Tail() }
//
// XXX Track adds tree path to tracked set and associates path root with file.
//
// XXX text
//
// XXX objects in path and zblk must be with .PJar().At() == .head
// Objects in path and zblk must be with .PJar().At() == .head
//
// A root can be associated with several files (each provided on different Track call).
func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zblk ZBlk) {
// XXX locking
head := δFtail.Head()
fileAt := file.PJar().At()
if fileAt != head {
panicf("file.at (@%s) != δFtail.head (@%s)", fileAt, head)
}
if zblk != nil {
zblkAt := zblk.PJar().At()
if zblkAt != head {
panicf("zblk.at (@%s) != δFtail.head (@%s)", zblkAt, head)
}
}
// path.at == head is verified by ΔBtail.Track
foid := file.POid()
if blk == -1 {
// XXX blk = ∞ from beginning ?
......@@ -186,7 +198,7 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
δftail, ok := δFtail.byFile[foid]
if !ok {
δftail = &_ΔFileTail{root: roid, vδE: nil /*will need to be rebuilt till past*/}
δftail = &_ΔFileTail{root: roid, vδE: nil /*will need to be rebuilt to past till tail*/}
δFtail.byFile[foid] = δftail
δFtail.trackNew.Add(foid)
}
......@@ -195,7 +207,7 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
}
// associate zblk with file, if it was not hole
// associate zblk with root, if it was not hole
if zblk != nil {
zoid := zblk.POid()
zt, ok := δFtail.trackSetZBlk[zoid]
......@@ -221,11 +233,13 @@ func (δFtail *ΔFtail) rebuildAll() (err error) {
defer xerr.Contextf(&err, "ΔFtail rebuildAll")
// XXX locking
δBtail := δFtail.δBtail
δZtail := δBtail.ΔZtail()
db := δBtail.DB()
for foid := range δFtail.trackNew {
δFtail.trackNew.Del(foid)
δftail := δFtail.byFile[foid]
δBtail := δFtail.δBtail
err := δftail.rebuild1(foid, δBtail.ΔZtail(), δBtail.DB())
err := δftail.rebuild1(foid, δZtail, db)
if err != nil {
return err
}
......@@ -517,9 +531,11 @@ func (δftail *_ΔFileTail) forgetPast(revCut zodb.Tid) {
//
// the caller must not modify returned slice.
//
// XXX only tracked blocks are guaranteed to be present.
//
// Note: contrary to regular go slicing, low is exclusive while high is inclusive.
func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*readonly*/[]*ΔFile {
//fmt.Printf("\n")
//fmt.Printf("\nslice f<%s> (@%s,@%s]\n", zfile.POid(), lo, hi)
xtail.AssertSlice(δFtail, lo, hi)
// XXX locking
......@@ -552,7 +568,7 @@ func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*reado
if δfTail.Rev == tail {
return δfTail
}
if !(tail <= δfTail.Rev) {
if !(tail < δfTail.Rev) {
panic("tail not ↓")
}
}
......@@ -579,7 +595,7 @@ func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*reado
// vδE[ie] is next epoch
// vδE[ie-1] is epoch that covers hi
// loop through all epochs till lo
// loop through all epochs from hi till lo
for lastEpoch := false; !lastEpoch ; {
// current epoch
var epoch zodb.Tid
......@@ -611,7 +627,6 @@ func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*reado
Zinblk[zblk] = inblk.Clone()
}
}
// XXX ZinblkAt
} else {
δE := vδE[ie+1]
root = δE.oldRoot
......@@ -620,6 +635,7 @@ func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*reado
Zinblk[zblk] = inblk.Clone()
}
}
//fmt.Printf("Zinblk: %v\n", Zinblk)
// vδT for current epoch
vδT := δFtail.δBtail.SliceByRootRev(root, epoch, head) // NOTE @head, not hi
......@@ -630,11 +646,54 @@ func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*reado
ZinblkAt = epoch
}
// merge cumulative vδT(epoch,head] update to Zinblk, so that
// changes to blocks that were not explicitly requested to be
// tracked, are present in resulting slice uniformly.
//
// For example on
//
// at1 T/B0:a,1:b,2:c δDø δ{0,1,2}
// at2 δT{0:d,1:e} δD{c} δ{0,1,2}
// at3 δTø δD{c,d,e} δ{0,1,2}
// at4 δTø δD{c,e} δ{ 1,2}
//
// if tracked={0} for (at1,at4] query changes to 1 should be
// also all present @at2, @at3 and @at4 - because @at2 both 0
// and 1 are changed in the same tracked bucket. Note that
// changes to 2 should not be present at all.
ZinblkAdj := map[zodb.Oid]setI64{}
for _, δT := range vδT {
for blk, δzblk := range δT.ΔKV {
if δzblk.Old != xbtree.VDEL {
inblk, ok := ZinblkAdj[δzblk.Old]
if ok {
inblk.Del(blk)
}
}
if δzblk.New != xbtree.VDEL {
inblk, ok := ZinblkAdj[δzblk.New]
if !ok {
inblk = setI64{}
ZinblkAdj[δzblk.New] = inblk
}
inblk.Add(blk)
}
}
}
for zblk, inblkAdj := range ZinblkAdj {
inblk, ok := Zinblk[zblk]
if !ok {
Zinblk[zblk] = inblkAdj
} else {
inblk.Update(inblkAdj)
}
}
// merge vδZ and vδT of current epoch
for ((iz >= 0 && vδZ[iz].Rev >= epoch) || it >= 0) {
for ((iz >= 0 && vδZ[iz].Rev > epoch) || it >= 0) {
// δZ that is covered by current Zinblk
// -> update δf
if iz >= 0 {
if iz >= 0 && vδZ[iz].Rev > epoch {
δZ := vδZ[iz]
if ZinblkAt <= δZ.Rev {
//fmt.Printf("δZ @%s\n", δZ.Rev)
......@@ -653,7 +712,7 @@ func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*reado
// δT -> adjust Zinblk + update δf
if it >= 0 {
δT := vδT[it]
//fmt.Printf("δT @%s\n", δT.Rev)
//fmt.Printf("δT @%s %v\n", δT.Rev, δT.ΔKV)
for blk, δzblk := range δT.ΔKV {
// apply in reverse as we go ←
if δzblk.New != xbtree.VDEL {
......@@ -797,7 +856,7 @@ func (δFtail *ΔFtail) _BlkRevAt(ctx context.Context, zf *ZBigFile, blk int64,
}
// if δBtail does not have entry that covers root[blk] - get it
// through zconn that has any .at ∈ (tail, head].
// through any zconn with .at ∈ (tail, head].
if !zblkExact {
xblktab, err := zconn.Get(ctx, root)
if err != nil {
......
This diff is collapsed.
......@@ -632,6 +632,7 @@ type BigFile struct {
// blocks that were ever read-accessed (head/ only) XXX locking by bfdir.δFmu ?
// XXX = δFtail.Tracked(f) ?
// XXX goes away if δFtail query returns only tracked blocks
accessed setI64
// inflight loadings of ZBigFile from ZODB.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment