Commit 0853cc9f authored by Kirill Smelkov's avatar Kirill Smelkov

X ΔFtail + tests

- Reimplement ΔFtail queries via gluing ΔBtail and ΔZtail data on the fly.
  This helps to avoid implementing complex rebuild logic in ΔFtail.
  The only place that needs to have that complexity is now ΔBtail, and there it
  already works draftly.

- Add ΔFtail tests.

- Add notion of epochs to ΔFtail. Epochs correspond to ZBigFile objects changes
  (creation and deletion). Unfortunately handling ZBigFile object changes
  turned out to be necessary to keep wcfs tests in passing state.

- Move common testing infrastructure - that is used by both ΔBtail and ΔFtail - to xbtreetest package.

- Add tests for ΔBtail.SliceByRootRev aliasing

- Lazy rebuild is now on

- ΔBtail.GetAt reworked

...

* t2: (112 commits)
  X wcfs: v↑ NEO/go (checkpoint)
  .
  .
  .
  .
  .
  .
  .
  .
  .
  .
  X ΔFtail: Rebuild vδE after first track
  .
  .
  .
  .
  .
  .
  .
  .
  ...
parents f91982af d13f11ca
......@@ -10,7 +10,7 @@ require (
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.7.0
lab.nexedi.com/kirr/go123 v0.0.0-20210302025843-863c4602a230
lab.nexedi.com/kirr/neo/go v0.0.0-20210524152903-d02d65559752
lab.nexedi.com/kirr/neo/go v0.0.0-20210720105030-d99bf118d61a
)
// we use kirr/go-fuse@y/nodefs-cancel
......
......@@ -197,3 +197,5 @@ lab.nexedi.com/kirr/neo/go v0.0.0-20210503113049-7fba56df234c h1:+M4xtOKZqy7oC6L
lab.nexedi.com/kirr/neo/go v0.0.0-20210503113049-7fba56df234c/go.mod h1:llI3hcJJMACe+rYuXUfS5dljjwIrlBMfJ1ZeRcey96A=
lab.nexedi.com/kirr/neo/go v0.0.0-20210524152903-d02d65559752 h1:knRAqs0xLytZrxWHkCccg9xyAbAgzGFnyHE2rdg7onI=
lab.nexedi.com/kirr/neo/go v0.0.0-20210524152903-d02d65559752/go.mod h1:llI3hcJJMACe+rYuXUfS5dljjwIrlBMfJ1ZeRcey96A=
lab.nexedi.com/kirr/neo/go v0.0.0-20210720105030-d99bf118d61a h1:ex8P5oGhvDDp4y3HSIwGfWx++waqU9dKnrAkITMeWQs=
lab.nexedi.com/kirr/neo/go v0.0.0-20210720105030-d99bf118d61a/go.mod h1:llI3hcJJMACe+rYuXUfS5dljjwIrlBMfJ1ZeRcey96A=
......@@ -453,8 +453,8 @@ func diffX(ctx context.Context, a, b Node, δZTC setOid, trackSet blib.PPTreeSub
// a, b point to top of subtrees @old and @new revisions.
// δZTC is connected set of objects covering δZT (objects changed in this tree in old..new).
func diffT(ctx context.Context, A, B *Tree, δZTC setOid, trackSet blib.PPTreeSubSet) (δ map[Key]ΔValue, δtrack *blib.ΔPPTreeSubSet, δtkeycov *blib.RangedKeySet, err error) {
tracefDiff(" diffT %s %s\n", xidOf(A), xidOf(B))
defer xerr.Contextf(&err, "diffT %s %s", xidOf(A), xidOf(B))
tracefDiff(" diffT %s %s\n", xzodb.XidOf(A), xzodb.XidOf(B))
defer xerr.Contextf(&err, "diffT %s %s", xzodb.XidOf(A), xzodb.XidOf(B))
δ = map[Key]ΔValue{}
δtrack = blib.NewΔPPTreeSubSet()
......@@ -887,8 +887,8 @@ func δMerge(δ, δ2 map[Key]ΔValue) error {
// diffB computes difference in between two buckets.
// see diffX for details.
func diffB(ctx context.Context, a, b *Bucket) (δ map[Key]ΔValue, err error) {
tracefDiff(" diffB %s %s\n", xidOf(a), xidOf(b))
defer xerr.Contextf(&err, "diffB %s %s", xidOf(a), xidOf(b))
tracefDiff(" diffB %s %s\n", xzodb.XidOf(a), xzodb.XidOf(b))
defer xerr.Contextf(&err, "diffB %s %s", xzodb.XidOf(a), xzodb.XidOf(b))
// XXX oid can be InvalidOid for T/B... (i.e. B is part of T and is not yet committed separately)
var av []BucketEntry
......@@ -952,13 +952,10 @@ func diffB(ctx context.Context, a, b *Bucket) (δ map[Key]ΔValue, err error) {
// zgetNodeOrNil returns btree node corresponding to zconn.Get(oid) .
// if the node does not exist, (nil, ok) is returned.
func zgetNodeOrNil(ctx context.Context, zconn *zodb.Connection, oid zodb.Oid) (_ Node, err error) {
func zgetNodeOrNil(ctx context.Context, zconn *zodb.Connection, oid zodb.Oid) (node Node, err error) {
defer xerr.Contextf(&err, "getnode %s@%s", oid, zconn.At())
xnode, err := zconn.Get(ctx, oid)
if err != nil {
if xzodb.IsErrNoData(err) {
err = nil
}
xnode, err := xzodb.ZGetOrNil(ctx, zconn, oid)
if xnode == nil || err != nil {
return nil, err
}
......@@ -966,20 +963,6 @@ func zgetNodeOrNil(ctx context.Context, zconn *zodb.Connection, oid zodb.Oid) (_
if !ok {
return nil, fmt.Errorf("unexpected type: %s", zodb.ClassOf(xnode))
}
// activate the node to find out it really exists
// after removal on storage, the object might have stayed in Connection
// cache due to e.g. PCachePinObject, and it will be PActivate that
// will return "deleted" error.
err = node.PActivate(ctx)
if err != nil {
if xzodb.IsErrNoData(err) {
return nil, nil
}
return nil, err
}
node.PDeactivate()
return node, nil
}
......@@ -993,15 +976,6 @@ func vOid(xvalue interface{}) (zodb.Oid, error) {
return value.POid(), nil
}
// xidOf return string representation of object xid.
func xidOf(obj zodb.IPersistent) string {
if obj == nil || reflect.ValueOf(obj).IsNil() {
return "ø"
}
xid := zodb.Xid{At: obj.PJar().At(), Oid: obj.POid()}
return xid.String()
}
func (rn *nodeInRange) String() string {
done := " "; if rn.done { done = "*" }
return fmt.Sprintf("%s%s%s", done, rn.keycov, vnode(rn.node))
......
......@@ -26,13 +26,13 @@ import (
"strings"
)
// kvdiff returns difference in between kv1 and kv2.
// KVDiff returns difference in between kv1 and kv2.
const DEL = "ø" // DEL means deletion
type Δstring struct {
Old string
New string
}
func kvdiff(kv1, kv2 map[Key]string) map[Key]Δstring {
func KVDiff(kv1, kv2 map[Key]string) map[Key]Δstring {
delta := map[Key]Δstring{}
keys := setKey{}
for k := range kv1 { keys.Add(k) }
......@@ -51,8 +51,8 @@ func kvdiff(kv1, kv2 map[Key]string) map[Key]Δstring {
return delta
}
// kvtxt returns string representation of {} kv.
func kvtxt(kv map[Key]string) string {
// KVTxt returns string representation of {} kv.
func KVTxt(kv map[Key]string) string {
if len(kv) == 0 {
return "ø"
}
......
......@@ -27,7 +27,7 @@ import (
func TestKVDiff(t *testing.T) {
kv1 := map[Key]string{1:"a", 3:"c", 4:"d"}
kv2 := map[Key]string{1:"b", 4:"d", 5:"e"}
got := kvdiff(kv1, kv2)
got := KVDiff(kv1, kv2)
want := map[Key]Δstring{1:{"a","b"}, 3:{"c",DEL}, 5:{DEL,"e"}}
if !reflect.DeepEqual(got, want) {
t.Fatalf("error:\ngot: %v\nwant: %v", got, want)
......@@ -36,7 +36,7 @@ func TestKVDiff(t *testing.T) {
func TestKVTxt(t *testing.T) {
kv := map[Key]string{3:"hello", 1:"zzz", 4:"world"}
got := kvtxt(kv)
got := KVTxt(kv)
want := "1:zzz,3:hello,4:world"
if got != want {
t.Fatalf("error:\ngot: %q\nwant: %q", got, want)
......
......@@ -104,5 +104,5 @@ func (xkv RBucketSet) Flatten() map[Key]string {
}
func (b *RBucket) String() string {
return fmt.Sprintf("%sB%s{%s}", b.Keycov, b.Oid, kvtxt(b.KV))
return fmt.Sprintf("%sB%s{%s}", b.Keycov, b.Oid, KVTxt(b.KV))
}
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtreetest
// testing-related support
import (
"flag"
"math/rand"
"testing"
"time"
)
var (
verylongFlag = flag.Bool("verylong", false, `switch tests to run in "very long" mode`)
randseedFlag = flag.Int64("randseed", -1, `seed for random number generator`)
)
// N returns short, medium, or long depending on whether tests were ran with
// -short, -verylong, or normally.
func N(short, medium, long int) int {
// -short
if testing.Short() {
return short
}
// -verylong
if *verylongFlag {
return long
}
// default
return medium
}
// NewRand returns new random-number generator and seed that was used to initialize it.
//
// The seed can be controlled via -randseed optiong.
func NewRand() (rng *rand.Rand, seed int64) {
seed = *randseedFlag
if seed == -1 {
seed = time.Now().UnixNano()
}
rng = rand.New(rand.NewSource(seed))
return rng, seed
}
......@@ -35,7 +35,7 @@ import (
// T is tree-based testing environment.
//
// It combines TreeSrv and client side access to ZODB with committed trees.
// It should be created it via NewT().
// It should be created it NewT().
type T struct {
*testing.T
......@@ -50,14 +50,19 @@ type T struct {
// Commit represent test commit changing a tree.
type Commit struct {
Tree string // the tree in topology-encoding
Prev *Commit // previous commit
At zodb.Tid // commit revision
ΔZ *zodb.EventCommit // raw ZODB changes; δZ.tid == at
Xkv RBucketSet // full tree state as of @at
Δxkv map[Key]Δstring // full tree-diff against parent
zblkDataTab map[zodb.Oid]string // full snapshot of all ZBlk data @at
// δzblkData map[zodb.Oid]Δstring // full diff for zblkData against parent XXX ?
Tree string // the tree in topology-encoding
Prev *Commit // previous commit
At zodb.Tid // commit revision
ΔZ *zodb.EventCommit // raw ZODB changes; δZ.tid == at
Xkv RBucketSet // full tree state as of @at
Δxkv map[Key]Δstring // full tree-diff against parent
ZBlkTab map[zodb.Oid]ZBlkInfo // full snapshot of all ZBlk name/data @at
}
// ZBlkInfo describes one ZBlk object.
type ZBlkInfo struct {
Name string // this ZBlk comes under root['treegen/values'][Name]
Data string
}
// NewT creates new T.
......@@ -93,13 +98,13 @@ func NewT(t *testing.T) *T {
head := tt.treeSrv.head
t1 := &Commit{
Tree: "T/B:", // treegen.py creates the tree as initially empty
Prev: nil,
At: head,
Xkv: xGetTree(tt.DB, head, tt.Root()),
zblkDataTab: xGetBlkDataTab(tt.DB, head),
ΔZ: nil,
Δxkv: nil,
Tree: "T/B:", // treegen.py creates the tree as initially empty
Prev: nil,
At: head,
Xkv: xGetTree(tt.DB, head, tt.Root()),
ZBlkTab: xGetBlkTab(tt.DB, head),
ΔZ: nil,
Δxkv: nil,
}
tt.commitv = []*Commit{t1}
......@@ -178,30 +183,30 @@ func (t *T) CommitTree(tree string) *Commit {
}
ttree := &Commit{
Tree: tree,
At: δZ.Tid,
ΔZ: δZ,
Xkv: xkv,
zblkDataTab: xGetBlkDataTab(t.DB, δZ.Tid),
Tree: tree,
At: δZ.Tid,
ΔZ: δZ,
Xkv: xkv,
ZBlkTab: xGetBlkTab(t.DB, δZ.Tid),
}
tprev := t.Head()
ttree.Prev = tprev
ttree.Δxkv = kvdiff(tprev.Xkv.Flatten(), ttree.Xkv.Flatten())
ttree.Δxkv = KVDiff(tprev.Xkv.Flatten(), ttree.Xkv.Flatten())
t.commitv = append(t.commitv, ttree)
return ttree
}
// xGetBlkDataTab loads all ZBlk from db@at.
// xGetBlkTab loads all ZBlk from db@at.
//
// it returns {} oid -> blkdata.
func xGetBlkDataTab(db *zodb.DB, at zodb.Tid) map[zodb.Oid]string {
defer exc.Contextf("%s: @%s: get blkdatatab", db.Storage().URL(), at)
func xGetBlkTab(db *zodb.DB, at zodb.Tid) map[zodb.Oid]ZBlkInfo {
defer exc.Contextf("%s: @%s: get blktab", db.Storage().URL(), at)
X := exc.Raiseif
blkDataTab := map[zodb.Oid]string{}
blkTab := map[zodb.Oid]ZBlkInfo{}
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
......@@ -228,18 +233,23 @@ func xGetBlkDataTab(db *zodb.DB, at zodb.Tid) map[zodb.Oid]string {
err = zblkdir.PActivate(ctx); X(err)
defer zblkdir.PDeactivate()
for k, xzblk := range zblkdir.Data {
for xname, xzblk := range zblkdir.Data {
name, ok := xname.(string)
if !ok {
exc.Raisef("root['treegen/values']: key [%q]: expected str, got %T", xname, xname)
}
zblk, ok := xzblk.(zodb.IPersistent)
if !ok {
exc.Raisef("root['treegen/values'][%q]: expected %s, got %s", k, xzodb.TypeOf(zblk), xzodb.TypeOf(xzblk))
exc.Raisef("root['treegen/values'][%q]: expected IPersistent, got %s", name, xzodb.TypeOf(xzblk))
}
oid := zblk.POid()
data := xzgetBlkData(ctx, zconn, oid)
blkDataTab[oid] = data
blkTab[oid] = ZBlkInfo{name, data}
}
return blkDataTab
return blkTab
}
// XGetBlkData loads blk data for ZBlk<oid> @t.at
......@@ -249,13 +259,23 @@ func (t *Commit) XGetBlkData(oid zodb.Oid) string {
if oid == VDEL {
return DEL
}
data, ok := t.zblkDataTab[oid]
zblki, ok := t.ZBlkTab[oid]
if !ok {
exc.Raisef("getBlkData ZBlk<%s> @%s: no such ZBlk", oid, t.At)
}
return data
return zblki.Data
}
// XGetBlkByName returns ZBlk info associated with ZBlk<name>
func (t *Commit) XGetBlkByName(name string) (zodb.Oid, ZBlkInfo) {
for oid, zblki := range t.ZBlkTab {
if zblki.Name == name {
return oid, zblki
}
}
panicf("ZBlk<%q> not found", name)
return zodb.InvalidOid, ZBlkInfo{} // XXX should be not needed
}
// xGetTree loads Tree from zurl@at->obj<root>.
//
......
......@@ -69,8 +69,8 @@ type AllStructsSrv struct {
*TreeGenSrv
}
// StartTreeGenSrv spawns `treegen ...` server.
func StartTreeGenSrv(argv ...string) (_ *TreeGenSrv, hello string, err error) {
// startTreeGenSrv spawns `treegen ...` server.
func startTreeGenSrv(argv ...string) (_ *TreeGenSrv, hello string, err error) {
defer xerr.Contextf(&err, "treesrv %v: start", argv)
// spawn `treegen ...`
......@@ -125,7 +125,7 @@ func (tg *TreeGenSrv) Close() (err error) {
// StartTreeSrv spawns `treegen trees` server.
func StartTreeSrv(zurl string) (_ *TreeSrv, err error) {
defer xerr.Contextf(&err, "tree.srv %s: start", zurl)
tgSrv, hello, err := StartTreeGenSrv("trees", zurl)
tgSrv, hello, err := startTreeGenSrv("trees", zurl)
if err != nil {
return nil, err
}
......@@ -160,7 +160,7 @@ func StartTreeSrv(zurl string) (_ *TreeSrv, err error) {
func StartAllStructsSrv() (_ *AllStructsSrv, err error) {
defer xerr.Context(&err, "allstructs.srv: start")
tgSrv, hello, err := StartTreeGenSrv("allstructs")
tgSrv, hello, err := startTreeGenSrv("allstructs")
if err != nil {
return nil, err
}
......@@ -209,7 +209,7 @@ func (tg *TreeSrv) Commit(tree string) (_ zodb.Tid, err error) {
// AllStructs returns response from `treegen allstructs`
func (tg *AllStructsSrv) AllStructs(kv map[Key]string, maxdepth, maxsplit, n int, seed int64) (_ []string, err error) {
req := fmt.Sprintf("%d %d %d/%d %s", maxdepth, maxsplit, n, seed, kvtxt(kv))
req := fmt.Sprintf("%d %d %d/%d %s", maxdepth, maxsplit, n, seed, KVTxt(kv))
defer xerr.Contextf(&err, "allstructs.srv: %s ", req)
_, err = io.WriteString(tg.pyin, req + "\n")
......
......@@ -20,7 +20,7 @@
# See https://www.nexedi.com/licensing for rationale and options.
"""Program treegen provides infrastructure to generate ZODB BTree states.
It is used as helper for ΔBtail tests.
It is used as helper for ΔBtail and ΔFtail tests.
The following subcommands are provided:
......@@ -39,7 +39,7 @@ trees
-----
`treegen trees <zurl>` transitions ZODB LOBTree through requested tree states.
Tree states are specified on stdin as topology-encoded strings, 1 state per 1 line.
Tree states are specified on stdin as topology-encoded strings(+), 1 state per 1 line.
For every request the tree is changed to have specified keys, values and
topology, and modifications are committed to database. For every made commit
corresponding transaction ID is printed to stdout.
......@@ -65,6 +65,8 @@ session example:
S: 03d85dd871718899
...
XXX describe ø command
allstructs
----------
......@@ -108,9 +110,17 @@ session example:
T3/T-T/B1:a,2:b-B3:c
# ----
ΔFtail support
--------------
XXX describe øf and `t... D...` commands.
--------
(*) 300-500ms, see https://github.com/pypa/setuptools/issues/510.
(+) see wcfs/internal/xbtree.py
"""
from __future__ import print_function, absolute_import
......@@ -127,7 +137,7 @@ import random
import six
from wendelin.wcfs.internal import xbtree, xbtree_test
from wendelin.bigfile.file_zodb import ZBlk
from wendelin.bigfile.file_zodb import ZBlk, ZBigFile
from zodbtools.util import storageFromURL, ashex
from persistent import CHANGED
......@@ -197,6 +207,9 @@ def TreesSrv(zstor, r):
defer(zctx.close)
ztree = zctx.root['treegen/tree'] = LOBTree()
zfile = zctx.root['treegen/file'] = ZBigFile(blksize=4) # for ΔFtail tests
zfile.blktab = ztree
zdummy = zctx.root['treegen/dummy'] = PersistentMapping() # anything for ._p_changed=True
head = commit('treegen/tree: init')
xprint("tree.srv start @%s root=%s" % (ashex(head), ashex(ztree._p_oid)))
treetxtPrev = zctx.ztreetxt(ztree)
......@@ -210,10 +223,57 @@ def TreesSrv(zstor, r):
xprint("%s" % ashex(head))
continue
# mark tree as changed if the same topology is requested twice.
# øf command to delete the file
if treetxt == "øf":
head = commitDelete(zfile, subj)
xprint("%s" % ashex(head))
continue
# make sure we continue with undeleted ztree/zfile
if deleted(ztree):
undelete(ztree)
if deleted(zfile):
undelete(zfile)
# t... D... commands to natively commit updates to tree and values
if treetxt.startswith('t'):
t, D = treetxt.split()
assert D.startswith('D')
kv = kvDecode(t[1:], zctx.vdecode)
zv = _kvDecode(D[1:], kdecode=lambda ktxt: ktxt, vdecode=lambda vtxt: vtxt)
patch(ztree, diff(ztree, kv), kv)
# ~ patch(valdict, diff(valdict,zv)) but sets zblk.value on change
valdict = zctx.root['treegen/values']
vkeys = set(valdict.keys())
vkeys.update(zv.keys())
for k in vkeys:
zblk = valdict.get(k)
v1 = None
if zblk is not None:
v1 = zblk.loadblkdata()
v2 = zv.get(k)
if v1 != v2:
if v1 is None:
zblk = ZBlk()
valdict[k] = zblk
if v2 is not None:
zblk.setblkdata(v2)
zblk._p_changed = True
elif v2 is None:
del valdict[k]
zdummy._p_changed = True # alayws non-empty commit
head = commit(subj)
xprint("%s" % ashex(head))
continue
# everything else is considerd to be a tree topology
# mark something as changed if the same topology is requested twice.
# this ensures we can actually make a non-empty commit
if treetxt == treetxtPrev:
ztree._p_changed = True
zdummy._p_changed = True
treetxtPrev = treetxt
tree = zctx.TopoDecode(treetxt)
......@@ -342,12 +402,15 @@ def kvEncode(kvDict, vencode): # -> kvText
# kvDecode decodes key->value mapping from text.
# e.g. '1:a,2:b' -> {1:'a', 2:'b'}
def kvDecode(kvText, vdecode): # -> kvDict
if kvText == "":
return _kvDecode(kvText, int, vdecode)
def _kvDecode(kvText, kdecode, vdecode): # -> kvDict
if kvText in ("", "ø"):
return {}
kv = {}
for item in kvText.split(','):
ktxt, vtxt = item.split(':')
k = int(ktxt)
k = kdecode(ktxt)
v = vdecode(vtxt)
if k in kv:
raise ValueError("key %s present multiple times" % k)
......@@ -372,7 +435,7 @@ def diff(d1, d2): # -> [] of (k,v) to change; DEL means del[k]
# diff = [] of (k,v) to change; DEL means del[k]
def patch(d, diff, verify):
for (k,v) in diff:
if v is DEL:
if v == DEL:
del d[k]
else:
d[k] = v
......@@ -431,8 +494,18 @@ def commitDelete(obj, description): # -> tid
# reset transaction to a new one
transaction.begin()
obj._v_deleted = True
return tid
# deleted reports whether obj was deleted via commitDelete.
def deleted(obj): # -> bool
return getattr(obj, '_v_deleted', False)
# undelete forces recreation for obj that was previously deleted via commitDelete.
def undelete(obj):
obj._p_changed = True
del obj._v_deleted
# ztreetxt returns text representation of a ZODB tree.
@func(ZCtx)
......
......@@ -43,18 +43,12 @@ const debugΔBtail = false
// It semantically consists of
//
// []δB ; rev ∈ (tail, head]
// atTail XXX no need (see vvv)
//
// where δB represents a change in BTrees space
//
// δB:
// .rev↑
// {} root -> {}(key, δvalue) XXX was value
//
// and atTail keeps set of k/v @tail for keys changed in (tail, head]
//
// atTail: XXX no need for atTail as we have δvalue.Old
// {} root -> {}(key, value)
// {} root -> {}(key, δvalue)
//
// It covers only changes to keys from tracked subset of BTrees parts.
// In particular a key that was not explicitly requested to be tracked, even if
......@@ -62,7 +56,7 @@ const debugΔBtail = false
//
// ΔBtail provides the following operations:
//
// .Track(path) - start tracking tree nodes and keys; root=path[0], keys=path[-1].keys XXX keys not correct - e.g. track missing key
// .Track(path) - start tracking tree nodes and keys; root=path[0], keys=path[-1].(lo,hi]
//
// .Update(δZ) -> δB - update BTree δ tail given raw ZODB changes
// .ForgetPast(revCut) - forget changes past revCut
......@@ -80,11 +74,10 @@ const debugΔBtail = false
//
// XXX incremental; not full coverage
//
// ΔBtail is not safe for concurrent access.
// ΔBtail is not safe for concurrent access. XXX rework
// XXX -> multiple readers / single writer?
//
// See also zodb.ΔTail
// XXX naming -> ΔBTail ?
type ΔBtail struct {
// raw ZODB changes; Kept to rebuild .vδTbyRoot after new Track.
// includes all changed objects, not only tracked ones.
......@@ -118,9 +111,6 @@ type ΔTtail struct {
trackNew blib.PPTreeSubSet
// XXX + trackNewKeys RangedKeySet
// {}k/v @tail for keys that are changed in (tail, head].
KVAtTail map[Key]Value // XXX not needed since vδT has ΔValue ?
// index for LastRevOf queries
lastRevOf map[Key]zodb.Tid // {} key -> last
}
......@@ -169,7 +159,6 @@ func NewΔBtail(at0 zodb.Tid, db *zodb.DB) *ΔBtail {
func newΔTtail() *ΔTtail {
return &ΔTtail{
trackNew: blib.PPTreeSubSet{},
KVAtTail: make(map[Key]Value),
lastRevOf: make(map[Key]zodb.Tid),
}
}
......@@ -210,8 +199,19 @@ func (orig *ΔBtail) Clone() *ΔBtail {
// Clone returns copy of ΔTtail.
func (orig *ΔTtail) Clone() *ΔTtail {
klon := &ΔTtail{}
klon.vδT = make([]ΔTree, 0, len(orig.vδT))
for _, origδT := range orig.vδT {
klon.vδT = vδTClone(orig.vδT)
klon.trackNew = orig.trackNew.Clone()
klon.lastRevOf = make(map[Key]zodb.Tid, len(orig.lastRevOf))
for k, rev := range orig.lastRevOf {
klon.lastRevOf[k] = rev
}
return klon
}
// vδTClone returns deep copy of []ΔTree.
func vδTClone(orig []ΔTree) []ΔTree {
klon := make([]ΔTree, 0, len(orig))
for _, origδT := range orig {
klonδT := ΔTree{
Rev: origδT.Rev,
ΔKV: make(map[Key]ΔValue, len(origδT.ΔKV)),
......@@ -219,16 +219,7 @@ func (orig *ΔTtail) Clone() *ΔTtail {
for k, δv := range origδT.ΔKV {
klonδT.ΔKV[k] = δv
}
klon.vδT = append(klon.vδT, klonδT)
}
klon.trackNew = orig.trackNew.Clone()
klon.KVAtTail = make(map[Key]Value, len(orig.KVAtTail))
for k, v := range orig.KVAtTail {
klon.KVAtTail[k] = v
}
klon.lastRevOf = make(map[Key]zodb.Tid, len(orig.lastRevOf))
for k, rev := range orig.lastRevOf {
klon.lastRevOf[k] = rev
klon = append(klon, klonδT)
}
return klon
}
......@@ -308,24 +299,42 @@ func (δBtail *ΔBtail) rebuildAll() (err error) {
defer xerr.Context(&err, "ΔBtail rebuildAll")
// XXX locking
trackNewRoots := δBtail.trackNewRoots
tracefΔBtail("\nRebuildAll @%s..@%s trackNewRoots: %s\n", δBtail.Tail(), δBtail.Head(), δBtail.trackNewRoots)
tracefΔBtail("\nRebuildAll @%s..@%s trackNewRoots: %s\n", δBtail.Tail(), δBtail.Head(), trackNewRoots)
for root := range δBtail.trackNewRoots {
delete(δBtail.trackNewRoots, root)
δBtail.rebuild1(root)
}
for root := range trackNewRoots {
δTtail := δBtail.vδTbyRoot[root] // must be there
δtrackSet, δrevSet, err := δTtail.rebuild(root, δBtail.δZtail, δBtail.db)
if err != nil {
return err
}
δBtail.trackSet.UnionInplace(δtrackSet)
δBtail.vδBroots_Update(root, δrevSet)
return nil
}
// rebuild1IfNeeded rebuilds ΔBtail for single root if that root needs rebuilding.
func (δBtail *ΔBtail) rebuild1IfNeeded(root zodb.Oid) error {
// XXX locking
_, ok := δBtail.trackNewRoots[root]
if !ok {
return nil
}
δBtail.trackNewRoots = setOid{}
delete(δBtail.trackNewRoots, root)
return δBtail.rebuild1(root)
}
// rebuild1 rebuilds ΔBtail for single root.
func (δBtail *ΔBtail) rebuild1(root zodb.Oid) error {
// XXX locking
δTtail := δBtail.vδTbyRoot[root] // must be there
δtrackSet, δrevSet, err := δTtail.rebuild(root, δBtail.δZtail, δBtail.db)
if err != nil {
return err
}
δBtail.trackSet.UnionInplace(δtrackSet)
δBtail.vδBroots_Update(root, δrevSet)
return nil
}
// rebuild rebuilds ΔTtail taking trackNew requests into account.
//
// It returns:
......@@ -336,7 +345,7 @@ func (δBtail *ΔBtail) rebuildAll() (err error) {
//
// XXX place
func (δTtail *ΔTtail) rebuild(root zodb.Oid, δZtail *zodb.ΔTail, db *zodb.DB) (δtrackSet blib.PPTreeSubSet, δrevSet setTid, err error) {
defer xerr.Context(&err, "ΔTtail rebuild")
defer xerr.Contextf(&err, "ΔTtail<%s> rebuild", root)
// XXX locking
tracefΔBtail("\nRebuild %s @%s .. @%s\n", root, δZtail.Tail(), δZtail.Head())
......@@ -351,6 +360,11 @@ func (δTtail *ΔTtail) rebuild(root zodb.Oid, δZtail *zodb.ΔTail, db *zodb.DB
δrevSet = setTid{}
// clone vδT before modifying it
// queries such as SliceByRootRev return slices of vδT and we do not
// want to change data that is already returned to user.
δTtail.vδT = vδTClone(δTtail.vδT)
// go backwards and merge vδT <- treediff(lo..hi/trackNew)
vδZ := δZtail.Data()
for {
......@@ -379,7 +393,7 @@ func (δTtail *ΔTtail) rebuild(root zodb.Oid, δZtail *zodb.ΔTail, db *zodb.DB
δrevSet.Add(δZ.Rev)
}
// XXX update .KVAtTail, .lastRevOf
// XXX update .lastRevOf
}
// an iteration closer to tail may turn out to add a key to the tracking set.
......@@ -536,7 +550,7 @@ func (δTtail *ΔTtail) rebuild1(atPrev zodb.Tid, δZ zodb.ΔRevEntry, trackNew
}
}
// XXX update .KVAtTail, .lastRevOf (here?)
// XXX update .lastRevOf (here?)
return δtrack, δtkeycov, newRevEntry, nil
}
......@@ -590,7 +604,6 @@ func (δBtail *ΔBtail) Update(δZ *zodb.EventCommit) (_ ΔB, err error) {
}
}
// XXX rebuild KVAtTail
// XXX rebuild lastRevOf
}
......@@ -666,6 +679,7 @@ func (δBtail *ΔBtail) _Update1(δZ *zodb.EventCommit) (δB1 _ΔBUpdate1, err e
tracefΔBtail("\n-> root<%s> δkv: %v δtrack: %v δtkeycov: %v\n", root, δT, δtrack, δtkeycov)
// XXX also needs vδT clone here?
δTtail := δBtail.vδTbyRoot[root] // must be there
if len(δT) > 0 { // an object might be resaved without change
δTtail.vδT = append(δTtail.vδT, ΔTree{Rev: δZ.Tid, ΔKV: δT})
......@@ -730,7 +744,8 @@ func (δBtail *ΔBtail) ForgetPast(revCut zodb.Tid) {
}
func (δTtail *ΔTtail) forgetPast(revCut zodb.Tid) {
// XXX KVAtTail, lastRevOf
// XXX locking
// XXX lastRevOf
icut := 0
for ; icut < len(δTtail.vδT); icut++ {
......@@ -740,78 +755,78 @@ func (δTtail *ΔTtail) forgetPast(revCut zodb.Tid) {
}
// vδT[:icut] should be forgotten
// NOTE clones vδT because queries return vδT aliases
δTtail.vδT = append([]ΔTree(nil), δTtail.vδT[icut:]...)
}
// Get returns root[key] as of @at database state plus revision that changed it.
// GetAt tries to retrieve root[key]@at from δBtail data.
//
// If δBtail has δB entry that covers root[key]@at, corresponding value
// (VDEL means deletion) and valueExact=true are returned. If δBtail data
// allows to determine revision of root[key]@at value, corresponding revision
// and revExact=true are returned. If revision of root[key]@at cannot be
// determined (rev=δBtail.Tail, revExact=false) are returned.
//
// If δBtail has no δB entry that covers root[key]@at, return is
//
// (value=VDEL, valueExact=false, rev=δBtail.Tail, revExact=false)
//
// .rev and exact=true are returned:
//
// if revExact=False - rev is upper estimate for the revision.
// (δB[root/key].δvalue.New, δB.rev, exact=true)
//
// If δBtail has no δB entry for root[key] with .rev ≤ @at, return is
//
// (VDEL, δBtail.Tail, exact=false)
//
// key must be tracked
// at must ∈ (tail, head]
//
// XXX root -> Oid ?
func (δBtail *ΔBtail) GetAt(ctx context.Context, root *Tree, key Key, at zodb.Tid) (value Value, ok bool, rev zodb.Tid, revExact bool, err error) {
defer xerr.Contextf(&err, "δBtail: root<%s>: get %d @%s", root.POid(), key, at)
func (δBtail *ΔBtail) GetAt(root zodb.Oid, key Key, at zodb.Tid) (value Value, rev zodb.Tid, valueExact, revExact bool, err error) {
defer xerr.Contextf(&err, "δBtail: root<%s>: get %d @%s", root, key, at)
// XXX key not tracked -> panic
// XXX at not ∈ (tail, head] -> panic
// XXX handle deletion
tail := δBtail.Tail()
head := δBtail.Head()
if !(tail < at && at <= head) {
panicf("at out of bounds: at: @%s, (tail, head] = (@%s, @%s]", at, tail, head)
}
// XXX locking
// FIXME stub -> that only ZBlk.rev is used
//return @head, rev=.Tail(), revExact=false
value = VDEL
valueExact = false
rev = tail
revExact = false
// XXX dirty -> rebuild
err = δBtail.rebuild1IfNeeded(root)
if err != nil {
return value, rev, valueExact, revExact, err
}
// XXX -> index lastXXXOf(key) | linear scan ↓ looking for change <= at
δTtail := δBtail.vδTbyRoot[root.POid()]
δTtail := δBtail.vδTbyRoot[root]
if δTtail == nil {
panicf("δBtail: root<%s> not tracked", root.POid())
panicf("δBtail: root<%s> not tracked", root)
}
// XXX -> index lastXXXOf(key) | linear scan ↓ looking for change <= at
for i := len(δTtail.vδT)-1; i >= 0; i-- {
δT := δTtail.vδT[i]
if at < δT.Rev {
continue
}
var δvalue ΔValue
δvalue, ok = δT.ΔKV[key]
if ok {
value = δvalue.New
rev = δT.Rev
revExact = true
break
δvalue, ok_ := δT.ΔKV[key]
if ok_ {
valueExact = true
if δT.Rev > at {
value = δvalue.Old
} else {
value = δvalue.New
rev = δT.Rev
revExact = true
break
}
}
}
// key was found in δT ∈ δTtail
if ok {
return
}
// key not in history tail.
// either use @tail[key], if it is present, or @head[key]
rev = δBtail.Tail()
revExact = false
value, ok = δTtail.KVAtTail[key] // XXX kill - just use δvalue.Old from next-to-at entry
if ok {
return
}
// @tail[key] is not present - key was not changing in (tail, head].
// since at ∈ (tail, head] we can use @head[key] as the result
xvalue, ok, err := root.Get(ctx, key)
if err != nil || !ok {
return
}
value, err = vOid(xvalue)
if err != nil {
ok = false
return
}
return
return value, rev, valueExact, revExact, nil
}
// XXX don't need
......@@ -834,13 +849,38 @@ func (δBtail *ΔBtail) GetAt(ctx context.Context, root *Tree, key Key, at zodb.
func (δBtail *ΔBtail) SliceByRootRev(root zodb.Oid, lo, hi zodb.Tid) /*readonly*/[]ΔTree {
xtail.AssertSlice(δBtail, lo, hi)
// XXX locking
// XXX rebuild
err := δBtail.rebuild1IfNeeded(root)
if err != nil {
panic(err) // XXX
}
δTtail, ok := δBtail.vδTbyRoot[root]
if !ok {
return []ΔTree{}
}
// XXX dup data - because they can be further rebuilt in parallel to caller using them
return δTtail.vδT // FIXME process lo, hi
// XXX no -> dup data in rebuild, not here
vδT := δTtail.vδT
l := len(vδT)
if l == 0 {
return nil
}
// find max j : [j].rev ≤ hi XXX linear scan -> binary search
j := l - 1
for ; j >= 0 && vδT[j].Rev > hi; j-- {}
if j < 0 {
return nil // ø
}
// find max i : [i].rev > lo XXX linear scan -> binary search
i := j
for ; i >= 0 && vδT[i].Rev > lo; i-- {}
i++
return vδT[i:j+1]
}
......@@ -851,6 +891,11 @@ func (δBtail *ΔBtail) ΔZtail() /*readonly*/*zodb.ΔTail {
return δBtail.δZtail
}
// DB returns database handle that δBtail is using to access ZODB.
func (δBtail *ΔBtail) DB() *zodb.DB {
return δBtail.db
}
func tracefΔBtail(format string, argv ...interface{}) {
if traceΔBtail {
......
......@@ -33,26 +33,18 @@ package xbtree
//
// a) transition a BTree in ZODB through particular tricky tree topologies
// and feed ΔBtail through created database transactions.
// b) transition a BTree in ZODB through random tree topologies and feed
// ΔBtail through created database transactions.
//
// TestΔBTail and TestΔBTailAllStructs implement approaches "a" and "b" correspondingly.
//
// testprog/treegen.py is used as helper to both: XXX moved to xbtreetest
// b) transition a BTree in ZODB through random tree topologies
// and feed ΔBtail through created database transactions.
//
// - commit a particular BTree topology into ZODB, and
// - to generate set of random tree topologies that all correspond to particular {k->v} dict.
// TestΔBTail and TestΔBTailRandom implement approaches "a" and "b" correspondingly.
import (
"flag"
"fmt"
"math"
"math/rand"
"reflect"
"sort"
"strings"
"testing"
"time"
"lab.nexedi.com/kirr/go123/exc"
"lab.nexedi.com/kirr/go123/xerr"
......@@ -64,49 +56,6 @@ import (
type Δstring = xbtreetest.Δstring
// trackSet returns what should be ΔBtail.trackSet coverage for specified tracked key set.
// XXX was method -> change name?
func trackSet(rbs xbtreetest.RBucketSet, tracked setKey) blib.PPTreeSubSet {
// nil = don't compute keyCover
// (trackSet is called from inside hot inner loop of rebuild test)
trackSet := _trackSetWithCov(rbs, tracked, nil)
return trackSet
}
// trackSetWithCov returns what should be ΔBtail.trackSet and its key coverage for specified tracked key set.
func trackSetWithCov(rbs xbtreetest.RBucketSet, tracked setKey) (trackSet blib.PPTreeSubSet, keyCover *blib.RangedKeySet) {
keyCover = &blib.RangedKeySet{}
trackSet = _trackSetWithCov(rbs, tracked, keyCover)
return trackSet, keyCover
}
func _trackSetWithCov(rbs xbtreetest.RBucketSet, tracked setKey, outKeyCover *blib.RangedKeySet) (trackSet blib.PPTreeSubSet) {
trackSet = blib.PPTreeSubSet{}
for k := range tracked {
kb := rbs.Get(k)
if outKeyCover != nil {
outKeyCover.AddRange(kb.Keycov)
}
trackSet.AddPath(kb.Path())
}
return trackSet
}
// XGetδKV translates {k -> δ<oid>} to {k -> δ(ZBlk(oid).data)} according to t1..t2 db snapshots.
func XGetδKV(t1, t2 *xbtreetest.Commit, δkvOid map[Key]ΔValue) map[Key]Δstring {
δkv := make(map[Key]Δstring, len(δkvOid))
for k, δvOid := range δkvOid {
δkv[k] = Δstring{
Old: t1.XGetBlkData(δvOid.Old),
New: t2.XGetBlkData(δvOid.New),
}
}
return δkv
}
// KAdjMatrix is adjacency matrix that describes how set of tracked keys
// changes (always grow) when tree topology is updated from A to B.
//
......@@ -134,162 +83,506 @@ func XGetδKV(t1, t2 *xbtreetest.Commit, δkvOid map[Key]ΔValue) map[Key]Δstri
//
// XXX fix definition for "and changed, or coverage changed"
//
// Use:
//
// - KAdj(A,B) to build adjacency matrix for A -> B transition.
// - kadj.Map(keys) to compute kadj·keys.
// - kadj1.Mul(kadj2) to compute kadj1·kadj2.
//
// Note: adjacency matrix is symmetric (KAdj verifies this at runtime):
//
// kadj(A,B) == kadj(B,A)
// KAdj(A,B) == KAdj(B,A)
type KAdjMatrix map[Key]setKey
// Map returns kadj·keys .
func (kadj KAdjMatrix) Map(keys setKey) setKey {
res := make(setKey, len(keys))
for k := range keys {
to, ok := kadj[k]
if !ok {
panicf("kadj.Map: %d ∉ kadj\n\nkadj: %v", k, kadj)
}
res.Update(to)
}
return res
// ΔBTestEntry represents one entry in ΔBTail tests.
type ΔBTestEntry struct {
tree string // next tree topology
kadjOK KAdjMatrix // adjacency matrix against previous case (optional)
flags ΔBTestFlags
}
// Mul returns kadjA·kadjB .
//
// (kadjA·kadjB).Map(keys) = kadjA.Map(kadjB.Map(keys))
func (kadjA KAdjMatrix) Mul(kadjB KAdjMatrix) KAdjMatrix {
// ~ assert kadjA.keys == kadjB.keys
// check only len here; the rest will be asserted by Map
if len(kadjA) != len(kadjB) {
panicf("kadj.Mul: different keys:\n\nkadjA: %v\nkadjB: %v", kadjA, kadjB)
}
type ΔBTestFlags int
const ΔBTest_SkipUpdate ΔBTestFlags = 1 // skip verifying Update for this test entry
const ΔBTest_SkipRebuild ΔBTestFlags = 2 // skip verifying rebuild for this test entry
kadj := make(KAdjMatrix, len(kadjB))
for k, tob := range kadjB {
kadj[k] = kadjA.Map(tob)
// ΔBTest converts xtest into ΔBTestEntry.
// xtest can be string|ΔBTestEntry.
func ΔBTest(xtest interface{}) ΔBTestEntry {
var test ΔBTestEntry
switch xtest := xtest.(type) {
case string:
test.tree = xtest
test.kadjOK = nil
test.flags = 0
case ΔBTestEntry:
test = xtest
default:
panicf("BUG: ΔBTest: bad type %T", xtest)
}
return kadj
return test
}
// KAdj computes adjacency matrix for t1 -> t2 transition.
//
// The set of keys for which kadj matrix is computed can be optionally provided.
// This set of keys defaults to allTestKeys(t1,t2).
//
// KAdj itself is verified by testΔBTail on entries with .kadjOK set.
func KAdj(t1, t2 *xbtreetest.Commit, keysv ...setKey) (kadj KAdjMatrix) {
// assert KAdj(A,B) == KAdj(B,A)
kadj12 := _KAdj(t1,t2, keysv...)
kadj21 := _KAdj(t2,t1, keysv...)
if !reflect.DeepEqual(kadj12, kadj21) {
panicf("KAdj not symmetric:\nt1: %s\nt2: %s\nkadj12: %v\nkadj21: %v",
t1.Tree, t2.Tree, kadj12, kadj21)
// TestΔBTail verifies ΔBTail for explicitly provided tree topologies.
func TestΔBTail(t *testing.T) {
// K is shorthand for setKey
K := func(keyv ...Key) setKey {
ks := setKey{}
for _, k := range keyv { ks.Add(k) }
return ks
}
return kadj12
}
const debugKAdj = false
func debugfKAdj(format string, argv ...interface{}) {
if debugKAdj {
fmt.Printf(format, argv...)
// oo is shorthand for KeyMax
const oo = KeyMax
// A is shorthand for KAdjMatrix
type A = KAdjMatrix
// Δ is shorthand for ΔBTestEntry
Δ := func(tree string, kadjOK A) (test ΔBTestEntry) {
test.tree = tree
test.kadjOK = kadjOK
return test
}
}
func _KAdj(t1, t2 *xbtreetest.Commit, keysv ...setKey) (kadj KAdjMatrix) {
var keys setKey
switch len(keysv) {
case 0:
keys = allTestKeys(t1, t2)
case 1:
keys = keysv[0]
default:
panic("multiple key sets on the call")
}
// test known cases going through tree1 -> tree2 -> ...
testv := []interface{} {
// start from non-empty tree to verify both ->empty and empty-> transitions
"T/B1:a,2:b",
debugfKAdj("\n\n_KAdj\n")
debugfKAdj("t1: %s\n", t1.Tree)
debugfKAdj("t2: %s\n", t2.Tree)
debugfKAdj("keys: %s\n", keys)
defer func() {
debugfKAdj("kadj -> %v\n", kadj)
}()
// empty
"T/B:",
// kadj = {} k -> adjacent keys.
// if k is tracked and covered by changed leaf -> changes to adjacents must be in Update(t1->t2).
kadj = KAdjMatrix{}
for k := range keys {
adj1 := setKey{}
adj2 := setKey{}
// +1
Δ("T/B1:a",
A{1: K(1,oo),
oo: K(1,oo)}),
q1 := &blib.RangedKeySet{}; q1.Add(k)
q2 := &blib.RangedKeySet{}; q2.Add(k)
done1 := &blib.RangedKeySet{}
done2 := &blib.RangedKeySet{}
// +2
Δ("T/B1:a,2:b",
A{1: K(1,2,oo),
2: K(1,2,oo),
oo: K(1,2,oo)}),
debugfKAdj("\nk%s\n", kstr(k))
for !q1.Empty() || !q2.Empty() {
debugfKAdj("q1: %s\tdone1: %s\n", q1, done1)
debugfKAdj("q2: %s\tdone2: %s\n", q2, done2)
for _, r1 := range q1.AllRanges() {
lo1 := r1.Lo
for {
b1 := t1.Xkv.Get(lo1)
debugfKAdj(" b1: %s\n", b1)
for k_ := range keys {
if b1.Keycov.Has(k_) {
adj1.Add(k_)
debugfKAdj(" adj1 += %s\t-> %s\n", kstr(k_), adj1)
}
}
done1.AddRange(b1.Keycov)
// q2 |= (b1.keyrange \ done2)
δq2 := &blib.RangedKeySet{}
δq2.AddRange(b1.Keycov)
δq2.DifferenceInplace(done2)
q2.UnionInplace(δq2)
debugfKAdj("q2 += %s\t-> %s\n", δq2, q2)
// -1
Δ("T/B2:b",
A{1: K(1,2,oo),
2: K(1,2,oo),
oo: K(1,2,oo)}),
// continue with next right bucket until r1 coverage is complete
if r1.Hi_ <= b1.Keycov.Hi_ {
break
}
lo1 = b1.Keycov.Hi_ + 1
}
}
q1.Clear()
// 2: b->c
Δ("T/B2:c",
A{2: K(2,oo),
oo: K(2,oo)}),
for _, r2 := range q2.AllRanges() {
lo2 := r2.Lo
for {
b2 := t2.Xkv.Get(lo2)
debugfKAdj(" b2: %s\n", b2)
for k_ := range keys {
if b2.Keycov.Has(k_) {
adj2.Add(k_)
debugfKAdj(" adj2 += %s\t-> %s\n", kstr(k_), adj2)
}
}
done2.AddRange(b2.Keycov)
// q1 |= (b2.keyrange \ done1)
δq1 := &blib.RangedKeySet{}
δq1.AddRange(b2.Keycov)
δq1.DifferenceInplace(done1)
q1.UnionInplace(δq1)
debugfKAdj("q1 += %s\t-> %s\n", δq1, q1)
// +1 in new bucket (to the left)
Δ("T2/B1:a-B2:c",
A{1: K(1,2,oo),
2: K(1,2,oo),
oo: K(1,2,oo)}),
// continue with next right bucket until r2 coverage is complete
if r2.Hi_ <= b2.Keycov.Hi_ {
break
}
lo2 = b2.Keycov.Hi_ + 1
}
}
q2.Clear()
}
// +3 in new bucket (to the right)
Δ("T2,3/B1:a-B2:c-B3:c",
A{1: K(1),
2: K(2,3,oo),
3: K(2,3,oo),
oo: K(2,3,oo)}),
adj := setKey{}; adj.Update(adj1); adj.Update(adj2)
kadj[k] = adj
}
// bucket split; +3 in new bucket
"T/B1:a,2:b",
Δ("T2/B1:a-B2:b,3:c",
A{1: K(1,2,3,oo),
2: K(1,2,3,oo),
3: K(1,2,3,oo),
oo: K(1,2,3,oo)}),
return kadj
// bucket split; +3 in new bucket; +4 +5 in another new bucket
// everything becomes tracked because original bucket had [-∞,∞) coverage
"T/B1:a,2:b",
Δ("T2,4/B1:a-B2:b,3:c-B4:d,5:e",
A{1: K(1,2,3,4,5,oo),
2: K(1,2,3,4,5,oo),
3: K(1,2,3,4,5,oo),
4: K(1,2,3,4,5,oo),
5: K(1,2,3,4,5,oo),
oo: K(1,2,3,4,5,oo)}),
// reflow of keys: even if tracked={1}, changes to all B nodes need to be rescanned:
// +B12 forces to look in -B23 which adds -3 into δ, which
// forces to look into +B34 and so on.
"T2,4,6/B1:a-B2:b,3:c-B4:d,5:e-B6:f,7:g",
Δ("T3,5,7/B1:g,2:f-B3:e,4:d-B5:c,6:b-B7:a",
A{1: K(1,2,3,4,5,6,7,oo),
2: K(1,2,3,4,5,6,7,oo),
3: K(1,2,3,4,5,6,7,oo),
4: K(1,2,3,4,5,6,7,oo),
5: K(1,2,3,4,5,6,7,oo),
6: K(1,2,3,4,5,6,7,oo),
7: K(1,2,3,4,5,6,7,oo),
oo: K(1,2,3,4,5,6,7,oo)}),
// reflow of keys for rebuild: even if tracked1={}, tracked2={1}, changes to
// all A/B/C nodes need to be rescanned. Contrary to the above case the reflow
// is not detectable at separate diff(A,B) and diff(B,C) runs.
"T3,5,7/B1:a,2:b-B3:c,4:d-B5:e,6:f-B7:g,8:h",
"T/B1:b",
"T2,4,6/B1:a-B2:b,3:c-B4:d,5:e-B6:f,7:g",
// similar situation where rebuild has to detect reflow in between non-neighbour trees
"T3,6/B1:a,2:b-B3:c,4:d-B6:f,7:g",
"T4,7/B1:b-B4:d,5:e-B7:g,8:h",
"T2,5,8/B1:a-B2:b,3:c-B5:e,6:f-B8:h,9:i",
// depth=2; bucket split; +3 in new bucket; left T remain
// _unchanged_ even though B under it is modified.
"T/T/B1:a,2:b",
Δ("T2/T-T/B1:a-B2:b,3:c",
A{1: K(1,2,3,oo),
2: K(1,2,3,oo),
3: K(1,2,3,oo),
oo: K(1,2,3,oo)}),
// depth=2; like prev. case, but additional right arm with +4 +5 is added.
"T/T/B1:a,2:b",
Δ("T2,4/T-T-T/B1:a-B2:b,3:c-B4:d,5:e",
A{1: K(1,2,3,4,5,oo),
2: K(1,2,3,4,5,oo),
3: K(1,2,3,4,5,oo),
4: K(1,2,3,4,5,oo),
5: K(1,2,3,4,5,oo),
oo: K(1,2,3,4,5,oo)}),
// depth=2; bucket split; +3 in new bucket; t0 and t1 split;
// +right arm (T7/B45-B89).
"T/T/B1:a,2:b",
Δ("T4/T2-T7/B1:a-B2:b,3:c-B4:d,5:e-B8:h,9:i",
A{1: K(1,2,3,4,5,8,9,oo),
2: K(1,2,3,4,5,8,9,oo),
3: K(1,2,3,4,5,8,9,oo),
4: K(1,2,3,4,5,8,9,oo),
5: K(1,2,3,4,5,8,9,oo),
8: K(1,2,3,4,5,8,9,oo),
9: K(1,2,3,4,5,8,9,oo),
oo: K(1,2,3,4,5,8,9,oo)}),
// 2 reflow to right B neighbour; 8 splits into new B; δ=ø
"T3/B1:a,2:b-B4:d,8:h",
"T2,5/B1:a-B2:b,4:d-B8:h",
// case where kadj does not grow too much as leafs coverage remains stable
"T4,8/B1:a,2:b-B5:d,6:e-B10:g,11:h",
Δ("T4,8/B2:b,3:c-B6:e,7:f-B11:h,12:i",
A{1: K(1,2,3),
2: K(1,2,3),
3: K(1,2,3),
5: K(5,6,7),
6: K(5,6,7),
7: K(5,6,7,),
10: K(10,11,12,oo),
11: K(10,11,12,oo),
12: K(10,11,12,oo),
oo: K(10,11,12,oo)}),
// tree deletion
// having ø in the middle of the test cases exercises all:
// * `ø -> Tree ...` (tree is created anew),
// * `... Tree -> ø` (tree is deleted), and
// * `Tree -> ø -> Tree` (tree is deleted and then recreated)
xbtreetest.DEL,
// tree rotation
"T3/B2:b-B3:c,4:d",
"T5/T3-T7/B2:a-B3:a,4:a-B6:a-B8:a",
// found by AllStructs ([1] is not changed, but because B1 is
// unlinked and 1 migrates to other bucket, changes in that
// other bucket must be included into δT)
"T1,2/B0:e-B1:d-B2:g,3:a",
"T1/B0:d-B1:d,2:d",
// ----//---- with depth=2
"T1,2/T-T-T/B0:a-B1:b-B2:c,3:d",
"T1/T-T/B0:e-B1:b,2:f",
// XXX depth=3 (to verify recursion and selecting which tree children to follow or not)
// degenerate topology from ZODB tests
// https://github.com/zopefoundation/ZODB/commit/6cd24e99f89b
// https://github.com/zopefoundation/BTrees/blob/4.7.2-1-g078ba60/BTrees/tests/testBTrees.py#L20-L57
"T4/T2-T/T-T-T6,10/B1:a-B3:b-T-T-T/T-B7:c-B11:d/B5:e",
"T/B1:e,5:d,7:c,8:b,11:a", // -3 +8
// was leading treegen to generate corrupt trees
"T/T1/T-T/B0:g-B1:e,2:d,3:h",
"T1/T-T3/B0:g-T-T/B1:e,2:d-B3:h",
// was leading to wrongly computed trackSet2 due to top not
// being tracked to tree root.
"T/T1/B0:a-B1:b",
"T/T1/T-T/B0:c-B1:d",
// was leading to wrongly computed trackSet2: leaf bucket not
// reparented to root.
"T/T/B0:a",
"T/B0:a",
// δtkeycov grows due to change in parent tree only
"T3/B1:a-B8:c",
"T7/B1:a-B8:c",
// ----//----
"T3/B1:a,2:b-B8:c,9:d",
"T7/B1:a,2:b-B8:c,9:d",
// ----//---- depth=2
"T3/T-T/B1:a,2:b-B8:c,9:d",
"T7/T-T/B1:a,2:b-B8:c,9:d",
// ----//---- found by AllStructs
"T1,3/B0:d-B1:a-B3:d,4:g",
"T1,4/B0:e-B1:a-B4:c",
// ----//---- found by AllStructs
"T2,4/T-T-T/T1-T-B4:f/T-T-B3:f/B0:h-B1:f",
"T4/T-T/B3:f-T/B4:a",
// ---- found by AllStructs ----
// trackSet2 wrongly computed due to top not being tracked to tree root
"T2/T1-T/B0:g-B1:b-T/B2:b,3:a",
"T2/T1-T/T-T-B2:a/B0:c-B1:g",
// unchanged node is reparented
"T1/B0:c-B1:f",
"T1/T-T/B0:c-T/B1:h",
// SIGSEGV in ApplyΔ
"T1/T-T2/T-B1:c-B2:c/B0:g",
"T1/T-T/B0:g-T/B1:e",
// trackSet corruption: oid is pointed by some .parent but is not present
"T1/T-T/B0:g-T2/B1:h-B2:g",
"T/T1/T-T2/B0:e-B1:f-B2:g",
// ApplyΔ -> xunion: node is reachable from multiple parents
// ( because xdifference did not remove common non-leaf node
// under which there were also other changed, but not initially
// tracked, node )
"T4/T1-T/T-T2-B4:c/T-T-T/B0:f-B1:h-B2:g,3:b",
"T1/T-T/T-T2/T-T-T/B0:f-B1:h-B2:f",
// ----//----
"T3/T1-T/T-T2-T/B0:b-T-T-B3:h/B1:e-B2:a",
"T1/T-T4/T-T2-T/T-T-T-T/B0:b-B1:e-B2:a,3:c-B4:e",
// ----//----
"T/T1,3/T-T2-T4/B0:b-T-T-B3:g-B4:c/B1:b-B2:e",
"T1,4/T-T-T/T-T2-B4:f/T-T-T/B0:h-B1:b-B2:h,3:a",
"T2/B1:a-B7:g",
"T2,8/B1:a-B7:g-B9:i",
"T2/B1:a-B2:b", "T/B1:a,2:b",
"T2,3/B1:a-B2:b-B3:c", "T/B1:a,2:b",
"T2,3/B1:a-B2:c-B3:c", "T/B1:a,2:b",
"T2/B1:a-B2:c", "T2,3/B1:a-B2:c-B3:c",
"T2/B1:a-B3:c",
Δ("T2/T-T4/B1:b-B3:d-B99:h",
A{1: K(1),
3: K(3,99,oo),
99: K(3,99,oo),
oo: K(3,99,oo)}),
}
// direct tree_i -> tree_{i+1} -> _{i+2} ... plus
// reverse ... tree_i <- _{i+1} <- _{i+2}
kadjOK := ΔBTest(testv[len(testv)-1]).kadjOK
for i := len(testv)-2; i >= 0; i-- {
test := ΔBTest(testv[i])
kadjOK, test.kadjOK = test.kadjOK, kadjOK
testv = append(testv, test)
}
testq := make(chan ΔBTestEntry)
go func() {
defer close(testq)
for _, test := range testv {
testq <- ΔBTest(test)
}
}()
testΔBTail(t, testq)
}
// TestΔBTailRandom verifies ΔBtail on random tree topologies generated by AllStructs.
func TestΔBTailRandom(t *testing.T) {
X := exc.Raiseif
// considerations:
// - maxdepth↑ better for testing (more tricky topologies)
// - maxsplit↑ not so better for testing (leave s=1, max s=2)
// - |kmin - kmax| affects N(variants) significantly
// -> keep key range small (dumb increase does not help testing)
// - N(keys) affects N(variants) significantly
// -> keep Nkeys reasonably small/medium (dumb increase does not help testing)
//
// - spawning python subprocess is very slow (takes 300-500ms for
// imports; https://github.com/pypa/setuptools/issues/510)
// -> we spawn `treegen allstructs` once and use request/response approach.
maxdepth := xbtreetest.N(2, 3, 4)
maxsplit := xbtreetest.N(1, 2, 2)
n := xbtreetest.N(10,10,100)
nkeys := xbtreetest.N(3, 5, 10)
// server to generate AllStructs(kv, ...)
sg, err := xbtreetest.StartAllStructsSrv(); X(err)
defer func() {
err := sg.Close(); X(err)
}()
// random-number generator
rng, seed := xbtreetest.NewRand()
t.Logf("# maxdepth=%d maxsplit=%d nkeys=%d n=%d seed=%d", maxdepth, maxsplit, nkeys, n, seed)
// generate (kv1, kv2, kv3) randomly
// keysv1, keysv2 and keysv3 are random shuffle of IntSets
var keysv1 [][]int
var keysv2 [][]int
var keysv3 [][]int
for keys := range IntSets(nkeys) {
keysv1 = append(keysv1, keys)
keysv2 = append(keysv2, keys)
keysv3 = append(keysv3, keys)
}
v := keysv1
rng.Shuffle(len(v), func(i,j int) { v[i], v[j] = v[j], v[i] })
v = keysv2
rng.Shuffle(len(v), func(i,j int) { v[i], v[j] = v[j], v[i] })
v = keysv3
rng.Shuffle(len(v), func(i,j int) { v[i], v[j] = v[j], v[i] })
// given random (kv1, kv2, kv3) generate corresponding set of random tree
// topology sets (T1, T2, T3). Then iterate through T1->T2->T3->T1...
// elements such that all right-directed triplets are visited and only once.
// Test Update and rebuild on the generated tree sequences.
vv := "abcdefghij"
randv := func() string {
i := rng.Intn(len(vv))
return vv[i:i+1]
}
// the number of pairs is 3·n^2
// the number of triplets is n^3
//
// limit n for emitted triplets, so that the amount of work for Update
// and rebuild tests is approximately of the same order.
nrebuild := int(math.Ceil(math.Pow(3*float64(n*n), 1./3)))
// in non-short mode rebuild tests are exercising more keys variants, plus every test case
// takes more time. Compensate for that as well.
if !testing.Short() {
nrebuild -= 3
}
testq := make(chan ΔBTestEntry)
go func() {
defer close(testq)
for i := range keysv1 {
keys1 := keysv1[i]
keys2 := keysv2[i]
keys3 := keysv3[i]
kv1 := map[Key]string{}
kv2 := map[Key]string{}
kv3 := map[Key]string{}
for _, k := range keys1 { kv1[Key(k)] = randv() }
for _, k := range keys2 { kv2[Key(k)] = randv() }
for _, k := range keys3 { kv3[Key(k)] = randv() }
treev1, err1 := sg.AllStructs(kv1, maxdepth, maxsplit, n, rng.Int63())
treev2, err2 := sg.AllStructs(kv2, maxdepth, maxsplit, n, rng.Int63())
treev3, err3 := sg.AllStructs(kv3, maxdepth, maxsplit, n, rng.Int63())
err := xerr.Merge(err1, err2, err3)
if err != nil {
t.Fatal(err)
}
emit := func(tree string, flags ΔBTestFlags) {
// skip emitting this entry if both Update and
// Rebuild are requested to be skipped.
if flags == (ΔBTest_SkipUpdate | ΔBTest_SkipRebuild) {
return
}
testq <- ΔBTestEntry{tree, nil, flags}
}
URSkipIf := func(ucond, rcond bool) ΔBTestFlags {
var flags ΔBTestFlags
if ucond {
flags |= ΔBTest_SkipUpdate
}
if rcond {
flags |= ΔBTest_SkipRebuild
}
return flags
}
for j := range treev1 {
for k := range treev2 {
for l := range treev3 {
// limit rebuild to subset of tree topologies,
// because #(triplets) grow as n^3. See nrebuild
// definition above for details.
norebuild := (j >= nrebuild ||
k >= nrebuild ||
l >= nrebuild)
// C_{l-1} -> Aj (pair first seen on k=0)
emit(treev1[j], URSkipIf(k != 0, norebuild))
// Aj -> Bk (pair first seen on l=0)
emit(treev2[k], URSkipIf(l != 0, norebuild))
// Bk -> Cl (pair first seen on j=0)
emit(treev3[l], URSkipIf(j != 0, norebuild))
}
}
}
}
}()
testΔBTail(t, testq)
}
// testΔBTail verifies ΔBTail on sequence of tree topologies coming from testq.
func testΔBTail(t_ *testing.T, testq chan ΔBTestEntry) {
t := xbtreetest.NewT(t_)
var t0 *xbtreetest.Commit
for test := range testq {
t1 := t.Head()
t2 := t.CommitTree(test.tree)
subj := fmt.Sprintf("%s -> %s", t1.Tree, t2.Tree)
//t.Logf("\n\n\n**** %s ****\n\n", subj)
// KAdj
if kadjOK := test.kadjOK; kadjOK != nil {
t.Run(fmt.Sprintf("KAdj/%s→%s", t1.Tree, t2.Tree), func(t *testing.T) {
kadj := KAdj(t1, t2)
if !reflect.DeepEqual(kadj, kadjOK) {
t.Fatalf("BUG: computed kadj is wrong:\nkadjOK: %v\nkadj : %v\n\n", kadjOK, kadj)
}
})
}
// ΔBTail.Update
if test.flags & ΔBTest_SkipUpdate == 0 {
xverifyΔBTail_Update(t.T, subj, t.DB, t.Root(), t1,t2)
}
// ΔBTail.rebuild
if t0 != nil && (test.flags & ΔBTest_SkipRebuild == 0) {
xverifyΔBTail_rebuild(t.T, t.DB, t.Root(), t0,t1,t2)
}
t0, t1 = t1, t2
}
}
......@@ -301,7 +594,6 @@ func _KAdj(t1, t2 *xbtreetest.Commit, keysv ...setKey) (kadj KAdjMatrix) {
// xverifyΔBTail_rebuild.
func xverifyΔBTail_Update(t *testing.T, subj string, db *zodb.DB, treeRoot zodb.Oid, t1, t2 *xbtreetest.Commit) {
// verify transition at1->at2 for all initial states of tracked {keys} from kv1 + kv2 + ∞
t.Run(fmt.Sprintf("Update/%s→%s", t1.Tree, t2.Tree), func(t *testing.T) {
allKeys := allTestKeys(t1, t2)
allKeyv := allKeys.SortedElements()
......@@ -474,8 +766,8 @@ func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zod
// δT <- δB
δToid := δB.ΔByRoot[treeRoot] // {} k -> δoid
δT = XGetδKV(t1,t2, δToid) // {} k -> δ(ZBlk(oid).data)
δToid := δB.ΔByRoot[treeRoot] // {} k -> δoid
δT = xgetδKV(t1,t2, δToid) // {} k -> δ(ZBlk(oid).data)
// δT must be subset of d12.
// changed keys, that are
......@@ -502,56 +794,12 @@ func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zod
if !inδT && inδTok {
badf("δT ∌ δTok[%v]", k)
}
if inδT {
if δT[k] != d12[k] {
badf("δT[%v] ≠ δTok[%v]", k, k)
}
}
}
}
// assertTrack verifies state of .trackSet and ΔTtail.trackNew.
// it assumes that only one tree root is being tracked.
// XXX place
func (δBtail *ΔBtail) assertTrack(t *testing.T, subj string, trackSetOK blib.PPTreeSubSet, trackNewOK blib.PPTreeSubSet) {
t.Helper()
if !δBtail.trackSet.Equal(trackSetOK) {
t.Errorf("%s: trackSet:\n\thave: %v\n\twant: %v", subj, δBtail.trackSet, trackSetOK)
}
roots := setOid{}
for root := range δBtail.vδTbyRoot {
roots.Add(root)
}
nrootsOK := 1
if trackSetOK.Empty() && trackNewOK.Empty() {
nrootsOK = 0
}
if len(roots) != nrootsOK {
t.Errorf("%s: len(vδTbyRoot) != %d ; roots=%v", subj, nrootsOK, roots)
return
}
if nrootsOK == 0 {
return
}
root := roots.Elements()[0]
δTtail := δBtail.vδTbyRoot[root]
trackNewRootsOK := setOid{}
if !trackNewOK.Empty() {
trackNewRootsOK.Add(root)
}
if !δBtail.trackNewRoots.Equal(trackNewRootsOK) {
t.Errorf("%s: trackNewRoots:\n\thave: %v\n\twant: %v", subj, δBtail.trackNewRoots, trackNewRootsOK)
}
if !δTtail.trackNew.Equal(trackNewOK) {
t.Errorf("%s: vδT.trackNew:\n\thave: %v\n\twant: %v", subj, δTtail.trackNew, trackNewOK)
if inδT {
if δT[k] != d12[k] {
badf("δT[%v] ≠ δTok[%v]", k, k)
}
}
}
}
......@@ -777,7 +1025,7 @@ func xverifyΔBTail_rebuild_U(t *testing.T, δbtail *ΔBtail, treeRoot zodb.Oid,
}
δToid, ok := δB.ΔByRoot[treeRoot]
if ok {
δT = XGetδKV(ti, tj, δToid)
δT = xgetδKV(ti, tj, δToid)
}
if δB.Rev != tj.At {
t.Errorf("%s: δB.Rev: have %s ; want %s", subj, δB.Rev, tj.At)
......@@ -812,106 +1060,6 @@ func xverifyΔBTail_rebuild_TR(t *testing.T, δbtail *ΔBtail, tj *xbtreetest.Co
assertΔTtail(t, subj, δbtail, tj, treeRoot, xat, vδTok...)
}
// assertΔTtail verifies state of ΔTtail that corresponds to treeRoot in δbtail.
// it also verifies that δbtail.vδBroots matches ΔTtail data.
func assertΔTtail(t *testing.T, subj string, δbtail *ΔBtail, tj *xbtreetest.Commit, treeRoot zodb.Oid, xat map[zodb.Tid]string, vδTok ...map[Key]Δstring) {
t.Helper()
// XXX +KVAtTail, +lastRevOf
l := len(vδTok)
var vatOK []zodb.Tid
var vδTok_ []map[Key]Δstring
at2t := map[zodb.Tid]*xbtreetest.Commit{tj.At: tj}
t0 := tj
for i := 0; i<l; i++ {
// empty vδTok entries means they should be absent in vδT
if δTok := vδTok[l-i-1]; len(δTok) != 0 {
vatOK = append([]zodb.Tid{t0.At}, vatOK...)
vδTok_ = append([]map[Key]Δstring{δTok}, vδTok_...)
}
t0 = t0.Prev
at2t[t0.At] = t0
}
vδTok = vδTok_
δTtail, ok := δbtail.vδTbyRoot[treeRoot]
var vδToid []ΔTree
if ok {
vδToid = δTtail.vδT
}
l = len(vδToid)
var vat []zodb.Tid
var vδT []map[Key]Δstring
atPrev := t0.At
for _, δToid := range vδToid {
vat = append(vat, δToid.Rev)
δT := XGetδKV(at2t[atPrev], at2t[δToid.Rev], δToid.ΔKV) // {} k -> δ(ZBlk(oid).data)
vδT = append(vδT, δT)
atPrev = δToid.Rev
}
var vatδB []zodb.Tid // δbtail.vδBroots/treeRoot
for _, δBroots := range δbtail.vδBroots {
if δBroots.ΔRoots.Has(treeRoot) {
vatδB = append(vatδB, δBroots.Rev)
}
}
tok := tidvEqual(vat, vatOK) && vδTEqual(vδT, vδTok)
bok := tidvEqual(vatδB, vatOK)
if !(tok && bok) {
emsg := fmt.Sprintf("%s: vδT:\n", subj)
have := ""
for i := 0; i<len(vδT); i++ {
have += fmt.Sprintf("\n\t@%s: %v", xat[vat[i]], vδT[i])
}
emsg += fmt.Sprintf("have: %s\n", have)
if !tok {
want := ""
for i := 0; i<len(vδTok); i++ {
want += fmt.Sprintf("\n\t@%s: %v", xat[vatOK[i]], vδTok[i])
}
emsg += fmt.Sprintf("want: %s\n", want)
}
if !bok {
vδb_root := ""
for i := 0; i<len(vatδB); i++ {
vδb_root += fmt.Sprintf("\n\t@%s", xat[vatδB[i]])
}
emsg += fmt.Sprintf("vδb/root: %s\n", vδb_root)
}
t.Error(emsg)
}
}
// xtrackKeys issues δbtail.Track requests for tree[keys].
// XXX place
func xtrackKeys(δbtail *ΔBtail, t *xbtreetest.Commit, keys setKey) {
X := exc.Raiseif
head := δbtail.Head()
if head != t.At {
panicf("BUG: δbtail.head: %s ; t.at: %s", head, t.At)
}
for k := range keys {
// NOTE: if tree is deleted - the following adds it to tracked
// set with every key being a hole. This aligns with the
// following situation
//
// T1 -> ø -> T2
//
// where after T1->ø, even though the tree becomes deleted, its root
// continues to be tracked and all keys migrate to holes in the
// tracking set. By aligning initial state to the same as after
// T1->ø, we test what will happen on ø->T2.
b := t.Xkv.Get(k)
err := δbtail.track(k, b.Path()); X(err)
}
}
// xverifyΔBTail_GetAt verifies δBtail.Get on series of vt ZODB changes.
// XXX
// XXX kill
......@@ -956,635 +1104,626 @@ func xverifyΔBTail_GetAt1(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt []*x
δbtail := NewΔBtail(vt[0].At, db)
for i := 1; i < len(vt); i++ {
_, err := δbtail.Update(vt[i].ΔZ); X(err)
}
// Track(keys)
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: vt[len(vt)-1].At}); X(err)
xtree, err := zconn.Get(ctx, treeRoot); X(err)
ztree := xtree.(*Tree)
for k := range keys {
_, _, path, err := ZTreeGetBlkData(ctx, ztree, k); X(err)
err = δbtail.Track(k, path); X(err)
}
// verify GetAt(k, @at) for all keys and @at
for i := 1; i < len(vt); i++ {
at := vt[i].At
for _, k := range keys.SortedElements() {
vOid, ok, rev, revExact, err := δbtail.GetAt(ctx, ztree, k, at); X(err)
v := xzgetBlkDataAt(db, vOid, rev)
v_, ok_ := vt[i].Xkv.Get(k).kv[k]
rev_, revExact_ := vt[i].At, false
for j := i-1; j >= 0; j-- {
v__ := vt[j].Xkv.Get(k).kv[k]
if v__ != v_ {
rev_ = vt[j+1].At
revExact_ = true
break
}
rev_ = vt[j].At
}
if v == "" { v = DEL }
if v_ == "" { v_ = DEL }
if !(v == v_ && ok == ok_ && rev == rev_ && revExact == revExact_) {
t.Errorf("Get(%d, @%s) ->\nhave: %s, %v, @%s, %v\nwant: %s, %v, @%s, %v",
k, xat[at],
v, ok, xat[rev], revExact,
v_, ok_, xat[rev_], revExact_)
}
}
}
}
*/
// ----------------------------------------
// ΔBTestEntry represents one entry in ΔBTail tests.
type ΔBTestEntry struct {
tree string // next tree topology
kadjOK KAdjMatrix // adjacency matrix against previous case (optional)
flags ΔBTestFlags
}
type ΔBTestFlags int
const ΔBTest_SkipUpdate ΔBTestFlags = 1 // skip verifying Update for this test entry
const ΔBTest_SkipRebuild ΔBTestFlags = 2 // skip verifying rebuild for this test entry
// ΔBTest converts xtest into ΔBTestEntry.
// xtest can be string|ΔBTestEntry.
func ΔBTest(xtest interface{}) ΔBTestEntry {
var test ΔBTestEntry
switch xtest := xtest.(type) {
case string:
test.tree = xtest
test.kadjOK = nil
test.flags = 0
case ΔBTestEntry:
test = xtest
default:
panicf("BUG: ΔBTest: bad type %T", xtest)
}
return test
}
// testΔBTail verifies ΔBTail on sequence of tree topologies coming from testq.
func testΔBTail(t_ *testing.T, testq chan ΔBTestEntry) {
t := xbtreetest.NewT(t_)
var t0 *xbtreetest.Commit
for test := range testq {
t1 := t.Head()
t2 := t.CommitTree(test.tree)
subj := fmt.Sprintf("%s -> %s", t1.Tree, t2.Tree)
//t.Logf("\n\n\n**** %s ****\n\n", subj)
// KAdj
if kadjOK := test.kadjOK; kadjOK != nil {
t.Run(fmt.Sprintf("KAdj/%s→%s", t1.Tree, t2.Tree), func(t *testing.T) {
kadj := KAdj(t1, t2)
if !reflect.DeepEqual(kadj, kadjOK) {
t.Fatalf("BUG: computed kadj is wrong:\nkadjOK: %v\nkadj : %v\n\n", kadjOK, kadj)
}
})
}
// ΔBTail.Update
if test.flags & ΔBTest_SkipUpdate == 0 {
xverifyΔBTail_Update(t.T, subj, t.DB, t.Root(), t1,t2)
}
// ΔBTail.rebuild
if t0 != nil && (test.flags & ΔBTest_SkipRebuild == 0) {
xverifyΔBTail_rebuild(t.T, t.DB, t.Root(), t0,t1,t2)
}
t0, t1 = t1, t2
}
}
// TestΔBTail verifies ΔBTail for explicitly provided tree topologies.
func TestΔBTail(t *testing.T) {
// K is shorthand for setKey
K := func(keyv ...Key) setKey {
ks := setKey{}
for _, k := range keyv { ks.Add(k) }
return ks
}
// oo is shorthand for KeyMax
const oo = KeyMax
// A is shorthand for KAdjMatrix
type A = KAdjMatrix
// Δ is shorthand for ΔBTestEntry
Δ := func(tree string, kadjOK A) (test ΔBTestEntry) {
test.tree = tree
test.kadjOK = kadjOK
return test
}
// test known cases going through tree1 -> tree2 -> ...
testv := []interface{} {
// start from non-empty tree to verify both ->empty and empty-> transitions
"T/B1:a,2:b",
// empty
"T/B:",
// +1
Δ("T/B1:a",
A{1: K(1,oo),
oo: K(1,oo)}),
// +2
Δ("T/B1:a,2:b",
A{1: K(1,2,oo),
2: K(1,2,oo),
oo: K(1,2,oo)}),
}
// -1
Δ("T/B2:b",
A{1: K(1,2,oo),
2: K(1,2,oo),
oo: K(1,2,oo)}),
// Track(keys)
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: vt[len(vt)-1].At}); X(err)
xtree, err := zconn.Get(ctx, treeRoot); X(err)
ztree := xtree.(*Tree)
// 2: b->c
Δ("T/B2:c",
A{2: K(2,oo),
oo: K(2,oo)}),
for k := range keys {
_, _, path, err := ZTreeGetBlkData(ctx, ztree, k); X(err)
err = δbtail.Track(k, path); X(err)
}
// +1 in new bucket (to the left)
Δ("T2/B1:a-B2:c",
A{1: K(1,2,oo),
2: K(1,2,oo),
oo: K(1,2,oo)}),
// verify GetAt(k, @at) for all keys and @at
for i := 1; i < len(vt); i++ {
at := vt[i].At
for _, k := range keys.SortedElements() {
vOid, ok, rev, revExact, err := δbtail.GetAt(ctx, ztree, k, at); X(err)
v := xzgetBlkDataAt(db, vOid, rev)
// +3 in new bucket (to the right)
Δ("T2,3/B1:a-B2:c-B3:c",
A{1: K(1),
2: K(2,3,oo),
3: K(2,3,oo),
oo: K(2,3,oo)}),
v_, ok_ := vt[i].Xkv.Get(k).kv[k]
rev_, revExact_ := vt[i].At, false
for j := i-1; j >= 0; j-- {
v__ := vt[j].Xkv.Get(k).kv[k]
if v__ != v_ {
rev_ = vt[j+1].At
revExact_ = true
break
}
rev_ = vt[j].At
}
// bucket split; +3 in new bucket
"T/B1:a,2:b",
Δ("T2/B1:a-B2:b,3:c",
A{1: K(1,2,3,oo),
2: K(1,2,3,oo),
3: K(1,2,3,oo),
oo: K(1,2,3,oo)}),
if v == "" { v = DEL }
if v_ == "" { v_ = DEL }
// bucket split; +3 in new bucket; +4 +5 in another new bucket
// everything becomes tracked because original bucket had [-∞,∞) coverage
"T/B1:a,2:b",
Δ("T2,4/B1:a-B2:b,3:c-B4:d,5:e",
A{1: K(1,2,3,4,5,oo),
2: K(1,2,3,4,5,oo),
3: K(1,2,3,4,5,oo),
4: K(1,2,3,4,5,oo),
5: K(1,2,3,4,5,oo),
oo: K(1,2,3,4,5,oo)}),
if !(v == v_ && ok == ok_ && rev == rev_ && revExact == revExact_) {
t.Errorf("Get(%d, @%s) ->\nhave: %s, %v, @%s, %v\nwant: %s, %v, @%s, %v",
k, xat[at],
v, ok, xat[rev], revExact,
v_, ok_, xat[rev_], revExact_)
}
}
}
}
*/
// reflow of keys: even if tracked={1}, changes to all B nodes need to be rescanned:
// +B12 forces to look in -B23 which adds -3 into δ, which
// forces to look into +B34 and so on.
"T2,4,6/B1:a-B2:b,3:c-B4:d,5:e-B6:f,7:g",
Δ("T3,5,7/B1:g,2:f-B3:e,4:d-B5:c,6:b-B7:a",
A{1: K(1,2,3,4,5,6,7,oo),
2: K(1,2,3,4,5,6,7,oo),
3: K(1,2,3,4,5,6,7,oo),
4: K(1,2,3,4,5,6,7,oo),
5: K(1,2,3,4,5,6,7,oo),
6: K(1,2,3,4,5,6,7,oo),
7: K(1,2,3,4,5,6,7,oo),
oo: K(1,2,3,4,5,6,7,oo)}),
// reflow of keys for rebuild: even if tracked1={}, tracked2={1}, changes to
// all A/B/C nodes need to be rescanned. Contrary to the above case the reflow
// is not detectable at separate diff(A,B) and diff(B,C) runs.
"T3,5,7/B1:a,2:b-B3:c,4:d-B5:e,6:f-B7:g,8:h",
"T/B1:b",
"T2,4,6/B1:a-B2:b,3:c-B4:d,5:e-B6:f,7:g",
// similar situation where rebuild has to detect reflow in between non-neighbour trees
"T3,6/B1:a,2:b-B3:c,4:d-B6:f,7:g",
"T4,7/B1:b-B4:d,5:e-B7:g,8:h",
"T2,5,8/B1:a-B2:b,3:c-B5:e,6:f-B8:h,9:i",
// ----------------------------------------
// depth=2; bucket split; +3 in new bucket; left T remain
// _unchanged_ even though B under it is modified.
"T/T/B1:a,2:b",
Δ("T2/T-T/B1:a-B2:b,3:c",
A{1: K(1,2,3,oo),
2: K(1,2,3,oo),
3: K(1,2,3,oo),
oo: K(1,2,3,oo)}),
func TestΔBtailForget(t_ *testing.T) {
t := xbtreetest.NewT(t_)
X := exc.Raiseif
// depth=2; like prev. case, but additional right arm with +4 +5 is added.
"T/T/B1:a,2:b",
Δ("T2,4/T-T-T/B1:a-B2:b,3:c-B4:d,5:e",
A{1: K(1,2,3,4,5,oo),
2: K(1,2,3,4,5,oo),
3: K(1,2,3,4,5,oo),
4: K(1,2,3,4,5,oo),
5: K(1,2,3,4,5,oo),
oo: K(1,2,3,4,5,oo)}),
t0 := t.CommitTree("T/B:")
t1 := t.CommitTree("T/B1:a")
t2 := t.CommitTree("T2/B1:a-B2:b")
t3 := t.CommitTree("T/B2:b")
// depth=2; bucket split; +3 in new bucket; t0 and t1 split;
// +right arm (T7/B45-B89).
"T/T/B1:a,2:b",
Δ("T4/T2-T7/B1:a-B2:b,3:c-B4:d,5:e-B8:h,9:i",
A{1: K(1,2,3,4,5,8,9,oo),
2: K(1,2,3,4,5,8,9,oo),
3: K(1,2,3,4,5,8,9,oo),
4: K(1,2,3,4,5,8,9,oo),
5: K(1,2,3,4,5,8,9,oo),
8: K(1,2,3,4,5,8,9,oo),
9: K(1,2,3,4,5,8,9,oo),
oo: K(1,2,3,4,5,8,9,oo)}),
δbtail := NewΔBtail(t0.At, t.DB)
_, err := δbtail.Update(t1.ΔZ); X(err)
_, err = δbtail.Update(t2.ΔZ); X(err)
// start tracking. everything becomes tracked because t1's T/B1:a has [-∞,∞) coverage
// By starting tracking after t2 we verify vδBroots update in both Update and rebuild
_0 := setKey{}; _0.Add(0)
xtrackKeys(δbtail, t2, _0)
// 2 reflow to right B neighbour; 8 splits into new B; δ=ø
"T3/B1:a,2:b-B4:d,8:h",
"T2,5/B1:a-B2:b,4:d-B8:h",
_, err = δbtail.Update(t3.ΔZ); X(err)
// case where kadj does not grow too much as leafs coverage remains stable
"T4,8/B1:a,2:b-B5:d,6:e-B10:g,11:h",
Δ("T4,8/B2:b,3:c-B6:e,7:f-B11:h,12:i",
A{1: K(1,2,3),
2: K(1,2,3),
3: K(1,2,3),
5: K(5,6,7),
6: K(5,6,7),
7: K(5,6,7,),
10: K(10,11,12,oo),
11: K(10,11,12,oo),
12: K(10,11,12,oo),
oo: K(10,11,12,oo)}),
xat := map[zodb.Tid]string{
t0.At: "at0",
t1.At: "at1",
t2.At: "at2",
t3.At: "at3",
}
assertΔTtail(t.T, "init", δbtail, t3, t.Root(), xat, t1.Δxkv, t2.Δxkv, t3.Δxkv)
δbtail.ForgetPast(t0.At)
assertΔTtail(t.T, "forget ≤ at0", δbtail, t3, t.Root(), xat, t1.Δxkv, t2.Δxkv, t3.Δxkv)
δbtail.ForgetPast(t1.At)
assertΔTtail(t.T, "forget ≤ at1", δbtail, t3, t.Root(), xat, t2.Δxkv, t3.Δxkv)
δbtail.ForgetPast(t3.At)
assertΔTtail(t.T, "forget ≤ at3", δbtail, t3, t.Root(), xat, )
// XXX verify no aliasing
}
// tree deletion
// having ø in the middle of the test cases exercises all:
// * `ø -> Tree ...` (tree is created anew),
// * `... Tree -> ø` (tree is deleted), and
// * `Tree -> ø -> Tree` (tree is deleted and then recreated)
xbtreetest.DEL,
func TestΔBtailSliceByRootRev(t_ *testing.T) {
// SliceByRootRev is thin wrapper to return ΔTtail.vδT slice.
// Recomputing ΔTtail.vδT itself is exercised in depth by xverifyΔBTail_rebuild.
// Here we verify only properties of the wrapper.
t := xbtreetest.NewT(t_)
X := exc.Raiseif
// tree rotation
"T3/B2:b-B3:c,4:d",
"T5/T3-T7/B2:a-B3:a,4:a-B6:a-B8:a",
// ΔT is similar to ΔTree but uses Δstring instead of ΔValue for ΔKV
type ΔT struct {
Rev zodb.Tid
ΔKV map[Key]Δstring
}
// δ is shorthand for ΔKV
type δ = map[Key]Δstring
// found by AllStructs ([1] is not changed, but because B1 is
// unlinked and 1 migrates to other bucket, changes in that
// other bucket must be included into δT)
"T1,2/B0:e-B1:d-B2:g,3:a",
"T1/B0:d-B1:d,2:d",
// ----//---- with depth=2
"T1,2/T-T-T/B0:a-B1:b-B2:c,3:d",
"T1/T-T/B0:e-B1:b,2:f",
t0 := t.CommitTree("T2/B1:a-B2:f")
t1 := t.CommitTree("T2/B1:b-B2:g")
t2 := t.CommitTree("T2/B1:c-B2:h")
// XXX depth=3 (to verify recursion and selecting which tree children to follow or not)
const a, b, c = "a", "b", "c"
const f, g, h = "f", "g", "h"
xat := map[zodb.Tid]string{
t0.At: "at0",
t1.At: "at1",
t2.At: "at2",
}
at2t := map[zodb.Tid]*xbtreetest.Commit{ // XXX -> move to treeenv ?
t0.At: t0,
t1.At: t1,
t2.At: t2,
}
// degenerate topology from ZODB tests
// https://github.com/zopefoundation/ZODB/commit/6cd24e99f89b
// https://github.com/zopefoundation/BTrees/blob/4.7.2-1-g078ba60/BTrees/tests/testBTrees.py#L20-L57
"T4/T2-T/T-T-T6,10/B1:a-B3:b-T-T-T/T-B7:c-B11:d/B5:e",
"T/B1:e,5:d,7:c,8:b,11:a", // -3 +8
δbtail := NewΔBtail(t0.At, t.DB)
_, err := δbtail.Update(t1.ΔZ); X(err)
_, err = δbtail.Update(t2.ΔZ); X(err)
// was leading treegen to generate corrupt trees
"T/T1/T-T/B0:g-B1:e,2:d,3:h",
"T1/T-T3/B0:g-T-T/B1:e,2:d-B3:h",
// track 2 + rebuild.
_2 := setKey{}; _2.Add(2)
xtrackKeys(δbtail, t2, _2)
err = δbtail.rebuildAll(); X(err)
δttail := δbtail.vδTbyRoot[t.Root()]
// assertvδT asserts that vδT matches vδTok
assertvδT := func(subj string, vδT []ΔTree, vδTok ...ΔT) {
t.Helper()
// convert vδT from ΔTree to ΔT
var vδT_ []ΔT
for _, δT := range vδT {
tj := at2t[δT.Rev]
δt := ΔT{δT.Rev, xgetδKV(tj.Prev, tj, δT.ΔKV)}
vδT_ = append(vδT_, δt)
}
// was leading to wrongly computed trackSet2 due to top not
// being tracked to tree root.
"T/T1/B0:a-B1:b",
"T/T1/T-T/B0:c-B1:d",
if reflect.DeepEqual(vδT_, vδTok) {
return
}
have := []string{}
for _, δT := range vδT_ {
have = append(have, fmt.Sprintf("@%s·%v", xat[δT.Rev], δT.ΔKV))
}
want := []string{}
for _, δT := range vδTok {
want = append(want, fmt.Sprintf("@%s·%v", xat[δT.Rev], δT.ΔKV))
}
t.Errorf("%s:\nhave: %s\nwant: %s", subj, have, want)
}
s00 := δbtail.SliceByRootRev(t.Root(), t0.At, t0.At)
s01 := δbtail.SliceByRootRev(t.Root(), t0.At, t1.At)
s02 := δbtail.SliceByRootRev(t.Root(), t0.At, t2.At)
s12 := δbtail.SliceByRootRev(t.Root(), t1.At, t2.At)
s22 := δbtail.SliceByRootRev(t.Root(), t2.At, t2.At)
vδT := δttail.vδT
assertvδT("t2.vδT", vδT, ΔT{t1.At, δ{2:{f,g}}}, ΔT{t2.At, δ{2:{g,h}}})
assertvδT("t2.s00", s00)
assertvδT("t2.s01", s01, ΔT{t1.At, δ{2:{f,g}}})
assertvδT("t2.s02", s02, ΔT{t1.At, δ{2:{f,g}}}, ΔT{t2.At, δ{2:{g,h}}})
assertvδT("t2.s12", s12, ΔT{t2.At, δ{2:{g,h}}})
assertvδT("t2.s22", s22)
// sXX should be all aliased to vδT
gg, _ := t0.XGetBlkByName("g")
hh, _ := t0.XGetBlkByName("h")
vδT[0].Rev = t0.At; δkv0 := vδT[0].ΔKV; vδT[0].ΔKV = map[Key]ΔValue{11:{gg,gg}}
vδT[1].Rev = t0.At; δkv1 := vδT[1].ΔKV; vδT[1].ΔKV = map[Key]ΔValue{12:{hh,hh}}
assertvδT("t2.vδT*", vδT, ΔT{t0.At, δ{11:{g,g}}}, ΔT{t0.At, δ{12:{h,h}}})
assertvδT("t2.s00*", s00)
assertvδT("t2.s01*", s01, ΔT{t0.At, δ{11:{g,g}}})
assertvδT("t2.s02*", s02, ΔT{t0.At, δ{11:{g,g}}}, ΔT{t0.At, δ{12:{h,h}}})
assertvδT("t2.s12*", s12, ΔT{t0.At, δ{12:{h,h}}})
assertvδT("2.s22*", s22)
vδT[0].Rev = t1.At; vδT[0].ΔKV = δkv0
vδT[1].Rev = t2.At; vδT[1].ΔKV = δkv1
assertvδT("t2.vδT+", vδT, ΔT{t1.At, δ{2:{f,g}}}, ΔT{t2.At, δ{2:{g,h}}})
assertvδT("t2.s00+", s00)
assertvδT("t2.s01+", s01, ΔT{t1.At, δ{2:{f,g}}})
assertvδT("t2.s02+", s02, ΔT{t1.At, δ{2:{f,g}}}, ΔT{t2.At, δ{2:{g,h}}})
assertvδT("t2.s12+", s12, ΔT{t2.At, δ{2:{g,h}}})
assertvδT("t2.s22+", s22)
// after track 1 + rebuild old slices remain unchanged, but new queries return updated data
_1 := setKey{}; _1.Add(1)
xtrackKeys(δbtail, t2, _1)
err = δbtail.rebuildAll(); X(err)
s00_ := δbtail.SliceByRootRev(t.Root(), t0.At, t0.At)
s01_ := δbtail.SliceByRootRev(t.Root(), t0.At, t1.At)
s02_ := δbtail.SliceByRootRev(t.Root(), t0.At, t2.At)
s12_ := δbtail.SliceByRootRev(t.Root(), t1.At, t2.At)
s22_ := δbtail.SliceByRootRev(t.Root(), t2.At, t2.At)
vδT = δttail.vδT
assertvδT("t12.vδT", vδT, ΔT{t1.At, δ{1:{a,b},2:{f,g}}}, ΔT{t2.At, δ{1:{b,c},2:{g,h}}})
assertvδT("t12.s00", s00)
assertvδT("t12.s00_", s00_)
assertvδT("t12.s01", s01, ΔT{t1.At, δ{ 2:{f,g}}})
assertvδT("t12.s01_", s01_, ΔT{t1.At, δ{1:{a,b},2:{f,g}}})
assertvδT("t12.s02", s02, ΔT{t1.At, δ{ 2:{f,g}}}, ΔT{t2.At, δ{ 2:{g,h}}})
assertvδT("t12.s02_", s02_, ΔT{t1.At, δ{1:{a,b},2:{f,g}}}, ΔT{t2.At, δ{1:{b,c},2:{g,h}}})
assertvδT("t12.s12", s12, ΔT{t2.At, δ{ 2:{g,h}}})
assertvδT("t12.s12_", s12_, ΔT{t2.At, δ{1:{b,c},2:{g,h}}})
assertvδT("t12.s22", s22)
assertvδT("t12.s22_", s22_)
// sXX_ should be all aliased to vδT, but not sXX
bb, _ := t0.XGetBlkByName("b")
cc, _ := t0.XGetBlkByName("c")
vδT[0].Rev = t0.At; δkv0 = vδT[0].ΔKV; vδT[0].ΔKV = map[Key]ΔValue{111:{bb,bb}}
vδT[1].Rev = t0.At; δkv1 = vδT[1].ΔKV; vδT[1].ΔKV = map[Key]ΔValue{112:{cc,cc}}
assertvδT("t12.vδT*", vδT, ΔT{t0.At, δ{111:{b,b}}}, ΔT{t0.At, δ{112:{c,c}}})
assertvδT("t12.s00*", s00)
assertvδT("t12.s00_*", s00_)
assertvδT("t12.s01*", s01, ΔT{t1.At, δ{ 2:{f,g}}})
assertvδT("t12.s01_*", s01_, ΔT{t0.At, δ{111:{b,b} }})
assertvδT("t12.s02*", s02, ΔT{t1.At, δ{ 2:{f,g}}}, ΔT{t2.At, δ{ 2:{g,h}}})
assertvδT("t12.s02_*", s02_, ΔT{t0.At, δ{111:{b,b} }}, ΔT{t0.At, δ{112:{c,c} }})
assertvδT("t12.s12*", s12, ΔT{t2.At, δ{ 2:{g,h}}})
assertvδT("t12.s12_*", s12_, ΔT{t0.At, δ{112:{c,c} }})
assertvδT("t12.s22*", s22)
assertvδT("t12.s22_*", s22_)
vδT[0].Rev = t1.At; vδT[0].ΔKV = δkv0
vδT[1].Rev = t2.At; vδT[1].ΔKV = δkv1
assertvδT("t12.vδT+", vδT, ΔT{t1.At, δ{1:{a,b},2:{f,g}}}, ΔT{t2.At, δ{1:{b,c},2:{g,h}}})
assertvδT("t12.s00+", s00)
assertvδT("t12.s00_+", s00_)
assertvδT("t12.s01+", s01, ΔT{t1.At, δ{ 2:{f,g}}})
assertvδT("t12.s01_+", s01_, ΔT{t1.At, δ{1:{a,b},2:{f,g}}})
assertvδT("t12.s02+", s02, ΔT{t1.At, δ{ 2:{f,g}}}, ΔT{t2.At, δ{ 2:{g,h}}})
assertvδT("t12.s02_+", s02_, ΔT{t1.At, δ{1:{a,b},2:{f,g}}}, ΔT{t2.At, δ{1:{b,c},2:{g,h}}})
assertvδT("t12.s12+", s12, ΔT{t2.At, δ{ 2:{g,h}}})
assertvδT("t12.s12_+", s12_, ΔT{t2.At, δ{1:{b,c},2:{g,h}}})
assertvδT("t12.s22+", s22)
assertvδT("t12.s22_+", s22_)
}
// was leading to wrongly computed trackSet2: leaf bucket not
// reparented to root.
"T/T/B0:a",
"T/B0:a",
// δtkeycov grows due to change in parent tree only
"T3/B1:a-B8:c",
"T7/B1:a-B8:c",
// ----//----
"T3/B1:a,2:b-B8:c,9:d",
"T7/B1:a,2:b-B8:c,9:d",
// ----//---- depth=2
"T3/T-T/B1:a,2:b-B8:c,9:d",
"T7/T-T/B1:a,2:b-B8:c,9:d",
// ----//---- found by AllStructs
"T1,3/B0:d-B1:a-B3:d,4:g",
"T1,4/B0:e-B1:a-B4:c",
// ----//---- found by AllStructs
"T2,4/T-T-T/T1-T-B4:f/T-T-B3:f/B0:h-B1:f",
"T4/T-T/B3:f-T/B4:a",
func TestΔBtailClone(t_ *testing.T) {
// ΔBtail.Clone had bug that aliased klon data to orig
t := xbtreetest.NewT(t_)
X := exc.Raiseif
t0 := t.CommitTree("T2/B1:a-B2:b")
t1 := t.CommitTree("T2/B1:c-B2:d")
δbtail := NewΔBtail(t0.At, t.DB)
_, err := δbtail.Update(t1.ΔZ); X(err)
_2 := setKey{}; _2.Add(2)
xtrackKeys(δbtail, t1, _2)
err = δbtail.rebuildAll(); X(err)
xat := map[zodb.Tid]string{
t0.At: "at0",
t1.At: "at1",
}
// ---- found by AllStructs ----
δkv1_1 := map[Key]Δstring{2:{"b","d"}}
assertΔTtail(t.T, "orig @at1", δbtail, t1, t.Root(), xat, δkv1_1)
δbklon := δbtail.Clone()
assertΔTtail(t.T, "klon @at1", δbklon, t1, t.Root(), xat, δkv1_1)
// trackSet2 wrongly computed due to top not being tracked to tree root
"T2/T1-T/B0:g-B1:b-T/B2:b,3:a",
"T2/T1-T/T-T-B2:a/B0:c-B1:g",
t2 := t.CommitTree("T/B1:b,2:a")
_, err = δbtail.Update(t2.ΔZ); X(err)
xat[t2.At] = "at2"
// unchanged node is reparented
"T1/B0:c-B1:f",
"T1/T-T/B0:c-T/B1:h",
δkv1_2 := map[Key]Δstring{1:{"a","c"}, 2:{"b","d"}}
δkv2_2 := map[Key]Δstring{1:{"c","b"}, 2:{"d","a"}}
assertΔTtail(t.T, "orig @at2", δbtail, t2, t.Root(), xat, δkv1_2, δkv2_2)
assertΔTtail(t.T, "klon @at1 after orig @at->@at2", δbklon, t1, t.Root(), xat, δkv1_1)
}
// SIGSEGV in ApplyΔ
"T1/T-T2/T-B1:c-B2:c/B0:g",
"T1/T-T/B0:g-T/B1:e",
// trackSet corruption: oid is pointed by some .parent but is not present
"T1/T-T/B0:g-T2/B1:h-B2:g",
"T/T1/T-T2/B0:e-B1:f-B2:g",
// -------- KAdj --------
// ApplyΔ -> xunion: node is reachable from multiple parents
// ( because xdifference did not remove common non-leaf node
// under which there were also other changed, but not initially
// tracked, node )
"T4/T1-T/T-T2-B4:c/T-T-T/B0:f-B1:h-B2:g,3:b",
"T1/T-T/T-T2/T-T-T/B0:f-B1:h-B2:f",
// ----//----
"T3/T1-T/T-T2-T/B0:b-T-T-B3:h/B1:e-B2:a",
"T1/T-T4/T-T2-T/T-T-T-T/B0:b-B1:e-B2:a,3:c-B4:e",
// ----//----
"T/T1,3/T-T2-T4/B0:b-T-T-B3:g-B4:c/B1:b-B2:e",
"T1,4/T-T-T/T-T2-B4:f/T-T-T/B0:h-B1:b-B2:h,3:a",
// Map returns kadj·keys.
func (kadj KAdjMatrix) Map(keys setKey) setKey {
res := make(setKey, len(keys))
for k := range keys {
to, ok := kadj[k]
if !ok {
panicf("kadj.Map: %d ∉ kadj\n\nkadj: %v", k, kadj)
}
res.Update(to)
}
return res
}
"T2/B1:a-B7:g",
"T2,8/B1:a-B7:g-B9:i",
// Mul returns kadjA·kadjB.
//
// (kadjA·kadjB).Map(keys) = kadjA.Map(kadjB.Map(keys))
func (kadjA KAdjMatrix) Mul(kadjB KAdjMatrix) KAdjMatrix {
// ~ assert kadjA.keys == kadjB.keys
// check only len here; the rest will be asserted by Map
if len(kadjA) != len(kadjB) {
panicf("kadj.Mul: different keys:\n\nkadjA: %v\nkadjB: %v", kadjA, kadjB)
}
"T2/B1:a-B2:b", "T/B1:a,2:b",
"T2,3/B1:a-B2:b-B3:c", "T/B1:a,2:b",
"T2,3/B1:a-B2:c-B3:c", "T/B1:a,2:b",
kadj := make(KAdjMatrix, len(kadjB))
for k, tob := range kadjB {
kadj[k] = kadjA.Map(tob)
}
return kadj
}
"T2/B1:a-B2:c", "T2,3/B1:a-B2:c-B3:c",
// KAdj computes adjacency matrix for t1 -> t2 transition.
//
// The set of keys for which kadj matrix is computed can be optionally provided.
// This set of keys defaults to allTestKeys(t1,t2).
//
// KAdj itself is verified by testΔBTail on entries with .kadjOK set.
func KAdj(t1, t2 *xbtreetest.Commit, keysv ...setKey) (kadj KAdjMatrix) {
// assert KAdj(A,B) == KAdj(B,A)
kadj12 := _KAdj(t1,t2, keysv...)
kadj21 := _KAdj(t2,t1, keysv...)
if !reflect.DeepEqual(kadj12, kadj21) {
panicf("KAdj not symmetric:\nt1: %s\nt2: %s\nkadj12: %v\nkadj21: %v",
t1.Tree, t2.Tree, kadj12, kadj21)
}
return kadj12
}
"T2/B1:a-B3:c",
Δ("T2/T-T4/B1:b-B3:d-B99:h",
A{1: K(1),
3: K(3,99,oo),
99: K(3,99,oo),
oo: K(3,99,oo)}),
const debugKAdj = false
func debugfKAdj(format string, argv ...interface{}) {
if debugKAdj {
fmt.Printf(format, argv...)
}
// direct tree_i -> tree_{i+1} -> _{i+2} ... plus
// reverse ... tree_i <- _{i+1} <- _{i+2}
kadjOK := ΔBTest(testv[len(testv)-1]).kadjOK
for i := len(testv)-2; i >= 0; i-- {
test := ΔBTest(testv[i])
kadjOK, test.kadjOK = test.kadjOK, kadjOK
testv = append(testv, test)
}
func _KAdj(t1, t2 *xbtreetest.Commit, keysv ...setKey) (kadj KAdjMatrix) {
var keys setKey
switch len(keysv) {
case 0:
keys = allTestKeys(t1, t2)
case 1:
keys = keysv[0]
default:
panic("multiple key sets on the call")
}
testq := make(chan ΔBTestEntry)
go func() {
defer close(testq)
for _, test := range testv {
testq <- ΔBTest(test)
}
debugfKAdj("\n\n_KAdj\n")
debugfKAdj("t1: %s\n", t1.Tree)
debugfKAdj("t2: %s\n", t2.Tree)
debugfKAdj("keys: %s\n", keys)
defer func() {
debugfKAdj("kadj -> %v\n", kadj)
}()
testΔBTail(t, testq)
}
// kadj = {} k -> adjacent keys.
// if k is tracked and covered by changed leaf -> changes to adjacents must be in Update(t1->t2).
kadj = KAdjMatrix{}
for k := range keys {
adj1 := setKey{}
adj2 := setKey{}
// TestΔBTailAllStructs verifies ΔBtail on tree topologies generated by AllStructs.
var (
verylongFlag = flag.Bool("verylong", false, `switch tests to run in "very long" mode`)
randseedFlag = flag.Int64("randseed", -1, `seed for random number generator`)
)
func TestΔBTailAllStructs(t *testing.T) {
X := exc.Raiseif
q1 := &blib.RangedKeySet{}; q1.Add(k)
q2 := &blib.RangedKeySet{}; q2.Add(k)
done1 := &blib.RangedKeySet{}
done2 := &blib.RangedKeySet{}
// considerations:
// - maxdepth↑ better for testing (more tricky topologies)
// - maxsplit↑ not so better for testing (leave s=1, max s=2)
// - |kmin - kmax| affects N(variants) significantly
// -> keep key range small (dumb increase does not help testing)
// - N(keys) affects N(variants) significantly
// -> keep Nkeys reasonably small/medium (dumb increase does not help testing)
//
// - spawning python subprocess is very slow (takes 300-500ms for
// imports; https://github.com/pypa/setuptools/issues/510)
// -> we spawn `treegen allstructs` once and use request/response approach.
debugfKAdj("\nk%s\n", kstr(k))
for !q1.Empty() || !q2.Empty() {
debugfKAdj("q1: %s\tdone1: %s\n", q1, done1)
debugfKAdj("q2: %s\tdone2: %s\n", q2, done2)
for _, r1 := range q1.AllRanges() {
lo1 := r1.Lo
for {
b1 := t1.Xkv.Get(lo1)
debugfKAdj(" b1: %s\n", b1)
for k_ := range keys {
if b1.Keycov.Has(k_) {
adj1.Add(k_)
debugfKAdj(" adj1 += %s\t-> %s\n", kstr(k_), adj1)
}
}
done1.AddRange(b1.Keycov)
// q2 |= (b1.keyrange \ done2)
δq2 := &blib.RangedKeySet{}
δq2.AddRange(b1.Keycov)
δq2.DifferenceInplace(done2)
q2.UnionInplace(δq2)
debugfKAdj("q2 += %s\t-> %s\n", δq2, q2)
N := func(short, medium, long int) int {
// -short
if testing.Short() {
return short
}
// -verylong
if *verylongFlag {
return long
// continue with next right bucket until r1 coverage is complete
if r1.Hi_ <= b1.Keycov.Hi_ {
break
}
lo1 = b1.Keycov.Hi_ + 1
}
}
q1.Clear()
for _, r2 := range q2.AllRanges() {
lo2 := r2.Lo
for {
b2 := t2.Xkv.Get(lo2)
debugfKAdj(" b2: %s\n", b2)
for k_ := range keys {
if b2.Keycov.Has(k_) {
adj2.Add(k_)
debugfKAdj(" adj2 += %s\t-> %s\n", kstr(k_), adj2)
}
}
done2.AddRange(b2.Keycov)
// q1 |= (b2.keyrange \ done1)
δq1 := &blib.RangedKeySet{}
δq1.AddRange(b2.Keycov)
δq1.DifferenceInplace(done1)
q1.UnionInplace(δq1)
debugfKAdj("q1 += %s\t-> %s\n", δq1, q1)
// continue with next right bucket until r2 coverage is complete
if r2.Hi_ <= b2.Keycov.Hi_ {
break
}
lo2 = b2.Keycov.Hi_ + 1
}
}
q2.Clear()
}
// default
return medium
adj := setKey{}; adj.Update(adj1); adj.Update(adj2)
kadj[k] = adj
}
maxdepth := N(2, 3, 4)
maxsplit := N(1, 2, 2)
n := N(10,10,100)
nkeys := N(3, 5, 10)
// server to generate AllStructs(kv, ...)
sg, err := xbtreetest.StartAllStructsSrv(); X(err)
defer func() {
err := sg.Close(); X(err)
}()
return kadj
}
// random seed
seed := *randseedFlag
if seed == -1 {
seed = time.Now().UnixNano()
}
rng := rand.New(rand.NewSource(seed))
t.Logf("# maxdepth=%d maxsplit=%d nkeys=%d n=%d seed=%d", maxdepth, maxsplit, nkeys, n, seed)
// generate (kv1, kv2, kv3) randomly
// ----------------------------------------
// keysv1, keysv2 and keysv3 are random shuffle of IntSets
var keysv1 [][]int
var keysv2 [][]int
var keysv3 [][]int
for keys := range IntSets(nkeys) {
keysv1 = append(keysv1, keys)
keysv2 = append(keysv2, keys)
keysv3 = append(keysv3, keys)
// assertΔTtail verifies state of ΔTtail that corresponds to treeRoot in δbtail.
// it also verifies that δbtail.vδBroots matches ΔTtail data.
func assertΔTtail(t *testing.T, subj string, δbtail *ΔBtail, tj *xbtreetest.Commit, treeRoot zodb.Oid, xat map[zodb.Tid]string, vδTok ...map[Key]Δstring) {
t.Helper()
// XXX +lastRevOf
l := len(vδTok)
var vatOK []zodb.Tid
var vδTok_ []map[Key]Δstring
at2t := map[zodb.Tid]*xbtreetest.Commit{tj.At: tj}
t0 := tj
for i := 0; i<l; i++ {
// empty vδTok entries means they should be absent in vδT
if δTok := vδTok[l-i-1]; len(δTok) != 0 {
vatOK = append([]zodb.Tid{t0.At}, vatOK...)
vδTok_ = append([]map[Key]Δstring{δTok}, vδTok_...)
}
t0 = t0.Prev
at2t[t0.At] = t0
}
vδTok = vδTok_
δTtail, ok := δbtail.vδTbyRoot[treeRoot]
var vδToid []ΔTree
if ok {
vδToid = δTtail.vδT
}
v := keysv1
rng.Shuffle(len(v), func(i,j int) { v[i], v[j] = v[j], v[i] })
v = keysv2
rng.Shuffle(len(v), func(i,j int) { v[i], v[j] = v[j], v[i] })
v = keysv3
rng.Shuffle(len(v), func(i,j int) { v[i], v[j] = v[j], v[i] })
// given random (kv1, kv2, kv3) generate corresponding set of random tree
// topology sets (T1, T2, T3). Then iterate through T1->T2->T3->T1...
// elements such that all right-directed triplets are visited and only once.
// Test Update and rebuild on the generated tree sequences.
vv := "abcdefgh"
randv := func() string {
i := rng.Intn(len(vv))
return vv[i:i+1]
l = len(vδToid)
var vat []zodb.Tid
var vδT []map[Key]Δstring
atPrev := t0.At
for _, δToid := range vδToid {
vat = append(vat, δToid.Rev)
δT := xgetδKV(at2t[atPrev], at2t[δToid.Rev], δToid.ΔKV) // {} k -> δ(ZBlk(oid).data)
vδT = append(vδT, δT)
atPrev = δToid.Rev
}
// the number of pairs is 3·n^2
// the number of triplets is n^3
//
// limit n for emitted triplets, so that the amount of work for Update
// and rebuild tests is approximately of the same order.
nrebuild := int(math.Ceil(math.Pow(3*float64(n*n), 1./3)))
// in non-short mode rebuild tests are exercising more keys variants, plus every test case
// takes more time. Compensate for that as well.
if !testing.Short() {
nrebuild -= 3
var vatδB []zodb.Tid // δbtail.vδBroots/treeRoot
for _, δBroots := range δbtail.vδBroots {
if δBroots.ΔRoots.Has(treeRoot) {
vatδB = append(vatδB, δBroots.Rev)
}
}
testq := make(chan ΔBTestEntry)
go func() {
defer close(testq)
for i := range keysv1 {
keys1 := keysv1[i]
keys2 := keysv2[i]
keys3 := keysv3[i]
kv1 := map[Key]string{}
kv2 := map[Key]string{}
kv3 := map[Key]string{}
for _, k := range keys1 { kv1[Key(k)] = randv() }
for _, k := range keys2 { kv2[Key(k)] = randv() }
for _, k := range keys3 { kv3[Key(k)] = randv() }
treev1, err1 := sg.AllStructs(kv1, maxdepth, maxsplit, n, rng.Int63())
treev2, err2 := sg.AllStructs(kv2, maxdepth, maxsplit, n, rng.Int63())
treev3, err3 := sg.AllStructs(kv3, maxdepth, maxsplit, n, rng.Int63())
err := xerr.Merge(err1, err2, err3)
if err != nil {
t.Fatal(err)
}
tok := tidvEqual(vat, vatOK) && vδTEqual(vδT, vδTok)
bok := tidvEqual(vatδB, vatOK)
if !(tok && bok) {
emsg := fmt.Sprintf("%s: vδT:\n", subj)
have := ""
for i := 0; i<len(vδT); i++ {
have += fmt.Sprintf("\n\t@%s: %v", xat[vat[i]], vδT[i])
}
emsg += fmt.Sprintf("have: %s\n", have)
emit := func(tree string, flags ΔBTestFlags) {
// skip emitting this entry if both Update and
// Rebuild are requested to be skipped.
if flags == (ΔBTest_SkipUpdate | ΔBTest_SkipRebuild) {
return
}
testq <- ΔBTestEntry{tree, nil, flags}
if !tok {
want := ""
for i := 0; i<len(vδTok); i++ {
want += fmt.Sprintf("\n\t@%s: %v", xat[vatOK[i]], vδTok[i])
}
emsg += fmt.Sprintf("want: %s\n", want)
}
URSkipIf := func(ucond, rcond bool) ΔBTestFlags {
var flags ΔBTestFlags
if ucond {
flags |= ΔBTest_SkipUpdate
}
if rcond {
flags |= ΔBTest_SkipRebuild
}
return flags
if !bok {
vδb_root := ""
for i := 0; i<len(vatδB); i++ {
vδb_root += fmt.Sprintf("\n\t@%s", xat[vatδB[i]])
}
emsg += fmt.Sprintf("vδb/root: %s\n", vδb_root)
}
for j := range treev1 {
for k := range treev2 {
for l := range treev3 {
// limit rebuild to subset of tree topologies,
// because #(triplets) grow as n^3. See nrebuild
// definition above for details.
norebuild := (j >= nrebuild ||
k >= nrebuild ||
l >= nrebuild)
t.Error(emsg)
}
}
// C_{l-1} -> Aj (pair first seen on k=0)
emit(treev1[j], URSkipIf(k != 0, norebuild))
// assertTrack verifies state of .trackSet and ΔTtail.trackNew.
// it assumes that only one tree root is being tracked.
func (δBtail *ΔBtail) assertTrack(t *testing.T, subj string, trackSetOK blib.PPTreeSubSet, trackNewOK blib.PPTreeSubSet) {
t.Helper()
if !δBtail.trackSet.Equal(trackSetOK) {
t.Errorf("%s: trackSet:\n\thave: %v\n\twant: %v", subj, δBtail.trackSet, trackSetOK)
}
// Aj -> Bk (pair first seen on l=0)
emit(treev2[k], URSkipIf(l != 0, norebuild))
roots := setOid{}
for root := range δBtail.vδTbyRoot {
roots.Add(root)
}
// Bk -> Cl (pair first seen on j=0)
emit(treev3[l], URSkipIf(j != 0, norebuild))
}
}
}
}
}()
nrootsOK := 1
if trackSetOK.Empty() && trackNewOK.Empty() {
nrootsOK = 0
}
if len(roots) != nrootsOK {
t.Errorf("%s: len(vδTbyRoot) != %d ; roots=%v", subj, nrootsOK, roots)
return
}
if nrootsOK == 0 {
return
}
testΔBTail(t, testq)
}
root := roots.Elements()[0]
δTtail := δBtail.vδTbyRoot[root]
func TestΔBtailForget(t_ *testing.T) {
t := xbtreetest.NewT(t_)
X := exc.Raiseif
trackNewRootsOK := setOid{}
if !trackNewOK.Empty() {
trackNewRootsOK.Add(root)
}
t0 := t.CommitTree("T/B:")
t1 := t.CommitTree("T/B1:a")
t2 := t.CommitTree("T2/B1:a-B2:b")
t3 := t.CommitTree("T/B2:b")
if !δBtail.trackNewRoots.Equal(trackNewRootsOK) {
t.Errorf("%s: trackNewRoots:\n\thave: %v\n\twant: %v", subj, δBtail.trackNewRoots, trackNewRootsOK)
}
δbtail := NewΔBtail(t0.At, t.DB)
_, err := δbtail.Update(t1.ΔZ); X(err)
_, err = δbtail.Update(t2.ΔZ); X(err)
if !δTtail.trackNew.Equal(trackNewOK) {
t.Errorf("%s: vδT.trackNew:\n\thave: %v\n\twant: %v", subj, δTtail.trackNew, trackNewOK)
}
}
// start tracking. everything becomes tracked because t1's T/B1:a has [-∞,∞) coverage
// By starting tracking after t2 we verify vδBroots update in both Update and rebuild
_0 := setKey{}; _0.Add(0)
xtrackKeys(δbtail, t2, _0)
// trackSet returns what should be ΔBtail.trackSet coverage for specified tracked key set.
func trackSet(rbs xbtreetest.RBucketSet, tracked setKey) blib.PPTreeSubSet {
// nil = don't compute keyCover
// (trackSet is called from inside hot inner loop of rebuild test)
return _trackSetWithCov(rbs, tracked, nil)
}
_, err = δbtail.Update(t3.ΔZ); X(err)
// trackSetWithCov returns what should be ΔBtail.trackSet and its key coverage for specified tracked key set.
func trackSetWithCov(rbs xbtreetest.RBucketSet, tracked setKey) (trackSet blib.PPTreeSubSet, keyCover *blib.RangedKeySet) {
keyCover = &blib.RangedKeySet{}
trackSet = _trackSetWithCov(rbs, tracked, keyCover)
return trackSet, keyCover
}
xat := map[zodb.Tid]string{
t0.At: "at0",
t1.At: "at1",
t2.At: "at2",
t3.At: "at3",
func _trackSetWithCov(rbs xbtreetest.RBucketSet, tracked setKey, outKeyCover *blib.RangedKeySet) (trackSet blib.PPTreeSubSet) {
trackSet = blib.PPTreeSubSet{}
for k := range tracked {
kb := rbs.Get(k)
if outKeyCover != nil {
outKeyCover.AddRange(kb.Keycov)
}
trackSet.AddPath(kb.Path())
}
assertΔTtail(t.T, "init", δbtail, t3, t.Root(), xat, t1.Δxkv, t2.Δxkv, t3.Δxkv)
δbtail.ForgetPast(t0.At)
assertΔTtail(t.T, "forget ≤ at0", δbtail, t3, t.Root(), xat, t1.Δxkv, t2.Δxkv, t3.Δxkv)
δbtail.ForgetPast(t1.At)
assertΔTtail(t.T, "forget ≤ at1", δbtail, t3, t.Root(), xat, t2.Δxkv, t3.Δxkv)
δbtail.ForgetPast(t3.At)
assertΔTtail(t.T, "forget ≤ at3", δbtail, t3, t.Root(), xat, )
return trackSet
}
func TestΔBtailClone(t_ *testing.T) {
// ΔBtail.Clone had bug that aliased klon data to orig
t := xbtreetest.NewT(t_)
// xtrackKeys issues δbtail.Track requests for tree[keys].
func xtrackKeys(δbtail *ΔBtail, t *xbtreetest.Commit, keys setKey) {
X := exc.Raiseif
t0 := t.CommitTree("T2/B1:a-B2:b")
t1 := t.CommitTree("T2/B1:c-B2:d")
δbtail := NewΔBtail(t0.At, t.DB)
_, err := δbtail.Update(t1.ΔZ); X(err)
_2 := setKey{}; _2.Add(2)
xtrackKeys(δbtail, t1, _2)
err = δbtail.rebuildAll(); X(err)
xat := map[zodb.Tid]string{
t0.At: "at0",
t1.At: "at1",
head := δbtail.Head()
if head != t.At {
panicf("BUG: δbtail.head: %s ; t.at: %s", head, t.At)
}
δkv1_1 := map[Key]Δstring{2:{"b","d"}}
assertΔTtail(t.T, "orig @at1", δbtail, t1, t.Root(), xat, δkv1_1)
δbklon := δbtail.Clone()
assertΔTtail(t.T, "klon @at1", δbklon, t1, t.Root(), xat, δkv1_1)
for k := range keys {
// NOTE: if tree is deleted - the following adds it to tracked
// set with every key being a hole. This aligns with the
// following situation
//
// T1 -> ø -> T2
//
// where after T1->ø, even though the tree becomes deleted, its root
// continues to be tracked and all keys migrate to holes in the
// tracking set. By aligning initial state to the same as after
// T1->ø, we test what will happen on ø->T2.
b := t.Xkv.Get(k)
err := δbtail.track(k, b.Path()); X(err)
}
}
t2 := t.CommitTree("T/B1:b,2:a")
_, err = δbtail.Update(t2.ΔZ); X(err)
xat[t2.At] = "at2"
δkv1_2 := map[Key]Δstring{1:{"a","c"}, 2:{"b","d"}}
δkv2_2 := map[Key]Δstring{1:{"c","b"}, 2:{"d","a"}}
assertΔTtail(t.T, "orig @at2", δbtail, t2, t.Root(), xat, δkv1_2, δkv2_2)
assertΔTtail(t.T, "klon @at1 after orig @at->@at2", δbklon, t1, t.Root(), xat, δkv1_1)
// xgetδKV translates {k -> δ<oid>} to {k -> δ(ZBlk(oid).data)} according to t1..t2 db snapshots.
func xgetδKV(t1, t2 *xbtreetest.Commit, δkvOid map[Key]ΔValue) map[Key]Δstring {
δkv := make(map[Key]Δstring, len(δkvOid))
for k, δvOid := range δkvOid {
δkv[k] = Δstring{
Old: t1.XGetBlkData(δvOid.Old),
New: t2.XGetBlkData(δvOid.New),
}
}
return δkv
}
......
......@@ -24,8 +24,10 @@ import (
"context"
"errors"
"fmt"
"reflect"
"lab.nexedi.com/kirr/go123/xcontext"
"lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb"
......@@ -82,6 +84,33 @@ func ZOpen(ctx context.Context, zdb *zodb.DB, zopt *zodb.ConnOptions) (_ *ZConn,
}, nil
}
// ZGetOrNil returns zconn.Get(oid), or (nil,ok) if the object does not exist.
func ZGetOrNil(ctx context.Context, zconn *zodb.Connection, oid zodb.Oid) (_ zodb.IPersistent, err error) {
defer xerr.Contextf(&err, "zget %s@%s", oid, zconn.At())
obj, err := zconn.Get(ctx, oid)
if err != nil {
if IsErrNoData(err) {
err = nil
}
return nil, err
}
// activate the object to find out it really exists
// after removal on storage, the object might have stayed in Connection
// cache due to e.g. PCachePinObject, and it will be PActivate that
// will return "deleted" error.
err = obj.PActivate(ctx)
if err != nil {
if IsErrNoData(err) {
return nil, nil
}
return nil, err
}
obj.PDeactivate()
return obj, nil
}
// IsErrNoData returns whether err is due to NoDataError or NoObjectError.
func IsErrNoData(err error) bool {
var eNoData *zodb.NoDataError
......@@ -96,3 +125,12 @@ func IsErrNoData(err error) bool {
return false
}
}
// XidOf return string representation of object xid.
func XidOf(obj zodb.IPersistent) string {
if obj == nil || reflect.ValueOf(obj).IsNil() {
return "ø"
}
xid := zodb.Xid{At: obj.PJar().At(), Oid: obj.POid()}
return xid.String()
}
......@@ -390,9 +390,9 @@ func (bf *zBigFileState) PySetState(pystate interface{}) (err error) {
return fmt.Errorf("blksize: must be > 0; got %d", blksize)
}
blktab, ok := t[1].(*btree.LOBTree)
if !ok {
return fmt.Errorf("blktab: expect LOBTree; got %s", xzodb.TypeOf(t[1]))
blktab, err := vBlktab(t[1])
if err != nil {
return err
}
bf.blksize = blksize
......@@ -437,9 +437,9 @@ func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath
return make([]byte, bf.blksize), treePath, nil, blkRevMax, nil
}
zblk, ok = xzblk.(ZBlk)
if !ok {
return nil, nil, nil, 0, fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xzblk))
zblk, err = vZBlk(xzblk)
if err != nil {
return nil, nil, nil, 0, err
}
blkdata, zblkrev, err := zblk.LoadBlkData(ctx)
......@@ -493,6 +493,23 @@ func (bf *ZBigFile) Size(ctx context.Context) (_ int64, treePath []btree.LONode,
return size, treePath, nil
}
// vZBlk checks and converts xzblk to a ZBlk object.
func vZBlk(xzblk interface{}) (ZBlk, error) {
zblk, ok := xzblk.(ZBlk)
if !ok {
return nil, fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xzblk))
}
return zblk, nil
}
// vBlktab checks and converts xblktab to LOBTree object.
func vBlktab(xblktab interface{}) (*btree.LOBTree, error) {
blktab, ok := xblktab.(*btree.LOBTree)
if !ok {
return nil, fmt.Errorf("blktab: expect LOBTree; got %s", xzodb.TypeOf(xblktab))
}
return blktab, nil
}
// ----------------------------------------
......
......@@ -19,11 +19,16 @@
package zdata
// XXX note about ΔFtail organization: queries results are built on the fly to
// avoid complexity of recomputing vδF on tracking set change.
import (
"context"
"fmt"
"sort"
"lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/kirr/neo/go/zodb/btree"
......@@ -46,12 +51,14 @@ type setOid = set.Oid
//
// δF:
// .rev↑
// {} file -> {}blk
// {} file -> {}blk | EPOCH
//
// Only files and blocks explicitly requested to be tracked are guaranteed to
// be present. In particular a block that was not explicitly requested to be
// tracked, even if it was changed in δZ, is not guaranteed to be present in δF.
//
// XXX after epoch previous track requests has no effects
//
// ΔFtail provides the following operations:
//
// .Track(file, blk, path, zblk) - add file and block reached via BTree path to tracked set.
......@@ -60,7 +67,7 @@ type setOid = set.Oid
// .ForgetPast(revCut) - forget changes past revCut
// .SliceByRev(lo, hi) -> []δF - query for all files changes with rev ∈ (lo, hi]
// .SliceByFileRev(file, lo, hi) -> []δfile - query for changes of file with rev ∈ (lo, hi]
// .LastBlkRev(file, #blk, at) - query for what is last revision that changed
// .BlkRevAt(file, #blk, at) - query for what is last revision that changed
// file[#blk] as of @at database state.
//
// where δfile represents a change to one file
......@@ -75,28 +82,36 @@ type setOid = set.Oid
type ΔFtail struct {
// ΔFtail merges ΔBtail with history of ZBlk
δBtail *xbtree.ΔBtail
fileIdx map[zodb.Oid]setOid // tree-root -> {} ZBigFile<oid> as of @head
fileIdx map[zodb.Oid]setOid // tree-root -> {} ZBigFile<oid> as of @head XXX -> root2file ?
byFile map[zodb.Oid]*_ΔFileTail // file -> vδf tail XXX
// set of files, which are newly tracked and for which vδE was not yet rebuilt
trackNew setOid // {}foid
trackSetZFile setOid // set of tracked ZBigFiles as of @head
trackSetZBlk map[zodb.Oid]*zblkTrack // zblk -> {} root -> {}blk as of @head
// XXX kill
///*
// XXX don't need vδF - everything is reconstructed at runtime from .δBtail.vδT
// this way we also don't need to keep up updating vδF from vδT on its rebuild during.
// data with δF changes. Actual for part of tracked set that was taken
// into account.
vδF []ΔF
// tracked ZBlk that are not yet taken into account in current vδF.
// grows on new track requests; flushes on queries and update.
trackNew map[zodb.Oid]map[zodb.Oid]*zblkTrack // {} foid -> {} zoid -> zblk
//*/
}
// _ΔFileTail represents tail of revisional changes to one file.
type _ΔFileTail struct {
root zodb.Oid // .blktab as of @head
vδE []_ΔFileEpoch // changes to ZBigFile object itself ; nil if not yet rebuilt
}
// _ΔFileEpoch represent change to ZBigFile object.
type _ΔFileEpoch struct {
Rev zodb.Tid
oldRoot zodb.Oid // .blktab was pointing to oldRoot before ; VDEL if ZBigFile deleted
newRoot zodb.Oid // .blktab was changed to point to newRoot ; ----//----
newBlkSize int64 // .blksize was changed to newBlkSize ; -1 if ZBigFile deleted
// XXX +oldBlkSize ?
// snapshot of trackSetZBlk for this file right before this epoch
oldTrackSetZBlk map[zodb.Oid]setI64 // {} zblk -> {}blk
}
// zblkTrack keeps information in which root/blocks ZBlk is present as of @head.
type zblkTrack struct {
// inroot map[zodb.Oid]setI64 // {} root -> {}blk XXX later switch to this
infile map[zodb.Oid]setI64 // {} foid -> {}blk
inroot map[zodb.Oid]setI64 // {} root -> {}blk
}
// ΔF represents a change in files space.
......@@ -108,11 +123,11 @@ type ΔF struct {
// ΔFile represents a change to one file.
type ΔFile struct {
Rev zodb.Tid
Epoch bool // whether file changed completely
Blocks setI64 // changed blocks XXX -> ΔBlocks ?
Size bool // whether file size changed XXX -> ΔSize?
}
// NewΔFtail creates new ΔFtail object.
//
// Initial tracked set is empty.
......@@ -124,9 +139,9 @@ func NewΔFtail(at0 zodb.Tid, db *zodb.DB) *ΔFtail {
return &ΔFtail{
δBtail: xbtree.NewΔBtail(at0, db),
fileIdx: map[zodb.Oid]setOid{},
trackSetZFile: setOid{},
byFile: map[zodb.Oid]*_ΔFileTail{},
trackNew: setOid{},
trackSetZBlk: map[zodb.Oid]*zblkTrack{},
trackNew: map[zodb.Oid]map[zodb.Oid]*zblkTrack{},
}
}
......@@ -161,14 +176,24 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
}
root := path[0].(*btree.LOBTree)
files, ok := δFtail.fileIdx[root.POid()]
roid := root.POid()
files, ok := δFtail.fileIdx[roid]
if !ok {
files = setOid{}
δFtail.fileIdx[root.POid()] = files
δFtail.fileIdx[roid] = files
}
files.Add(foid)
δFtail.trackSetZFile.Add(foid)
δftail, ok := δFtail.byFile[foid]
if !ok {
δftail = &_ΔFileTail{root: roid, vδE: nil /*will need to be rebuilt till past*/}
δFtail.byFile[foid] = δftail
δFtail.trackNew.Add(foid)
}
if δftail.root != roid {
panicf("zfile<%s> changed root from %s -> %s", foid, δftail.root, roid)
}
// associate zblk with file, if it was not hole
if zblk != nil {
......@@ -179,36 +204,101 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
δFtail.trackSetZBlk[zoid] = zt
}
blocks, ok := zt.infile[foid]
inblk, ok := zt.inroot[roid]
if !ok {
blocks = make(setI64, 1)
if zt.infile == nil {
zt.infile = make(map[zodb.Oid]setI64)
inblk = make(setI64, 1)
if zt.inroot == nil {
zt.inroot = make(map[zodb.Oid]setI64)
}
zt.infile[foid] = blocks
zt.inroot[roid] = inblk
}
blocks.Add(blk)
inblk.Add(blk)
}
}
if !ok {
// zblk was not associated with this file
ft := δFtail.trackNew[foid]
if ft == nil {
ft = make(map[zodb.Oid]*zblkTrack, 1)
δFtail.trackNew[foid] = ft
}
ft[zoid] = zt
// rebuildAll rebuilds vδE for all files from trackNew requests.
func (δFtail *ΔFtail) rebuildAll() (err error) {
defer xerr.Contextf(&err, "ΔFtail rebuildAll")
// XXX locking
for foid := range δFtail.trackNew {
δFtail.trackNew.Del(foid)
δftail := δFtail.byFile[foid]
δBtail := δFtail.δBtail
err := δftail.rebuild1(foid, δBtail.ΔZtail(), δBtail.DB())
if err != nil {
return err
}
}
// XXX mark something dirty so that LastBlkRev and Slice* know what to rebuild?
return nil
}
// rebuildIfNeeded rebuilds vδE if there is such need.
//
// it returns corresponding δftail for convenience.
// the only case when vδE actually needs to be rebuilt is when the file just started to be tracked.
func (δFtail *ΔFtail) rebuildIfNeeded(foid zodb.Oid) (_ *_ΔFileTail, err error) {
// XXX locking
// XXX debug
/*
leaf := path[len(path)-1].(*btree.LOBucket)
for _, e := range leaf.Entryv() { // XXX activate
δFtail.tracked.Add(e.Key())
δftail := δFtail.byFile[foid]
if δftail.vδE != nil {
err = nil
} else {
δFtail.trackNew.Del(foid)
δBtail := δFtail.δBtail
err = δftail.rebuild1(foid, δBtail.ΔZtail(), δBtail.DB())
}
*/
return δftail, err
}
// rebuild rebuilds vδE.
func (δftail *_ΔFileTail) rebuild1(foid zodb.Oid, δZtail *zodb.ΔTail, db *zodb.DB) (err error) {
defer xerr.Contextf(&err, "file<%s>: rebuild", foid)
// XXX locking
if δftail.vδE != nil {
panic("rebuild: vδE != nil")
}
vδE := []_ΔFileEpoch{}
vδZ := δZtail.Data()
atPrev := δZtail.Tail()
for i := 0; i < len(vδZ); i++ {
δZ := vδZ[i]
fchanged := false
for _, oid := range δZ.Changev {
if oid == foid {
fchanged = true
break
}
}
if !fchanged {
continue
}
δ, err := zfilediff(db, foid, atPrev, δZ.Rev)
if err != nil {
return err
}
if δ != nil {
δE := _ΔFileEpoch{
Rev: δZ.Rev,
oldRoot: δ.blktabOld,
newRoot: δ.blktabNew,
newBlkSize: δ.blksizeNew,
oldTrackSetZBlk: nil, // nothing was tracked
}
vδE = append(vδE, δE)
}
atPrev = δZ.Rev
}
δftail.vδE = vδE
return nil
}
// Update updates δFtail given raw ZODB changes.
......@@ -217,16 +307,25 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
//
// δZ should include all objects changed by ZODB transaction.
//
// Zhead must be active connection at δFtail.Head() database state.
// Objects in Zhead must not be modified.
// During call to Update zhead must not be otherwise used - even for reading.
func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF, err error) {
// XXX readd zhead?
// // Zhead must be active connection at δFtail.Head() database state.
// // Objects in Zhead must not be modified.
// // During call to Update zhead must not be otherwise used - even for reading.
//func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF, err error) {
func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit) (_ ΔF, err error) {
defer xerr.Contextf(&err, "ΔFtail update %s -> %s", δFtail.Head(), δZ.Tid)
// XXX δFtail.update() first?
// XXX verify zhead.At() == δFtail.Head()
// XXX locking
// rebuild vδE for newly tracked files
err = δFtail.rebuildAll()
if err != nil {
return ΔF{}, err
}
headOld := δFtail.Head()
δB, err := δFtail.δBtail.Update(δZ)
if err != nil {
return ΔF{}, err
......@@ -234,8 +333,47 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF
δF := ΔF{Rev: δB.Rev, ByFile: make(map[zodb.Oid]*ΔFile)}
// take ZBigFile changes into account
δzfile := map[zodb.Oid]*_ΔZBigFile{} // which tracked ZBigFiles are changed
for _, oid := range δZ.Changev {
δftail, ok := δFtail.byFile[oid]
if !ok {
continue // not ZBigFile or file is not tracked
}
δ, err := zfilediff(δFtail.δBtail.DB(), oid, headOld, δZ.Tid)
if err != nil {
return ΔF{}, err
}
//fmt.Printf("zfile<%s> diff %s..%s -> δ: %v\n", oid, headOld, δZ.Tid, δ)
if δ != nil {
// XXX rebuild first
δzfile[oid] = δ
δE := _ΔFileEpoch{
Rev: δZ.Tid,
oldRoot: δ.blktabOld,
newRoot: δ.blktabNew,
newBlkSize: δ.blksizeNew,
oldTrackSetZBlk: map[zodb.Oid]setI64{},
}
for oid, zt := range δFtail.trackSetZBlk {
inblk, ok := zt.inroot[δftail.root]
if ok {
δE.oldTrackSetZBlk[oid] = inblk
delete(zt.inroot, δftail.root)
}
}
δftail.root = δE.newRoot
δftail.vδE = append(δftail.vδE, δE)
}
}
// take btree changes into account
// fmt.Printf("δB.ΔByRoot: %v\n", δB.ΔByRoot)
for root, δt := range δB.ΔByRoot {
// fmt.Printf("root: %v δt: %v\n", root, δt)
files := δFtail.fileIdx[root]
if len(files) == 0 {
panicf("BUG: ΔFtail: root<%s> -> ø files", root)
......@@ -246,8 +384,7 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(setI64)}
δF.ByFile[file] = δfile
}
for blk /*, zblk*/ := range δt {
// FIXME stub - need to take both keys and zblk changes into account
for blk /*, δzblk*/ := range δt {
// XXX document, and in particular how to include atTail
δfile.Blocks.Add(blk)
}
......@@ -258,88 +395,112 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF
// XXX currently we invalidate size on any topology change.
δfile.Size = true
}
}
// take zblk changes into account
for _, oid := range δZ.Changev {
if δFtail.trackSetZFile.Has(oid) {
// TODO check that .blksize and .blktab (it is only
// persistent reference) do not change.
// update trackSetZBlk according to btree changes
for blk, δzblk := range δt {
if δzblk.Old != xbtree.VDEL {
ztOld, ok := δFtail.trackSetZBlk[δzblk.Old]
if ok {
inblk, ok := ztOld.inroot[root]
if ok {
inblk.Del(blk)
}
}
}
return ΔF{}, fmt.Errorf("ZBigFile<%s> changed @%s", oid, δZ.Tid)
if δzblk.New != xbtree.VDEL {
ztNew, ok := δFtail.trackSetZBlk[δzblk.New]
if !ok {
ztNew = &zblkTrack{}
δFtail.trackSetZBlk[δzblk.New] = ztNew
}
inblk, ok := ztNew.inroot[root]
if !ok {
inblk = make(setI64, 1)
if ztNew.inroot == nil {
ztNew.inroot = make(map[zodb.Oid]setI64)
}
ztNew.inroot[root] = inblk
}
inblk.Add(blk)
}
}
}
// take zblk changes into account
for _, oid := range δZ.Changev {
zt, ok := δFtail.trackSetZBlk[oid]
if !ok {
continue // not tracked
}
for foid, blocks := range zt.infile {
δfile, ok := δF.ByFile[foid]
if !ok {
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(setI64)}
δF.ByFile[foid] = δfile
for root, inblk := range zt.inroot {
if len(inblk) == 0 {
continue
}
// fmt.Printf("root: %v inblk: %v\n", root, inblk)
files := δFtail.fileIdx[root]
for file := range files {
δfile, ok := δF.ByFile[file]
if !ok {
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(setI64)}
δF.ByFile[file] = δfile
}
δfile.Blocks.Update(blocks)
δfile.Blocks.Update(inblk)
}
}
// XXX update zt.infile according to btree changes
}
δFtail.vδF = append(δFtail.vδF, δF)
return δF, nil
}
// if ZBigFile object is changed - it starts new epoch for that file
for foid, δ := range δzfile {
δfile, ok := δF.ByFile[foid]
if !ok {
δfile = &ΔFile{Rev: δF.Rev}
δF.ByFile[foid] = δfile
}
δfile.Epoch = true
δfile.Blocks = nil
δfile.Size = false
// XXX kill after vδF is gone
// update processes new track requests and updates vδF.
//
// If file != nil only track requests related to file are processed.
// Otherwise all track requests are processed.
func (δFtail *ΔFtail) update(file *ZBigFile) {
if file == nil {
panic("TODO")
}
// XXX + rebuild XXX not here - in track(new file)
// let's see if we need to rebuild .vδF due to not-yet processed track requests
foid := file.POid()
_ = δ
//fmt.Printf("δZBigFile: %v\n", δ)
// XXX locking
// XXX dumb
zt, dirty := δFtail.trackNew[foid]
if !dirty {
return
// XXX update .fileIdx
}
delete(δFtail.trackNew, foid)
// XXX unlock here
// fmt.Printf("-> δF: %v\n", δF)
return δF, nil
}
for i, δZ := range δFtail.δBtail.ΔZtail().Data() {
δF := δFtail.vδF[i]
// XXX assert δF.Rev == δZ.Rev
for _, oid := range δZ.Changev {
z, ok := zt[oid]
if !ok {
continue
}
// ForgetPast discards all δFtail entries with rev ≤ revCut.
func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) {
δFtail.δBtail.ForgetPast(revCut)
// XXX locking
// XXX -> func δF.δfile(foid) ?
δfile, ok := δF.ByFile[foid]
if !ok {
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(setI64)}
δF.ByFile[foid] = δfile
}
// XXX locking
// XXX keep index which file changed epoch where (similarly to ΔBtail),
// and, instead of scanning all files, trim vδE only on files that is really necessary.
for _, δftail := range δFtail.byFile {
δftail.forgetPast(revCut)
}
}
δfile.Blocks.Update(z.infile[foid])
func (δftail *_ΔFileTail) forgetPast(revCut zodb.Tid) {
// XXX locking
icut := 0
for ; icut < len(δftail.vδE); icut++ {
if δftail.vδE[icut].Rev > revCut {
break
}
}
}
// ForgetPast discards all δFtail entries with rev ≤ revCut.
func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) {
δFtail.δBtail.ForgetPast(revCut)
// vδE[:icut] should be forgotten
if icut > 0 { // XXX workarond for ΔFtail.ForgetPast calling forgetPast on all files
δftail.vδE = append([]_ΔFileEpoch{}, δftail.vδE[icut:]...)
}
}
// XXX don't need
......@@ -357,49 +518,15 @@ func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) {
// the caller must not modify returned slice.
//
// Note: contrary to regular go slicing, low is exclusive while high is inclusive.
func (δFtail *ΔFtail) SliceByFileRev(file *ZBigFile, lo, hi zodb.Tid) /*readonly*/[]*ΔFile {
func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*readonly*/[]*ΔFile {
//fmt.Printf("\n")
xtail.AssertSlice(δFtail, lo, hi)
// FIXME rework to just query .δBtail.SliceByRootRev(file.blktab, lo, hi) +
// merge δZBlk history with that.
// XXX locking?
δFtail.update(file)
// find vδF range corresponding to (lo, hi]
// XXX linear scan
vδF := δFtail.vδF
if len(vδF) == 0 {
return nil
}
// find max j : [j].rev ≤ hi XXX linear scan -> binary search
j := len(vδF)-1
for ; j >= 0 && vδF[j].Rev > hi; j-- {}
if j < 0 {
return nil // ø
}
// find max i : [i].rev > low XXX linear scan -> binary search
i := j
for ; i >= 0 && vδF[i].Rev > lo; i-- {}
i++
vδF = vδF[i:j+1]
// filter found changed to have only file-related bits
foid := file.POid()
var vδfile []*ΔFile
for _, δF := range vδF {
δfile, ok := δF.ByFile[foid]
if ok {
vδfile = append(vδfile, δfile)
}
}
// XXX merge into vδF zblk from not yet handled tracked part
// XXX locking
// XXX rebuild
return vδfile
// query .δBtail.SliceByRootRev(file.blktab, lo, hi) +
// merge δZBlk history with that.
// merging tree (δT) and Zblk (δZblk) histories into file history (δFile):
......@@ -415,86 +542,379 @@ func (δFtail *ΔFtail) SliceByFileRev(file *ZBigFile, lo, hi zodb.Tid) /*readon
//
// δFile ────────o───────o──────x─────x────────────────────────
/*
vδZ := δFtail.δBtail.ΔZtail().SliceByRev(lo, hi)
// XXX stub that takes only ZBlk changes into account
// XXX dumb
for _, δZ := range vδZ {
var vδf []*ΔFile
// vδfTail returns or creates vδf entry for revision tail
// tail must be <= all vδf revisions
vδfTail := func(tail zodb.Tid) *ΔFile {
if l := len(vδf); l > 0 {
δfTail := vδf[l-1]
if δfTail.Rev == tail {
return δfTail
}
if !(tail <= δfTail.Rev) {
panic("tail not ↓")
}
}
δfTail := &ΔFile{Rev: tail, Blocks: setI64{}}
vδf = append(vδf, δfTail)
return δfTail
}
*/
δftail, err := δFtail.rebuildIfNeeded(zfile.POid())
if err != nil {
panic(err) // XXX
}
vδZ := δFtail.δBtail.ΔZtail().SliceByRev(lo, hi)
iz := len(vδZ) - 1
/*
// XXX activate zfile?
vδT := δFtail.δBtail.SliceByRootRev(file.zfile.blktab, lo, hi)
// find epoch that covers hi
vδE := δftail.vδE
le := len(vδE)
ie := sort.Search(le, func(i int) bool {
return hi < vδE[i].Rev
})
// vδE[ie] is next epoch
// vδE[ie-1] is epoch that covers hi
// loop through all epochs till lo
for lastEpoch := false; !lastEpoch ; {
// current epoch
var epoch zodb.Tid
ie--
if ie < 0 {
epoch = δFtail.Tail()
} else {
epoch = vδE[ie].Rev
}
// state of `{} blk -> zblk` as we are scanning ↓
δblktab := map[int64]struct {
zblk zodb.Oid // blk points to this zblk
lo, hi zodb.Tid // blk points to zblk during [lo, hi)
}{}
if epoch <= lo {
epoch = lo
lastEpoch = true
}
iz := len(vδZ) - 1
it := len(vδT) - 1
for (iz >= 0 && it >= 0) { // XXX -> ||
δZ := vδZ[iz]
δT := vδT[it]
if δZ.Rev >= δT.Rev {
for _, oid := range δZ.Changev {
// XXX oid -> tracked ZBlk?
// ZBlk -> bound to {}blk @head
for blk := range boundToAtHead {
if !δblktab.Has(blk) {
δblktab[blk] = oid
var root zodb.Oid // root of blktab in current epoch
var head zodb.Tid // head] of current epoch coverage
// state of Zinblk as we are scanning ← current epoch
// initially corresponds to head of the epoch (= @head for latest epoch)
Zinblk := map[zodb.Oid]setI64{} // zblk -> which #blk refers to it
var ZinblkAt zodb.Tid // Zinblk covers [ZinblkAt,<next δT>)
if ie+1 == le {
// head
root = δftail.root
head = δFtail.Head()
for zblk, zt := range δFtail.trackSetZBlk {
inblk, ok := zt.inroot[root]
if ok {
Zinblk[zblk] = inblk.Clone()
}
}
// XXX ZinblkAt
} else {
δE := vδE[ie+1]
root = δE.oldRoot
head = δE.Rev - 1 // XXX ok?
for zblk, inblk := range δE.oldTrackSetZBlk {
Zinblk[zblk] = inblk.Clone()
}
}
// vδT for current epoch
vδT := δFtail.δBtail.SliceByRootRev(root, epoch, head) // NOTE @head, not hi
it := len(vδT) - 1
if it >= 0 {
ZinblkAt = vδT[it].Rev
} else {
ZinblkAt = epoch
}
// merge vδZ and vδT of current epoch
for ((iz >= 0 && vδZ[iz].Rev >= epoch) || it >= 0) {
// δZ that is covered by current Zinblk
// -> update δf
if iz >= 0 {
δZ := vδZ[iz]
if ZinblkAt <= δZ.Rev {
//fmt.Printf("δZ @%s\n", δZ.Rev)
for _, oid := range δZ.Changev {
inblk, ok := Zinblk[oid]
if ok && len(inblk) != 0 {
δf := vδfTail(δZ.Rev)
δf.Blocks.Update(inblk)
}
}
iz--
continue
}
}
// δT -> adjust Zinblk + update δf
if it >= 0 {
δT := vδT[it]
//fmt.Printf("δT @%s\n", δT.Rev)
for blk, δzblk := range δT.ΔKV {
// apply in reverse as we go ←
if δzblk.New != xbtree.VDEL {
inblk, ok := Zinblk[δzblk.New]
if ok {
inblk.Del(blk)
}
}
if δzblk.Old != xbtree.VDEL {
inblk, ok := Zinblk[δzblk.Old]
if !ok {
inblk = setI64{}
Zinblk[δzblk.Old] = inblk
}
inblk.Add(blk)
}
if δT.Rev <= hi {
δf := vδfTail(δT.Rev)
δf.Blocks.Add(blk)
δf.Size = true // see Update
}
}
it--
if it >= 0 {
ZinblkAt = vδT[it].Rev
} else {
ZinblkAt = epoch
}
}
}
if δT.Rev >= δZ.Rev {
...
// emit epoch δf
if ie >= 0 {
epoch := vδE[ie].Rev
if epoch > lo { // it could be <=
δf := vδfTail(epoch)
δf.Epoch = true
δf.Blocks = nil // XXX must be already nil
δf.Size = false // XXX must be already false
}
}
}
*/
// vδf was built in reverse order
// invert the order before returning
for i,j := 0, len(vδf)-1; i<j; i,j = i+1,j-1 {
vδf[i], vδf[j] = vδf[j], vδf[i]
}
return vδf
}
// XXX rename -> BlkRevAt
// LastBlkRev returns last revision that changed file[blk] as of @at database state.
// BlkRevAt returns last revision that changed file[blk] as of @at database state.
//
// if exact=False - what is returned is only an upper bound for last block revision.
//
// zf must be from @head
// zf must be any checkout from (tail, head]
// at must ∈ (tail, head]
// blk must be tracked
//
// XXX +ctx, error rebuild []δF here
func (δFtail *ΔFtail) LastBlkRev(ctx context.Context, zf *ZBigFile, blk int64, at zodb.Tid) (_ zodb.Tid, exact bool) {
//defer xerr.Contextf(&err, "") // XXX text
func (δFtail *ΔFtail) BlkRevAt(ctx context.Context, zf *ZBigFile, blk int64, at zodb.Tid) (_ zodb.Tid, exact bool) {
rev, exact, err := δFtail._BlkRevAt(ctx, zf, blk, at)
if err != nil {
panic(err) // XXX
}
return rev, exact
}
func (δFtail *ΔFtail) _BlkRevAt(ctx context.Context, zf *ZBigFile, blk int64, at zodb.Tid) (_ zodb.Tid, exact bool, err error) {
defer xerr.Contextf(&err, "blkrev f<%s> #%d @%s", zf.POid(), blk, at)
//fmt.Printf("\nblkrev #%d @%s\n", blk, at)
// XXX assert δFtail == f.head.bfdir.δFtail ?
// assert at ∈ (tail, head]
tail := δFtail.Tail()
head := δFtail.Head()
if !(tail < at && at <= head) {
panicf("at out of bounds: at: @%s, (tail, head] = (@%s, @%s]", at, tail, head)
}
// assert zf.at ∈ (tail, head]
zconn := zf.PJar()
zconnAt := zconn.At()
if !(tail < zconnAt && zconnAt <= head) {
panicf("zconn.at out of bounds: zconn.at: @%s, (tail, head] = (@%s, @%s]", zconnAt, tail, head)
}
// XXX tabRev -> treeRev ?
// XXX activate zfile?
zblkOid, ok, tabRev, tabRevExact, err := δFtail.δBtail.GetAt(ctx, zf.blktab, blk, at)
// XXX locking
δftail, err := δFtail.rebuildIfNeeded(zf.POid())
if err != nil {
panic(err) // XXX
return zodb.InvalidTid, false, err
}
// find epoch that covers at and associated blktab root/object
vδE := δftail.vδE
//fmt.Printf(" vδE: %v\n", vδE)
l := len(vδE)
i := sort.Search(l, func(i int) bool {
return at < vδE[i].Rev
})
// vδE[i] is next epoch
// vδE[i-1] is epoch that covers at
// root
var root zodb.Oid
if i == l {
root = δftail.root
} else {
root = vδE[i].oldRoot
}
// epoch
var epoch zodb.Tid
i--
if i < 0 {
// i<0 - first epoch (no explicit start) - use δFtail.tail as lo
epoch = δFtail.Tail()
} else {
epoch = vδE[i].Rev
}
//fmt.Printf(" epoch: @%s root: %s\n", epoch, root)
if root == xbtree.VDEL {
return epoch, true, nil
}
zblk, tabRev, zblkExact, tabRevExact, err := δFtail.δBtail.GetAt(root, blk, at)
//fmt.Printf(" GetAt #%d @%s -> %s(%v), @%s(%v)\n", blk, at, zblk, zblkExact, tabRev, tabRevExact)
if err != nil {
return zodb.InvalidTid, false, err
}
if tabRev < epoch {
tabRev = epoch
tabRevExact = true
}
// if δBtail does not have entry that covers root[blk] - get it
// through zconn that has any .at ∈ (tail, head].
if !zblkExact {
xblktab, err := zconn.Get(ctx, root)
if err != nil {
return zodb.InvalidTid, false, err
}
blktab, err := vBlktab(xblktab)
if err != nil {
return zodb.InvalidTid, false, err
}
xzblkObj, ok, err := blktab.Get(ctx, blk)
if err != nil {
return zodb.InvalidTid, false, err
}
if !ok {
zblk = xbtree.VDEL
} else {
zblkObj, err := vZBlk(xzblkObj)
if err != nil {
return zodb.InvalidTid, false, fmt.Errorf("blktab<%s>[#%d]: %s", root, blk, err)
}
zblk = zblkObj.POid()
}
}
// block was removed
// XXX or not in tracked set?
if !ok {
return tabRev, tabRevExact
if zblk == xbtree.VDEL {
return tabRev, tabRevExact, nil
}
// blktab[blk] was changed to point to a zblk @rev.
// blktab[blk] was changed to point to a zblk @tabRev.
// blk revision is max rev and when zblk changed last in (rev, at] range.
//
// XXX need to use full δZ, not only connected to tracked subset?
zblkRev, zblkRevExact := δFtail.δBtail.ΔZtail().LastRevOf(zblkOid, at)
zblkRev, zblkRevExact := δFtail.δBtail.ΔZtail().LastRevOf(zblk, at)
//fmt.Printf(" ZRevOf %s @%s -> @%s, %v\n", zblk, at, zblkRev, zblkRevExact)
if zblkRev > tabRev {
return zblkRev, zblkRevExact
return zblkRev, zblkRevExact, nil
} else {
return tabRev, tabRevExact, nil
}
}
// ----------------------------------------
// zfilediff returns direct difference for ZBigFile<foid> old..new .
type _ΔZBigFile struct {
blksizeOld, blksizeNew int64
blktabOld, blktabNew zodb.Oid
}
func zfilediff(db *zodb.DB, foid zodb.Oid, old, new zodb.Tid) (δ *_ΔZBigFile, err error) {
txn, ctx := transaction.New(context.TODO()) // XXX - merge in ctx arg?
defer txn.Abort()
zconnOld, err := db.Open(ctx, &zodb.ConnOptions{At: old})
if err != nil {
return nil, err
}
zconnNew, err := db.Open(ctx, &zodb.ConnOptions{At: new})
if err != nil {
return nil, err
}
a, err1 := zgetFileOrNil(ctx, zconnOld, foid)
b, err2 := zgetFileOrNil(ctx, zconnNew, foid)
err = xerr.Merge(err1, err2)
if err != nil {
return nil, err
}
return diffF(ctx, a, b)
}
// diffF returns direct difference in between two ZBigFile objects.
func diffF(ctx context.Context, a, b *ZBigFile) (δ *_ΔZBigFile, err error) {
defer xerr.Contextf(&err, "diffF %s %s", xzodb.XidOf(a), xzodb.XidOf(b))
δ = &_ΔZBigFile{}
if a == nil {
δ.blksizeOld = -1
δ.blktabOld = xbtree.VDEL
} else {
return tabRev, tabRevExact
err = a.PActivate(ctx); if err != nil { return nil, err }
defer a.PDeactivate()
δ.blksizeOld = a.blksize
δ.blktabOld = a.blktab.POid()
}
if b == nil {
δ.blksizeNew = -1
δ.blktabNew = xbtree.VDEL
} else {
err = b.PActivate(ctx); if err != nil { return nil, err }
defer b.PDeactivate()
δ.blksizeNew = b.blksize
δ.blktabNew = b.blktab.POid()
}
// return δ=nil if no change
if δ.blksizeOld == δ.blksizeNew && δ.blktabOld == δ.blktabNew {
δ = nil
}
return δ, nil
}
// zgetFileOrNil returns ZBigFile corresponding to zconn.Get(oid) .
// if the file does not exist, (nil, ok) is returned.
func zgetFileOrNil(ctx context.Context, zconn *zodb.Connection, oid zodb.Oid) (zfile *ZBigFile, err error) {
defer xerr.Contextf(&err, "getfile %s@%s", oid, zconn.At())
xfile, err := xzodb.ZGetOrNil(ctx, zconn, oid)
if xfile == nil || err != nil {
return nil, err
}
zfile, ok := xfile.(*ZBigFile)
if !ok {
return nil, fmt.Errorf("unexpected type: %s", zodb.ClassOf(xfile))
}
return zfile, nil
}
......@@ -18,28 +18,61 @@
// See https://www.nexedi.com/licensing for rationale and options.
package zdata
// tests for δftail.go
//
// This are the main tests for ΔFtail functionality. The primary testing
// concern is to verify how ΔFtail merges ΔBtail and ΔZtail histories on Update
// and queries.
//
// We assume that ΔBtail works correctly (this is covered by ΔBtail tests)
// -> no need to exercise many different topologies and tracking sets.
//
// Since ΔFtail does not recompute anything by itself when tracking set
// changes, and only merges δBtail and δZtail histories on queries, there is no
// need to exercise many different tracking sets. Once again we assume that
// ΔBtail works correctly and verify δFtail only with track=[-∞,∞).
//
// There are 2 testing approaches:
//
// a) transition a ZBigFile in ZODB through particular .blktab and ZBlk
// states and feed ΔFtail through created database transactions.
// b) transition a ZBigFile in ZODB through random .blktab and ZBlk
// states and feed ΔFtail through created database transactions.
//
// TestΔFtail and TestΔFtailRandom implement approaches "a" and "b" correspondingly.
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
"testing"
"lab.nexedi.com/kirr/go123/exc"
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/set"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/xbtreetest"
)
type setStr = set.Str
const ø = "ø"
// ΔFTestEntry represents one entry in ΔFtail tests.
type ΔFTestEntry struct {
δblkTab map[int64]string // change in tree part {} #blk -> ZBlk<oid>
δblkData setStr // change to ZBlk objects
δblkTab map[int64]string // changes in tree part {} #blk -> ZBlk<name>
δdataTab setStr // changes to ZBlk objects
}
// TestΔFtail runs ΔFtail tests on set of concrete prepared testcases.
func TestΔFtail(t *testing.T) {
// δT is shorthand to create δblkTab.
type δT = map[int64]string
// δD is shorthand to create δblkData.
// δD is shorthand to create δdataTab.
δD := func(zblkv ...string) setStr {
δ := setStr{}
for _, zblk := range zblkv {
......@@ -48,59 +81,529 @@ func TestΔFtail(t *testing.T) {
return δ
}
const a, b, c, ø = "a", "b", "c", "
const a,b,c,d,e,f,g,h,i,j = "a","b","c","d","e","f","g","h","i","j"
testv := []ΔFTestEntry{
{δT{1:a,2:b,3:ø}, δD(a)},
{δT{}, δD(c)},
{δT{2:c}, δD(a,b)},
// clear the tree
{δT{1:ø,2:ø}, δD()},
// i is first associated with file, but later unlinked from it
// then i is changed -> the file should no be in δF
{δT{5:i}, δD()},
{δT{5:e}, δD()},
{δT{}, δD(i)},
// XXX text
{nil, nil},
// ---- found by TestΔFtailRandom ----
{δT{1:a,6:i,7:d,8:e}, δD(a,c,e,f,g,h,i,j)},
// was including <= lo entries in SliceByFileRev
{δT{0:b,2:j,3:i,5:f,6:b,7:i,8:d}, δD(a,b,c,d,e,g,i,j)},
{δT{0:e,2:h,4:d,9:b}, δD(a,h,i)},
{δT{0:j,1:i,3:g,5:a,6:e,7:j,8:f,9:d}, δD()},
{δT{0:b,1:f,2:h,4:b,8:b}, δD(b,d,i)},
{δT{1:a,3:d,6:j}, δD(b,c,d,f,g,h,i,j)},
{δT{0:i,1:f,4:e,5:e,7:d,8:h}, δD(d,j)},
{δT{}, δD(a,b,c,e,f,g,h,i,j)},
}
vδf := []ΔFile{} // (rev↑, {}blk) XXX +.Size?
blkTab := map[int64]string{} // #blk -> ZBlk<oid>
Zinblk := map[string]setI64{} // ZBlk<oid> -> which #blk refer to it
for _, test := range testv {
δf := setI64{}
testq := make(chan ΔFTestEntry)
go func() {
defer close(testq)
for _, test := range testv {
testq <- test
}
}()
testΔFtail(t, testq)
}
// TestΔFtailRandom runs ΔFtail tests on randomly-generated file changes.
func TestΔFtailRandom(t *testing.T) {
n := xbtreetest.N(1E3, 1E4, 1E5)
nblk := xbtreetest.N(1E1, 2E1, 1E2) // keeps failures detail small on -short
for blk, zblk := range test.δblkTab {
// random-number generator
rng, seed := xbtreetest.NewRand()
t.Logf("# n=%d seed=%d", n, seed)
vv := "abcdefghij"
randv := func() string {
i := rng.Intn(len(vv))
return vv[i:i+1]
}
testq := make(chan ΔFTestEntry)
go func() {
defer close(testq)
for i := 0; i < n; i++ {
nδblkTab := rng.Intn(nblk)
nδdataTab := rng.Intn(len(vv))
δblkTab := map[int64]string{}
δdataTab := setStr{}
blkv := rng.Perm(nblk)
for j := 0; j < nδblkTab; j++ {
blk := blkv[j]
zblk := randv()
δblkTab[int64(blk)] = zblk
}
vv_ := rng.Perm(len(vv))
for j := 0; j < nδdataTab; j++ {
k := vv_[j]
v := vv[k:k+1]
δdataTab.Add(v)
}
testq <- ΔFTestEntry{δblkTab, δdataTab}
}
}()
testΔFtail(t, testq)
}
// testΔFtail verifies ΔFtail on sequence on testcases coming from testq.
func testΔFtail(t_ *testing.T, testq chan ΔFTestEntry) {
t := xbtreetest.NewT(t_)
X := exc.Raiseif
xat := map[zodb.Tid]string{} // tid > "at<i>"
// start δFtail when zfile does not yet exists
// this way we'll verify how ΔFtail rebuilds vδE for started-to-be-tracked file
t0 := t.CommitTree("øf")
xat[t0.At] = "at0"
t.Logf("# @at0 (%s)", t0.At)
δFtail := NewΔFtail(t.Head().At, t.DB)
// load dataTab
dataTab := map[string]string{} // ZBlk<name> -> data
for /*oid*/_, zblki := range t.Head().ZBlkTab {
dataTab[zblki.Name] = zblki.Data
}
// create zfile, but do not track it yet
t1 := t.CommitTree(fmt.Sprintf("t0:a D%s", dataTabTxt(dataTab)))
xat[t1.At] = "at1"
t.Logf("# → @at1 (%s) %s\t; not-yet-tracked", t1.At, t1.Tree)
δF, err := δFtail.Update(t1.ΔZ); X(err)
if !(δF.Rev == t1.At && len(δF.ByFile) == 0) {
t.Errorf("wrong δF:\nhave {%s, %v}\nwant: {%s, ø}", δF.Rev, δF.ByFile, t1.At)
}
// load zfile via root['treegen/file']
txn, ctx := transaction.New(context.Background())
zconn, err := t.DB.Open(ctx, &zodb.ConnOptions{At: t.Head().At, NoPool: true}); X(err)
xzroot, err := zconn.Get(ctx, 0); X(err)
zroot := xzroot.(*zodb.Map)
err = zroot.PActivate(ctx); X(err)
zfile := zroot.Data["treegen/file"].(*ZBigFile)
zroot.PDeactivate()
foid := zfile.POid()
err = zfile.PActivate(ctx); X(err)
blksize := zfile.blksize
blktabOid := zfile.blktab.POid()
if blktabOid != t.Root() {
t.Fatalf("BUG: zfile.blktab (%s) != treeroot (%s)", blktabOid, t.Root())
}
zfile.PDeactivate()
// start track zfile[0,∞) from the beginning
// this should make ΔFtail to see all zfile changes
size, path, err := zfile.Size(ctx); X(err)
δFtail.Track(zfile, /*blk*/-1, path, /*zblk*/nil)
if sizeOK := 1*blksize; size != sizeOK { // NOTE maches t1 commit
t.Fatalf("BUG: zfile size: have %d ; want %d", size, sizeOK)
}
// data built via applying changes from testv
vδf := []*ΔFile{ // (rev↑, {}blk)
{Rev: t1.At, Epoch: true},
}
vδE := []_ΔFileEpoch{ // (rev↑, EPOCH)
{
Rev: t1.At,
oldRoot: zodb.InvalidOid,
newRoot: blktabOid,
newBlkSize: blksize,
oldTrackSetZBlk: nil,
},
}
blkTab := map[int64]string{0:"a"} // #blk -> ZBlk<name>
Zinblk := map[string]setI64{} // ZBlk<name> -> which #blk refer to it
blkRevAt := map[zodb.Tid]map[int64]zodb.Tid{} // {} at -> {} #blk -> rev
// retrack should be called after new epoch to track zfile[-∞,∞) again
retrack := func() {
for blk := range blkTab {
_, path, zblk, _, err := zfile.LoadBlk(ctx, blk); X(err)
δFtail.Track(zfile, blk, path, zblk)
}
}
epochv := []zodb.Tid{t0.At, t1.At}
// δfstr/vδfstr converts δf/vδf to string taking xat into account
δfstr := func(δf *ΔFile) string {
s := fmt.Sprintf("@%s·%s", xat[δf.Rev], δf.Blocks)
if δf.Epoch {
s += "E"
}
if δf.Size {
s += "S"
}
return s
}
vδfstr := func(vδf []*ΔFile) string {
var s []string
for _, δf := range vδf {
s = append(s, δfstr(δf))
}
return fmt.Sprintf("%s", s)
}
i := 1 // matches t1
delfilePrev := false
for test := range testq {
i++
δblk := setI64{}
δtree := false
delfile := false
// command to delete zfile
if test.δblkTab == nil && test.δdataTab == nil {
delfile = true
}
// new epoch starts when file is deleted or recreated
newEpoch := delfile || (!delfile && delfile != delfilePrev)
delfilePrev = delfile
ZinblkPrev := map[string]setI64{}
for zblk, inblk := range Zinblk {
ZinblkPrev[zblk] = inblk.Clone()
}
// newEpoch -> reset
if newEpoch {
blkTab = map[int64]string{}
Zinblk = map[string]setI64{}
δblk = nil
} else {
// rebuild blkTab/Zinblk
zprev, ok := blkTab[blk]
if ok {
delete(Zinblk[zprev], blk)
for blk, zblk := range test.δblkTab {
zprev, ok := blkTab[blk]
if ok {
delete(Zinblk[zprev], blk)
} else {
zprev = ø
}
if zblk != ø {
blkTab[blk] = zblk
inblk, ok := Zinblk[zblk]
if !ok {
inblk = setI64{}
Zinblk[zblk] = inblk
}
inblk.Add(blk)
} else {
delete(blkTab, blk)
}
// update δblk due to change in blkTab
if zblk != zprev {
δblk.Add(blk)
δtree = true
}
}
// rebuild dataTab
for zblk := range test.δdataTab {
data, ok := dataTab[zblk] // e.g. a -> a2
if !ok {
t.Fatalf("BUG: blk %s not in dataTab\ndataTab: %v", zblk, dataTab)
}
data = fmt.Sprintf("%s%d", data[:1], i) // e.g. a4
dataTab[zblk] = data
// update δblk due to change in ZBlk data
for blk := range Zinblk[zblk] {
δblk.Add(blk)
}
}
}
// commit updated zfile / blkTab + dataTab
var req string
if delfile {
req = "øf"
} else {
tTxt := "t" + xbtreetest.KVTxt(blkTab)
dTxt := "D" + dataTabTxt(dataTab)
req = tTxt + " " + dTxt
}
commit := t.CommitTree(req)
if newEpoch {
epochv = append(epochv, commit.At)
}
xat[commit.At] = fmt.Sprintf("at%d", i)
flags := ""
if newEpoch {
flags += "\tEPOCH"
}
t.Logf("# → @%s (%s) δT%s δD%s\t; %s\tδ%s%s", xat[commit.At], commit.At, xbtreetest.KVTxt(test.δblkTab), test.δdataTab, commit.Tree, δblk, flags)
//t.Logf("# vδf: %s", vδfstr(vδf))
// update blkRevAt
var blkRevPrev map[int64]zodb.Tid
if i != 0 {
blkRevPrev = blkRevAt[δFtail.Head()]
}
blkRev := map[int64]zodb.Tid{}
for blk, rev := range blkRevPrev {
if newEpoch {
blkRev[blk] = commit.At
} else {
blkRev[blk] = rev
}
}
for blk := range δblk {
blkRev[blk] = commit.At
}
blkRevAt[commit.At] = blkRev
if false {
fmt.Printf("blkRevAt[@%s]:\n", xat[commit.At])
blkv := []int64{}
for blk := range blkRev {
blkv = append(blkv, blk)
}
sort.Slice(blkv, func(i, j int) bool {
return blkv[i] < blkv[j]
})
for _, blk := range blkv {
fmt.Printf(" #%d: %v\n", blk, blkRev[blk])
}
}
// update zfile
txn.Abort()
txn, ctx = transaction.New(context.Background())
err = zconn.Resync(ctx, commit.At); X(err)
var δfok *ΔFile
if newEpoch || len(δblk) != 0 {
δfok = &ΔFile{
Rev: commit.At,
Epoch: newEpoch,
Blocks: δblk,
Size: δtree, // not strictly ok, but matches current ΔFtail code
}
vδf = append(vδf, δfok)
}
if newEpoch {
δE := _ΔFileEpoch{Rev: commit.At}
if delfile {
δE.oldRoot = blktabOid
δE.newRoot = zodb.InvalidOid
δE.newBlkSize = -1
// XXX oldBlkSize ?
} else {
zprev = ø
δE.oldRoot = zodb.InvalidOid
δE.newRoot = blktabOid
δE.newBlkSize = blksize
// XXX oldBlkSize ?
}
oldTrackSetZBlk := map[zodb.Oid]setI64{}
for zblk, inblk := range ZinblkPrev {
oid, _ := commit.XGetBlkByName(zblk)
oldTrackSetZBlk[oid] = inblk
}
δE.oldTrackSetZBlk = oldTrackSetZBlk
vδE = append(vδE, δE)
}
//fmt.Printf("Zinblk: %v\n", Zinblk)
// update δFtail
δF, err := δFtail.Update(commit.ΔZ); X(err)
// assert δF points to the file if δfok != ø
if δF.Rev != commit.At {
t.Errorf("wrong δF.Rev: have %s ; want %s", δF.Rev, commit.At)
}
δfiles := setOid{}
for δfile := range δF.ByFile {
δfiles.Add(δfile)
}
δfilesOK := setOid{}
if δfok != nil {
δfilesOK.Add(foid)
}
if !δfiles.Equal(δfilesOK) {
t.Errorf("wrong δF.ByFile:\nhave keys: %s\nwant keys: %s", δfiles, δfilesOK)
continue
}
// verify δf
δf := δF.ByFile[foid]
if !reflect.DeepEqual(δf, δfok) {
t.Errorf("δf:\nhave: %v\nwant: %v", δf, δfok)
}
// track whole zfile again if new epoch was started
if newEpoch {
retrack()
}
if zblk != ø {
blkTab[blk] = zblk
inblk, ok := Zinblk[zblk]
// verify δFtail.trackSetZBlk
trackZinblk := map[string]setI64{}
for oid, zt := range δFtail.trackSetZBlk {
zblki := commit.ZBlkTab[oid]
for root, blocks := range zt.inroot {
if root != blktabOid {
t.Errorf(".trackSetZBlk: zblk %s points to unexpected blktab %s", zblki.Name, blktabOid)
continue
}
inblk, ok := trackZinblk[zblki.Name]
if !ok {
inblk = setI64{}
Zinblk[zblk] = inblk
trackZinblk[zblki.Name] = inblk
}
inblk.Add(blk)
inblk.Update(blocks)
}
}
if !reflect.DeepEqual(trackZinblk, Zinblk) {
t.Errorf(".trackSetZBlk:\n~have: %v\n want: %v", trackZinblk, Zinblk)
}
// update δf due to change in blkTab
if zblk != zprev {
δf.Add(blk)
// ForgetPast configured threshold
const ncut = 5
if len(vδf) >= ncut {
revcut := vδf[0].Rev
t.Logf("# forget ≤ @%s", xat[revcut])
δFtail.ForgetPast(revcut)
vδf = vδf[1:]
//t.Logf("# vδf: %s", vδfstr(vδf))
//t.Logf("# vδt: %s", vδfstr(δFtail.SliceByFileRev(zfile, δFtail.Tail(), δFtail.Head())))
icut := 0;
for ; icut < len(vδE); icut++ {
if vδE[icut].Rev > revcut {
break
}
}
vδE = vδE[icut:]
}
// update δf due to change in ZBlk data
for zblk := range test.δblkData {
for blk := range Zinblk[zblk] {
δf.Add(blk)
// verify δftail.root
δftail := δFtail.byFile[foid]
rootOK := blktabOid
if delfile {
rootOK = zodb.InvalidOid
}
if δftail.root != rootOK {
t.Errorf(".root: have %s ; want %s", δftail.root, rootOK)
}
// verify vδE
if !reflect.DeepEqual(δftail.vδE, vδE) {
t.Errorf("vδE:\nhave: %v\nwant: %v", δftail.vδE, vδE)
}
// SliceByFileRev
for j := 0; j < len(vδf); j++ {
for k := j; k < len(vδf); k++ {
var lo zodb.Tid
if j == 0 {
lo = vδf[0].Rev - 1
} else {
lo = vδf[j-1].Rev
}
hi := vδf[k].Rev
vδf_ok := vδf[j:k+1] // [j,k]
vδf_ := δFtail.SliceByFileRev(zfile, lo, hi)
if !reflect.DeepEqual(vδf_, vδf_ok) {
t.Errorf("slice (@%s,@%s]:\nhave: %v\nwant: %v", xat[lo], xat[hi], vδfstr(vδf_), vδfstr(vδf_ok))
}
}
}
vδf = append(vδf, ΔFile{
Rev: zodb.InvalidTid, // XXX will be set after treegen commit
Blocks: δf,
Size: false/*XXX*/,
// BlkRevAt
blkv := []int64{} // all blocks
if l := len(vδf); l > 0 {
for blk := range blkRevAt[vδf[l-1].Rev] {
blkv = append(blkv, blk)
}
}
blkv = append(blkv, 1E4/*this block is always hole*/)
sort.Slice(blkv, func(i, j int) bool {
return blkv[i] < blkv[j]
})
for j := 0; j < len(vδf); j++ {
at := vδf[j].Rev
blkRev := blkRevAt[at]
for _, blk := range blkv {
rev, exact := δFtail.BlkRevAt(ctx, zfile, blk, at)
revOK, ok := blkRev[blk]
if !ok {
k := len(epochv) - 1
for ; k >= 0; k-- {
if epochv[k] <= at {
break
}
}
revOK = epochv[k]
}
exactOK := true
if revOK <= δFtail.Tail() {
revOK, exactOK = δFtail.Tail(), false
}
if !(rev == revOK && exact == exactOK) {
t.Errorf("blkrev #%d @%s:\nhave: @%s, %v\nwant: @%s, %v", blk, xat[at], xat[rev], exact, xat[revOK], exactOK)
}
}
}
}
}
// XXX TestΔFtailRandom(t *testing.T) {
//}
// dataTabTxt returns string representation of {} dataTab.
func dataTabTxt(dataTab map[string]string) string {
// XXX dup wrt xbtreetest.KVTxt but uses string instead of Key for keys.
if len(dataTab) == 0 {
return "ø"
}
keyv := []string{}
for k := range dataTab { keyv = append(keyv, k) }
sort.Strings(keyv)
sv := []string{}
for _, k := range keyv {
v := dataTab[k]
if strings.ContainsAny(v, " \n\t,:") {
panicf("[%v]=%q: invalid value", k, v)
}
sv = append(sv, fmt.Sprintf("%v:%s", k, v))
}
return strings.Join(sv, ",")
}
// Copyright (C) 2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package zdata_test
import (
_ "lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/xbtreetest/init"
)
......@@ -368,7 +368,7 @@ package main
// rev(blk) ≤ rev'(blk) rev'(blk) = min(^^^)
//
//
// XXX we delay recomputing δFtail.LastBlkRev(file, #blk, head) because
// XXX we delay recomputing δFtail.BlkRevAt(file, #blk, head) because
// using just cheap revmax estimate can frequently result in all watches
// being skipped.
//
......@@ -868,7 +868,8 @@ retry:
// invalidate kernel cache for data in changed files
// NOTE no δFmu lock needed because zhead is WLocked
δF, err := bfdir.δFtail.Update(δZ, zhead) // δF <- δZ |tracked
// δF, err := bfdir.δFtail.Update(δZ, zhead) // δF <- δZ |tracked
δF, err := bfdir.δFtail.Update(δZ) // δF <- δZ |tracked
if err != nil {
return err
}
......@@ -881,28 +882,40 @@ retry:
sort.Slice(blkv, func(i, j int) bool {
return blkv[i] < blkv[j]
})
size := " "
flags := ""
if δfile.Size {
size = "S"
flags += "S"
}
log.Infof("S: \t- %s\t%s %v\n", foid, size, blkv)
if δfile.Epoch {
flags += "E"
}
log.Infof("S: \t- %s\t%2s %v\n", foid, flags, blkv)
}
log.Infof("\n\n")
}
// invalidate kernel cache for file data
wg := xsync.NewWorkGroup(ctx)
for foid, δfile := range δF.ByFile {
// // XXX needed?
// // XXX even though δBtail is complete, not all ZBlk are present here
// file.δtail.Append(δF.Rev, δfile.Blocks.Elements())
// file was requested to be tracked -> it must be present in fileTab
file := bfdir.fileTab[foid]
for blk := range δfile.Blocks {
blk := blk
wg.Go(func(ctx context.Context) error {
return file.invalidateBlk(ctx, blk)
})
if δfile.Epoch {
// XXX while invalidating whole file at epoch is easy,
// it becomes not so easy to handle isolation if epochs
// could be present. For this reason we forbid changes
// to ZBigFile objects for now.
return fmt.Errorf("ZBigFile<%s> changed @%s", foid, δF.Rev)
// wg.Go(func(ctx context.Context) error {
// return file.invalidateAll() // NOTE does not accept ctx
// })
} else {
for blk := range δfile.Blocks {
blk := blk
wg.Go(func(ctx context.Context) error {
return file.invalidateBlk(ctx, blk)
})
}
}
}
err = wg.Wait()
......@@ -948,6 +961,7 @@ retry:
file := bfdir.fileTab[foid] // must be present
zfile := file.zfile
// XXX need to do only if δfile.Size changed
size, sizePath, err := zfile.Size(ctx)
if err != nil {
return err
......@@ -973,7 +987,6 @@ retry:
}
// XXX δFtail.ForgetPast(...)
// XXX for f in δF: f.δtail.ForgetPast(...)
// notify zhead.At waiters
for hw := range head.hwait {
......@@ -1074,7 +1087,7 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) {
func() {
// store retrieved data back to OS cache for file @<rev>/file[blk]
δFtail := f.head.bfdir.δFtail
blkrev, _ := δFtail.LastBlkRev(ctx, f.zfile, blk, f.head.zconn.At())
blkrev, _ := δFtail.BlkRevAt(ctx, f.zfile, blk, f.head.zconn.At())
frev, funlock, err := groot.lockRevFile(blkrev, f.zfile.POid())
if err != nil {
log.Errorf("BUG: %s: invalidate blk #%d: %s (ignoring, but reading @revX/bigfile will be slow)", f.path(), blk, err)
......@@ -1112,6 +1125,21 @@ func (f *BigFile) invalidateAttr() (err error) {
return nil
}
// invalidateAll invalidates file attributes and all file data in kernel cache.
//
// complements invalidateAttr and invalidateBlk and is used to completely reset
// kernel file cache on ΔFtail epoch.
// called with zheadMu wlocked.
func (f *BigFile) invalidateAll() (err error) {
defer xerr.Contextf(&err, "%s: invalidate all", f.path())
fsconn := gfsconn
st := fsconn.FileNotify(f.Inode(), 0, -1) // metadata + all data
if st != fuse.OK {
return syscall.Errno(st)
}
return nil
}
// lockRevFile makes sure inode ID of /@<rev>/bigfile/<fid> is known to kernel
// and won't change until unlock.
......@@ -1291,7 +1319,7 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err erro
// and thus would trigger DB access again.
//
// TODO if direct-io: don't touch pagecache
// TODO upload parts only not covered by currrent read (not to e.g. wait for page lock)
// TODO upload parts only not covered by current read (not to e.g. wait for page lock)
// TODO skip upload completely if read is wide to cover whole blksize
go f.uploadBlk(blk, loading)
......@@ -1537,7 +1565,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
// we'll relock atMu again and recheck blkrev vs w.at after.
w.atMu.RUnlock()
blkrev, _ = δFtail.LastBlkRev(ctx, f.zfile, blk, f.head.zconn.At())
blkrev, _ = δFtail.BlkRevAt(ctx, f.zfile, blk, f.head.zconn.At())
blkrevRough = false
w.atMu.RLock()
......@@ -1553,7 +1581,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
// and most of them would be on different w.at - cache of the file will
// be lost. Via pinning to particular block revision, we make sure the
// revision to pin is the same on all clients, and so file cache is shared.
pinrev, _ := δFtail.LastBlkRev(ctx, w.file.zfile, blk, w.at) // XXX move into go?
pinrev, _ := δFtail.BlkRevAt(ctx, w.file.zfile, blk, w.at) // XXX move into go?
// XXX ^^^ w.file vs f ?
//fmt.Printf("S: read #%d: watch @%s: pin -> @%s\n", blk, w.at, pinrev)
......@@ -1681,7 +1709,7 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
// rlocked during pin setup.
//
// δ δ
// ----x----.------------]----x----
// ────x────.────────────]────x────
// ↑ ↑
// w.at head
//
......@@ -1700,6 +1728,21 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
δFtail := bfdir.δFtail
for _, δfile := range δFtail.SliceByFileRev(f.zfile, at, headAt) { // XXX locking δFtail
if δfile.Epoch {
// file epochs are currently forbidden (see watcher), so the only
// case when we could see an epoch here is creation of
// the file if w.at is before that time:
//
// create file
// ────.────────x────────]────
// ↑ ↑
// w.at head
//
// but then the file should not be normally accessed in that case.
//
// -> reject such watches with an error
return fmt.Errorf("file epoch detected @%s in between (at,head=@%s]", δfile.Rev, headAt)
}
for blk := range δfile.Blocks {
_, already := toPin[blk]
if already {
......@@ -1714,13 +1757,13 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
// XXX adjust wcfs tests to not require only accessed
// blocks to be in setup pins? But that would mean that
// potentially more blocks would be potentially
// _unneccessarily_ pinned if they are not going to be
// _unnecessarily_ pinned if they are not going to be
// accessed at all.
if !f.accessed.Has(blk) {
continue
}
toPin[blk], _ = δFtail.LastBlkRev(ctx, f.zfile, blk, at) // XXX err
toPin[blk], _ = δFtail.BlkRevAt(ctx, f.zfile, blk, at) // XXX err
}
}
......@@ -2088,7 +2131,7 @@ func (root *Root) lookup(name string, fctx *fuse.Context) (_ *Head, err error) {
root.revMu.Unlock()
if already {
// XXX race wrt simlutaneous "FORGET @<rev>" ?
// XXX race wrt simultaneous "FORGET @<rev>" ?
return revDir, nil
}
......@@ -2533,7 +2576,7 @@ func _main() (err error) {
}
// wait for unmount
// XXX the kernel does not sentd FORGETs on unmount - release left node resources ourselves?
// XXX the kernel does not send FORGETs on unmount - release left node resources ourselves?
<-serveCtx.Done()
log.Infof("stop %q %q", mntpt, zurl)
return nil // XXX serveErr | zwatchErr ?
......
......@@ -1132,7 +1132,8 @@ def _expectPin(twlink, ctx, zf, expect): # -> []SrvReq
# _blkDataAt returns expected zf[blk] data and its revision as of @at database state.
#
# If the block is hole - (b'', at0) is returned. XXX -> @z64?
# XXX ret for when the file did not existed at all? blk was after file size?
# XXX ret for when the file did not existed at all?
# XXX ret ----//---- blk was after file size?
@func(tDB)
def _blkDataAt(t, zf, blk, at): # -> (data, rev)
if at is None:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment