Commit f91982af authored by Kirill Smelkov's avatar Kirill Smelkov

X First round of preparation steps for ΔFtail tests

* t2: (29 commits)
  .
  .
  .
  .
  .
  .
  .
  .
  .
  .
  .
  .
  .
  .
  Revert "."
  .
  X Unexprt ΔBroots
  .
  X Unexport SetXXX from packages API
  .
  ...
parents d0fe680a c7f1e3c9
- doc: notes on how things are organized in wendelin.core 2
wcfs:
- SIGSEGV is used only to track writes
#!/bin/bash -e
# δtail.go.in -> specialized with concrete types
# gen-δtail KIND ID out
# Copyright (C) 2018-2021 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
#
# This program is free software: you can Use, Study, Modify and Redistribute
# it under the terms of the GNU General Public License version 3, or (at your
# option) any later version, as published by the Free Software Foundation.
#
# You can also Link and Combine this program with other software covered by
# the terms of any of the Free Software licenses or any of the Open Source
# Initiative approved licenses and Convey the resulting work. Corresponding
# source of such a combination shall include the source code for all other
# software used.
#
# This program is distributed WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
# TODO kill this after finishing ΔFtail (zodb/δtail.go generic is not used here)
KIND=$1
ID=$2
out=$3
zodb=lab.nexedi.com/kirr/neo/go/zodb
zdir=`go list -f '{{.Dir}}' $zodb`
zrev=`git -C $zdir describe --always`
echo "// Code generated by gen-δtail $KIND $ID; DO NOT EDIT." >$out
echo "// (from $zodb @ $zrev)" >>$out
echo >>$out
$zdir/δtail.go.cat-generic | sed \
-e "s/PACKAGE/main/g" \
-e "s/ID/$ID/g" \
-e "s/ΔTail/ΔTail${KIND}/g" \
-e "s/δRevEntry/δRevEntry${KIND}/g" \
>>$out
#!/bin/bash -e
# tries to minimize reproducer for https://github.com/golang/go/issues/41303
export GOTRACEBACK=crash
ulimit -c unlimited
go test -c
cwd=$(pwd)
# runtest1
function runtest1() {
for i in `seq 1000`; do
echo -e "\n>>> #$i"
#GOGC=1 $cwd/wcfs.test -test.v -test.run 'TestZBlk|TestΔBTail|TestΔBTreeAllStructs' || break
#GOGC=1 $cwd/wcfs.test -test.v -test.run 'TestZBlk|TestΔBTail' || break
#GOGC=0 $cwd/wcfs.test -test.v -test.run 'TestZBlk|TestΔBTail' || break
GOGC=0 $cwd/wcfs.test -test.v -test.count=100 -test.run 'TestZBlk|TestΔBTail' || break
done
}
nwork=2
rm -rf BUG
for n in `seq $nwork`; do
workdir=BUG/$n
mkdir -p $workdir
ln -s $cwd/testdata $workdir
ln -s $cwd/testprog $workdir
(cd $workdir && runtest1 >>log 2>&1) &
done
wait -n
echo "done, crashes:"
find BUG -name "*core*"
echo
kill `jobs -p`
wait
......@@ -21,10 +21,9 @@
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
PACKAGE=$1
KIND=$2
VALUE=$3
out=$4
KIND=$1
VALUE=$2
out=$3
input=$(dirname $0)/set.go.in
......@@ -32,7 +31,6 @@ echo "// Code generated by gen-set $KIND $VALUE; DO NOT EDIT." >$out
echo >>$out
sed \
-e "s/PACKAGE/$PACKAGE/g" \
-e "s/VALUE/$VALUE/g" \
-e "s/Set/Set${KIND}/g" \
-e "s/Set/${KIND}/g" \
$input >>$out
......@@ -19,9 +19,10 @@
package set
//go:generate ./gen-set set I64 int64 zset_i64.go
//go:generate ./gen-set set Oid _Oid zset_oid.go
//go:generate ./gen-set set Tid _Tid zset_tid.go
//go:generate ./gen-set I64 int64 zset_i64.go
//go:generate ./gen-set Str string zset_str.go
//go:generate ./gen-set Oid _Oid zset_oid.go
//go:generate ./gen-set Tid _Tid zset_tid.go
import (
"lab.nexedi.com/kirr/neo/go/zodb"
......
......@@ -17,7 +17,7 @@
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package PACKAGE
package set
import (
"fmt"
......
......@@ -27,35 +27,35 @@ import (
"strings"
)
// SetI64 is a set of int64.
type SetI64 map[int64]struct{}
// I64 is a set of int64.
type I64 map[int64]struct{}
// Add adds v to the set.
func (s SetI64) Add(v int64) {
func (s I64) Add(v int64) {
s[v] = struct{}{}
}
// Del removes v from the set.
// it is noop if v was not in the set.
func (s SetI64) Del(v int64) {
func (s I64) Del(v int64) {
delete(s, v)
}
// Has checks whether the set contains v.
func (s SetI64) Has(v int64) bool {
func (s I64) Has(v int64) bool {
_, ok := s[v]
return ok
}
// Update adds t values to s.
func (s SetI64) Update(t SetI64) {
func (s I64) Update(t I64) {
for v := range t {
s.Add(v)
}
}
// Elements returns all elements of set as slice.
func (s SetI64) Elements() []int64 {
func (s I64) Elements() []int64 {
ev := make([]int64, len(s))
i := 0
for e := range s {
......@@ -66,14 +66,14 @@ func (s SetI64) Elements() []int64 {
}
// Union returns s ∪ t
func (s SetI64) Union(t SetI64) SetI64 {
func (s I64) Union(t I64) I64 {
// l = max(len(s), len(t))
l := len(s)
if lt := len(t); lt > l {
l = lt
}
u := make(SetI64, l)
u := make(I64, l)
for v := range s {
u.Add(v)
......@@ -85,8 +85,8 @@ func (s SetI64) Union(t SetI64) SetI64 {
}
// Intersection returns s ∩ t
func (s SetI64) Intersection(t SetI64) SetI64 {
i := SetI64{}
func (s I64) Intersection(t I64) I64 {
i := I64{}
for v := range s {
if t.Has(v) {
i.Add(v)
......@@ -96,8 +96,8 @@ func (s SetI64) Intersection(t SetI64) SetI64 {
}
// Difference returns s\t.
func (s SetI64) Difference(t SetI64) SetI64 {
d := SetI64{}
func (s I64) Difference(t I64) I64 {
d := I64{}
for v := range s {
if !t.Has(v) {
d.Add(v)
......@@ -107,8 +107,8 @@ func (s SetI64) Difference(t SetI64) SetI64 {
}
// SymmetricDifference returns s Δ t.
func (s SetI64) SymmetricDifference(t SetI64) SetI64 {
d := SetI64{}
func (s I64) SymmetricDifference(t I64) I64 {
d := I64{}
for v := range s {
if !t.Has(v) {
d.Add(v)
......@@ -123,7 +123,7 @@ func (s SetI64) SymmetricDifference(t SetI64) SetI64 {
}
// Equal returns whether a == b.
func (a SetI64) Equal(b SetI64) bool {
func (a I64) Equal(b I64) bool {
if len(a) != len(b) {
return false
}
......@@ -139,8 +139,8 @@ func (a SetI64) Equal(b SetI64) bool {
}
// Clone returns copy of the set.
func (orig SetI64) Clone() SetI64 {
klon := make(SetI64, len(orig))
func (orig I64) Clone() I64 {
klon := make(I64, len(orig))
for v := range orig {
klon.Add(v)
}
......@@ -149,7 +149,7 @@ func (orig SetI64) Clone() SetI64 {
// --------
func (s SetI64) SortedElements() []int64 {
func (s I64) SortedElements() []int64 {
ev := s.Elements()
sort.Slice(ev, func(i, j int) bool {
return ev[i] < ev[j]
......@@ -157,7 +157,7 @@ func (s SetI64) SortedElements() []int64 {
return ev
}
func (s SetI64) String() string {
func (s I64) String() string {
ev := s.SortedElements()
strv := make([]string, len(ev))
for i, v := range ev {
......
......@@ -27,35 +27,35 @@ import (
"strings"
)
// SetOid is a set of _Oid.
type SetOid map[_Oid]struct{}
// Oid is a set of _Oid.
type Oid map[_Oid]struct{}
// Add adds v to the set.
func (s SetOid) Add(v _Oid) {
func (s Oid) Add(v _Oid) {
s[v] = struct{}{}
}
// Del removes v from the set.
// it is noop if v was not in the set.
func (s SetOid) Del(v _Oid) {
func (s Oid) Del(v _Oid) {
delete(s, v)
}
// Has checks whether the set contains v.
func (s SetOid) Has(v _Oid) bool {
func (s Oid) Has(v _Oid) bool {
_, ok := s[v]
return ok
}
// Update adds t values to s.
func (s SetOid) Update(t SetOid) {
func (s Oid) Update(t Oid) {
for v := range t {
s.Add(v)
}
}
// Elements returns all elements of set as slice.
func (s SetOid) Elements() []_Oid {
func (s Oid) Elements() []_Oid {
ev := make([]_Oid, len(s))
i := 0
for e := range s {
......@@ -66,14 +66,14 @@ func (s SetOid) Elements() []_Oid {
}
// Union returns s ∪ t
func (s SetOid) Union(t SetOid) SetOid {
func (s Oid) Union(t Oid) Oid {
// l = max(len(s), len(t))
l := len(s)
if lt := len(t); lt > l {
l = lt
}
u := make(SetOid, l)
u := make(Oid, l)
for v := range s {
u.Add(v)
......@@ -85,8 +85,8 @@ func (s SetOid) Union(t SetOid) SetOid {
}
// Intersection returns s ∩ t
func (s SetOid) Intersection(t SetOid) SetOid {
i := SetOid{}
func (s Oid) Intersection(t Oid) Oid {
i := Oid{}
for v := range s {
if t.Has(v) {
i.Add(v)
......@@ -96,8 +96,8 @@ func (s SetOid) Intersection(t SetOid) SetOid {
}
// Difference returns s\t.
func (s SetOid) Difference(t SetOid) SetOid {
d := SetOid{}
func (s Oid) Difference(t Oid) Oid {
d := Oid{}
for v := range s {
if !t.Has(v) {
d.Add(v)
......@@ -107,8 +107,8 @@ func (s SetOid) Difference(t SetOid) SetOid {
}
// SymmetricDifference returns s Δ t.
func (s SetOid) SymmetricDifference(t SetOid) SetOid {
d := SetOid{}
func (s Oid) SymmetricDifference(t Oid) Oid {
d := Oid{}
for v := range s {
if !t.Has(v) {
d.Add(v)
......@@ -123,7 +123,7 @@ func (s SetOid) SymmetricDifference(t SetOid) SetOid {
}
// Equal returns whether a == b.
func (a SetOid) Equal(b SetOid) bool {
func (a Oid) Equal(b Oid) bool {
if len(a) != len(b) {
return false
}
......@@ -139,8 +139,8 @@ func (a SetOid) Equal(b SetOid) bool {
}
// Clone returns copy of the set.
func (orig SetOid) Clone() SetOid {
klon := make(SetOid, len(orig))
func (orig Oid) Clone() Oid {
klon := make(Oid, len(orig))
for v := range orig {
klon.Add(v)
}
......@@ -149,7 +149,7 @@ func (orig SetOid) Clone() SetOid {
// --------
func (s SetOid) SortedElements() []_Oid {
func (s Oid) SortedElements() []_Oid {
ev := s.Elements()
sort.Slice(ev, func(i, j int) bool {
return ev[i] < ev[j]
......@@ -157,7 +157,7 @@ func (s SetOid) SortedElements() []_Oid {
return ev
}
func (s SetOid) String() string {
func (s Oid) String() string {
ev := s.SortedElements()
strv := make([]string, len(ev))
for i, v := range ev {
......
// Code generated by gen-set ZBigFile *ZBigFile; DO NOT EDIT.
// Code generated by gen-set Str string; DO NOT EDIT.
// Copyright (C) 2015-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
......@@ -19,38 +19,44 @@
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package zdata
package set
// SetZBigFile is a set of *ZBigFile.
type SetZBigFile map[*ZBigFile]struct{}
import (
"fmt"
"sort"
"strings"
)
// Str is a set of string.
type Str map[string]struct{}
// Add adds v to the set.
func (s SetZBigFile) Add(v *ZBigFile) {
func (s Str) Add(v string) {
s[v] = struct{}{}
}
// Del removes v from the set.
// it is noop if v was not in the set.
func (s SetZBigFile) Del(v *ZBigFile) {
func (s Str) Del(v string) {
delete(s, v)
}
// Has checks whether the set contains v.
func (s SetZBigFile) Has(v *ZBigFile) bool {
func (s Str) Has(v string) bool {
_, ok := s[v]
return ok
}
// Update adds t values to s.
func (s SetZBigFile) Update(t SetZBigFile) {
func (s Str) Update(t Str) {
for v := range t {
s.Add(v)
}
}
// Elements returns all elements of set as slice.
func (s SetZBigFile) Elements() []*ZBigFile {
ev := make([]*ZBigFile, len(s))
func (s Str) Elements() []string {
ev := make([]string, len(s))
i := 0
for e := range s {
ev[i] = e
......@@ -60,14 +66,14 @@ func (s SetZBigFile) Elements() []*ZBigFile {
}
// Union returns s ∪ t
func (s SetZBigFile) Union(t SetZBigFile) SetZBigFile {
func (s Str) Union(t Str) Str {
// l = max(len(s), len(t))
l := len(s)
if lt := len(t); lt > l {
l = lt
}
u := make(SetZBigFile, l)
u := make(Str, l)
for v := range s {
u.Add(v)
......@@ -79,8 +85,8 @@ func (s SetZBigFile) Union(t SetZBigFile) SetZBigFile {
}
// Intersection returns s ∩ t
func (s SetZBigFile) Intersection(t SetZBigFile) SetZBigFile {
i := SetZBigFile{}
func (s Str) Intersection(t Str) Str {
i := Str{}
for v := range s {
if t.Has(v) {
i.Add(v)
......@@ -90,8 +96,8 @@ func (s SetZBigFile) Intersection(t SetZBigFile) SetZBigFile {
}
// Difference returns s\t.
func (s SetZBigFile) Difference(t SetZBigFile) SetZBigFile {
d := SetZBigFile{}
func (s Str) Difference(t Str) Str {
d := Str{}
for v := range s {
if !t.Has(v) {
d.Add(v)
......@@ -101,8 +107,8 @@ func (s SetZBigFile) Difference(t SetZBigFile) SetZBigFile {
}
// SymmetricDifference returns s Δ t.
func (s SetZBigFile) SymmetricDifference(t SetZBigFile) SetZBigFile {
d := SetZBigFile{}
func (s Str) SymmetricDifference(t Str) Str {
d := Str{}
for v := range s {
if !t.Has(v) {
d.Add(v)
......@@ -117,7 +123,7 @@ func (s SetZBigFile) SymmetricDifference(t SetZBigFile) SetZBigFile {
}
// Equal returns whether a == b.
func (a SetZBigFile) Equal(b SetZBigFile) bool {
func (a Str) Equal(b Str) bool {
if len(a) != len(b) {
return false
}
......@@ -131,3 +137,31 @@ func (a SetZBigFile) Equal(b SetZBigFile) bool {
return true
}
// Clone returns copy of the set.
func (orig Str) Clone() Str {
klon := make(Str, len(orig))
for v := range orig {
klon.Add(v)
}
return klon
}
// --------
func (s Str) SortedElements() []string {
ev := s.Elements()
sort.Slice(ev, func(i, j int) bool {
return ev[i] < ev[j]
})
return ev
}
func (s Str) String() string {
ev := s.SortedElements()
strv := make([]string, len(ev))
for i, v := range ev {
strv[i] = fmt.Sprintf("%v", v)
}
return "{" + strings.Join(strv, " ") + "}"
}
......@@ -27,35 +27,35 @@ import (
"strings"
)
// SetTid is a set of _Tid.
type SetTid map[_Tid]struct{}
// Tid is a set of _Tid.
type Tid map[_Tid]struct{}
// Add adds v to the set.
func (s SetTid) Add(v _Tid) {
func (s Tid) Add(v _Tid) {
s[v] = struct{}{}
}
// Del removes v from the set.
// it is noop if v was not in the set.
func (s SetTid) Del(v _Tid) {
func (s Tid) Del(v _Tid) {
delete(s, v)
}
// Has checks whether the set contains v.
func (s SetTid) Has(v _Tid) bool {
func (s Tid) Has(v _Tid) bool {
_, ok := s[v]
return ok
}
// Update adds t values to s.
func (s SetTid) Update(t SetTid) {
func (s Tid) Update(t Tid) {
for v := range t {
s.Add(v)
}
}
// Elements returns all elements of set as slice.
func (s SetTid) Elements() []_Tid {
func (s Tid) Elements() []_Tid {
ev := make([]_Tid, len(s))
i := 0
for e := range s {
......@@ -66,14 +66,14 @@ func (s SetTid) Elements() []_Tid {
}
// Union returns s ∪ t
func (s SetTid) Union(t SetTid) SetTid {
func (s Tid) Union(t Tid) Tid {
// l = max(len(s), len(t))
l := len(s)
if lt := len(t); lt > l {
l = lt
}
u := make(SetTid, l)
u := make(Tid, l)
for v := range s {
u.Add(v)
......@@ -85,8 +85,8 @@ func (s SetTid) Union(t SetTid) SetTid {
}
// Intersection returns s ∩ t
func (s SetTid) Intersection(t SetTid) SetTid {
i := SetTid{}
func (s Tid) Intersection(t Tid) Tid {
i := Tid{}
for v := range s {
if t.Has(v) {
i.Add(v)
......@@ -96,8 +96,8 @@ func (s SetTid) Intersection(t SetTid) SetTid {
}
// Difference returns s\t.
func (s SetTid) Difference(t SetTid) SetTid {
d := SetTid{}
func (s Tid) Difference(t Tid) Tid {
d := Tid{}
for v := range s {
if !t.Has(v) {
d.Add(v)
......@@ -107,8 +107,8 @@ func (s SetTid) Difference(t SetTid) SetTid {
}
// SymmetricDifference returns s Δ t.
func (s SetTid) SymmetricDifference(t SetTid) SetTid {
d := SetTid{}
func (s Tid) SymmetricDifference(t Tid) Tid {
d := Tid{}
for v := range s {
if !t.Has(v) {
d.Add(v)
......@@ -123,7 +123,7 @@ func (s SetTid) SymmetricDifference(t SetTid) SetTid {
}
// Equal returns whether a == b.
func (a SetTid) Equal(b SetTid) bool {
func (a Tid) Equal(b Tid) bool {
if len(a) != len(b) {
return false
}
......@@ -139,8 +139,8 @@ func (a SetTid) Equal(b SetTid) bool {
}
// Clone returns copy of the set.
func (orig SetTid) Clone() SetTid {
klon := make(SetTid, len(orig))
func (orig Tid) Clone() Tid {
klon := make(Tid, len(orig))
for v := range orig {
klon.Add(v)
}
......@@ -149,7 +149,7 @@ func (orig SetTid) Clone() SetTid {
// --------
func (s SetTid) SortedElements() []_Tid {
func (s Tid) SortedElements() []_Tid {
ev := s.Elements()
sort.Slice(ev, func(i, j int) bool {
return ev[i] < ev[j]
......@@ -157,7 +157,7 @@ func (s SetTid) SortedElements() []_Tid {
return ev
}
func (s SetTid) String() string {
func (s Tid) String() string {
ev := s.SortedElements()
strv := make([]string, len(ev))
for i, v := range ev {
......
// Copyright (C) 2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
// Package blib provides utilities related to BTrees.
package blib
import (
"fmt"
"math"
"lab.nexedi.com/kirr/neo/go/zodb/btree"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/set"
)
// XXX instead of generics
type Tree = btree.LOBTree
type Bucket = btree.LOBucket
type Node = btree.LONode
type TreeEntry = btree.LOEntry
type BucketEntry = btree.LOBucketEntry
type Key = int64
const KeyMax Key = math.MaxInt64
const KeyMin Key = math.MinInt64
type setOid = set.Oid
// kstr formats key as string.
func kstr(k Key) string {
if k == KeyMin {
return "-∞"
}
if k == KeyMax {
return "∞"
}
return fmt.Sprintf("%d", k)
}
func panicf(format string, argv ...interface{}) {
panic(fmt.Sprintf(format, argv...))
}
......@@ -17,7 +17,7 @@
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtree
package blib
// PP-connected subset of tree nodes.
import (
......@@ -61,6 +61,16 @@ type nodeInTree struct {
nchild int // number of direct children in PPTreeSubSet referring to this node
}
// Parent returns parent of this node.
func (n *nodeInTree) Parent() zodb.Oid {
return n.parent
}
// NChild returns number of children of this node in the tree subset.
func (n *nodeInTree) NChild() int {
return n.nchild
}
// Has returns whether node is in the set.
func (S PPTreeSubSet) Has(oid zodb.Oid) bool {
_, ok := S[oid]
......@@ -102,7 +112,7 @@ func (S PPTreeSubSet) AddPath(path []zodb.Oid) {
// normalize path: remove embedded bucket and check whether it was an
// artificial empty tree.
path = normPath(path)
path = NormPath(path)
// go through path and add nodes to the set
parent := zodb.InvalidOid
......@@ -132,11 +142,11 @@ func (S PPTreeSubSet) AddPath(path []zodb.Oid) {
}
}
// normPath normalizes path.
// NormPath normalizes path.
//
// It removes embedded buckets and artificial empty trees.
// Returned slice is subslice of path and aliases its memory.
func normPath(path []zodb.Oid) []zodb.Oid {
func NormPath(path []zodb.Oid) []zodb.Oid {
l := len(path)
// don't keep track of artificial empty tree
......@@ -363,12 +373,12 @@ func (S PPTreeSubSet) verify() {
}()
// recompute {} oid -> children and verify .nchild against it
children := make(map[zodb.Oid]SetOid, len(S))
children := make(map[zodb.Oid]setOid, len(S))
for oid, t := range S {
if t.parent != zodb.InvalidOid {
cc, ok := children[t.parent]
if !ok {
cc = make(SetOid, 1)
cc = make(setOid, 1)
children[t.parent] = cc
}
cc.Add(oid)
......@@ -464,20 +474,20 @@ func (t nodeInTree) String() string {
// The second component with "-22" builds from leaf, but the first
// component with "-43" builds from non-leaf node.
//
// δnchildNonLeafs = {43: +1}
// ΔnchildNonLeafs = {43: +1}
//
// Only complete result of applying all
//
// - xfixup(-1, δnchildNonLeafs)
// - xfixup(-1, ΔnchildNonLeafs)
// - δ.Del,
// - δ.Add, and
// - xfixup(+1, δnchildNonLeafs)
// - xfixup(+1, ΔnchildNonLeafs)
//
// produces correctly PP-connected set.
type ΔPPTreeSubSet struct {
Del PPTreeSubSet
Add PPTreeSubSet
δnchildNonLeafs map[zodb.Oid]int
ΔnchildNonLeafs map[zodb.Oid]int
}
// NewΔPPTreeSubSet creates new empty ΔPPTreeSubSet.
......@@ -485,7 +495,7 @@ func NewΔPPTreeSubSet() *ΔPPTreeSubSet {
return &ΔPPTreeSubSet{
Del: PPTreeSubSet{},
Add: PPTreeSubSet{},
δnchildNonLeafs: map[zodb.Oid]int{},
ΔnchildNonLeafs: map[zodb.Oid]int{},
}
}
......@@ -493,15 +503,15 @@ func NewΔPPTreeSubSet() *ΔPPTreeSubSet {
func (δ *ΔPPTreeSubSet) Update(δ2 *ΔPPTreeSubSet) {
δ.Del.UnionInplace(δ2.Del)
δ.Add.UnionInplace(δ2.Add)
for oid, δnc := range δ2.δnchildNonLeafs {
δ.δnchildNonLeafs[oid] += δnc
for oid, δnc := range δ2.ΔnchildNonLeafs {
δ.ΔnchildNonLeafs[oid] += δnc
}
}
// Reverse changes δ=diff(A->B) to δ'=diff(A<-B).
func (δ *ΔPPTreeSubSet) Reverse() {
δ.Del, δ.Add = δ.Add, δ.Del
// δnchildNonLeafs stays the same
// ΔnchildNonLeafs stays the same
}
......@@ -514,7 +524,7 @@ func (S PPTreeSubSet) ApplyΔ(δ *ΔPPTreeSubSet) {
fmt.Printf(" A: %s\n", S)
fmt.Printf(" -: %s\n", δ.Del)
fmt.Printf(" +: %s\n", δ.Add)
fmt.Printf(" x: %v\n", δ.δnchildNonLeafs)
fmt.Printf(" x: %v\n", δ.ΔnchildNonLeafs)
defer fmt.Printf("\n->B: %s\n", S)
}
......@@ -523,8 +533,8 @@ func (S PPTreeSubSet) ApplyΔ(δ *ΔPPTreeSubSet) {
δ.Add.verify()
defer S.verify()
S.xfixup(-1, δ.δnchildNonLeafs)
S.xfixup(-1, δ.ΔnchildNonLeafs)
S.xDifferenceInplace(δ.Del)
S.xUnionInplace(δ.Add)
S.xfixup(+1, δ.δnchildNonLeafs)
S.xfixup(+1, δ.ΔnchildNonLeafs)
}
......@@ -17,7 +17,7 @@
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtree
package blib
import (
"strings"
......
......@@ -17,7 +17,7 @@
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtree
package blib
// set of [lo,hi) Key ranges.
import (
......@@ -30,8 +30,8 @@ const debugRangeSet = false
// KeyRange represents [lo,hi) Key range.
type KeyRange struct {
lo Key
hi_ Key // NOTE _not_ hi) to avoid overflow at ∞; hi = hi_ + 1
Lo Key
Hi_ Key // NOTE _not_ hi) to avoid overflow at ∞; hi = hi_ + 1
}
// RangedKeySet is set of Keys with adjacent keys coalesced into Ranges.
......@@ -47,23 +47,23 @@ type RangedKeySet struct {
// Has returns whether key k belongs to the range.
func (r *KeyRange) Has(k Key) bool {
return (r.lo <= k && k <= r.hi_)
return (r.Lo <= k && k <= r.Hi_)
}
// Add adds key k to the set.
func (S *RangedKeySet) Add(k Key) {
S.AddRange(KeyRange{lo: k, hi_: k})
S.AddRange(KeyRange{Lo: k, Hi_: k})
}
// Del removes key k from the set.
func (S *RangedKeySet) Del(k Key) {
S.DelRange(KeyRange{lo: k, hi_: k})
S.DelRange(KeyRange{Lo: k, Hi_: k})
}
// Has returns whether key k belongs to the set.
func (S *RangedKeySet) Has(k Key) bool {
return S.HasRange(KeyRange{lo: k, hi_: k})
return S.HasRange(KeyRange{Lo: k, Hi_: k})
}
......@@ -79,10 +79,10 @@ func (S *RangedKeySet) AddRange(r KeyRange) {
S.verify()
defer S.verify()
// find first ilo: r.lo < [ilo].hi
// find first ilo: r.Lo < [ilo].hi
l := len(S.rangev)
ilo := sort.Search(l, func(i int) bool {
return r.lo <= S.rangev[i].hi_
return r.Lo <= S.rangev[i].Hi_
})
debugfRSet("\tilo: %d\n", ilo)
......@@ -92,58 +92,58 @@ func (S *RangedKeySet) AddRange(r KeyRange) {
debugfRSet("\tappend %s\t-> %s\n", r, S)
}
// find last jhi: [jhi].lo < r.hi
// find last jhi: [jhi].Lo < r.hi
jhi := ilo
for ;; jhi++ {
if jhi == l {
break
}
if S.rangev[jhi].lo <= r.hi_ {
if S.rangev[jhi].Lo <= r.Hi_ {
continue
}
break
}
debugfRSet("\tjhi: %d\n", jhi)
// entries in [ilo:jhi) ∈ [r.lo,r.hi) and should be merged into one
// entries in [ilo:jhi) ∈ [r.Lo,r.hi) and should be merged into one
if (jhi - ilo) > 1 {
lo := S.rangev[ilo].lo
hi_ := S.rangev[jhi-1].hi_
lo := S.rangev[ilo].Lo
hi_ := S.rangev[jhi-1].Hi_
vReplaceSlice(&S.rangev, ilo,jhi, KeyRange{lo,hi_})
debugfRSet("\tmerge S[%d:%d]\t-> %s\n", ilo, jhi, S)
}
jhi = -1 // no longer valid
// if [r.lo,r.hi) was outside of any entry - create new entry
if r.hi_ < S.rangev[ilo].lo {
if r.Hi_ < S.rangev[ilo].Lo {
vInsert(&S.rangev, ilo, r)
debugfRSet("\tinsert %s\t-> %s\n", r, S)
}
// now we have covered entries merged as needed into [ilo]
// extend this entry if r coverage is wider
if r.lo < S.rangev[ilo].lo {
S.rangev[ilo].lo = r.lo
if r.Lo < S.rangev[ilo].Lo {
S.rangev[ilo].Lo = r.Lo
debugfRSet("\textend left\t-> %s\n", S)
}
if r.hi_ > S.rangev[ilo].hi_ {
S.rangev[ilo].hi_ = r.hi_
if r.Hi_ > S.rangev[ilo].Hi_ {
S.rangev[ilo].Hi_ = r.Hi_
debugfRSet("\textend right\t-> %s\n", S)
}
// and check if we should merge it with right/left neighbours
if ilo+1 < len(S.rangev) { // right
if S.rangev[ilo].hi_+1 == S.rangev[ilo+1].lo {
if S.rangev[ilo].Hi_+1 == S.rangev[ilo+1].Lo {
vReplaceSlice(&S.rangev, ilo,ilo+2,
KeyRange{S.rangev[ilo].lo, S.rangev[ilo+1].hi_})
KeyRange{S.rangev[ilo].Lo, S.rangev[ilo+1].Hi_})
debugfRSet("\tmerge right\t-> %s\n", S)
}
}
if ilo > 0 { // left
if S.rangev[ilo-1].hi_+1 == S.rangev[ilo].lo {
if S.rangev[ilo-1].Hi_+1 == S.rangev[ilo].Lo {
vReplaceSlice(&S.rangev, ilo-1,ilo+1,
KeyRange{S.rangev[ilo-1].lo, S.rangev[ilo].hi_})
KeyRange{S.rangev[ilo-1].Lo, S.rangev[ilo].Hi_})
debugfRSet("\tmerge left\t-> %s\n", S)
}
}
......@@ -163,10 +163,10 @@ func (S *RangedKeySet) DelRange(r KeyRange) {
S.verify()
defer S.verify()
// find first ilo: r.lo < [ilo].hi
// find first ilo: r.Lo < [ilo].hi
l := len(S.rangev)
ilo := sort.Search(l, func(i int) bool {
return r.lo <= S.rangev[i].hi_
return r.Lo <= S.rangev[i].Hi_
})
debugfRSet("\tilo: %d\n", ilo)
......@@ -175,13 +175,13 @@ func (S *RangedKeySet) DelRange(r KeyRange) {
return
}
// find last jhi: [jhi].lo < r.hi
// find last jhi: [jhi].Lo < r.hi
jhi := ilo
for ;; jhi++ {
if jhi == l {
break
}
if S.rangev[jhi].lo <= r.hi_ {
if S.rangev[jhi].Lo <= r.Hi_ {
continue
}
break
......@@ -196,19 +196,19 @@ func (S *RangedKeySet) DelRange(r KeyRange) {
// [ilo+1:jhi-1] should be deleted
// [ilo] and [jhi-1] overlap with [r.lo,r.hi) - they should be deleted, or shrinked,
// or split+shrinked if ilo==jhi-1 and r is inside [ilo]
if jhi-ilo == 1 && S.rangev[ilo].lo < r.lo && r.hi_ < S.rangev[ilo].hi_ {
if jhi-ilo == 1 && S.rangev[ilo].Lo < r.Lo && r.Hi_ < S.rangev[ilo].Hi_ {
x := S.rangev[ilo]
vInsert(&S.rangev, ilo, x)
jhi++
debugfRSet("\tpresplit copy %s\t-> %s\n", x, S)
}
if S.rangev[ilo].lo < r.lo { // shrink left
S.rangev[ilo] = KeyRange{S.rangev[ilo].lo, r.lo-1}
if S.rangev[ilo].Lo < r.Lo { // shrink left
S.rangev[ilo] = KeyRange{S.rangev[ilo].Lo, r.Lo-1}
ilo++
debugfRSet("\tshrink [%d] left\t-> %s\n", ilo, S)
}
if r.hi_ < S.rangev[jhi-1].hi_ { // shrink right
S.rangev[jhi-1] = KeyRange{r.hi_+1, S.rangev[jhi-1].hi_}
if r.Hi_ < S.rangev[jhi-1].Hi_ { // shrink right
S.rangev[jhi-1] = KeyRange{r.Hi_+1, S.rangev[jhi-1].Hi_}
jhi--
debugfRSet("\tshrink [%d] right\t-> %s\n", jhi-1, S)
}
......@@ -237,7 +237,7 @@ func (S *RangedKeySet) HasRange(r KeyRange) (yes bool) {
// find first ilo: r.lo < [ilo].hi
l := len(S.rangev)
ilo := sort.Search(l, func(i int) bool {
return r.lo <= S.rangev[i].hi_
return r.Lo <= S.rangev[i].Hi_
})
debugfRSet("\tilo: %d\n", ilo)
......@@ -246,7 +246,7 @@ func (S *RangedKeySet) HasRange(r KeyRange) (yes bool) {
}
// all keys from r are in S if r ∈ [ilo]
return (S.rangev[ilo].lo <= r.lo && r.hi_ <= S.rangev[ilo].hi_)
return (S.rangev[ilo].Lo <= r.Lo && r.Hi_ <= S.rangev[ilo].Hi_)
}
......@@ -317,13 +317,13 @@ func (S *RangedKeySet) verify() {
hi_Prev := KeyMin
for i, r := range S.rangev {
hiPrev := hi_Prev + 1
if i > 0 && !(hiPrev < r.lo) { // NOTE not ≤ - adjacent ranges must be merged
if i > 0 && !(hiPrev < r.Lo) { // NOTE not ≤ - adjacent ranges must be merged
badf("[%d]: !(hiPrev < r.lo)", i)
}
if !(r.lo <= r.hi_) {
if !(r.Lo <= r.Hi_) {
badf("[%d]: !(r.lo <= r.hi_)", i)
}
hi_Prev = r.hi_
hi_Prev = r.Hi_
}
}
......@@ -378,12 +378,12 @@ func (S RangedKeySet) String() string {
func (r KeyRange) String() string {
var shi string
if r.hi_ == KeyMax {
shi = kstr(r.hi_) // ∞
if r.Hi_ == KeyMax {
shi = kstr(r.Hi_) // ∞
} else {
shi = fmt.Sprintf("%d", r.hi_+1)
shi = fmt.Sprintf("%d", r.Hi_+1)
}
return fmt.Sprintf("[%s,%s)", kstr(r.lo), shi)
return fmt.Sprintf("[%s,%s)", kstr(r.Lo), shi)
}
......
......@@ -17,7 +17,7 @@
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtree
package blib
import (
"testing"
......
......@@ -80,6 +80,7 @@ import (
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/blib"
)
const traceDiff = false
......@@ -113,10 +114,10 @@ func (δv ΔValue) String() string {
// for example for e.g. t₀->t₁->b₂ if δZ/T={t₀ b₂} -> δZ/TC=δZ/T+{t₁}
//
// δtopsByRoot = {} root -> {top changed nodes in that tree}
func δZConnectTracked(δZv []zodb.Oid, T PPTreeSubSet) (δZTC SetOid, δtopsByRoot map[zodb.Oid]SetOid) {
δZ := SetOid{}; for _, δ := range δZv { δZ.Add(δ) }
δZTC = SetOid{}
δtopsByRoot = map[zodb.Oid]SetOid{}
func δZConnectTracked(δZv []zodb.Oid, T blib.PPTreeSubSet) (δZTC setOid, δtopsByRoot map[zodb.Oid]setOid) {
δZ := setOid{}; for _, δ := range δZv { δZ.Add(δ) }
δZTC = setOid{}
δtopsByRoot = map[zodb.Oid]setOid{}
for δ := range δZ {
track, ok := T[δ]
......@@ -131,14 +132,14 @@ func δZConnectTracked(δZv []zodb.Oid, T PPTreeSubSet) (δZTC SetOid, δtopsByR
// if !root -> δZTC += path through which we reached another node (forming connection)
path := []zodb.Oid{}
node := δ
parent := track.parent
parent := track.Parent()
for {
// reached root
if parent == zodb.InvalidOid {
root := node
δtops, ok := δtopsByRoot[root]
if !ok {
δtops = SetOid{}
δtops = setOid{}
δtopsByRoot[root] = δtops
}
δtops.Add(δ)
......@@ -159,7 +160,7 @@ func δZConnectTracked(δZv []zodb.Oid, T PPTreeSubSet) (δZTC SetOid, δtopsByR
panicf("BUG: .p%s -> %s, but %s is not tracked", node, parent, parent)
}
node = parent
parent = trackUp.parent
parent = trackUp.Parent()
}
}
......@@ -171,7 +172,7 @@ func δZConnectTracked(δZv []zodb.Oid, T PPTreeSubSet) (δZTC SetOid, δtopsByR
// nodeInRange represents a Node coming under [lo, hi_] key range in its tree.
type nodeInRange struct {
prefix []zodb.Oid // path to this node goes via this objects
lo, hi_ Key // [lo, hi_] NOTE _not_ hi) not to overflow at ∞ XXX -> keycov KeyRange?
keycov blib.KeyRange
node Node
done bool // whether this node was already taken into account while computing diff
}
......@@ -209,14 +210,14 @@ func (rs rangeSplit) Get(k Key) *nodeInRange {
// Get_ returns node covering key k.
func (rs rangeSplit) Get_(k Key) (rnode *nodeInRange, ok bool) {
i := sort.Search(len(rs), func(i int) bool {
return k <= rs[i].hi_
return k <= rs[i].keycov.Hi_
})
if i == len(rs) {
return nil, false // key not covered
}
rn := rs[i]
if !(rn.lo <= k && k <= rn.hi_) {
if !rn.keycov.Has(k) {
panicf("BUG: get(%v) -> %s; coverage: %s", k, rn, rs)
}
......@@ -233,7 +234,7 @@ func (rs rangeSplit) Get_(k Key) (rnode *nodeInRange, ok bool) {
func (prs *rangeSplit) Expand(rnode *nodeInRange) (children rangeSplit) {
rs := *prs
i := sort.Search(len(rs), func(i int) bool {
return rnode.hi_ <= rs[i].hi_
return rnode.keycov.Hi_ <= rs[i].keycov.Hi_
})
if i == len(rs) || rs[i] != rnode {
panicf("%s not in rangeSplit; coverage: %s", rnode, rs)
......@@ -247,19 +248,18 @@ func (prs *rangeSplit) Expand(rnode *nodeInRange) (children rangeSplit) {
treev := tree.Entryv()
children = make(rangeSplit, 0, len(treev)+1)
for i := range treev {
lo := rnode.lo
lo := rnode.keycov.Lo
if i > 0 {
lo = treev[i].Key()
}
hi_ := rnode.hi_
hi_ := rnode.keycov.Hi_
if i < len(treev)-1 {
hi_ = treev[i+1].Key()-1 // NOTE -1 because it is hi_] not hi)
}
children = append(children, &nodeInRange{
prefix: rnode.Path(),
lo: lo,
hi_: hi_,
keycov: blib.KeyRange{lo, hi_},
node: treev[i].Child(),
})
}
......@@ -338,18 +338,18 @@ func (rs rangeSplit) String() string {
// δtops is set of top nodes for changed subtrees.
// δZTC is connected(δZ/T) - connected closure for subset of δZ(old..new) that
// touches tracked nodes of T.
func treediff(ctx context.Context, root zodb.Oid, δtops SetOid, δZTC SetOid, trackSet PPTreeSubSet, zconnOld, zconnNew *zodb.Connection) (δT map[Key]ΔValue, δtrack *ΔPPTreeSubSet, δtkeycov *RangedKeySet, err error) {
func treediff(ctx context.Context, root zodb.Oid, δtops setOid, δZTC setOid, trackSet blib.PPTreeSubSet, zconnOld, zconnNew *zodb.Connection) (δT map[Key]ΔValue, δtrack *blib.ΔPPTreeSubSet, δtkeycov *blib.RangedKeySet, err error) {
defer xerr.Contextf(&err, "treediff %s..%s %s", zconnOld.At(), zconnNew.At(), root)
δT = map[Key]ΔValue{}
δtrack = NewΔPPTreeSubSet()
δtkeycov = &RangedKeySet{}
δtrack = blib.NewΔPPTreeSubSet()
δtkeycov = &blib.RangedKeySet{}
tracefDiff("\ntreediff %s δtops: %v δZTC: %v\n", root, δtops, δZTC)
tracefDiff(" trackSet: %v\n", trackSet)
defer tracefDiff("\n-> δT: %v\nδtrack: %v\nδtkeycov: %v\n", δT, δtrack, δtkeycov)
δtrackv := []*ΔPPTreeSubSet{}
δtrackv := []*blib.ΔPPTreeSubSet{}
for top := range δtops { // XXX -> sorted?
a, err1 := zgetNodeOrNil(ctx, zconnOld, top)
......@@ -401,7 +401,7 @@ func treediff(ctx context.Context, root zodb.Oid, δtops SetOid, δZTC SetOid, t
// consistent with b (= a + δ).
//
// δtkeycov represents how δtrack grows (always grows) tracking set key coverage.
func diffX(ctx context.Context, a, b Node, δZTC SetOid, trackSet PPTreeSubSet) (δ map[Key]ΔValue, δtrack *ΔPPTreeSubSet, δtkeycov *RangedKeySet, err error) {
func diffX(ctx context.Context, a, b Node, δZTC setOid, trackSet blib.PPTreeSubSet) (δ map[Key]ΔValue, δtrack *blib.ΔPPTreeSubSet, δtkeycov *blib.RangedKeySet, err error) {
if a==nil && b==nil {
panic("BUG: both a & b == nil") // XXX -> not a bug e.g. for `ø ø T` sequence?
}
......@@ -438,11 +438,11 @@ func diffX(ctx context.Context, a, b Node, δZTC SetOid, trackSet PPTreeSubSet)
if isT {
return diffT(ctx, aT, bT, δZTC, trackSet)
} else {
var δtrack *ΔPPTreeSubSet
var δtrack *blib.ΔPPTreeSubSet
δ, err := diffB(ctx, aB, bB)
if δ != nil {
δtrack = NewΔPPTreeSubSet()
δtkeycov = &RangedKeySet{}
δtrack = blib.NewΔPPTreeSubSet()
δtkeycov = &blib.RangedKeySet{}
}
return δ, δtrack, δtkeycov, err
}
......@@ -452,13 +452,13 @@ func diffX(ctx context.Context, a, b Node, δZTC SetOid, trackSet PPTreeSubSet)
//
// a, b point to top of subtrees @old and @new revisions.
// δZTC is connected set of objects covering δZT (objects changed in this tree in old..new).
func diffT(ctx context.Context, A, B *Tree, δZTC SetOid, trackSet PPTreeSubSet) (δ map[Key]ΔValue, δtrack *ΔPPTreeSubSet, δtkeycov *RangedKeySet, err error) {
func diffT(ctx context.Context, A, B *Tree, δZTC setOid, trackSet blib.PPTreeSubSet) (δ map[Key]ΔValue, δtrack *blib.ΔPPTreeSubSet, δtkeycov *blib.RangedKeySet, err error) {
tracefDiff(" diffT %s %s\n", xidOf(A), xidOf(B))
defer xerr.Contextf(&err, "diffT %s %s", xidOf(A), xidOf(B))
δ = map[Key]ΔValue{}
δtrack = NewΔPPTreeSubSet()
δtkeycov = &RangedKeySet{}
δtrack = blib.NewΔPPTreeSubSet()
δtkeycov = &blib.RangedKeySet{}
defer func() {
tracefDiff(" -> δ: %v\n", δ)
tracefDiff(" -> δtrack: %v\n", δtrack)
......@@ -557,29 +557,30 @@ ABcov:
}
// initial split ranges for A and B
ABcov := blib.KeyRange{ABlo, ABhi_}
prefix := ABpath[:len(ABpath)-1]
atop := &nodeInRange{prefix: prefix, lo: ABlo, hi_: ABhi_, node: A} // [-∞, ∞)
btop := &nodeInRange{prefix: prefix, lo: ABlo, hi_: ABhi_, node: B} // [-∞, ∞)
atop := &nodeInRange{prefix: prefix, keycov: ABcov, node: A}
btop := &nodeInRange{prefix: prefix, keycov: ABcov, node: B}
Av := rangeSplit{atop} // nodes expanded from A
Bv := rangeSplit{btop} // nodes expanded from B
// for phase 2:
Akqueue := &RangedKeySet{} // queue for keys in A to be processed for δ-
Bkqueue := &RangedKeySet{} // ----//---- in B for δ+
Akdone := &RangedKeySet{} // already processed keys in A
Bkdone := &RangedKeySet{} // ----//---- in B
Aktodo := func(r KeyRange) {
Akqueue := &blib.RangedKeySet{} // queue for keys in A to be processed for δ-
Bkqueue := &blib.RangedKeySet{} // ----//---- in B for δ+
Akdone := &blib.RangedKeySet{} // already processed keys in A
Bkdone := &blib.RangedKeySet{} // ----//---- in B
Aktodo := func(r blib.KeyRange) {
if !Akdone.HasRange(r) {
δtodo := &RangedKeySet{}
δtodo := &blib.RangedKeySet{}
δtodo.AddRange(r)
δtodo.DifferenceInplace(Akdone)
debugfDiff(" Akq <- %s\n", δtodo)
Akqueue.UnionInplace(δtodo)
}
}
Bktodo := func(r KeyRange) {
Bktodo := func(r blib.KeyRange) {
if !Bkdone.HasRange(r) {
δtodo := &RangedKeySet{}
δtodo := &blib.RangedKeySet{}
δtodo.AddRange(r)
δtodo.DifferenceInplace(Bkdone)
debugfDiff(" Bkq <- %s\n", δtodo)
......@@ -595,8 +596,8 @@ ABcov:
}
// δtkeycov will be = BAdd \ ADel
δtkeycovADel := &RangedKeySet{}
δtkeycovBAdd := &RangedKeySet{}
δtkeycovADel := &blib.RangedKeySet{}
δtkeycovBAdd := &blib.RangedKeySet{}
// phase 1: expand A top->down driven by δZTC.
// by default a node contributes to δ-
......@@ -620,23 +621,21 @@ ABcov:
// a is bucket -> δ-
δA, err := diffB(ctx, a, nil); /*X*/if err != nil { return nil,nil,nil, err }
err = δMerge(δ, δA); /*X*/if err != nil { return nil,nil,nil, err }
ar := KeyRange{ra.lo, ra.hi_}
δtrack.Del.AddPath(ra.Path())
δtkeycovADel.AddRange(ar)
debugfDiff(" δtrack - %s %v\n", ar, ra.Path())
δtkeycovADel.AddRange(ra.keycov)
debugfDiff(" δtrack - %s %v\n", ra.keycov, ra.Path())
// Bkqueue <- ra.range
Bktodo(ar)
Bktodo(ra.keycov)
ra.done = true
case *Tree:
// empty tree - queue holes covered by it
if len(a.Entryv()) == 0 {
ar := KeyRange{ra.lo, ra.hi_}
δtrack.Del.AddPath(ra.Path())
δtkeycovADel.AddRange(ar)
debugfDiff(" δtrack - %s %v\n", ar, ra.Path())
Bktodo(ar)
δtkeycovADel.AddRange(ra.keycov)
debugfDiff(" δtrack - %s %v\n", ra.keycov, ra.Path())
Bktodo(ra.keycov)
continue
}
......@@ -659,8 +658,8 @@ ABcov:
bc, found := BnodeIdx[acOid]
if !found {
for {
blo := Bv.Get(ac.lo)
bhi_ := Bv.Get(ac.hi_)
blo := Bv.Get(ac.keycov.Lo)
bhi_ := Bv.Get(ac.keycov.Hi_)
if blo != bhi_ {
break
}
......@@ -691,17 +690,15 @@ ABcov:
}
if found {
// ac can be skipped if key coverage stays the same
ar := KeyRange{ac.lo, ac.hi_}
br := KeyRange{bc.lo, bc.hi_}
if ar == br {
if ac.keycov == bc.keycov {
// adjust trackSet since path to the node could have changed
apath := ac.Path()
bpath := bc.Path()
if !pathEqual(apath, bpath) {
δtrack.Del.AddPath(apath)
δtrack.Add.AddPath(bpath)
if nc := at.nchild; nc != 0 {
δtrack.δnchildNonLeafs[acOid] = nc
if nc := at.NChild(); nc != 0 {
δtrack.ΔnchildNonLeafs[acOid] = nc
}
}
......@@ -744,7 +741,7 @@ ABcov:
}
for _, r := range Bkqueue.AllRanges() {
lo := r.lo
lo := r.Lo
for {
b, err := Bv.GetToLeaf(ctx, lo); /*X*/if err != nil { return nil,nil,nil, err }
debugfDiff(" B k%d -> %s\n", lo, b)
......@@ -758,23 +755,22 @@ ABcov:
// δ <- δB
err = δMerge(δ, δB); /*X*/if err != nil { return nil,nil,nil, err }
br := KeyRange{b.lo, b.hi_}
δtrack.Add.AddPath(b.Path())
δtkeycovBAdd.AddRange(br)
debugfDiff(" δtrack + %s %v\n", br, b.Path())
δtkeycovBAdd.AddRange(b.keycov)
debugfDiff(" δtrack + %s %v\n", b.keycov, b.Path())
// Akqueue <- δB
Bkdone.AddRange(br)
Aktodo(br)
Bkdone.AddRange(b.keycov)
Aktodo(b.keycov)
b.done = true
}
// continue with next right bucket until r coverage is complete
if r.hi_ <= b.hi_ {
if r.Hi_ <= b.keycov.Hi_ {
break
}
lo = b.hi_ + 1
lo = b.keycov.Hi_ + 1
}
}
Bkqueue.Clear()
......@@ -782,7 +778,7 @@ ABcov:
debugfDiff("\n")
debugfDiff(" Akq: %s\n", Akqueue)
for _, r := range Akqueue.AllRanges() {
lo := r.lo
lo := r.Lo
for {
a, err := Av.GetToLeaf(ctx, lo); /*X*/if err != nil { return nil,nil,nil, err }
debugfDiff(" A k%d -> %s\n", lo, a)
......@@ -798,27 +794,26 @@ ABcov:
err = δMerge(δ, δA); /*X*/if err != nil { return nil,nil,nil, err }
δtrack.Del.AddPath(a.Path())
// NOTE adjust δtkeycovADel only if a was originally tracked
ar := KeyRange{a.lo, a.hi_}
_, tracked := trackSet[a.node.POid()]
if tracked {
δtkeycovADel.AddRange(ar)
debugfDiff(" δtrack - %s %v\n", ar, a.Path())
δtkeycovADel.AddRange(a.keycov)
debugfDiff(" δtrack - %s %v\n", a.keycov, a.Path())
} else {
debugfDiff(" δtrack - [) %v\n", a.Path())
}
// Bkqueue <- a.range
Akdone.AddRange(ar)
Bktodo(ar)
Akdone.AddRange(a.keycov)
Bktodo(a.keycov)
a.done = true
}
// continue with next right bucket until r coverage is complete
if r.hi_ <= a.hi_ {
if r.Hi_ <= a.keycov.Hi_ {
break
}
lo = a.hi_ + 1
lo = a.keycov.Hi_ + 1
}
}
Akqueue.Clear()
......@@ -1009,7 +1004,7 @@ func xidOf(obj zodb.IPersistent) string {
func (rn *nodeInRange) String() string {
done := " "; if rn.done { done = "*" }
return fmt.Sprintf("%s%s%s", done, KeyRange{rn.lo, rn.hi_}, vnode(rn.node))
return fmt.Sprintf("%s%s%s", done, rn.keycov, vnode(rn.node))
}
// push pushes element to node stack.
......
......@@ -5,34 +5,33 @@ package xbtree
import (
"fmt"
"math"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/kirr/neo/go/zodb/btree"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/set"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/blib"
)
// XXX instead of generics
type Tree = btree.LOBTree
type Bucket = btree.LOBucket
type Node = btree.LONode
type TreeEntry = btree.LOEntry
type BucketEntry = btree.LOBucketEntry
type Tree = blib.Tree
type Bucket = blib.Bucket
type Node = blib.Node
type TreeEntry = blib.TreeEntry
type BucketEntry = blib.BucketEntry
type Key = int64
const KeyMax Key = math.MaxInt64
const KeyMin Key = math.MinInt64
type Key = blib.Key
const KeyMax = blib.KeyMax
const KeyMin = blib.KeyMin
// value is assumed to be persistent reference.
// deletion is represented as VDEL.
type Value = zodb.Oid
const VDEL = zodb.InvalidOid
type SetKey = set.SetI64
type SetOid = set.SetOid
type SetTid = set.SetTid
type setKey = set.I64
type setOid = set.Oid
type setTid = set.Tid
......@@ -74,3 +73,7 @@ func kstr(k Key) string {
}
return fmt.Sprintf("%d", k)
}
func panicf(format string, argv ...interface{}) {
panic(fmt.Sprintf(format, argv...))
}
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
// Package xbtreetest/init (ex imported from package A) should be imported in
// addition to xbtreetest (from package A_test) to initialize xbtreetest at runtime.
package init
// ZBlk-related part of δbtail_test
import (
"context"
"fmt"
"lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/xbtreetest"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/zdata"
)
type Tree = xbtreetest.Tree
type Node = xbtreetest.Node
type Key = xbtreetest.Key
type ZBlk = zdata.ZBlk
// ztreeGetBlk returns ztree[k] and tree path that lead to this block.
// XXX +return blkRevMax and use it ?
func ztreeGetBlk(ctx context.Context, ztree *Tree, k Key) (zblk ZBlk, ok bool, path []Node, err error) {
path = []Node{}
xzblk, ok, err := ztree.VGet(ctx, k, func(node Node) {
path = append(path, node)
})
if err != nil {
return nil, false, nil, err
}
if ok {
zblk, ok = xzblk.(ZBlk)
if !ok {
return nil, false, nil, fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xzblk)) // XXX errctx
}
}
return zblk, ok, path, nil
}
func init() {
xbtreetest.ZTreeGetBlkData = _ZTreeGetBlkData
xbtreetest.ZGetBlkData = _ZGetBlkData
}
// _ZTreeGetBlkData returns block data from block pointed to by ztree[k].
func _ZTreeGetBlkData(ctx context.Context, ztree *Tree, k Key) (data string, ok bool, path []Node, err error) {
defer xerr.Contextf(&err, "@%s: tree<%s>: get blkdata from [%d]", ztree.PJar().At(), ztree.POid(), k)
zblk, ok, path, err := ztreeGetBlk(ctx, ztree, k)
if err != nil || !ok {
return "", ok, path, err
}
bdata, _, err := zblk.LoadBlkData(ctx)
if err != nil {
return "", false, nil, err
}
return string(bdata), true, path, nil
}
// _ZGetBlkData loads block data from ZBlk object specified by its oid.
func _ZGetBlkData(ctx context.Context, zconn *zodb.Connection, zblkOid zodb.Oid) (data string, err error) {
defer xerr.Contextf(&err, "@%s: get blkdata from obj %s", zconn.At(), zblkOid)
xblk, err := zconn.Get(ctx, zblkOid)
if err != nil {
return "", err
}
zblk, ok := xblk.(ZBlk)
if !ok {
return "", fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xblk))
}
bdata, _, err := zblk.LoadBlkData(ctx)
if err != nil {
return "", err
}
return string(bdata), nil
}
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtreetest
// kvdiff + friends
import (
"fmt"
"sort"
"strings"
)
// kvdiff returns difference in between kv1 and kv2.
const DEL = "ø" // DEL means deletion
type Δstring struct {
Old string
New string
}
func kvdiff(kv1, kv2 map[Key]string) map[Key]Δstring {
delta := map[Key]Δstring{}
keys := setKey{}
for k := range kv1 { keys.Add(k) }
for k := range kv2 { keys.Add(k) }
for k := range keys {
v1, ok := kv1[k]
if !ok { v1 = DEL }
v2, ok := kv2[k]
if !ok { v2 = DEL }
if v1 != v2 {
delta[k] = Δstring{v1,v2}
}
}
return delta
}
// kvtxt returns string representation of {} kv.
func kvtxt(kv map[Key]string) string {
if len(kv) == 0 {
return "ø"
}
keyv := []Key{}
for k := range kv { keyv = append(keyv, k) }
sort.Slice(keyv, func(i,j int) bool { return keyv[i] < keyv[j] })
sv := []string{}
for _, k := range keyv {
v := kv[k]
if strings.ContainsAny(v, " \n\t,:") {
panicf("[%v]=%q: invalid value", k, v)
}
sv = append(sv, fmt.Sprintf("%v:%s", k, v))
}
return strings.Join(sv, ",")
}
// Copyright (C) 2018-2021 Nexedi SA and Contributors.
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
......@@ -17,12 +17,28 @@
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtree
package xbtreetest
import (
"fmt"
"reflect"
"testing"
)
func panicf(format string, argv ...interface{}) {
panic(fmt.Sprintf(format, argv...))
func TestKVDiff(t *testing.T) {
kv1 := map[Key]string{1:"a", 3:"c", 4:"d"}
kv2 := map[Key]string{1:"b", 4:"d", 5:"e"}
got := kvdiff(kv1, kv2)
want := map[Key]Δstring{1:{"a","b"}, 3:{"c",DEL}, 5:{DEL,"e"}}
if !reflect.DeepEqual(got, want) {
t.Fatalf("error:\ngot: %v\nwant: %v", got, want)
}
}
func TestKVTxt(t *testing.T) {
kv := map[Key]string{3:"hello", 1:"zzz", 4:"world"}
got := kvtxt(kv)
want := "1:zzz,3:hello,4:world"
if got != want {
t.Fatalf("error:\ngot: %q\nwant: %q", got, want)
}
}
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtreetest
import (
"fmt"
"sort"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/blib"
)
// RTree represents Tree node covering [lo, hi_] key range in its parent tree.
// XXX actually no coverage here -> kill? -> change to just `path []zodb.Oid` in RBucket?
type RTree struct {
Oid zodb.Oid
Parent *RTree
// XXX +children?
}
// RBucket represents Bucket node covering [lo, hi_] key range in its Tree.
// NOTE it is not [lo,hi) but [lo,hi_] instead to avoid overflow at KeyMax.
type RBucket struct {
Oid zodb.Oid
Parent *RTree
Keycov blib.KeyRange
KV map[Key]string // bucket's k->v; values were ZBlk objects whose data is loaded instead.
}
// Path returns path to this bucket from tree root.
func (rb *RBucket) Path() []zodb.Oid {
path := []zodb.Oid{rb.Oid}
p := rb.Parent
for p != nil {
path = append([]zodb.Oid{p.Oid}, path...)
p = p.Parent
}
return path
}
// RBucketSet represents set of buckets covering whole [-∞,∞) range.
type RBucketSet []*RBucket // k↑
// Get returns RBucket which covers key k.
func (rbs RBucketSet) Get(k Key) *RBucket {
i := sort.Search(len(rbs), func(i int) bool {
return k <= rbs[i].Keycov.Hi_
})
if i == len(rbs) {
panicf("BUG: key %v not covered; coverage: %s", k, rbs.coverage())
}
rb := rbs[i]
if !rb.Keycov.Has(k) {
panicf("BUG: get(%v) -> %s; coverage: %s", k, rb.Keycov, rbs.coverage())
}
return rb
}
// coverage returns string representation of rbs coverage structure.
func (rbs RBucketSet) coverage() string {
if len(rbs) == 0 {
return "ø"
}
s := ""
for _, rb := range rbs {
if s != "" {
s += " "
}
s += fmt.Sprintf("%s", rb.Keycov)
}
return s
}
// Flatten converts xkv with bucket structure into regular dict.
func (xkv RBucketSet) Flatten() map[Key]string {
kv := make(map[Key]string)
for _, b := range xkv {
for k,v := range b.KV {
kv[k] = v
}
}
return kv
}
func (b *RBucket) String() string {
return fmt.Sprintf("%sB%s{%s}", b.Keycov, b.Oid, kvtxt(b.KV))
}
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtreetest
// T + friends
import (
"context"
"testing"
"lab.nexedi.com/kirr/go123/exc"
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/blib"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
)
// T is tree-based testing environment.
//
// It combines TreeSrv and client side access to ZODB with committed trees.
// It should be created it via NewT().
type T struct {
*testing.T
work string // working directory
treeSrv *TreeSrv
zstor zodb.IStorage
DB *zodb.DB
// all committed trees
commitv []*Commit
}
// Commit represent test commit changing a tree.
type Commit struct {
Tree string // the tree in topology-encoding
Prev *Commit // previous commit
At zodb.Tid // commit revision
ΔZ *zodb.EventCommit // raw ZODB changes; δZ.tid == at
Xkv RBucketSet // full tree state as of @at
Δxkv map[Key]Δstring // full tree-diff against parent
zblkDataTab map[zodb.Oid]string // full snapshot of all ZBlk data @at
// δzblkData map[zodb.Oid]Δstring // full diff for zblkData against parent XXX ?
}
// NewT creates new T.
func NewT(t *testing.T) *T {
X := exc.Raiseif
t.Helper()
tt := &T{T: t}
var err error
work := t.TempDir()
tt.treeSrv, err = StartTreeSrv(work + "/1.fs"); X(err)
t.Cleanup(func() {
err := tt.treeSrv.Close(); X(err)
})
tt.zstor, err = zodb.Open(context.Background(), tt.treeSrv.zurl, &zodb.OpenOptions{
ReadOnly: true,
}); X(err)
t.Cleanup(func() {
err := tt.zstor.Close(); X(err)
})
tt.DB = zodb.NewDB(tt.zstor, &zodb.DBOptions{
// We need objects to be cached, because otherwise it is too
// slow to run the test for many testcases, especially
// xverifyΔBTail_rebuild.
CacheControl: &tZODBCacheEverything{},
})
t.Cleanup(func() {
err := tt.DB.Close(); X(err)
})
head := tt.treeSrv.head
t1 := &Commit{
Tree: "T/B:", // treegen.py creates the tree as initially empty
Prev: nil,
At: head,
Xkv: xGetTree(tt.DB, head, tt.Root()),
zblkDataTab: xGetBlkDataTab(tt.DB, head),
ΔZ: nil,
Δxkv: nil,
}
tt.commitv = []*Commit{t1}
return tt
}
// tZODBCacheEverything is workaround for ZODB/go not implementing real
// live cache for now: Objects get dropped on PDeactivate if cache
// control does not say we need the object to stay in the cache.
// XXX place
type tZODBCacheEverything struct{}
func (_ *tZODBCacheEverything) PCacheClassify(_ zodb.IPersistent) zodb.PCachePolicy {
return zodb.PCachePinObject | zodb.PCacheKeepState
}
// Root returns OID of root tree node.
func (t *T) Root() zodb.Oid {
return t.treeSrv.treeRoot
}
// Head returns most-recently committed tree.
func (t *T) Head() *Commit {
return t.commitv[len(t.commitv)-1]
}
// CommitTree calls t.treeSrv.Commit and returns Commit corresponding to committed transaction.
// XXX naming -> Commit ?
func (t *T) CommitTree(tree string) *Commit {
// TODO X = FatalIf
X := exc.Raiseif
defer exc.Contextf("commit %s", tree)
watchq := make(chan zodb.Event)
at0 := t.zstor.AddWatch(watchq)
defer t.zstor.DelWatch(watchq)
tid, err := t.treeSrv.Commit(tree); X(err)
if !(tid > at0) {
exc.Raisef("treegen -> %s ; want > %s", tid, at0)
}
zevent := <-watchq
δZ := zevent.(*zodb.EventCommit)
if δZ.Tid != tid {
exc.Raisef("treegen -> %s ; watchq -> %s", tid, δZ)
}
// load tree structure from the db
// if the tree does not exist yet - report its structure as empty
var xkv RBucketSet
if tree != DEL {
xkv = xGetTree(t.DB, δZ.Tid, t.Root())
} else {
// empty tree with real treeRoot as oid even though the tree is
// deleted. Having real oid in the root tests that after deletion,
// root of the tree stays in the tracking set. We need root to stay
// in trackSet because e.g. in
//
// T1 -> ø -> T2
//
// where the tree is first deleted, then recreated, without root
// staying in trackSet after ->ø, treediff will notice nothing when
// it comes to ->T2.
xkv = RBucketSet{
&RBucket{
Oid: zodb.InvalidOid,
Parent: &RTree{
Oid: t.Root(), // NOTE oid is not InvalidOid
Parent: nil,
},
Keycov: blib.KeyRange{KeyMin, KeyMax},
KV: map[Key]string{},
},
}
}
ttree := &Commit{
Tree: tree,
At: δZ.Tid,
ΔZ: δZ,
Xkv: xkv,
zblkDataTab: xGetBlkDataTab(t.DB, δZ.Tid),
}
tprev := t.Head()
ttree.Prev = tprev
ttree.Δxkv = kvdiff(tprev.Xkv.Flatten(), ttree.Xkv.Flatten())
t.commitv = append(t.commitv, ttree)
return ttree
}
// xGetBlkDataTab loads all ZBlk from db@at.
//
// it returns {} oid -> blkdata.
func xGetBlkDataTab(db *zodb.DB, at zodb.Tid) map[zodb.Oid]string {
defer exc.Contextf("%s: @%s: get blkdatatab", db.Storage().URL(), at)
X := exc.Raiseif
blkDataTab := map[zodb.Oid]string{}
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: at}); X(err)
xzroot, err := zconn.Get(ctx, 0); X(err)
zroot, ok := xzroot.(*zodb.Map)
if !ok {
exc.Raisef("root: expected %s, got %s", xzodb.TypeOf(zroot), xzodb.TypeOf(xzroot))
}
err = zroot.PActivate(ctx); X(err)
defer zroot.PDeactivate()
xzblkdir, ok := zroot.Data["treegen/values"]
if !ok {
exc.Raisef("root['treegen/values'] missing")
}
zblkdir, ok := xzblkdir.(*zodb.Map)
if !ok {
exc.Raisef("root['treegen/values']: expected %s, got %s", xzodb.TypeOf(zblkdir), xzodb.TypeOf(xzblkdir))
}
err = zblkdir.PActivate(ctx); X(err)
defer zblkdir.PDeactivate()
for k, xzblk := range zblkdir.Data {
zblk, ok := xzblk.(zodb.IPersistent)
if !ok {
exc.Raisef("root['treegen/values'][%q]: expected %s, got %s", k, xzodb.TypeOf(zblk), xzodb.TypeOf(xzblk))
}
oid := zblk.POid()
data := xzgetBlkData(ctx, zconn, oid)
blkDataTab[oid] = data
}
return blkDataTab
}
// XGetBlkData loads blk data for ZBlk<oid> @t.at
//
// For speed the load is done via preloaded t.blkDataTab instead of access to the DB.
func (t *Commit) XGetBlkData(oid zodb.Oid) string {
if oid == VDEL {
return DEL
}
data, ok := t.zblkDataTab[oid]
if !ok {
exc.Raisef("getBlkData ZBlk<%s> @%s: no such ZBlk", oid, t.At)
}
return data
}
// xGetTree loads Tree from zurl@at->obj<root>.
//
// Tree values must be ZBlk whose data is returned instead of references to ZBlk objects.
// The tree is returned structured by buckets as
//
// [] [lo,hi){k->v} k↑
func xGetTree(db *zodb.DB, at zodb.Tid, root zodb.Oid) RBucketSet {
defer exc.Contextf("%s: @%s: get tree %s", db.Storage().URL(), at, root)
X := exc.Raiseif
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: at}); X(err)
xztree, err := zconn.Get(ctx, root); X(err)
ztree, ok := xztree.(*Tree)
if !ok {
exc.Raisef("expected %s, got %s", xzodb.TypeOf(ztree), xzodb.TypeOf(xztree))
}
rbucketv := RBucketSet{}
xwalkDFS(ctx, KeyMin, KeyMax, ztree, func(rb *RBucket) {
rbucketv = append(rbucketv, rb)
})
if len(rbucketv) == 0 { // empty tree -> [-∞,∞){}
etree := &RTree{
Oid: root,
Parent: nil,
}
ebucket := &RBucket{
Oid: zodb.InvalidOid,
Parent: etree,
Keycov: blib.KeyRange{KeyMin, KeyMax},
KV: map[Key]string{},
}
rbucketv = RBucketSet{ebucket}
}
return rbucketv
}
// xwalkDFS walks ztree in depth-first order emitting bvisit callback on visited bucket nodes.
func xwalkDFS(ctx context.Context, lo, hi_ Key, ztree *Tree, bvisit func(*RBucket)) {
_xwalkDFS(ctx, lo, hi_, ztree, /*rparent*/nil, bvisit)
}
func _xwalkDFS(ctx context.Context, lo, hi_ Key, ztree *Tree, rparent *RTree, bvisit func(*RBucket)) {
X := exc.Raiseif
err := ztree.PActivate(ctx); X(err)
defer ztree.PDeactivate()
rtree := &RTree{Oid: ztree.POid(), Parent: rparent}
// [i].Key ≤ [i].Child.*.Key < [i+1].Key i ∈ [0, len([]))
//
// [0].Key = -∞ ; always returned so
// [len(ev)].Key = +∞ ; should be assumed so
ev := ztree.Entryv()
for i := range ev {
xlo := lo; if i > 0 { xlo = ev[i].Key() }
xhi_ := hi_; if i+1 < len(ev) { xhi_ = ev[i+1].Key() - 1 }
tchild, ok := ev[i].Child().(*Tree)
if ok {
_xwalkDFS(ctx, xlo, xhi_, tchild, rtree, bvisit)
continue
}
zbucket := ev[i].Child().(*Bucket)
err = zbucket.PActivate(ctx); X(err)
defer zbucket.PDeactivate()
bkv := make(map[Key]string)
bentryv := zbucket.Entryv()
for _, __ := range bentryv {
k := __.Key()
xv := __.Value()
pv, ok := xv.(zodb.IPersistent)
if !ok {
exc.Raisef("[%d] -> %s; want IPersistent", k, xzodb.TypeOf(xv))
}
data, err := ZGetBlkData(ctx, pv.PJar(), pv.POid())
if err != nil {
exc.Raisef("[%d]: %s", k, err)
}
bkv[k] = data
}
b := &RBucket{
Oid: zbucket.POid(),
Parent: rtree,
Keycov: blib.KeyRange{xlo, xhi_},
KV: bkv,
}
bvisit(b)
}
}
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtreetest
// treegen.go provides functionality:
//
// - to commit a particular BTree topology into ZODB, and
// - to generate set of random tree topologies that all correspond to particular {k->v} dict.
//
// treegen.py is used as helper for both tasks.
import (
"bufio"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"lab.nexedi.com/kirr/go123/my"
"lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/zodb"
)
// TreeGenSrv represents connection to running `treegen ...` server.
type TreeGenSrv struct {
argv []string
pysrv *exec.Cmd // spawned `treegen ...`
pyin io.WriteCloser // input to pysrv
pyoutRaw io.ReadCloser // output from pysrv
pyout *bufio.Reader // buffered ^^^
}
// TreeSrv represents connection to running `treegen trees` server.
//
// Create it with StartTreeSrv(zurl).
// - Commit(treeTopology) -> tid
type TreeSrv struct {
*TreeGenSrv
zurl string
treeRoot zodb.Oid // oid of the tree treegen works on
head zodb.Tid // last made commit
}
// AllStructsSrv represents connection to running `treegen allstructs` server.
//
// Create it with StartAllStructsSrv().
// - AllStructs(maxdepth, maxsplit, n, seed, kv1, kv2)
type AllStructsSrv struct {
*TreeGenSrv
}
// StartTreeGenSrv spawns `treegen ...` server.
func StartTreeGenSrv(argv ...string) (_ *TreeGenSrv, hello string, err error) {
defer xerr.Contextf(&err, "treesrv %v: start", argv)
// spawn `treegen ...`
tg := &TreeGenSrv{argv: argv}
tg.pysrv = exec.Command(filepath.Dir(my.File())+"/treegen.py", argv...)
tg.pyin, err = tg.pysrv.StdinPipe()
if err != nil {
return nil, "", err
}
tg.pyoutRaw, err = tg.pysrv.StdoutPipe()
if err != nil {
return nil, "", err
}
tg.pyout = bufio.NewReader(tg.pyoutRaw)
tg.pysrv.Stderr = os.Stderr // no redirection
err = tg.pysrv.Start()
if err != nil {
return nil, "", err
}
// wait for hello message and return it
defer func() {
if err != nil {
tg.Close() // ignore error
}
}()
defer xerr.Context(&err, "handshake")
hello, err = tg.pyout.ReadString('\n')
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, "", err
}
hello = strings.TrimSuffix(hello, "\n")
return tg, hello, nil
}
// Close shutdowns treegen server.
func (tg *TreeGenSrv) Close() (err error) {
defer xerr.Contextf(&err, "treegen %v: close", tg.argv)
err1 := tg.pyin.Close()
err2 := tg.pyoutRaw.Close()
err3 := tg.pysrv.Wait()
return xerr.Merge(err1, err2, err3)
}
// StartTreeSrv spawns `treegen trees` server.
func StartTreeSrv(zurl string) (_ *TreeSrv, err error) {
defer xerr.Contextf(&err, "tree.srv %s: start", zurl)
tgSrv, hello, err := StartTreeGenSrv("trees", zurl)
if err != nil {
return nil, err
}
tg := &TreeSrv{TreeGenSrv: tgSrv, zurl: zurl}
defer func() {
if err != nil {
tgSrv.Close() // ignore error
}
}()
// tree.srv start @<at> tree=<root>
defer xerr.Contextf(&err, "invalid hello %q", hello)
startRe := regexp.MustCompile(`^tree.srv start @([^ ]+) root=([^ ]+)$`)
m := startRe.FindStringSubmatch(hello)
if m == nil {
return nil, fmt.Errorf("unexpected format")
}
tg.head, err = zodb.ParseTid(m[1]) // <at>
if err != nil {
return nil, fmt.Errorf("tid: %s", err)
}
tg.treeRoot, err = zodb.ParseOid(m[2]) // <root>
if err != nil {
return nil, fmt.Errorf("root: %s", err)
}
return tg, nil
}
// StartAllStructsSrv spawns `treegen allstructs` server.
func StartAllStructsSrv() (_ *AllStructsSrv, err error) {
defer xerr.Context(&err, "allstructs.srv: start")
tgSrv, hello, err := StartTreeGenSrv("allstructs")
if err != nil {
return nil, err
}
sg := &AllStructsSrv{TreeGenSrv: tgSrv}
defer func() {
if err != nil {
tgSrv.Close() // ignore error
}
}()
defer xerr.Contextf(&err, "invalid hello %q", hello)
if hello != "# allstructs.srv start" {
return nil, fmt.Errorf("unexpected format")
}
return sg, nil
}
// Commit creates new commit with underlying tree changed to specified tree topology.
func (tg *TreeSrv) Commit(tree string) (_ zodb.Tid, err error) {
defer xerr.Contextf(&err, "tree.srv %s: commit %s", tg.zurl, tree)
_, err = io.WriteString(tg.pyin, tree + "\n")
if err != nil {
return zodb.InvalidTid, err
}
reply, err := tg.pyout.ReadString('\n')
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return zodb.InvalidTid, err
}
reply = strings.TrimSuffix(reply, "\n")
tid, err := zodb.ParseTid(reply)
if err != nil {
return zodb.InvalidTid, fmt.Errorf("invalid reply: %s", err)
}
tg.head = tid
return tid, nil
}
// AllStructs returns response from `treegen allstructs`
func (tg *AllStructsSrv) AllStructs(kv map[Key]string, maxdepth, maxsplit, n int, seed int64) (_ []string, err error) {
req := fmt.Sprintf("%d %d %d/%d %s", maxdepth, maxsplit, n, seed, kvtxt(kv))
defer xerr.Contextf(&err, "allstructs.srv: %s ", req)
_, err = io.WriteString(tg.pyin, req + "\n")
if err != nil {
return nil, err
}
structv := []string{}
for {
reply, err := tg.pyout.ReadString('\n')
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
reply = strings.TrimSuffix(reply, "\n")
if reply == "# ----" {
return structv, nil // end of response
}
if strings.HasPrefix(reply, "#") {
continue // comment
}
structv = append(structv, reply)
}
}
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
// Package xbtreetest provides infrastructure for testing LOBTree with ZBlk values.
// XXX -> treetest?
package xbtreetest
import (
"fmt"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/set"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/blib"
)
// XXX instead of generics
type Tree = blib.Tree
type Bucket = blib.Bucket
type Node = blib.Node
type TreeEntry = blib.TreeEntry
type BucketEntry = blib.BucketEntry
type Key = blib.Key
const KeyMax = blib.KeyMax
const KeyMin = blib.KeyMin
type setKey = set.I64
// XXX dup from xbtree (to avoid import cycle)
const VDEL = zodb.InvalidOid
func panicf(format string, argv ...interface{}) {
panic(fmt.Sprintf(format, argv...))
}
// Copyright (C) 2020-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package xbtreetest
// access to ZBlk data
import (
"context"
"lab.nexedi.com/kirr/go123/exc"
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb"
_ "lab.nexedi.com/kirr/neo/go/zodb/wks"
)
// ZBlk-related functions are imported at runtime by package xbtreetest/init
var (
ZTreeGetBlkData func(context.Context, *Tree, Key) (string, bool, []Node, error)
ZGetBlkData func(context.Context, *zodb.Connection, zodb.Oid) (string, error)
)
func zassertInitDone() {
if ZTreeGetBlkData == nil {
panic("xbtreetest/zdata not initialized -> import xbtreetest/init to fix")
}
}
// xzgetBlkData loads block data from ZBlk object specified by its oid.
func xzgetBlkData(ctx context.Context, zconn *zodb.Connection, zblkOid zodb.Oid) string {
zassertInitDone()
X := exc.Raiseif
if zblkOid == VDEL {
return DEL
}
data, err := ZGetBlkData(ctx, zconn, zblkOid); X(err)
return string(data)
}
// xzgetBlkDataAt loads block data from ZBlk object specified by oid@at.
func xzgetBlkDataAt(db *zodb.DB, zblkOid zodb.Oid, at zodb.Tid) string {
zassertInitDone()
X := exc.Raiseif
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: at}); X(err)
return xzgetBlkData(ctx, zconn, zblkOid)
}
......@@ -30,6 +30,7 @@ import (
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/blib"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xtail"
)
......@@ -89,17 +90,17 @@ type ΔBtail struct {
// includes all changed objects, not only tracked ones.
δZtail *zodb.ΔTail
vδBroots []ΔBroots // [] (rev↑, roots changed in this rev)
vδBroots []_ΔBroots // [] (rev↑, roots changed in this rev)
vδTbyRoot map[zodb.Oid]*ΔTtail // {} root -> [] k/v change history; only for keys ∈ tracked subset XXX -> byRoot?
// set of tracked nodes as of @head state.
// For this set all vδT are fully computed.
// The set of nodes that were requested to be tracked, but were not yet
// taken into account, is kept in ΔTtail.trackNew & co.
trackSet PPTreeSubSet
trackSet blib.PPTreeSubSet
// set of trees for which .trackNew is non-empty
trackNewRoots SetOid
trackNewRoots setOid
// handle to make connections to access database.
// TODO allow client to optionally provide zconnOld/zconnNew on e.g. Update()
......@@ -114,7 +115,7 @@ type ΔTtail struct {
// set of nodes that were requested to be tracked in this tree, but for
// which vδT was not yet rebuilt
trackNew PPTreeSubSet
trackNew blib.PPTreeSubSet
// XXX + trackNewKeys RangedKeySet
// {}k/v @tail for keys that are changed in (tail, head].
......@@ -124,12 +125,12 @@ type ΔTtail struct {
lastRevOf map[Key]zodb.Tid // {} key -> last
}
// ΔBroots represents roots-only part of ΔB.
// _ΔBroots represents roots-only part of ΔB.
//
// It describes which trees were changed, but does not provide δkv details for changed trees.
type ΔBroots struct {
type _ΔBroots struct {
Rev zodb.Tid
ΔRoots SetOid // which roots changed in this revision
ΔRoots setOid // which roots changed in this revision
}
// ΔB represents a change in BTrees space.
......@@ -158,8 +159,8 @@ func NewΔBtail(at0 zodb.Tid, db *zodb.DB) *ΔBtail {
δZtail: zodb.NewΔTail(at0),
vδBroots: nil,
vδTbyRoot: map[zodb.Oid]*ΔTtail{},
trackSet: PPTreeSubSet{},
trackNewRoots: SetOid{},
trackSet: blib.PPTreeSubSet{},
trackNewRoots: setOid{},
db: db,
}
}
......@@ -167,7 +168,7 @@ func NewΔBtail(at0 zodb.Tid, db *zodb.DB) *ΔBtail {
// newΔTtail creates new empty ΔTtail object.
func newΔTtail() *ΔTtail {
return &ΔTtail{
trackNew: PPTreeSubSet{},
trackNew: blib.PPTreeSubSet{},
KVAtTail: make(map[Key]Value),
lastRevOf: make(map[Key]zodb.Tid),
}
......@@ -184,9 +185,9 @@ func (orig *ΔBtail) Clone() *ΔBtail {
}
// vδBroots
klon.vδBroots = make([]ΔBroots, 0, len(orig.vδBroots))
klon.vδBroots = make([]_ΔBroots, 0, len(orig.vδBroots))
for _, origδBroots := range orig.vδBroots {
klonδBroots := ΔBroots{
klonδBroots := _ΔBroots{
Rev: origδBroots.Rev,
ΔRoots: origδBroots.ΔRoots.Clone(),
}
......@@ -236,8 +237,6 @@ func (orig *ΔTtail) Clone() *ΔTtail {
func (δBtail *ΔBtail) Head() zodb.Tid { return δBtail.δZtail.Head() }
func (δBtail *ΔBtail) Tail() zodb.Tid { return δBtail.δZtail.Tail() }
// XXX SliceByRev?
// Track adds tree path to tracked set.
//
// path[0] signifies tree root.
......@@ -253,6 +252,7 @@ func (δBtail *ΔBtail) Tail() zodb.Tid { return δBtail.δZtail.Tail() }
//
// XXX catch cycles on add?
// XXX no need to pass in key? (-> all keys, covered by leaf keyrange, will be added to tracking set of keys)
// XXX no need to return error?
func (δBtail *ΔBtail) Track(key Key, nodePath []Node) error { // XXX Tree|Bucket; path[0] = root
path := nodePathToPath(nodePath)
......@@ -272,7 +272,7 @@ func (δBtail *ΔBtail) track(key Key, path []zodb.Oid) error {
// empty artificial tree. We need to do the normalization because we
// later check whether leaf path[-1] ∈ trackSet and without
// normalization path[-1] can be InvalidOid.
path = normPath(path)
path = blib.NormPath(path)
if len(path) == 0 {
return nil // empty tree
}
......@@ -322,7 +322,7 @@ func (δBtail *ΔBtail) rebuildAll() (err error) {
δBtail.vδBroots_Update(root, δrevSet)
}
δBtail.trackNewRoots = SetOid{}
δBtail.trackNewRoots = setOid{}
return nil
}
......@@ -335,7 +335,7 @@ func (δBtail *ΔBtail) rebuildAll() (err error) {
// - set of revisions for which new entries in .vδT have been created.
//
// XXX place
func (δTtail *ΔTtail) rebuild(root zodb.Oid, δZtail *zodb.ΔTail, db *zodb.DB) (δtrackSet PPTreeSubSet, δrevSet SetTid, err error) {
func (δTtail *ΔTtail) rebuild(root zodb.Oid, δZtail *zodb.ΔTail, db *zodb.DB) (δtrackSet blib.PPTreeSubSet, δrevSet setTid, err error) {
defer xerr.Context(&err, "ΔTtail rebuild")
// XXX locking
......@@ -343,18 +343,18 @@ func (δTtail *ΔTtail) rebuild(root zodb.Oid, δZtail *zodb.ΔTail, db *zodb.DB
tracefΔBtail("trackNew: %v\n", δTtail.trackNew)
trackNew := δTtail.trackNew
δTtail.trackNew = PPTreeSubSet{}
δTtail.trackNew = blib.PPTreeSubSet{}
if len(trackNew) == 0 {
return nil, nil, nil
}
δrevSet = SetTid{}
δrevSet = setTid{}
// go backwards and merge vδT <- treediff(lo..hi/trackNew)
vδZ := δZtail.Data()
for {
δtkeycov := &RangedKeySet{} // all keys coming into tracking set during this lo<-hi scan
δtkeycov := &blib.RangedKeySet{} // all keys coming into tracking set during this lo<-hi scan
trackNewCur := trackNew.Clone() // trackNew adjusted as of when going to i<- entry
for i := len(vδZ)-1; i>=0; i-- {
δZ := vδZ[i]
......@@ -412,7 +412,7 @@ func (δTtail *ΔTtail) rebuild(root zodb.Oid, δZtail *zodb.ΔTail, db *zodb.DB
// widenTrackNew widens trackNew to cover δtkeycov.
// XXX -> widenTrackSet?
func widenTrackNew(trackNew PPTreeSubSet, δtkeycov *RangedKeySet, root zodb.Oid, at zodb.Tid, db *zodb.DB) (err error) {
func widenTrackNew(trackNew blib.PPTreeSubSet, δtkeycov *blib.RangedKeySet, root zodb.Oid, at zodb.Tid, db *zodb.DB) (err error) {
// XXX errctx, debug
defer xerr.Contextf(&err, "widenTrackNew tree<%s> @%s +%s", root, at, δtkeycov)
......@@ -428,19 +428,19 @@ func widenTrackNew(trackNew PPTreeSubSet, δtkeycov *RangedKeySet, root zodb.Oid
}
tree := xtree.(*Tree) // must succeed XXX better explicit panic?
top := &nodeInRange{prefix: nil, lo: KeyMin, hi_: KeyMax, node: tree}
top := &nodeInRange{prefix: nil, keycov: blib.KeyRange{KeyMin, KeyMax}, node: tree}
V := rangeSplit{top}
for _, r := range δtkeycov.AllRanges() {
lo := r.lo
lo := r.Lo
for {
b, err := V.GetToLeaf(ctx, lo); /*X*/ if err != nil { return err }
trackNew.AddPath(b.Path())
// continue with next right bucket until r coverage is complete
if r.hi_ <= b.hi_ {
if r.Hi_ <= b.keycov.Hi_ {
break
}
lo = b.hi_ + 1
lo = b.keycov.Hi_ + 1
}
}
return nil
......@@ -451,7 +451,7 @@ func widenTrackNew(trackNew PPTreeSubSet, δtkeycov *RangedKeySet, root zodb.Oid
//
// δtrackNew/δtkeycov represents how trackNew changes when going through `atPrev <- δZ.Rev` .
// newRevEntry indicates whether δZ.Rev was not there before in .vδT and new corresponding δT entry was created.
func (δTtail *ΔTtail) rebuild1(atPrev zodb.Tid, δZ zodb.ΔRevEntry, trackNew PPTreeSubSet, db *zodb.DB) (δtrackNew *ΔPPTreeSubSet, δtkeycov *RangedKeySet, newRevEntry bool, err error) {
func (δTtail *ΔTtail) rebuild1(atPrev zodb.Tid, δZ zodb.ΔRevEntry, trackNew blib.PPTreeSubSet, db *zodb.DB) (δtrackNew *blib.ΔPPTreeSubSet, δtkeycov *blib.RangedKeySet, newRevEntry bool, err error) {
defer xerr.Contextf(&err, "rebuild1 %s<-%s", atPrev, δZ.Rev)
debugfΔBtail("\n rebuild1 @%s <- @%s\n", atPrev, δZ.Rev)
......@@ -468,14 +468,14 @@ func (δTtail *ΔTtail) rebuild1(atPrev zodb.Tid, δZ zodb.ΔRevEntry, trackNew
// skip opening DB connections if there is no change to this tree
if len(δtopsByRoot) == 0 {
return NewΔPPTreeSubSet(), &RangedKeySet{}, false, nil
return blib.NewΔPPTreeSubSet(), &blib.RangedKeySet{}, false, nil
}
if len(δtopsByRoot) != 1 {
panicf("BUG: δtopsByRoot has > 1 entries: %v\ntrackNew: %v\nδZ: %v", δtopsByRoot, trackNew, δZ)
}
var root zodb.Oid
var δtops SetOid
var δtops setOid
for root_, δtops_ := range δtopsByRoot {
root = root_
δtops = δtops_
......@@ -561,7 +561,7 @@ func (δBtail *ΔBtail) Update(δZ *zodb.EventCommit) (_ ΔB, err error) {
// δtkeycov1 != ø -> rebuild δTtail with trackNew ~= δtkeycov1
if !δT1.δtkeycov1.Empty() && δBtail.δZtail.Len() > 1 {
trackNew := PPTreeSubSet{}
trackNew := blib.PPTreeSubSet{}
err := widenTrackNew(trackNew, δT1.δtkeycov1, root, δBtail.Head(), δBtail.db)
if err != nil {
return ΔB{}, err
......@@ -595,11 +595,11 @@ func (δBtail *ΔBtail) Update(δZ *zodb.EventCommit) (_ ΔB, err error) {
}
// vδBroots += δB
δroots := SetOid{}
δroots := setOid{}
for root := range δB.ΔByRoot {
δroots.Add(root)
}
δBtail.vδBroots = append(δBtail.vδBroots, ΔBroots{Rev: δB.Rev, ΔRoots: δroots})
δBtail.vδBroots = append(δBtail.vδBroots, _ΔBroots{Rev: δB.Rev, ΔRoots: δroots})
return δB, err
}
......@@ -612,8 +612,8 @@ type _ΔBUpdate1 struct {
ByRoot map[zodb.Oid]*_ΔTUpdate1
}
type _ΔTUpdate1 struct {
δtkeycov1 *RangedKeySet // {} root -> δtrackedKeys after first treediff (always grow)
δtrack *ΔPPTreeSubSet // XXX kill (not used)
δtkeycov1 *blib.RangedKeySet // {} root -> δtrackedKeys after first treediff (always grow)
δtrack *blib.ΔPPTreeSubSet // XXX kill (not used)
}
func (δBtail *ΔBtail) _Update1(δZ *zodb.EventCommit) (δB1 _ΔBUpdate1, err error) {
headOld := δBtail.Head()
......@@ -682,7 +682,7 @@ func (δBtail *ΔBtail) _Update1(δZ *zodb.EventCommit) (δB1 _ΔBUpdate1, err e
// changed entries with δrevSet revisions.
//
// XXX place TODO δrevSet -> []rev↑
func (δBtail *ΔBtail) vδBroots_Update(root zodb.Oid, δrevSet SetTid) {
func (δBtail *ΔBtail) vδBroots_Update(root zodb.Oid, δrevSet setTid) {
// XXX locking
for rev := range δrevSet {
l := len(δBtail.vδBroots)
......@@ -690,10 +690,10 @@ func (δBtail *ΔBtail) vδBroots_Update(root zodb.Oid, δrevSet SetTid) {
return rev <= δBtail.vδBroots[k].Rev
})
if j == l || δBtail.vδBroots[j].Rev != rev {
δBroots := ΔBroots{Rev: rev, ΔRoots: SetOid{}}
δBroots := _ΔBroots{Rev: rev, ΔRoots: setOid{}}
// insert(@j, δBroots)
δBtail.vδBroots = append(δBtail.vδBroots[:j],
append([]ΔBroots{δBroots},
append([]_ΔBroots{δBroots},
δBtail.vδBroots[j:]...)...)
}
δBroots := δBtail.vδBroots[j]
......@@ -709,7 +709,7 @@ func (δBtail *ΔBtail) ForgetPast(revCut zodb.Tid) {
// go through vδBroots till revcut -> find which trees to trim -> trim ΔTtails.
totrim := SetOid{} // roots whose ΔTtail has changes ≤ revCut
totrim := setOid{} // roots whose ΔTtail has changes ≤ revCut
icut := 0
for ; icut < len(δBtail.vδBroots); icut++ {
δBroots := δBtail.vδBroots[icut]
......@@ -720,7 +720,7 @@ func (δBtail *ΔBtail) ForgetPast(revCut zodb.Tid) {
}
// vδBroots[:icut] should be forgotten
δBtail.vδBroots = append([]ΔBroots(nil), δBtail.vδBroots[icut:]...)
δBtail.vδBroots = append([]_ΔBroots(nil), δBtail.vδBroots[icut:]...)
// trim roots
for root := range totrim {
......@@ -839,7 +839,7 @@ func (δBtail *ΔBtail) SliceByRootRev(root zodb.Oid, lo, hi zodb.Tid) /*readonl
if !ok {
return []ΔTree{}
}
// XXX dup data - because they can be further rebuild in parallel to caller using them
// XXX dup data - because they can be further rebuilt in parallel to caller using them
return δTtail.vδT // FIXME process lo, hi
}
......
......@@ -38,23 +38,17 @@ package xbtree
//
// TestΔBTail and TestΔBTailAllStructs implement approaches "a" and "b" correspondingly.
//
// testprog/treegen.py is used as helper to both:
// testprog/treegen.py is used as helper to both: XXX moved to xbtreetest
//
// - commit a particular BTree topology into ZODB, and
// - to generate set of random tree topologies that all correspond to particular {k->v} dict.
import (
"bufio"
"context"
"flag"
"fmt"
"io"
"math"
"math/rand"
"os"
"os/exec"
"reflect"
"regexp"
"sort"
"strings"
"testing"
......@@ -62,445 +56,52 @@ import (
"lab.nexedi.com/kirr/go123/exc"
"lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb"
_ "lab.nexedi.com/kirr/neo/go/zodb/wks"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/blib"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/xbtreetest"
)
// TreeGenSrv represents connection to running `treegen ...` server.
type TreeGenSrv struct {
argv []string
pysrv *exec.Cmd // spawned `treegen ...`
pyin io.WriteCloser // input to pysrv
pyoutRaw io.ReadCloser // output from pysrv
pyout *bufio.Reader // buffered ^^^
}
// TreeSrv represents connection to running `treegen trees` server.
//
// Create it with StartTreeSrv(zurl).
// - Commit(treeTopology) -> tid
type TreeSrv struct {
*TreeGenSrv
zurl string
treeRoot zodb.Oid // oid of the tree treegen works on
head zodb.Tid // last made commit
}
// AllStructsSrv represents connection to running `treegen allstructs` server.
//
// Create it with StartAllStructsSrv().
// - AllStructs(maxdepth, maxsplit, n, seed, kv1, kv2)
type AllStructsSrv struct {
*TreeGenSrv
}
// StartTreeGenSrv spawns `treegen ...` server.
func StartTreeGenSrv(argv ...string) (_ *TreeGenSrv, hello string, err error) {
defer xerr.Contextf(&err, "treesrv %v: start", argv)
// spawn `treegen ...`
tg := &TreeGenSrv{argv: argv}
tg.pysrv = exec.Command("./testprog/treegen.py", argv...)
tg.pyin, err = tg.pysrv.StdinPipe()
if err != nil {
return nil, "", err
}
tg.pyoutRaw, err = tg.pysrv.StdoutPipe()
if err != nil {
return nil, "", err
}
tg.pyout = bufio.NewReader(tg.pyoutRaw)
tg.pysrv.Stderr = os.Stderr // no redirection
err = tg.pysrv.Start()
if err != nil {
return nil, "", err
}
// wait for hello message and return it
defer func() {
if err != nil {
tg.Close() // ignore error
}
}()
defer xerr.Context(&err, "handshake")
hello, err = tg.pyout.ReadString('\n')
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, "", err
}
hello = strings.TrimSuffix(hello, "\n")
return tg, hello, nil
}
// Close shutdowns treegen server.
func (tg *TreeGenSrv) Close() (err error) {
defer xerr.Contextf(&err, "treegen %v: close", tg.argv)
err1 := tg.pyin.Close()
err2 := tg.pyoutRaw.Close()
err3 := tg.pysrv.Wait()
return xerr.Merge(err1, err2, err3)
}
// StartTreeSrv spawns `treegen trees` server.
func StartTreeSrv(zurl string) (_ *TreeSrv, err error) {
defer xerr.Contextf(&err, "tree.srv %s: start", zurl)
tgSrv, hello, err := StartTreeGenSrv("trees", zurl)
if err != nil {
return nil, err
}
tg := &TreeSrv{TreeGenSrv: tgSrv, zurl: zurl}
defer func() {
if err != nil {
tgSrv.Close() // ignore error
}
}()
// tree.srv start @<at> tree=<root>
defer xerr.Contextf(&err, "invalid hello %q", hello)
startRe := regexp.MustCompile(`^tree.srv start @([^ ]+) root=([^ ]+)$`)
m := startRe.FindStringSubmatch(hello)
if m == nil {
return nil, fmt.Errorf("unexpected format")
}
tg.head, err = zodb.ParseTid(m[1]) // <at>
if err != nil {
return nil, fmt.Errorf("tid: %s", err)
}
tg.treeRoot, err = zodb.ParseOid(m[2]) // <root>
if err != nil {
return nil, fmt.Errorf("root: %s", err)
}
return tg, nil
}
// StartAllStructsSrv spawns `treegen allstructs` server.
func StartAllStructsSrv() (_ *AllStructsSrv, err error) {
defer xerr.Context(&err, "allstructs.srv: start")
tgSrv, hello, err := StartTreeGenSrv("allstructs")
if err != nil {
return nil, err
}
sg := &AllStructsSrv{TreeGenSrv: tgSrv}
defer func() {
if err != nil {
tgSrv.Close() // ignore error
}
}()
defer xerr.Contextf(&err, "invalid hello %q", hello)
if hello != "# allstructs.srv start" {
return nil, fmt.Errorf("unexpected format")
}
return sg, nil
}
// Commit creates new commit with underlying tree changed to specified tree topology.
func (tg *TreeSrv) Commit(tree string) (_ zodb.Tid, err error) {
defer xerr.Contextf(&err, "tree.srv %s: commit %s", tg.zurl, tree)
type Δstring = xbtreetest.Δstring
_, err = io.WriteString(tg.pyin, tree + "\n")
if err != nil {
return zodb.InvalidTid, err
}
reply, err := tg.pyout.ReadString('\n')
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return zodb.InvalidTid, err
}
reply = strings.TrimSuffix(reply, "\n")
tid, err := zodb.ParseTid(reply)
if err != nil {
return zodb.InvalidTid, fmt.Errorf("invalid reply: %s", err)
}
tg.head = tid
return tid, nil
}
// AllStructs returns response from `treegen allstructs`
func (tg *AllStructsSrv) AllStructs(kv map[Key]string, maxdepth, maxsplit, n int, seed int64) (_ []string, err error) {
req := fmt.Sprintf("%d %d %d/%d %s", maxdepth, maxsplit, n, seed, kvtxt(kv))
defer xerr.Contextf(&err, "allstructs.srv: %s ", req)
_, err = io.WriteString(tg.pyin, req + "\n")
if err != nil {
return nil, err
}
structv := []string{}
for {
reply, err := tg.pyout.ReadString('\n')
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return nil, err
}
reply = strings.TrimSuffix(reply, "\n")
if reply == "# ----" {
return structv, nil // end of response
}
if strings.HasPrefix(reply, "#") {
continue // comment
}
structv = append(structv, reply)
}
}
// RTree represents Tree node covering [lo, hi_] key range in its parent tree.
// XXX actually no coverage here -> kill? -> change to just `path []zodb.Oid` in RBucket?
type RTree struct {
oid zodb.Oid
parent *RTree
// XXX +children?
}
// RBucket represents Bucket node covering [lo, hi_] key range in its Tree.
// NOTE it is not [lo,hi) but [lo,hi_] instead to avoid overflow at KeyMax.
type RBucket struct {
oid zodb.Oid
parent *RTree
lo, hi_ Key // XXX -> KeyRange ?
kv map[Key]string // bucket's k->v; values were ZBlk objects whose data is loaded instead.
}
// Path returns path to this bucket from tree root.
func (rb *RBucket) Path() []zodb.Oid {
path := []zodb.Oid{rb.oid}
p := rb.parent
for p != nil {
path = append([]zodb.Oid{p.oid}, path...)
p = p.parent
}
return path
}
// RBucketSet represents set of buckets covering whole [-∞,∞) range.
type RBucketSet []*RBucket // k↑
// Get returns RBucket which covers key k.
func (rbs RBucketSet) Get(k Key) *RBucket {
i := sort.Search(len(rbs), func(i int) bool {
return k <= rbs[i].hi_
})
if i == len(rbs) {
panicf("BUG: key %v not covered; coverage: %s", k, rbs.coverage())
}
rb := rbs[i]
if !(rb.lo <= k && k <= rb.hi_) {
panicf("BUG: get(%v) -> [%v, %v]; coverage: %s", k, rb.lo, rb.hi_, rbs.coverage())
}
return rb
}
// coverage returns string representation of rbs coverage structure.
func (rbs RBucketSet) coverage() string {
if len(rbs) == 0 {
return "ø"
}
s := ""
for _, rb := range rbs {
if s != "" {
s += " "
}
s += fmt.Sprintf("[%v, %v]", rb.lo, rb.hi_)
}
return s
}
// trackSet returns what should be ΔBtail.trackSet coverage for specified tracked key set.
func (rbs RBucketSet) trackSet(tracked SetKey) PPTreeSubSet {
// XXX was method -> change name?
func trackSet(rbs xbtreetest.RBucketSet, tracked setKey) blib.PPTreeSubSet {
// nil = don't compute keyCover
// (trackSet is called from inside hot inner loop of rebuild test)
trackSet := rbs._trackSetWithCov(tracked, nil)
trackSet := _trackSetWithCov(rbs, tracked, nil)
return trackSet
}
// trackSetWithCov returns what should be ΔBtail.trackSet and its key coverage for specified tracked key set.
func (rbs RBucketSet) trackSetWithCov(tracked SetKey) (trackSet PPTreeSubSet, keyCover *RangedKeySet) {
keyCover = &RangedKeySet{}
trackSet = rbs._trackSetWithCov(tracked, keyCover)
func trackSetWithCov(rbs xbtreetest.RBucketSet, tracked setKey) (trackSet blib.PPTreeSubSet, keyCover *blib.RangedKeySet) {
keyCover = &blib.RangedKeySet{}
trackSet = _trackSetWithCov(rbs, tracked, keyCover)
return trackSet, keyCover
}
func (rbs RBucketSet) _trackSetWithCov(tracked SetKey, outKeyCover *RangedKeySet) (trackSet PPTreeSubSet) {
trackSet = PPTreeSubSet{}
func _trackSetWithCov(rbs xbtreetest.RBucketSet, tracked setKey, outKeyCover *blib.RangedKeySet) (trackSet blib.PPTreeSubSet) {
trackSet = blib.PPTreeSubSet{}
for k := range tracked {
kb := rbs.Get(k)
if outKeyCover != nil {
outKeyCover.AddRange(KeyRange{kb.lo, kb.hi_})
}
// trackSet explicitly records only regular buckets.
// embedded buckets all have oid=zodb.InvalidOid and would lead to z
newNode := false
if kb.oid != zodb.InvalidOid {
track, already := trackSet[kb.oid]
if !already {
track = &nodeInTree{parent: kb.parent.oid, nchild: 0}
trackSet[kb.oid] = track
newNode = true
}
if track.parent != kb.parent.oid {
panicf("BUG: %s: B%s -> multiple parents: %s %s", rbs.coverage(), kb.oid, track.parent, kb.parent.oid)
}
}
p := kb.parent
for p != nil {
ppoid := zodb.InvalidOid // oid of p.parent
if p.parent != nil {
ppoid = p.parent.oid
}
newParent := false
pt, already := trackSet[p.oid]
if !already {
pt = &nodeInTree{parent: ppoid, nchild: 0}
trackSet[p.oid] = pt
newParent = true
}
if pt.parent != ppoid {
panicf("BUG: %s: T%s -> multiple parents: %s %s", rbs.coverage(), p.oid, pt.parent, ppoid)
}
if newNode {
pt.nchild++
}
newNode = newParent
p = p.parent
outKeyCover.AddRange(kb.Keycov)
}
trackSet.AddPath(kb.Path())
}
return trackSet
}
// XGetTree loads Tree from zurl@at->obj<root>.
//
// Tree values must be ZBlk whose data is returned instead of references to ZBlk objects.
// The tree is returned structured by buckets as
//
// [] [lo,hi){k->v} k↑
func XGetTree(db *zodb.DB, at zodb.Tid, root zodb.Oid) RBucketSet {
defer exc.Contextf("%s: @%s: get tree %s", db.Storage().URL(), at, root)
X := exc.Raiseif
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: at}); X(err)
xztree, err := zconn.Get(ctx, root); X(err)
ztree, ok := xztree.(*Tree)
if !ok {
exc.Raisef("expected %s, got %s", xzodb.TypeOf(ztree), xzodb.TypeOf(xztree))
}
rbucketv := RBucketSet{}
xwalkDFS(ctx, KeyMin, KeyMax, ztree, func(rb *RBucket) {
rbucketv = append(rbucketv, rb)
})
if len(rbucketv) == 0 { // empty tree -> [-∞,∞){}
etree := &RTree{
oid: root,
parent: nil,
}
ebucket := &RBucket{
oid: zodb.InvalidOid,
parent: etree,
lo: KeyMin,
hi_: KeyMax,
kv: map[Key]string{},
}
rbucketv = RBucketSet{ebucket}
}
return rbucketv
}
// xwalkDFS walks ztree in depth-first order emitting bvisit callback on visited bucket nodes.
func xwalkDFS(ctx context.Context, lo, hi_ Key, ztree *Tree, bvisit func(*RBucket)) {
_xwalkDFS(ctx, lo, hi_, ztree, /*rparent*/nil, bvisit)
}
func _xwalkDFS(ctx context.Context, lo, hi_ Key, ztree *Tree, rparent *RTree, bvisit func(*RBucket)) {
X := exc.Raiseif
err := ztree.PActivate(ctx); X(err)
defer ztree.PDeactivate()
rtree := &RTree{oid: ztree.POid(), parent: rparent}
// [i].Key ≤ [i].Child.*.Key < [i+1].Key i ∈ [0, len([]))
//
// [0].Key = -∞ ; always returned so
// [len(ev)].Key = +∞ ; should be assumed so
ev := ztree.Entryv()
for i := range ev {
xlo := lo; if i > 0 { xlo = ev[i].Key() }
xhi_ := hi_; if i+1 < len(ev) { xhi_ = ev[i+1].Key() - 1 }
tchild, ok := ev[i].Child().(*Tree)
if ok {
_xwalkDFS(ctx, xlo, xhi_, tchild, rtree, bvisit)
continue
}
zbucket := ev[i].Child().(*Bucket)
err = zbucket.PActivate(ctx); X(err)
defer zbucket.PDeactivate()
bkv := make(map[Key]string)
bentryv := zbucket.Entryv()
for _, __ := range bentryv {
k := __.Key()
xv := __.Value()
pv, ok := xv.(zodb.IPersistent)
if !ok {
exc.Raisef("[%d] -> %s; want IPersistent", k, xzodb.TypeOf(xv))
}
data, err := ZGetBlkData(ctx, pv.PJar(), pv.POid())
if err != nil {
exc.Raisef("[%d]: %s", k, err)
}
bkv[k] = data
}
b := &RBucket{oid: zbucket.POid(), parent: rtree, lo: xlo, hi_: xhi_, kv: bkv}
bvisit(b)
}
}
// XGetδKV translates {k -> δ<oid>} to {k -> δ(ZBlk(oid).data)} according to t1..t2 db snapshots.
func XGetδKV(t1, t2 *tTreeCommit, δkvOid map[Key]ΔValue) map[Key]Δstring {
func XGetδKV(t1, t2 *xbtreetest.Commit, δkvOid map[Key]ΔValue) map[Key]Δstring {
δkv := make(map[Key]Δstring, len(δkvOid))
for k, δvOid := range δkvOid {
δkv[k] = Δstring{
Old: t1.xgetBlkData(δvOid.Old),
New: t2.xgetBlkData(δvOid.New),
Old: t1.XGetBlkData(δvOid.Old),
New: t2.XGetBlkData(δvOid.New),
}
}
return δkv
......@@ -536,11 +137,11 @@ func XGetδKV(t1, t2 *tTreeCommit, δkvOid map[Key]ΔValue) map[Key]Δstring {
// Note: adjacency matrix is symmetric (KAdj verifies this at runtime):
//
// kadj(A,B) == kadj(B,A)
type KAdjMatrix map[Key]SetKey
type KAdjMatrix map[Key]setKey
// Map returns kadj·keys .
func (kadj KAdjMatrix) Map(keys SetKey) SetKey {
res := make(SetKey, len(keys))
func (kadj KAdjMatrix) Map(keys setKey) setKey {
res := make(setKey, len(keys))
for k := range keys {
to, ok := kadj[k]
if !ok {
......@@ -574,13 +175,13 @@ func (kadjA KAdjMatrix) Mul(kadjB KAdjMatrix) KAdjMatrix {
// This set of keys defaults to allTestKeys(t1,t2).
//
// KAdj itself is verified by testΔBTail on entries with .kadjOK set.
func KAdj(t1, t2 *tTreeCommit, keysv ...SetKey) (kadj KAdjMatrix) {
func KAdj(t1, t2 *xbtreetest.Commit, keysv ...setKey) (kadj KAdjMatrix) {
// assert KAdj(A,B) == KAdj(B,A)
kadj12 := _KAdj(t1,t2, keysv...)
kadj21 := _KAdj(t2,t1, keysv...)
if !reflect.DeepEqual(kadj12, kadj21) {
panicf("KAdj not symmetric:\nt1: %s\nt2: %s\nkadj12: %v\nkadj21: %v",
t1.tree, t2.tree, kadj12, kadj21)
t1.Tree, t2.Tree, kadj12, kadj21)
}
return kadj12
}
......@@ -592,8 +193,8 @@ func debugfKAdj(format string, argv ...interface{}) {
}
}
func _KAdj(t1, t2 *tTreeCommit, keysv ...SetKey) (kadj KAdjMatrix) {
var keys SetKey
func _KAdj(t1, t2 *xbtreetest.Commit, keysv ...setKey) (kadj KAdjMatrix) {
var keys setKey
switch len(keysv) {
case 0:
keys = allTestKeys(t1, t2)
......@@ -604,8 +205,8 @@ func _KAdj(t1, t2 *tTreeCommit, keysv ...SetKey) (kadj KAdjMatrix) {
}
debugfKAdj("\n\n_KAdj\n")
debugfKAdj("t1: %s\n", t1.tree)
debugfKAdj("t2: %s\n", t2.tree)
debugfKAdj("t1: %s\n", t1.Tree)
debugfKAdj("t2: %s\n", t2.Tree)
debugfKAdj("keys: %s\n", keys)
defer func() {
debugfKAdj("kadj -> %v\n", kadj)
......@@ -615,78 +216,76 @@ func _KAdj(t1, t2 *tTreeCommit, keysv ...SetKey) (kadj KAdjMatrix) {
// if k is tracked and covered by changed leaf -> changes to adjacents must be in Update(t1->t2).
kadj = KAdjMatrix{}
for k := range keys {
adj1 := SetKey{}
adj2 := SetKey{}
adj1 := setKey{}
adj2 := setKey{}
q1 := &RangedKeySet{}; q1.Add(k)
q2 := &RangedKeySet{}; q2.Add(k)
done1 := &RangedKeySet{}
done2 := &RangedKeySet{}
q1 := &blib.RangedKeySet{}; q1.Add(k)
q2 := &blib.RangedKeySet{}; q2.Add(k)
done1 := &blib.RangedKeySet{}
done2 := &blib.RangedKeySet{}
debugfKAdj("\nk%s\n", kstr(k))
for !q1.Empty() || !q2.Empty() {
debugfKAdj("q1: %s\tdone1: %s\n", q1, done1)
debugfKAdj("q2: %s\tdone2: %s\n", q2, done2)
for _, r1 := range q1.AllRanges() {
lo1 := r1.lo
lo1 := r1.Lo
for {
b1 := t1.xkv.Get(lo1)
b1 := t1.Xkv.Get(lo1)
debugfKAdj(" b1: %s\n", b1)
for k_ := range keys {
if b1.lo <= k_ && k_ <= b1.hi_ {
if b1.Keycov.Has(k_) {
adj1.Add(k_)
debugfKAdj(" adj1 += %s\t-> %s\n", kstr(k_), adj1)
}
}
b1r := KeyRange{b1.lo, b1.hi_}
done1.AddRange(b1r)
done1.AddRange(b1.Keycov)
// q2 |= (b1.keyrange \ done2)
δq2 := &RangedKeySet{}
δq2.AddRange(b1r)
δq2 := &blib.RangedKeySet{}
δq2.AddRange(b1.Keycov)
δq2.DifferenceInplace(done2)
q2.UnionInplace(δq2)
debugfKAdj("q2 += %s\t-> %s\n", δq2, q2)
// continue with next right bucket until r1 coverage is complete
if r1.hi_ <= b1.hi_ {
if r1.Hi_ <= b1.Keycov.Hi_ {
break
}
lo1 = b1.hi_ + 1
lo1 = b1.Keycov.Hi_ + 1
}
}
q1.Clear()
for _, r2 := range q2.AllRanges() {
lo2 := r2.lo
lo2 := r2.Lo
for {
b2 := t2.xkv.Get(lo2)
b2 := t2.Xkv.Get(lo2)
debugfKAdj(" b2: %s\n", b2)
for k_ := range keys {
if b2.lo <= k_ && k_ <= b2.hi_ {
if b2.Keycov.Has(k_) {
adj2.Add(k_)
debugfKAdj(" adj2 += %s\t-> %s\n", kstr(k_), adj2)
}
}
b2r := KeyRange{b2.lo, b2.hi_}
done2.AddRange(b2r)
done2.AddRange(b2.Keycov)
// q1 |= (b2.keyrange \ done1)
δq1 := &RangedKeySet{}
δq1.AddRange(b2r)
δq1 := &blib.RangedKeySet{}
δq1.AddRange(b2.Keycov)
δq1.DifferenceInplace(done1)
q1.UnionInplace(δq1)
debugfKAdj("q1 += %s\t-> %s\n", δq1, q1)
// continue with next right bucket until r2 coverage is complete
if r2.hi_ <= b2.hi_ {
if r2.Hi_ <= b2.Keycov.Hi_ {
break
}
lo2 = b2.hi_ + 1
lo2 = b2.Keycov.Hi_ + 1
}
}
q2.Clear()
}
adj := SetKey{}; adj.Update(adj1); adj.Update(adj2)
adj := setKey{}; adj.Update(adj1); adj.Update(adj2)
kadj[k] = adj
}
......@@ -700,10 +299,10 @@ func _KAdj(t1, t2 *tTreeCommit, keysv ...SetKey) (kadj KAdjMatrix) {
// the cycling phase of update, that is responsible to recompute older
// entries when key coverage grows, is exercised by
// xverifyΔBTail_rebuild.
func xverifyΔBTail_Update(t *testing.T, subj string, db *zodb.DB, treeRoot zodb.Oid, t1, t2 *tTreeCommit) {
func xverifyΔBTail_Update(t *testing.T, subj string, db *zodb.DB, treeRoot zodb.Oid, t1, t2 *xbtreetest.Commit) {
// verify transition at1->at2 for all initial states of tracked {keys} from kv1 + kv2 + ∞
t.Run(fmt.Sprintf("Update/%s→%s", t1.tree, t2.tree), func(t *testing.T) {
t.Run(fmt.Sprintf("Update/%s→%s", t1.Tree, t2.Tree), func(t *testing.T) {
allKeys := allTestKeys(t1, t2)
allKeyv := allKeys.SortedElements()
......@@ -711,7 +310,7 @@ func xverifyΔBTail_Update(t *testing.T, subj string, db *zodb.DB, treeRoot zodb
// verify at1->at2 for all combination of initial tracked keys.
for kidx := range IntSets(len(allKeyv)) {
keys := SetKey{}
keys := setKey{}
for _, idx := range kidx {
keys.Add(allKeyv[idx])
}
......@@ -727,17 +326,17 @@ func xverifyΔBTail_Update(t *testing.T, subj string, db *zodb.DB, treeRoot zodb
// xverifyΔBTail_Update1 verifies how ΔBTail handles ZODB update at1->at2 from initial
// tracked state defined by initialTrackedKeys.
func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zodb.Oid, t1,t2 *tTreeCommit, initialTrackedKeys SetKey, kadj KAdjMatrix) {
func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zodb.Oid, t1,t2 *xbtreetest.Commit, initialTrackedKeys setKey, kadj KAdjMatrix) {
X := exc.Raiseif
//t.Logf("\n>>> Track=%s\n", initialTrackedKeys)
δZ := t2.δZ
d12 := t2.δxkv
δZ := t2.ΔZ
d12 := t2.Δxkv
var TrackedδZ SetKey = nil
var kadjTrackedδZ SetKey = nil
var TrackedδZ setKey = nil
var kadjTrackedδZ setKey = nil
var δT, δTok map[Key]Δstring = nil, nil
δZset := SetOid{}
δZset := setOid{}
for _, oid := range δZ.Changev {
δZset.Add(oid)
}
......@@ -764,28 +363,28 @@ func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zod
// δbtail @at1 with initial tracked set
δbtail := NewΔBtail(t1.at, db)
δbtail := NewΔBtail(t1.At, db)
xtrackKeys(δbtail, t1, initialTrackedKeys)
// TrackedδZ = Tracked ^ δZ (i.e. a tracked node has changed, or its coverage was changed)
TrackedδZ = SetKey{}
TrackedδZ = setKey{}
for k := range initialTrackedKeys {
leaf1 := t1.xkv.Get(k)
oid1 := leaf1.oid
leaf1 := t1.Xkv.Get(k)
oid1 := leaf1.Oid
if oid1 == zodb.InvalidOid { // embedded bucket
oid1 = leaf1.parent.oid
oid1 = leaf1.Parent.Oid
}
leaf2 := t2.xkv.Get(k)
oid2 := leaf2.oid
leaf2 := t2.Xkv.Get(k)
oid2 := leaf2.Oid
if oid2 == zodb.InvalidOid { // embedded bucket
oid2 = leaf2.parent.oid
oid2 = leaf2.Parent.Oid
}
if δZset.Has(oid1) || δZset.Has(oid2) || (KeyRange{leaf1.lo,leaf1.hi_} != KeyRange{leaf2.lo,leaf2.hi_}) {
if δZset.Has(oid1) || δZset.Has(oid2) || (leaf1.Keycov != leaf2.Keycov) {
TrackedδZ.Add(k)
}
}
kadjTrackedδZ = SetKey{} // kadj[Tracked^δZ] (all keys adjacent to tracked^δZ)
kadjTrackedδZ = setKey{} // kadj[Tracked^δZ] (all keys adjacent to tracked^δZ)
for k := range TrackedδZ {
kadjTrackedδZ.Update(kadj[k])
}
......@@ -809,12 +408,12 @@ func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zod
}
}
ø := PPTreeSubSet{}
ø := blib.PPTreeSubSet{}
// trackSet1 = xkv1[tracked1]
// trackSet2 = xkv2[tracked2] ( = xkv2[kadj[tracked1]]
trackSet1, tkeyCov1 := t1.xkv.trackSetWithCov(initialTrackedKeys)
trackSet2, tkeyCov2 := t2.xkv.trackSetWithCov(initialTrackedKeys.Union(kadjTrackedδZ))
trackSet1, tkeyCov1 := trackSetWithCov(t1.Xkv, initialTrackedKeys)
trackSet2, tkeyCov2 := trackSetWithCov(t2.Xkv, initialTrackedKeys.Union(kadjTrackedδZ))
// verify δbtail.trackSet against @at1
δbtail.assertTrack(t, "1", ø, trackSet1)
......@@ -834,7 +433,7 @@ func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zod
// assert δtkeycov == δ(tkeyCov1, tkeyCov2)
δtkeycovOK := tkeyCov2.Difference(tkeyCov1)
δtkeycov := &RangedKeySet{}
δtkeycov := &blib.RangedKeySet{}
if __, ok := δB1.ByRoot[treeRoot]; ok {
δtkeycov = __.δtkeycov1
}
......@@ -857,11 +456,11 @@ func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zod
// assert δB.ByRoot == {treeRoot -> ...} if δTok != ø
// == ø if δTok == ø
rootsOK := SetOid{}
rootsOK := setOid{}
if len(δTok) > 0 {
rootsOK.Add(treeRoot)
}
roots := SetOid{}
roots := setOid{}
for root := range δB.ΔByRoot {
roots.Add(root)
}
......@@ -915,13 +514,13 @@ func xverifyΔBTail_Update1(t *testing.T, subj string, db *zodb.DB, treeRoot zod
// assertTrack verifies state of .trackSet and ΔTtail.trackNew.
// it assumes that only one tree root is being tracked.
// XXX place
func (δBtail *ΔBtail) assertTrack(t *testing.T, subj string, trackSetOK PPTreeSubSet, trackNewOK PPTreeSubSet) {
func (δBtail *ΔBtail) assertTrack(t *testing.T, subj string, trackSetOK blib.PPTreeSubSet, trackNewOK blib.PPTreeSubSet) {
t.Helper()
if !δBtail.trackSet.Equal(trackSetOK) {
t.Errorf("%s: trackSet:\n\thave: %v\n\twant: %v", subj, δBtail.trackSet, trackSetOK)
}
roots := SetOid{}
roots := setOid{}
for root := range δBtail.vδTbyRoot {
roots.Add(root)
}
......@@ -942,7 +541,7 @@ func (δBtail *ΔBtail) assertTrack(t *testing.T, subj string, trackSetOK PPTree
δTtail := δBtail.vδTbyRoot[root]
trackNewRootsOK := SetOid{}
trackNewRootsOK := setOid{}
if !trackNewOK.Empty() {
trackNewRootsOK.Add(root)
}
......@@ -962,21 +561,21 @@ func (δBtail *ΔBtail) assertTrack(t *testing.T, subj string, trackSetOK PPTree
// t1->t2 further exercises incremental rebuild.
//
// It also exercises rebuild phase of ΔBtail.Update.
func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1, t2 *tTreeCommit) {
t.Run(fmt.Sprintf("rebuild/%s→%s", t0.tree, t1.tree), func(t *testing.T) {
func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1, t2 *xbtreetest.Commit) {
t.Run(fmt.Sprintf("rebuild/%s→%s", t0.Tree, t1.Tree), func(t *testing.T) {
tAllKeys := allTestKeys(t0, t1, t2)
tAllKeyv := tAllKeys.SortedElements()
// tid -> "at_i"
xat := map[zodb.Tid]string{
t0.at: "at0",
t1.at: "at1",
t2.at: "at2",
t0.At: "at0",
t1.At: "at1",
t2.At: "at2",
}
//fmt.Printf("@%s: %v\n", xat[t0.at], t0.xkv.Flatten())
//fmt.Printf("@%s: %v\n", xat[t1.at], t1.xkv.Flatten())
//fmt.Printf("@%s: %v\n", xat[t2.at], t2.xkv.Flatten())
//fmt.Printf("@%s: %v\n", xat[t0.At], t0.Xkv.Flatten())
//fmt.Printf("@%s: %v\n", xat[t1.At], t1.Xkv.Flatten())
//fmt.Printf("@%s: %v\n", xat[t2.At], t2.Xkv.Flatten())
kadj10 := KAdj(t1,t0, allTestKeys(t0,t1,t2))
kadj21 := KAdj(t2,t1, allTestKeys(t0,t1,t2))
......@@ -985,12 +584,12 @@ func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1
// kadj210 = kadj10·kadj21
kadj210 := kadj10.Mul(kadj21)
ø := PPTreeSubSet{}
ø := blib.PPTreeSubSet{}
// verify t0 -> t1 Track(keys1) Rebuild -> t2 Track(keys2) Rebuild
// for all combinations of keys1 and keys2
for k1idx := range IntSets(len(tAllKeyv)) {
keys1 := SetKey{}
keys1 := setKey{}
for _, idx1 := range k1idx {
keys1.Add(tAllKeyv[idx1])
}
......@@ -999,17 +598,17 @@ func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1
keys1_0 := kadj10.Map(keys1)
δkv1_1 := map[Key]Δstring{}
for k := range keys1_0 {
δv, ok := t1.δxkv[k]
δv, ok := t1.Δxkv[k]
if ok {
δkv1_1[k] = δv
}
}
Tkeys1 := t1.xkv.trackSet(keys1)
Tkeys1_0 := t1.xkv.trackSet(keys1_0)
Tkeys1 := trackSet(t1.Xkv, keys1)
Tkeys1_0 := trackSet(t1.Xkv, keys1_0)
t.Run(fmt.Sprintf(" T%s;R", keys1), func(t *testing.T) {
δbtail := NewΔBtail(t0.at, db)
δbtail := NewΔBtail(t0.At, db)
// assert trackSet=ø, trackNew=ø, vδB=[]
δbtail.assertTrack(t, "@at0", ø, ø)
......@@ -1029,7 +628,7 @@ func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1
/*trackSet=*/ Tkeys1_0,
/*vδT=*/ δkv1_1)
t.Run((" →" + t2.tree), func(t *testing.T) {
t.Run((" →" + t2.Tree), func(t *testing.T) {
// keys1R2 is full set of keys that should become tracked after
// Update() (which includes rebuild)
keys1R2 := kadj12.Map(keys1)
......@@ -1045,17 +644,17 @@ func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1
δkv1_k1R2 := map[Key]Δstring{}
δkv2_k1R2 := map[Key]Δstring{}
for k := range keys1R2 {
δv1, ok := t1.δxkv[k]
δv1, ok := t1.Δxkv[k]
if ok {
δkv1_k1R2[k] = δv1
}
δv2, ok := t2.δxkv[k]
δv2, ok := t2.Δxkv[k]
if ok {
δkv2_k1R2[k] = δv2
}
}
Tkeys1R2 := t2.xkv.trackSet(keys1R2)
Tkeys1R2 := trackSet(t2.Xkv, keys1R2)
xverifyΔBTail_rebuild_U(t, δbtail, treeRoot, t1, t2, xat,
/*trackSet=*/ Tkeys1R2,
......@@ -1065,7 +664,7 @@ func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1
// reduce that to = tAllKeys - keys1R2 in short mode
// ( if key from keys2 already became tracked after Track(keys1) + Update,
// adding Track(that-key), is not adding much testing coverage to recompute paths )
var tRestKeys2 SetKey
var tRestKeys2 setKey
if testing.Short() {
tRestKeys2 = tAllKeys.Difference(keys1R2)
} else {
......@@ -1074,7 +673,7 @@ func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1
tRestKeyv2 := tRestKeys2.SortedElements()
for k2idx := range IntSets(len(tRestKeyv2)) {
keys2 := SetKey{}
keys2 := setKey{}
for _, idx2 := range k2idx {
keys2.Add(tRestKeyv2[idx2])
}
......@@ -1090,16 +689,16 @@ func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1
keys12R2 = keys12R2_
}
Tkeys2 := t2.xkv.trackSet(keys2)
Tkeys12R2 := t2.xkv.trackSet(keys12R2)
Tkeys2 := trackSet(t2.Xkv, keys2)
Tkeys12R2 := trackSet(t2.Xkv, keys12R2)
/*
fmt.Printf("\n\n\nKKK\nkeys1=%s keys2=%s\n", keys1, keys2)
fmt.Printf("keys1R2: %s\n", keys1R2)
fmt.Printf("keys12R2: %s\n", keys12R2)
fmt.Printf("t0.xkv: %v\n", t0.xkv)
fmt.Printf("t1.xkv: %v\n", t1.xkv)
fmt.Printf("t2.xkv: %v\n", t2.xkv)
fmt.Printf("t0.Xkv: %v\n", t0.Xkv)
fmt.Printf("t1.Xkv: %v\n", t1.Xkv)
fmt.Printf("t2.Xkv: %v\n", t2.Xkv)
fmt.Printf("kadj21: %v\n", kadj21)
fmt.Printf("kadj12: %v\n", kadj12)
fmt.Printf("Tkeys2 -> %s\n", Tkeys2)
......@@ -1110,14 +709,14 @@ func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1
// δkvX_k12R2 = tX.δxkv / keys12R2
δkv1_k12R2 := make(map[Key]Δstring, len(t1.δxkv))
δkv2_k12R2 := make(map[Key]Δstring, len(t2.δxkv))
δkv1_k12R2 := make(map[Key]Δstring, len(t1.Δxkv))
δkv2_k12R2 := make(map[Key]Δstring, len(t2.Δxkv))
for k := range keys12R2 {
δv1, ok := t1.δxkv[k]
δv1, ok := t1.Δxkv[k]
if ok {
δkv1_k12R2[k] = δv1
}
δv2, ok := t2.δxkv[k]
δv2, ok := t2.Δxkv[k]
if ok {
δkv2_k12R2[k] = δv2
}
......@@ -1147,15 +746,15 @@ func xverifyΔBTail_rebuild(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, t0, t1
}
// xverifyΔBTail_rebuild_U verifies ΔBtail state after Update(ti->tj).
func xverifyΔBTail_rebuild_U(t *testing.T, δbtail *ΔBtail, treeRoot zodb.Oid, ti, tj *tTreeCommit, xat map[zodb.Tid]string, trackSet PPTreeSubSet, vδTok ...map[Key]Δstring) {
func xverifyΔBTail_rebuild_U(t *testing.T, δbtail *ΔBtail, treeRoot zodb.Oid, ti, tj *xbtreetest.Commit, xat map[zodb.Tid]string, trackSet blib.PPTreeSubSet, vδTok ...map[Key]Δstring) {
t.Helper()
X := exc.Raiseif
ø := PPTreeSubSet{}
ø := blib.PPTreeSubSet{}
subj := fmt.Sprintf("after Update(@%s→@%s)", xat[ti.at], xat[tj.at])
subj := fmt.Sprintf("after Update(@%s→@%s)", xat[ti.At], xat[tj.At])
// Update ati -> atj
δB, err := δbtail.Update(tj.δZ); X(err)
δB, err := δbtail.Update(tj.ΔZ); X(err)
δbtail.assertTrack(t, subj, trackSet, ø)
assertΔTtail(t, subj, δbtail, tj, treeRoot, xat, vδTok...)
......@@ -1172,7 +771,7 @@ func xverifyΔBTail_rebuild_U(t *testing.T, δbtail *ΔBtail, treeRoot zodb.Oid,
δrootsOK = 0
}
δroots := SetOid{}
δroots := setOid{}
for root := range δbtail.vδTbyRoot {
δroots.Add(root)
}
......@@ -1180,8 +779,8 @@ func xverifyΔBTail_rebuild_U(t *testing.T, δbtail *ΔBtail, treeRoot zodb.Oid,
if ok {
δT = XGetδKV(ti, tj, δToid)
}
if δB.Rev != tj.at {
t.Errorf("%s: δB.Rev: have %s ; want %s", subj, δB.Rev, tj.at)
if δB.Rev != tj.At {
t.Errorf("%s: δB.Rev: have %s ; want %s", subj, δB.Rev, tj.At)
}
if len(δB.ΔByRoot) != δrootsOK {
t.Errorf("%s: len(δB.ΔByRoot) != %d ; δroots=%v", subj, δrootsOK, δroots)
......@@ -1192,14 +791,14 @@ func xverifyΔBTail_rebuild_U(t *testing.T, δbtail *ΔBtail, treeRoot zodb.Oid,
}
// xverifyΔBTail_rebuild_TR verifies ΔBtail state after Track(keys) + rebuild.
func xverifyΔBTail_rebuild_TR(t *testing.T, δbtail *ΔBtail, tj *tTreeCommit, treeRoot zodb.Oid, xat map[zodb.Tid]string, keys SetKey, trackSet PPTreeSubSet, trackNew, trackSetAfterRebuild PPTreeSubSet, vδTok ...map[Key]Δstring) {
func xverifyΔBTail_rebuild_TR(t *testing.T, δbtail *ΔBtail, tj *xbtreetest.Commit, treeRoot zodb.Oid, xat map[zodb.Tid]string, keys setKey, trackSet blib.PPTreeSubSet, trackNew, trackSetAfterRebuild blib.PPTreeSubSet, vδTok ...map[Key]Δstring) {
t.Helper()
ø := PPTreeSubSet{}
ø := blib.PPTreeSubSet{}
// Track(keys)
xtrackKeys(δbtail, tj, keys)
subj := fmt.Sprintf("@%s: after Track%v", xat[tj.at], keys)
subj := fmt.Sprintf("@%s: after Track%v", xat[tj.At], keys)
δbtail.assertTrack(t, subj, trackSet, trackNew)
δbtail.rebuildAll()
......@@ -1215,23 +814,23 @@ func xverifyΔBTail_rebuild_TR(t *testing.T, δbtail *ΔBtail, tj *tTreeCommit,
// assertΔTtail verifies state of ΔTtail that corresponds to treeRoot in δbtail.
// it also verifies that δbtail.vδBroots matches ΔTtail data.
func assertΔTtail(t *testing.T, subj string, δbtail *ΔBtail, tj *tTreeCommit, treeRoot zodb.Oid, xat map[zodb.Tid]string, vδTok ...map[Key]Δstring) {
func assertΔTtail(t *testing.T, subj string, δbtail *ΔBtail, tj *xbtreetest.Commit, treeRoot zodb.Oid, xat map[zodb.Tid]string, vδTok ...map[Key]Δstring) {
t.Helper()
// XXX +KVAtTail, +lastRevOf
l := len(vδTok)
var vatOK []zodb.Tid
var vδTok_ []map[Key]Δstring
at2t := map[zodb.Tid]*tTreeCommit{tj.at: tj}
at2t := map[zodb.Tid]*xbtreetest.Commit{tj.At: tj}
t0 := tj
for i := 0; i<l; i++ {
// empty vδTok entries means they should be absent in vδT
if δTok := vδTok[l-i-1]; len(δTok) != 0 {
vatOK = append([]zodb.Tid{t0.at}, vatOK...)
vatOK = append([]zodb.Tid{t0.At}, vatOK...)
vδTok_ = append([]map[Key]Δstring{δTok}, vδTok_...)
}
t0 = t0.prev
at2t[t0.at] = t0
t0 = t0.Prev
at2t[t0.At] = t0
}
vδTok = vδTok_
δTtail, ok := δbtail.vδTbyRoot[treeRoot]
......@@ -1243,7 +842,7 @@ func assertΔTtail(t *testing.T, subj string, δbtail *ΔBtail, tj *tTreeCommit,
l = len(vδToid)
var vat []zodb.Tid
var vδT []map[Key]Δstring
atPrev := t0.at
atPrev := t0.At
for _, δToid := range vδToid {
vat = append(vat, δToid.Rev)
δT := XGetδKV(at2t[atPrev], at2t[δToid.Rev], δToid.ΔKV) // {} k -> δ(ZBlk(oid).data)
......@@ -1277,11 +876,11 @@ func assertΔTtail(t *testing.T, subj string, δbtail *ΔBtail, tj *tTreeCommit,
}
if !bok {
δbroots := ""
vδb_root := ""
for i := 0; i<len(vatδB); i++ {
δbroots += fmt.Sprintf("\n\t@%s", xat[vatδB[i]])
vδb_root += fmt.Sprintf("\n\t@%s", xat[vatδB[i]])
}
emsg += fmt.Sprintf("δbroots: %s\n", δbroots)
emsg += fmt.Sprintf("vδb/root: %s\n", vδb_root)
}
t.Error(emsg)
......@@ -1290,11 +889,11 @@ func assertΔTtail(t *testing.T, subj string, δbtail *ΔBtail, tj *tTreeCommit,
// xtrackKeys issues δbtail.Track requests for tree[keys].
// XXX place
func xtrackKeys(δbtail *ΔBtail, t *tTreeCommit, keys SetKey) {
func xtrackKeys(δbtail *ΔBtail, t *xbtreetest.Commit, keys setKey) {
X := exc.Raiseif
head := δbtail.Head()
if head != t.at {
panicf("BUG: δbtail.head: %s ; t.at: %s", head, t.at)
if head != t.At {
panicf("BUG: δbtail.head: %s ; t.at: %s", head, t.At)
}
for k := range keys {
......@@ -1308,7 +907,7 @@ func xtrackKeys(δbtail *ΔBtail, t *tTreeCommit, keys SetKey) {
// continues to be tracked and all keys migrate to holes in the
// tracking set. By aligning initial state to the same as after
// T1->ø, we test what will happen on ø->T2.
b := t.xkv.Get(k)
b := t.Xkv.Get(k)
err := δbtail.track(k, b.Path()); X(err)
}
}
......@@ -1317,19 +916,19 @@ func xtrackKeys(δbtail *ΔBtail, t *tTreeCommit, keys SetKey) {
// XXX
// XXX kill
/*
func ___xverifyΔBTail_GetAt(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt ...*tTreeCommit) {
subj := vt[0].tree
func ___xverifyΔBTail_GetAt(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt ...*xbtreetest.Commit) {
subj := vt[0].Tree
for _, t := range vt[1:] {
subj += "→" + t.tree
subj += "→" + t.Tree
}
t.Run(fmt.Sprintf("Get/%s", subj), func(t *testing.T) {
// tid -> "at_i"
xat := map[zodb.Tid]string{}
for i := range vt {
xat[vt[i].at] = fmt.Sprintf("at%d", i)
xat[vt[i].At] = fmt.Sprintf("at%d", i)
fmt.Printf("@%s: %v\n", xat[vt[i].at], vt[i].xkv.Flatten())
fmt.Printf("@%s: %v\n", xat[vt[i].At], vt[i].Xkv.Flatten())
}
tkeys := allTestKeys(vt...)
......@@ -1338,7 +937,7 @@ func ___xverifyΔBTail_GetAt(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt ..
// verify t1->t2-> ... ->tn Track(keys) Get(keys, @at)
// for all combinations of tracked keys and at
for kidx := range IntSets(len(tkeyv)) {
keys := SetKey{}
keys := setKey{}
for _, idx := range kidx {
keys.Add(tkeyv[idx])
}
......@@ -1350,19 +949,19 @@ func ___xverifyΔBTail_GetAt(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt ..
})
}
func xverifyΔBTail_GetAt1(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt []*tTreeCommit, xat map[zodb.Tid]string, keys SetKey) {
func xverifyΔBTail_GetAt1(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt []*xbtreetest.Commit, xat map[zodb.Tid]string, keys setKey) {
X := exc.Raiseif
// t1->t2-> ... -> tn
δbtail := NewΔBtail(vt[0].at, db)
δbtail := NewΔBtail(vt[0].At, db)
for i := 1; i < len(vt); i++ {
_, err := δbtail.Update(vt[i].δZ); X(err)
_, err := δbtail.Update(vt[i].ΔZ); X(err)
}
// Track(keys)
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: vt[len(vt)-1].at}); X(err)
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: vt[len(vt)-1].At}); X(err)
xtree, err := zconn.Get(ctx, treeRoot); X(err)
ztree := xtree.(*Tree)
......@@ -1373,21 +972,21 @@ func xverifyΔBTail_GetAt1(t *testing.T, db *zodb.DB, treeRoot zodb.Oid, vt []*t
// verify GetAt(k, @at) for all keys and @at
for i := 1; i < len(vt); i++ {
at := vt[i].at
at := vt[i].At
for _, k := range keys.SortedElements() {
vOid, ok, rev, revExact, err := δbtail.GetAt(ctx, ztree, k, at); X(err)
v := xzgetBlkDataAt(db, vOid, rev)
v_, ok_ := vt[i].xkv.Get(k).kv[k]
rev_, revExact_ := vt[i].at, false
v_, ok_ := vt[i].Xkv.Get(k).kv[k]
rev_, revExact_ := vt[i].At, false
for j := i-1; j >= 0; j-- {
v__ := vt[j].xkv.Get(k).kv[k]
v__ := vt[j].Xkv.Get(k).kv[k]
if v__ != v_ {
rev_ = vt[j+1].at
rev_ = vt[j+1].At
revExact_ = true
break
}
rev_ = vt[j].at
rev_ = vt[j].At
}
if v == "" { v = DEL }
......@@ -1436,244 +1035,21 @@ func ΔBTest(xtest interface{}) ΔBTestEntry {
}
// tTreeEnv is tree-based testing environment.
//
// It combines TreeSrv and client side access to ZODB with committed trees.
// It should be created it via tNewTreeEnv().
type tTreeEnv struct {
*testing.T
work string // working directory
treeSrv *TreeSrv
zstor zodb.IStorage
db *zodb.DB
// all committed trees
commitv []*tTreeCommit
}
// tTreeCommit represent test commit changing a tree.
type tTreeCommit struct {
tree string // the tree in topology-encoding
prev *tTreeCommit // previous commit
at zodb.Tid // commit revision
δZ *zodb.EventCommit // raw ZODB changes; δZ.tid == at
xkv RBucketSet // full tree state as of @at
δxkv map[Key]Δstring // full tree-diff against parent
blkDataTab map[zodb.Oid]string // full snapshot of all ZBlk data @at
}
// tNewTreeEnv creates new tTreeEnv.
func tNewTreeEnv(t *testing.T) *tTreeEnv {
X := exc.Raiseif
t.Helper()
tt := &tTreeEnv{T: t}
var err error
work := t.TempDir()
tt.treeSrv, err = StartTreeSrv(work + "/1.fs"); X(err)
t.Cleanup(func() {
err := tt.treeSrv.Close(); X(err)
})
tt.zstor, err = zodb.Open(context.Background(), tt.treeSrv.zurl, &zodb.OpenOptions{
ReadOnly: true,
}); X(err)
t.Cleanup(func() {
err := tt.zstor.Close(); X(err)
})
tt.db = zodb.NewDB(tt.zstor, &zodb.DBOptions{
// We need objects to be cached, because otherwise it is too
// slow to run the test for many testcases, especially
// xverifyΔBTail_rebuild.
CacheControl: &tZODBCacheEverything{},
})
t.Cleanup(func() {
err := tt.db.Close(); X(err)
})
head := tt.treeSrv.head
t1 := &tTreeCommit{
tree: "T/B:", // treegen.py creates the tree as initially empty
prev: nil,
at: head,
xkv: XGetTree(tt.db, head, tt.Root()),
blkDataTab: xGetBlkDataTab(tt.db, head),
δZ: nil,
δxkv: nil,
}
tt.commitv = []*tTreeCommit{t1}
return tt
}
// tZODBCacheEverything is workaround for ZODB/go not implementing real
// live cache for now: Objects get dropped on PDeactivate if cache
// control does not say we need the object to stay in the cache.
// XXX place
type tZODBCacheEverything struct{}
func (_ *tZODBCacheEverything) PCacheClassify(_ zodb.IPersistent) zodb.PCachePolicy {
return zodb.PCachePinObject | zodb.PCacheKeepState
}
// Root returns OID of root tree node.
func (t *tTreeEnv) Root() zodb.Oid {
return t.treeSrv.treeRoot
}
// Head returns most-recently committed tree.
func (t *tTreeEnv) Head() *tTreeCommit {
return t.commitv[len(t.commitv)-1]
}
// CommitTree calls t.treeSrv.Commit and returns tTreeCommit corresponding to committed transaction.
func (t *tTreeEnv) CommitTree(tree string) *tTreeCommit {
// TODO X = FatalIf
X := exc.Raiseif
defer exc.Contextf("commit %s", tree)
watchq := make(chan zodb.Event)
at0 := t.zstor.AddWatch(watchq)
defer t.zstor.DelWatch(watchq)
tid, err := t.treeSrv.Commit(tree); X(err)
if !(tid > at0) {
exc.Raisef("treegen -> %s ; want > %s", tid, at0)
}
zevent := <-watchq
δZ := zevent.(*zodb.EventCommit)
if δZ.Tid != tid {
exc.Raisef("treegen -> %s ; watchq -> %s", tid, δZ)
}
// load tree structure from the db
// if the tree does not exist yet - report its structure as empty
var xkv RBucketSet
if tree != DEL {
xkv = XGetTree(t.db, δZ.Tid, t.Root())
} else {
// empty tree with real treeRoot as oid even though the tree is
// deleted. Having real oid in the root tests that after deletion,
// root of the tree stays in the tracking set. We need root to stay
// in trackSet because e.g. in
//
// T1 -> ø -> T2
//
// where the tree is first deleted, then recreated, without root
// staying in trackSet after ->ø, treediff will notice nothing when
// it comes to ->T2.
xkv = RBucketSet{
&RBucket{
oid: zodb.InvalidOid,
parent: &RTree{
oid: t.Root(), // NOTE oid is not InvalidOid
parent: nil,
},
lo: KeyMin,
hi_: KeyMax,
kv: map[Key]string{},
},
}
}
ttree := &tTreeCommit{
tree: tree,
at: δZ.Tid,
δZ: δZ,
xkv: xkv,
blkDataTab: xGetBlkDataTab(t.db, δZ.Tid),
}
tprev := t.Head()
ttree.prev = tprev
ttree.δxkv = kvdiff(tprev.xkv.Flatten(), ttree.xkv.Flatten())
t.commitv = append(t.commitv, ttree)
return ttree
}
// xGetBlkDataTab loads all ZBlk from db@at.
//
// it returns {} oid -> blkdata.
func xGetBlkDataTab(db *zodb.DB, at zodb.Tid) map[zodb.Oid]string {
defer exc.Contextf("%s: @%s: get blkdatatab", db.Storage().URL(), at)
X := exc.Raiseif
blkDataTab := map[zodb.Oid]string{}
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: at}); X(err)
xzroot, err := zconn.Get(ctx, 0); X(err)
zroot, ok := xzroot.(*zodb.Map)
if !ok {
exc.Raisef("root: expected %s, got %s", xzodb.TypeOf(zroot), xzodb.TypeOf(xzroot))
}
err = zroot.PActivate(ctx); X(err)
defer zroot.PDeactivate()
xzblkdir, ok := zroot.Data["treegen/values"]
if !ok {
exc.Raisef("root['treegen/values'] missing")
}
zblkdir, ok := xzblkdir.(*zodb.Map)
if !ok {
exc.Raisef("root['treegen/values']: expected %s, got %s", xzodb.TypeOf(zblkdir), xzodb.TypeOf(xzblkdir))
}
err = zblkdir.PActivate(ctx); X(err)
defer zblkdir.PDeactivate()
for k, xzblk := range zblkdir.Data {
zblk, ok := xzblk.(zodb.IPersistent)
if !ok {
exc.Raisef("root['treegen/values'][%q]: expected %s, got %s", k, xzodb.TypeOf(zblk), xzodb.TypeOf(xzblk))
}
oid := zblk.POid()
data := xzgetBlkData(ctx, zconn, oid)
blkDataTab[oid] = data
}
return blkDataTab
}
// xgetBlkData loads blk data for ZBlk<oid> @t.at
//
// For speed the load is done via preloaded t.blkDataTab instead of access to the DB.
func (t *tTreeCommit) xgetBlkData(oid zodb.Oid) string {
if oid == VDEL {
return DEL
}
data, ok := t.blkDataTab[oid]
if !ok {
exc.Raisef("getBlkData ZBlk<%s> @%s: no such ZBlk", oid, t.at)
}
return data
}
// testΔBTail verifies ΔBTail on sequence of tree topologies coming from testq.
func testΔBTail(t_ *testing.T, testq chan ΔBTestEntry) {
t := tNewTreeEnv(t_)
t := xbtreetest.NewT(t_)
var t0 *tTreeCommit
var t0 *xbtreetest.Commit
for test := range testq {
t1 := t.Head()
t2 := t.CommitTree(test.tree)
subj := fmt.Sprintf("%s -> %s", t1.tree, t2.tree)
subj := fmt.Sprintf("%s -> %s", t1.Tree, t2.Tree)
//t.Logf("\n\n\n**** %s ****\n\n", subj)
// KAdj
if kadjOK := test.kadjOK; kadjOK != nil {
t.Run(fmt.Sprintf("KAdj/%s→%s", t1.tree, t2.tree), func(t *testing.T) {
t.Run(fmt.Sprintf("KAdj/%s→%s", t1.Tree, t2.Tree), func(t *testing.T) {
kadj := KAdj(t1, t2)
if !reflect.DeepEqual(kadj, kadjOK) {
t.Fatalf("BUG: computed kadj is wrong:\nkadjOK: %v\nkadj : %v\n\n", kadjOK, kadj)
......@@ -1683,12 +1059,12 @@ func testΔBTail(t_ *testing.T, testq chan ΔBTestEntry) {
// ΔBTail.Update
if test.flags & ΔBTest_SkipUpdate == 0 {
xverifyΔBTail_Update(t.T, subj, t.db, t.Root(), t1,t2)
xverifyΔBTail_Update(t.T, subj, t.DB, t.Root(), t1,t2)
}
// ΔBTail.rebuild
if t0 != nil && (test.flags & ΔBTest_SkipRebuild == 0) {
xverifyΔBTail_rebuild(t.T, t.db, t.Root(), t0,t1,t2)
xverifyΔBTail_rebuild(t.T, t.DB, t.Root(), t0,t1,t2)
}
t0, t1 = t1, t2
......@@ -1697,9 +1073,9 @@ func testΔBTail(t_ *testing.T, testq chan ΔBTestEntry) {
// TestΔBTail verifies ΔBTail for explicitly provided tree topologies.
func TestΔBTail(t *testing.T) {
// K is shorthand for SetKey
K := func(keyv ...Key) SetKey {
ks := SetKey{}
// K is shorthand for setKey
K := func(keyv ...Key) setKey {
ks := setKey{}
for _, k := range keyv { ks.Add(k) }
return ks
}
......@@ -1858,7 +1234,7 @@ func TestΔBTail(t *testing.T) {
// * `ø -> Tree ...` (tree is created anew),
// * `... Tree -> ø` (tree is deleted), and
// * `Tree -> ø -> Tree` (tree is deleted and then recreated)
DEL,
xbtreetest.DEL,
// tree rotation
"T3/B2:b-B3:c,4:d",
......@@ -2018,7 +1394,7 @@ func TestΔBTailAllStructs(t *testing.T) {
nkeys := N(3, 5, 10)
// server to generate AllStructs(kv, ...)
sg, err := StartAllStructsSrv(); X(err)
sg, err := xbtreetest.StartAllStructsSrv(); X(err)
defer func() {
err := sg.Close(); X(err)
}()
......@@ -2143,7 +1519,7 @@ func TestΔBTailAllStructs(t *testing.T) {
func TestΔBtailForget(t_ *testing.T) {
t := tNewTreeEnv(t_)
t := xbtreetest.NewT(t_)
X := exc.Raiseif
t0 := t.CommitTree("T/B:")
......@@ -2151,51 +1527,49 @@ func TestΔBtailForget(t_ *testing.T) {
t2 := t.CommitTree("T2/B1:a-B2:b")
t3 := t.CommitTree("T/B2:b")
δbtail := NewΔBtail(t0.at, t.db)
_, err := δbtail.Update(t1.δZ); X(err)
_, err = δbtail.Update(t2.δZ); X(err)
δbtail := NewΔBtail(t0.At, t.DB)
_, err := δbtail.Update(t1.ΔZ); X(err)
_, err = δbtail.Update(t2.ΔZ); X(err)
// start tracking. everything becomes tracked because t1's T/B1:a has [-∞,∞) coverage
// By starting tracking after t2 we verify vδBroots update in both Update and rebuild
_0 := SetKey{}; _0.Add(0)
_0 := setKey{}; _0.Add(0)
xtrackKeys(δbtail, t2, _0)
_, err = δbtail.Update(t3.δZ); X(err)
_, err = δbtail.Update(t3.ΔZ); X(err)
xat := map[zodb.Tid]string{
t0.at: "at0",
t1.at: "at1",
t2.at: "at2",
t3.at: "at3",
}
assertΔTtail(t.T, "init", δbtail, t3, t.Root(), xat, t1.δxkv, t2.δxkv, t3.δxkv)
δbtail.ForgetPast(t0.at)
assertΔTtail(t.T, "forget ≤ at0", δbtail, t3, t.Root(), xat, t1.δxkv, t2.δxkv, t3.δxkv)
δbtail.ForgetPast(t1.at)
assertΔTtail(t.T, "forget ≤ at1", δbtail, t3, t.Root(), xat, t2.δxkv, t3.δxkv)
δbtail.ForgetPast(t3.at)
t0.At: "at0",
t1.At: "at1",
t2.At: "at2",
t3.At: "at3",
}
assertΔTtail(t.T, "init", δbtail, t3, t.Root(), xat, t1.Δxkv, t2.Δxkv, t3.Δxkv)
δbtail.ForgetPast(t0.At)
assertΔTtail(t.T, "forget ≤ at0", δbtail, t3, t.Root(), xat, t1.Δxkv, t2.Δxkv, t3.Δxkv)
δbtail.ForgetPast(t1.At)
assertΔTtail(t.T, "forget ≤ at1", δbtail, t3, t.Root(), xat, t2.Δxkv, t3.Δxkv)
δbtail.ForgetPast(t3.At)
assertΔTtail(t.T, "forget ≤ at3", δbtail, t3, t.Root(), xat, )
}
// ---- misc ----
func TestΔBtailClone(t_ *testing.T) {
// ΔBtail.Clone had bug that aliased klon data to orig
t := tNewTreeEnv(t_)
t := xbtreetest.NewT(t_)
X := exc.Raiseif
t0 := t.CommitTree("T2/B1:a-B2:b")
t1 := t.CommitTree("T2/B1:c-B2:d")
δbtail := NewΔBtail(t0.at, t.db)
_, err := δbtail.Update(t1.δZ); X(err)
_2 := SetKey{}; _2.Add(2)
δbtail := NewΔBtail(t0.At, t.DB)
_, err := δbtail.Update(t1.ΔZ); X(err)
_2 := setKey{}; _2.Add(2)
xtrackKeys(δbtail, t1, _2)
err = δbtail.rebuildAll(); X(err)
xat := map[zodb.Tid]string{
t0.at: "at0",
t1.at: "at1",
t0.At: "at0",
t1.At: "at1",
}
δkv1_1 := map[Key]Δstring{2:{"b","d"}}
......@@ -2204,8 +1578,8 @@ func TestΔBtailClone(t_ *testing.T) {
assertΔTtail(t.T, "klon @at1", δbklon, t1, t.Root(), xat, δkv1_1)
t2 := t.CommitTree("T/B1:b,2:a")
_, err = δbtail.Update(t2.δZ); X(err)
xat[t2.at] = "at2"
_, err = δbtail.Update(t2.ΔZ); X(err)
xat[t2.At] = "at2"
δkv1_2 := map[Key]Δstring{1:{"a","c"}, 2:{"b","d"}}
δkv2_2 := map[Key]Δstring{1:{"c","b"}, 2:{"d","a"}}
......@@ -2214,6 +1588,9 @@ func TestΔBtailClone(t_ *testing.T) {
}
// -------- misc --------
// IntSets generates all sets of integers in range [0,N)
func IntSets(N int) chan []int {
ch := make(chan []int)
......@@ -2257,89 +1634,12 @@ func TestIntSets(t *testing.T) {
}
// kvdiff returns difference in between kv1 and kv2.
var DEL = "ø" // DEL means deletion
type Δstring struct {
Old string
New string
}
func kvdiff(kv1, kv2 map[Key]string) map[Key]Δstring {
delta := map[Key]Δstring{}
keys := SetKey{}
for k := range kv1 { keys.Add(k) }
for k := range kv2 { keys.Add(k) }
for k := range keys {
v1, ok := kv1[k]
if !ok { v1 = DEL }
v2, ok := kv2[k]
if !ok { v2 = DEL }
if v1 != v2 {
delta[k] = Δstring{v1,v2}
}
}
return delta
}
func TestKVDiff(t *testing.T) {
kv1 := map[Key]string{1:"a", 3:"c", 4:"d"}
kv2 := map[Key]string{1:"b", 4:"d", 5:"e"}
got := kvdiff(kv1, kv2)
want := map[Key]Δstring{1:{"a","b"}, 3:{"c",DEL}, 5:{DEL,"e"}}
if !reflect.DeepEqual(got, want) {
t.Fatalf("error:\ngot: %v\nwant: %v", got, want)
}
}
// kvtxt returns string representation of {} kv.
func kvtxt(kv map[Key]string) string {
if len(kv) == 0 {
return "ø"
}
keyv := []Key{}
for k := range kv { keyv = append(keyv, k) }
sort.Slice(keyv, func(i,j int) bool { return keyv[i] < keyv[j] })
sv := []string{}
for _, k := range keyv {
v := kv[k]
if strings.ContainsAny(v, " \n\t,:") {
panicf("[%v]=%q: invalid value", k, v)
}
sv = append(sv, fmt.Sprintf("%v:%s", k, v))
}
return strings.Join(sv, ",")
}
func TestKVTxt(t *testing.T) {
kv := map[Key]string{3:"hello", 1:"zzz", 4:"world"}
got := kvtxt(kv)
want := "1:zzz,3:hello,4:world"
if got != want {
t.Fatalf("error:\ngot: %q\nwant: %q", got, want)
}
}
// Flatten converts xkv with bucket structure into regular dict.
func (xkv RBucketSet) Flatten() map[Key]string {
kv := make(map[Key]string)
for _, b := range xkv {
for k,v := range b.kv {
kv[k] = v
}
}
return kv
}
// allTestKeys returns all keys from vt + ∞.
func allTestKeys(vt ...*tTreeCommit) SetKey {
allKeys := SetKey{}; allKeys.Add(KeyMax) // ∞ simulating ZBigFile.Size() query
func allTestKeys(vt ...*xbtreetest.Commit) setKey {
allKeys := setKey{}; allKeys.Add(KeyMax) // ∞ simulating ZBigFile.Size() query
for _, t := range vt {
for _, b := range t.xkv {
for k := range b.kv {
for _, b := range t.Xkv {
for k := range b.KV {
allKeys.Add(k)
}
}
......@@ -2358,11 +1658,6 @@ func sortedKeys(kv map[Key]Δstring) []Key {
return keyv
}
func (b *RBucket) String() string {
return fmt.Sprintf("%sB%s{%s}", KeyRange{b.lo, b.hi_}, b.oid, kvtxt(b.kv))
}
// XXX place
func tidvEqual(av, bv []zodb.Tid) bool {
if len(av) != len(bv) {
......@@ -2400,34 +1695,3 @@ func δTEqual(δa, δb map[Key]Δstring) bool {
}
return true
}
// ----------------------------------------
// ZBlk-related functions are imported at runtime by δbtail_x_test
var (
ZTreeGetBlkData func(context.Context, *Tree, Key) (string, bool, []Node, error)
ZGetBlkData func(context.Context, *zodb.Connection, zodb.Oid) (string, error)
)
// xzgetBlkData loads block data from ZBlk object specified by its oid.
func xzgetBlkData(ctx context.Context, zconn *zodb.Connection, zblkOid zodb.Oid) string {
X := exc.Raiseif
if zblkOid == VDEL {
return DEL
}
data, err := ZGetBlkData(ctx, zconn, zblkOid); X(err)
return string(data)
}
// xzgetBlkDataAt loads block data from ZBlk object specified by oid@at.
func xzgetBlkDataAt(db *zodb.DB, zblkOid zodb.Oid, at zodb.Tid) string {
X := exc.Raiseif
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: at}); X(err)
return xzgetBlkData(ctx, zconn, zblkOid)
}
......@@ -18,85 +18,7 @@
// See https://www.nexedi.com/licensing for rationale and options.
package xbtree_test
// ZBlk-related part of δbtail_test
import (
"context"
"fmt"
"lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/zdata"
_ "lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xbtree/xbtreetest/init"
)
type Tree = xbtree.Tree
type Node = xbtree.Node
type Key = xbtree.Key
type ZBlk = zdata.ZBlk
// ztreeGetBlk returns ztree[k] and tree path that lead to this block.
// XXX +return blkRevMax and use it ?
func ztreeGetBlk(ctx context.Context, ztree *Tree, k Key) (zblk ZBlk, ok bool, path []Node, err error) {
path = []Node{}
xzblk, ok, err := ztree.VGet(ctx, k, func(node Node) {
path = append(path, node)
})
if err != nil {
return nil, false, nil, err
}
if ok {
zblk, ok = xzblk.(ZBlk)
if !ok {
return nil, false, nil, fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xzblk)) // XXX errctx
}
}
return zblk, ok, path, nil
}
func init() {
xbtree.ZTreeGetBlkData = ZTreeGetBlkData
xbtree.ZGetBlkData = ZGetBlkData
}
// ZTreeGetBlkData returns block data from block pointed to by ztree[k].
func ZTreeGetBlkData(ctx context.Context, ztree *Tree, k Key) (data string, ok bool, path []Node, err error) {
defer xerr.Contextf(&err, "@%s: tree<%s>: get blkdata from [%d]", ztree.PJar().At(), ztree.POid(), k)
zblk, ok, path, err := ztreeGetBlk(ctx, ztree, k)
if err != nil || !ok {
return "", ok, path, err
}
bdata, _, err := zblk.LoadBlkData(ctx)
if err != nil {
return "", false, nil, err
}
return string(bdata), true, path, nil
}
// ZGetBlkData loads block data from ZBlk object specified by its oid.
func ZGetBlkData(ctx context.Context, zconn *zodb.Connection, zblkOid zodb.Oid) (data string, err error) {
defer xerr.Contextf(&err, "@%s: get blkdata from obj %s", zconn.At(), zblkOid)
xblk, err := zconn.Get(ctx, zblkOid)
if err != nil {
return "", err
}
zblk, ok := xblk.(ZBlk)
if !ok {
return "", fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xblk))
}
bdata, _, err := zblk.LoadBlkData(ctx)
if err != nil {
return "", err
}
return string(bdata), nil
}
......@@ -60,7 +60,6 @@ import (
// ZBlk is the interface that every ZBlk* block implements.
type ZBlk interface {
zodb.IPersistent
_ZBlkInΔFtail
// LoadBlkData loads from database and returns data block stored by this ZBlk.
//
......@@ -79,7 +78,6 @@ var _ ZBlk = (*ZBlk1)(nil)
// ZBlk0 mimics ZBlk0 from python.
type ZBlk0 struct {
zodb.Persistent
zblkInΔFtail
// NOTE py source uses bytes(buf) but on python2 it still results in str
blkdata string
......@@ -157,7 +155,6 @@ func (zd *zDataState) PySetState(pystate interface{}) error {
// ZBlk1 mimics ZBlk1 from python.
type ZBlk1 struct {
zodb.Persistent
zblkInΔFtail
chunktab *btree.IOBTree // {} offset -> ZData(chunk)
}
......@@ -468,7 +465,7 @@ func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath
// Size returns whole file size.
//
// it also returns BTree path scaned to obtain the size.
// it also returns BTree path scanned to obtain the size.
func (bf *ZBigFile) Size(ctx context.Context) (_ int64, treePath []btree.LONode, err error) {
defer xerr.Contextf(&err, "bigfile %s: size", bf.POid())
......
......@@ -35,8 +35,6 @@ import (
"github.com/stretchr/testify/require"
)
const K = 1024
// TestZBlk verifies that ZBlk* and ZBigFile saved by Python can be read correctly by Go.
// TODO also test with data saved by Python3.
func TestZBlk(t *testing.T) {
......
......@@ -19,13 +19,9 @@
package zdata
//go:generate ../set/gen-set zdata ZBigFile *ZBigFile zset_bigfile.go
import (
"context"
"fmt"
"runtime"
"sync"
"lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/zodb"
......@@ -37,7 +33,8 @@ import (
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/xzodb"
)
type SetI64 = set.SetI64
type setI64 = set.I64
type setOid = set.Oid
// ΔFtail represents tail of revisional changes to files.
//
......@@ -78,8 +75,12 @@ type SetI64 = set.SetI64
type ΔFtail struct {
// ΔFtail merges ΔBtail with history of ZBlk
δBtail *xbtree.ΔBtail
fileIdx map[zodb.Oid]SetZBigFile // tree-root -> {} ZBigFile as of @head
fileIdx map[zodb.Oid]setOid // tree-root -> {} ZBigFile<oid> as of @head
trackSetZFile setOid // set of tracked ZBigFiles as of @head
trackSetZBlk map[zodb.Oid]*zblkTrack // zblk -> {} root -> {}blk as of @head
// XXX kill
///*
// XXX don't need vδF - everything is reconstructed at runtime from .δBtail.vδT
// this way we also don't need to keep up updating vδF from vδT on its rebuild during.
// data with δF changes. Actual for part of tracked set that was taken
......@@ -88,40 +89,29 @@ type ΔFtail struct {
// tracked ZBlk that are not yet taken into account in current vδF.
// grows on new track requests; flushes on queries and update.
trackNew map[*ZBigFile]map[zodb.Oid]*zblkInΔFtail // {} file -> {} oid -> zblk
trackNew map[zodb.Oid]map[zodb.Oid]*zblkTrack // {} foid -> {} zoid -> zblk
//*/
}
// zblkTrack keeps information in which root/blocks ZBlk is present as of @head.
type zblkTrack struct {
// inroot map[zodb.Oid]setI64 // {} root -> {}blk XXX later switch to this
infile map[zodb.Oid]setI64 // {} foid -> {}blk
}
// ΔF represents a change in files space.
type ΔF struct {
Rev zodb.Tid
ByFile map[*ZBigFile]*ΔFile // file -> δfile
ByFile map[zodb.Oid]*ΔFile // foid -> δfile
}
// ΔFile represents a change to one file.
type ΔFile struct {
Rev zodb.Tid
Blocks SetI64 // changed blocks
Size bool // whether file size changed
Blocks setI64 // changed blocks XXX -> ΔBlocks ?
Size bool // whether file size changed XXX -> ΔSize?
}
// zblkInΔFtail is ΔFtail-related volatile data embedded into ZBlk*.
//
// The data is preserved even when ZBlk comes to ghost state, but is lost if
// ZBlk is garbage collected. The data is transient - it is _not_ included into
// persistent state.
type zblkInΔFtail struct {
mu sync.Mutex // to support multiple concurrent loaders
// XXX change vvv to intree_parent? {} Bucket -> set(#blk)
// (this is uniform with building in-RAM reverse child->parents relation for
// tree nodes and for tree_root->file)
// with which files/blocks this ZBlk is associated with as of @head state
infile map[*ZBigFile]SetI64 // {} file -> set(#blk)
}
type _ZBlkInΔFtail interface { inΔFtail() *zblkInΔFtail }
func (z *zblkInΔFtail) inΔFtail() *zblkInΔFtail { return z }
// NewΔFtail creates new ΔFtail object.
//
......@@ -132,9 +122,11 @@ func (z *zblkInΔFtail) inΔFtail() *zblkInΔFtail { return z }
// ZODB when needed.
func NewΔFtail(at0 zodb.Tid, db *zodb.DB) *ΔFtail {
return &ΔFtail{
δBtail: xbtree.NewΔBtail(at0, db),
fileIdx: make(map[zodb.Oid]SetZBigFile),
trackNew: make(map[*ZBigFile]map[zodb.Oid]*zblkInΔFtail),
δBtail: xbtree.NewΔBtail(at0, db),
fileIdx: map[zodb.Oid]setOid{},
trackSetZFile: setOid{},
trackSetZBlk: map[zodb.Oid]*zblkTrack{},
trackNew: map[zodb.Oid]map[zodb.Oid]*zblkTrack{},
}
}
......@@ -156,6 +148,9 @@ func (δFtail *ΔFtail) Tail() zodb.Tid { return δFtail.δBtail.Tail() }
//
// A root can be associated with several files (each provided on different Track call).
func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zblk ZBlk) {
// XXX locking
foid := file.POid()
if blk == -1 {
// XXX blk = ∞ from beginning ?
blk = xbtree.KeyMax
......@@ -164,38 +159,44 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
if err != nil {
panic(err) // XXX -> error? errctx
}
root := path[0].(*btree.LOBTree)
files, ok := δFtail.fileIdx[root.POid()]
if !ok {
files = SetZBigFile{}
files = setOid{}
δFtail.fileIdx[root.POid()] = files
}
files.Add(file)
files.Add(foid)
δFtail.trackSetZFile.Add(foid)
// associate zblk with file, if it was not hole
if zblk != nil {
z := zblk.inΔFtail()
z.mu.Lock()
blocks, ok := z.infile[file]
zoid := zblk.POid()
zt, ok := δFtail.trackSetZBlk[zoid]
if !ok {
zt = &zblkTrack{}
δFtail.trackSetZBlk[zoid] = zt
}
blocks, ok := zt.infile[foid]
if !ok {
blocks = make(SetI64, 1)
if z.infile == nil {
z.infile = make(map[*ZBigFile]SetI64)
blocks = make(setI64, 1)
if zt.infile == nil {
zt.infile = make(map[zodb.Oid]setI64)
}
z.infile[file] = blocks
zt.infile[foid] = blocks
}
blocks.Add(blk)
z.mu.Unlock()
// XXX locking
if !ok {
// zblk was not associated with this file
zt := δFtail.trackNew[file]
if zt == nil {
zt = make(map[zodb.Oid]*zblkInΔFtail, 1)
δFtail.trackNew[file] = zt
ft := δFtail.trackNew[foid]
if ft == nil {
ft = make(map[zodb.Oid]*zblkTrack, 1)
δFtail.trackNew[foid] = ft
}
zt[zblk.POid()] = z
ft[zoid] = zt
}
}
......@@ -224,13 +225,14 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF
// XXX δFtail.update() first?
// XXX verify zhead.At() == δFtail.Head()
// XXX locking
δB, err := δFtail.δBtail.Update(δZ)
if err != nil {
return ΔF{}, err
}
δF := ΔF{Rev: δB.Rev, ByFile: make(map[*ZBigFile]*ΔFile)}
δF := ΔF{Rev: δB.Rev, ByFile: make(map[zodb.Oid]*ΔFile)}
// take btree changes into account
for root, δt := range δB.ΔByRoot {
......@@ -241,7 +243,7 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF
for file := range files {
δfile, ok := δF.ByFile[file]
if !ok {
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(SetI64)}
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(setI64)}
δF.ByFile[file] = δfile
}
for blk /*, zblk*/ := range δt {
......@@ -260,42 +262,29 @@ func (δFtail *ΔFtail) Update(δZ *zodb.EventCommit, zhead *xzodb.ZConn) (_ ΔF
// take zblk changes into account
for _, oid := range δZ.Changev {
// XXX cache lock/unlock
obj := zhead.Cache().Get(oid)
if obj == nil {
//fmt.Printf("%s: not in cache\n", oid)
continue // nothing to do - see invariant
if δFtail.trackSetZFile.Has(oid) {
// TODO check that .blksize and .blktab (it is only
// persistent reference) do not change.
return ΔF{}, fmt.Errorf("ZBigFile<%s> changed @%s", oid, δZ.Tid)
}
//fmt.Printf("%s: in cache (%s)\n", oid, typeOf(obj))
switch obj := obj.(type) {
case ZBlk: // ZBlk*
// z.infile locking: since we write-locked head.zheadMu
// - no other fuse reads are running, and thus no one
// is mutating z.infile. XXX recheck
z := obj.inΔFtail()
for file, blocks := range z.infile {
δfile, ok := δF.ByFile[file]
if !ok {
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(SetI64)}
δF.ByFile[file] = δfile
}
zt, ok := δFtail.trackSetZBlk[oid]
if !ok {
continue // not tracked
}
δfile.Blocks.Update(blocks)
for foid, blocks := range zt.infile {
δfile, ok := δF.ByFile[foid]
if !ok {
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(setI64)}
δF.ByFile[foid] = δfile
}
// XXX update z.infile according to btree changes
case *ZBigFile:
// TODO check that .blksize and .blktab (it is only
// persistent reference) do not change.
return ΔF{}, fmt.Errorf("ZBigFile<%s> changed @%s", oid, δZ.Tid)
δfile.Blocks.Update(blocks)
}
// make sure obj won't be garbage-collected until we finish handling it.
runtime.KeepAlive(obj)
// XXX update zt.infile according to btree changes
}
δFtail.vδF = append(δFtail.vδF, δF)
......@@ -313,15 +302,16 @@ func (δFtail *ΔFtail) update(file *ZBigFile) {
}
// let's see if we need to rebuild .vδF due to not-yet processed track requests
foid := file.POid()
// XXX locking
// XXX dumb
zt, dirty := δFtail.trackNew[file]
zt, dirty := δFtail.trackNew[foid]
if !dirty {
return
}
delete(δFtail.trackNew, file)
delete(δFtail.trackNew, foid)
// XXX unlock here
for i, δZ := range δFtail.δBtail.ΔZtail().Data() {
......@@ -335,14 +325,14 @@ func (δFtail *ΔFtail) update(file *ZBigFile) {
}
// XXX locking
// XXX -> func δF.δfile(file) ?
δfile, ok := δF.ByFile[file]
// XXX -> func δF.δfile(foid) ?
δfile, ok := δF.ByFile[foid]
if !ok {
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(SetI64)}
δF.ByFile[file] = δfile
δfile = &ΔFile{Rev: δF.Rev, Blocks: make(setI64)}
δF.ByFile[foid] = δfile
}
δfile.Blocks.Update(z.infile[file])
δfile.Blocks.Update(z.infile[foid])
}
}
}
......@@ -353,10 +343,10 @@ func (δFtail *ΔFtail) ForgetPast(revCut zodb.Tid) {
}
// XXX don't need
func (δFtail *ΔFtail) SliceByRev(lo, hi zodb.Tid) /*readonly*/ []ΔF {
xtail.AssertSlice(δFtail, lo, hi)
panic("TODO")
}
//func (δFtail *ΔFtail) SliceByRev(lo, hi zodb.Tid) /*readonly*/ []ΔF {
// xtail.AssertSlice(δFtail, lo, hi)
// panic("TODO")
//}
// SliceByFileRev returns history of file changes in (lo, hi] range.
//
......@@ -398,9 +388,10 @@ func (δFtail *ΔFtail) SliceByFileRev(file *ZBigFile, lo, hi zodb.Tid) /*readon
vδF = vδF[i:j+1]
// filter found changed to have only file-related bits
foid := file.POid()
var vδfile []*ΔFile
for _, δF := range vδF {
δfile, ok := δF.ByFile[file]
δfile, ok := δF.ByFile[foid]
if ok {
vδfile = append(vδfile, δfile)
}
......
// Copyright (C) 2019-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package zdata
import (
"testing"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/set"
)
type setStr = set.Str
// ΔFTestEntry represents one entry in ΔFtail tests.
type ΔFTestEntry struct {
δblkTab map[int64]string // change in tree part {} #blk -> ZBlk<oid>
δblkData setStr // change to ZBlk objects
}
func TestΔFtail(t *testing.T) {
// δT is shorthand to create δblkTab.
type δT = map[int64]string
// δD is shorthand to create δblkData.
δD := func(zblkv ...string) setStr {
δ := setStr{}
for _, zblk := range zblkv {
δ.Add(zblk)
}
return δ
}
const a, b, c, ø = "a", "b", "c", "ø"
testv := []ΔFTestEntry{
{δT{1:a,2:b,3:ø}, δD(a)},
{δT{}, δD(c)},
{δT{2:c}, δD(a,b)},
}
vδf := []ΔFile{} // (rev↑, {}blk) XXX +.Size?
blkTab := map[int64]string{} // #blk -> ZBlk<oid>
Zinblk := map[string]setI64{} // ZBlk<oid> -> which #blk refer to it
for _, test := range testv {
δf := setI64{}
for blk, zblk := range test.δblkTab {
// rebuild blkTab/Zinblk
zprev, ok := blkTab[blk]
if ok {
delete(Zinblk[zprev], blk)
} else {
zprev = ø
}
if zblk != ø {
blkTab[blk] = zblk
inblk, ok := Zinblk[zblk]
if !ok {
inblk = setI64{}
Zinblk[zblk] = inblk
}
inblk.Add(blk)
}
// update δf due to change in blkTab
if zblk != zprev {
δf.Add(blk)
}
}
// update δf due to change in ZBlk data
for zblk := range test.δblkData {
for blk := range Zinblk[zblk] {
δf.Add(blk)
}
}
vδf = append(vδf, ΔFile{
Rev: zodb.InvalidTid, // XXX will be set after treegen commit
Blocks: δf,
Size: false/*XXX*/,
})
}
}
// XXX TestΔFtailRandom(t *testing.T) {
//}
......@@ -257,21 +257,19 @@ package main
// 2) head/bigfile/* of all bigfiles represent state as of zhead.At .
// 3) for head/bigfile/* the following invariant is maintained:
//
// #blk ∈ OS file cache => ZBlk(#blk) + all BTree/Bucket that lead to it ∈ zhead live cache(%)
// (ZBlk* in ghost state)
// #blk ∈ OS file cache => all BTree/Bucket/ZBlk that lead to blk are tracked(%)
//
// => all BTree/Bucket that lead to blk are tracked (XXX)
// The invariant helps on invalidation: when δFtail (see below) sees a
// changed oid, it is guaranteed that if the change affects block that was
// ever provided to OS, δFtail will detect that this block has changed. XXX review
// And if oid relates to a file block but is not in δFtail's tracking set -
// we know that block is not cached and will trigger ZODB load on a future
// file read.
//
// The invariant helps on invalidation: if we see a changed oid, and
// zhead.cache.lookup(oid) = ø -> we know we don't have to invalidate OS
// cache for any part of any file (even if oid relates to a file block - that
// block is not cached and will trigger ZODB load on file read).
//
// XXX explain why tracked
//
// Currently we maintain this invariant by simply never evicting ZBlk/LOBTree/LOBucket
// objects from ZODB Connection cache. In the future we may want to try to
// synchronize to kernel freeing its pagecache pages.
// Currently we maintain this invariant by adding ZBlk/LOBTree/LOBucket
// objects to δFtail on every access, and never shrinking that tracking set.
// In the future we may want to try to synchronize to kernel freeing its
// pagecache pages.
//
// 4) when we receive an invalidation message from ZODB - we process it and
// propagate invalidations to OS file cache of head/bigfile/*:
......@@ -301,6 +299,8 @@ package main
// Eager invalidation would require full scan - Trees _and_
// Buckets, which makes it prohibitively expensive - see (+).
//
// FIXME all ^^^ is outdated -> XXX δFtail
//
// 4.4) for all file/blk to invalidate we do:
//
// - try to retrieve head/bigfile/file[blk] from OS file cache(*);
......@@ -525,7 +525,7 @@ type (
ZData = zdata.ZData
ZBigFile = zdata.ZBigFile
SetI64 = set.SetI64
setI64 = set.I64
)
// Root represents root of wcfs filesystem.
......@@ -628,12 +628,11 @@ type BigFile struct {
// //
// // XXX computationally expensive to start - see "Invalidations to wcfs
// // clients are delayed ..." in notes.txt
// //go:generate ./gen-δtail I64 int64 zδtail_i64.go
// δtail *ΔTailI64 // [](rev↑, []#blk)
// blocks that were ever read-accessed (head/ only) XXX locking by bfdir.δFmu ?
// XXX = δFtail.Tracked(f) ?
accessed SetI64
accessed setI64
// inflight loadings of ZBigFile from ZODB.
// successful load results are kept here until blkdata is put into OS pagecache.
......@@ -719,7 +718,7 @@ type blkPinState struct {
err error
}
// -------- 3) Cache invariant --------
// -------- ZODB cache control --------
// zodbCacheControl implements zodb.LiveCacheControl to tune ZODB to never evict
// LOBTree/LOBucket from live cache. We want to keep LOBTree/LOBucket always alive
......@@ -727,34 +726,28 @@ type blkPinState struct {
//
// For the data itself - we put it to kernel pagecache and always deactivate
// from ZODB right after that.
//
// See "3) for */head/data the following invariant is maintained..."
type zodbCacheControl struct {}
func (_ *zodbCacheControl) PCacheClassify(obj zodb.IPersistent) zodb.PCachePolicy {
switch obj.(type) {
// ZBlk* should be in cache but without data
// don't let ZBlk*/ZData to pollute the cache
case *ZBlk0:
return zodb.PCachePinObject | zodb.PCacheDropState
return zodb.PCacheDropObject | zodb.PCacheDropState
case *ZBlk1:
return zodb.PCachePinObject | zodb.PCacheDropState
// ZBigFile btree index should be in cache with data
case *btree.LOBTree:
return zodb.PCachePinObject | zodb.PCacheKeepState
case *btree.LOBucket:
return zodb.PCachePinObject | zodb.PCacheKeepState
// don't let ZData to pollute the cache
return zodb.PCacheDropObject | zodb.PCacheDropState
case *ZData:
return zodb.PCacheDropObject | zodb.PCacheDropState
// for performance reason we also keep ZBigFile in cache.
// keep ZBigFile and its btree index in cache to speedup file data access.
//
// ZBigFile is top-level object that is used on every block load, and
// it would be a waste to evict ZBigFile from cache.
case *ZBigFile:
return zodb.PCachePinObject | zodb.PCacheKeepState
case *btree.LOBTree:
return zodb.PCachePinObject | zodb.PCacheKeepState
case *btree.LOBucket:
return zodb.PCachePinObject | zodb.PCacheKeepState
}
return 0
......@@ -883,7 +876,7 @@ retry:
if log.V(2) {
// debug dump δF
log.Infof("\n\nS: handleδZ: δF (#%d):\n", len(δF.ByFile))
for zfile, δfile := range δF.ByFile {
for foid, δfile := range δF.ByFile {
blkv := δfile.Blocks.Elements()
sort.Slice(blkv, func(i, j int) bool {
return blkv[i] < blkv[j]
......@@ -892,19 +885,19 @@ retry:
if δfile.Size {
size = "S"
}
log.Infof("S: \t- %s\t%s %v\n", zfile.POid(), size, blkv)
log.Infof("S: \t- %s\t%s %v\n", foid, size, blkv)
}
log.Infof("\n\n")
}
wg := xsync.NewWorkGroup(ctx)
for zfile, δfile := range δF.ByFile {
for foid, δfile := range δF.ByFile {
// // XXX needed?
// // XXX even though δBtail is complete, not all ZBlk are present here
// file.δtail.Append(δF.Rev, δfile.Blocks.Elements())
// zfile was requested to be tracked -> it must be present in fileTab
file := bfdir.fileTab[zfile.POid()]
// file was requested to be tracked -> it must be present in fileTab
file := bfdir.fileTab[foid]
for blk := range δfile.Blocks {
blk := blk
wg.Go(func(ctx context.Context) error {
......@@ -922,11 +915,11 @@ retry:
//
// do it after completing data invalidations.
wg = xsync.NewWorkGroup(ctx)
for zfile, δfile := range δF.ByFile {
for foid, δfile := range δF.ByFile {
if !δfile.Size {
continue
}
file := bfdir.fileTab[zfile.POid()] // must be present
file := bfdir.fileTab[foid] // must be present
wg.Go(func(ctx context.Context) error {
return file.invalidateAttr() // NOTE does not accept ctx
})
......@@ -951,14 +944,17 @@ retry:
// 2. restat invalidated ZBigFile
// NOTE no lock needed since .blksize and .size are constant during lifetime of one txn.
// XXX -> parallel
for zfile := range δF.ByFile {
for foid := range δF.ByFile {
file := bfdir.fileTab[foid] // must be present
zfile := file.zfile
size, sizePath, err := zfile.Size(ctx)
if err != nil {
return err
}
file := bfdir.fileTab[zfile.POid()] // must be present
file.size = size
// see "3) for */head/data the following invariant is maintained..."
bfdir.δFtail.Track(zfile, -1, sizePath, nil)
// XXX we can miss a change to file if δblk is not yet tracked
......@@ -1502,6 +1498,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
// update δFtail index XXX -> move upper into readBlk ?
// (δFtail is just for δZ -> δF invalidation handling and is needed without isolation protocol)
// XXX ^^^ no - also need to query to send pins
// see "3) for */head/data the following invariant is maintained..."
bfdir := f.head.bfdir
δFtail := bfdir.δFtail
bfdir.δFmu.Lock() // XXX locking correct? XXX -> better push down?
......@@ -2205,6 +2202,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
// only head/ needs δFtail, f.δtail and watches.
if head.rev == 0 {
// see "3) for */head/data the following invariant is maintained..."
head.bfdir.δFmu.Lock() // XXX locking ok?
head.bfdir.δFtail.Track(f.zfile, -1, sizePath, nil)
head.bfdir.δFmu.Unlock()
......@@ -2212,7 +2210,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
// FIXME: scan zfile.blktab - so that we can detect all btree changes
// see "XXX building δFtail lazily ..." in notes.txt
f.accessed = make(SetI64)
f.accessed = make(setI64)
f.watchTab = make(map[*Watch]struct{})
}
......@@ -2392,8 +2390,8 @@ func _main() (err error) {
zhead, err := xzodb.ZOpen(ctx, zdb, &zodb.ConnOptions{
At: at0,
// we need zhead.cache to be maintained across several transactions.
// see "3) for head/bigfile/* the following invariant is maintained ..."
// preserve zhead.cache across several transactions.
// see "ZODB cache control"
NoPool: true,
})
if err != nil {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment