Commit 7602c1f4 authored by Kirill Smelkov's avatar Kirill Smelkov

ΔBtail concurrency

See changes in δbtail.go for overview.

* t2+RebuildJob: (52 commits)
  .
  .
  .
  .
  .
  X Track should be nop if keycov/path is already in krebuildJobs
  .
  .
  .
  X xbtree/blib: RangedMap, RangedSet += IntersectsRange, Intersection
  X xbtree: tests: Also verify state of ΔTtail.ktrackNew
  .
  .
  .
  .
  .
  .
  .
  .
  .
  ...
parents 57be0126 c4e16704
......@@ -11,7 +11,7 @@ require (
github.com/stretchr/objx v0.3.0 // indirect
github.com/stretchr/testify v1.7.0
lab.nexedi.com/kirr/go123 v0.0.0-20210906140734-c9eb28d9e408
lab.nexedi.com/kirr/neo/go v0.0.0-20210908100526-87199da2b163
lab.nexedi.com/kirr/neo/go v0.0.0-20211004111643-c74a5a3cd0d0
)
// we use kirr/go-fuse@y/nodefs-cancel
......
......@@ -214,3 +214,5 @@ lab.nexedi.com/kirr/neo/go v0.0.0-20210720105030-d99bf118d61a h1:ex8P5oGhvDDp4y3
lab.nexedi.com/kirr/neo/go v0.0.0-20210720105030-d99bf118d61a/go.mod h1:llI3hcJJMACe+rYuXUfS5dljjwIrlBMfJ1ZeRcey96A=
lab.nexedi.com/kirr/neo/go v0.0.0-20210908100526-87199da2b163 h1:0HTNfLHL2ZNmfETtlF0iFPpWfuAAjzfIkxL5r6x2ALE=
lab.nexedi.com/kirr/neo/go v0.0.0-20210908100526-87199da2b163/go.mod h1:llI3hcJJMACe+rYuXUfS5dljjwIrlBMfJ1ZeRcey96A=
lab.nexedi.com/kirr/neo/go v0.0.0-20211004111643-c74a5a3cd0d0 h1:rmfVDj/IaTiMUFAXTKyW993f1G5IxKcZ1vtcrrqscpk=
lab.nexedi.com/kirr/neo/go v0.0.0-20211004111643-c74a5a3cd0d0/go.mod h1:llI3hcJJMACe+rYuXUfS5dljjwIrlBMfJ1ZeRcey96A=
......@@ -36,7 +36,8 @@ type Node = btree.LONode
type TreeEntry = btree.LOEntry
type BucketEntry = btree.LOBucketEntry
type Key = int64
type Key = int64
type KeyRange = btree.LKeyRange
const KeyMax Key = math.MaxInt64
const KeyMin Key = math.MinInt64
......
......@@ -26,10 +26,27 @@ VALUE=$2
out=$3
input=$(dirname $0)/rangemap.go.in
blib=$(cd $(dirname $0) && go list) # fullpath for blib package
curr=$(go list) # ----//---- current package
pkgname=$(go list -f {{.Name}}) # name of current package
echo "// Code generated by gen-rangemap $TYPE $VALUE; DO NOT EDIT." >$out
echo >>$out
# fiximports adjusts rangemap.go code to work outside of blib packages.
fiximports() {
if [ "$curr" == "$blib" ]; then
cat
return
fi
sed \
-e "/package blib/a \\\\nimport \"$blib\"\\n" \
-e "s/package blib/package $pkgname/g" \
-e 's/\([^\w.]\)KeyRange\b/\1blib.KeyRange/g' \
-e 's/\bKStr\b/blib.KStr/g'
}
sed \
-e "s/VALUE/$VALUE/g" \
-e "s/\bRangedMap\b/${TYPE}/g" \
......@@ -40,4 +57,4 @@ sed \
-e "s/\btraceRangeMap\b/trace${TYPE}/g" \
-e "s/\bdebugRangeMap\b/debug${TYPE}/g" \
-e "s/\bdebugfRMap\b/debugf${TYPE}/g" \
$input >>$out
$input |fiximports >>$out
// Copyright (C) 2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package blib
// range of keys.
import (
"fmt"
)
// KeyRange represents [lo,hi) Key range.
type KeyRange struct {
Lo Key
Hi_ Key // NOTE _not_ hi) to avoid overflow at ∞; hi = hi_ + 1
}
// Has returns whether key k belongs to the range.
func (r *KeyRange) Has(k Key) bool {
return (r.Lo <= k && k <= r.Hi_)
}
// Empty returns whether key range is empty.
func (r *KeyRange) Empty() bool {
hi := r.Hi_
if hi == KeyMax {
// [x,∞] cannot be empty because max x is ∞ and [∞,∞] has one element: ∞
return false
}
hi++ // no overflow
return r.Lo >= hi
}
func (r KeyRange) String() string {
var shi string
if r.Hi_ == KeyMax {
shi = KStr(r.Hi_) // ∞
} else {
shi = fmt.Sprintf("%d", r.Hi_+1)
}
return fmt.Sprintf("[%s,%s)", KStr(r.Lo), shi)
}
......@@ -46,9 +46,11 @@ type RangedMapEntry struct {
// Get returns value associated with key k.
func (M *RangedMap) Get(k Key) VALUE {
v, _ := M.Get_(k)
return v
//
// KeyRange indicates all keys adjacent to k, that are too mapped to the same value.
func (M *RangedMap) Get(k Key) (VALUE, KeyRange) {
v, r, _ := M.Get_(k)
return v, r
}
// Set changes M to map key k to value v.
......@@ -63,20 +65,21 @@ func (M *RangedMap) Del(k Key) {
// Has returns whether key k is present in the map.
func (M *RangedMap) Has(k Key) bool {
_, ok := M.Get_(k)
_, _, ok := M.Get_(k)
return ok
}
// Get_ is comma-ok version of Get.
func (M *RangedMap) Get_(k Key) (v VALUE, ok bool) {
func (M *RangedMap) Get_(k Key) (v VALUE, r KeyRange, ok bool) {
r = KeyRange{0,-1} // zero value represents non-empty [0,1)
if traceRangeMap {
fmt.Printf("\n\nGet_:\n")
fmt.Printf(" M: %s\n", M)
fmt.Printf(" k: %s\n", KStr(k))
defer func() {
fmt.Printf("->·: %v, %t\n", v, ok)
fmt.Printf("->·: %v%s, %t\n", v, r, ok)
}()
}
......@@ -99,7 +102,7 @@ func (M *RangedMap) Get_(k Key) (v VALUE, ok bool) {
}
// found
return e.Value, true
return e.Value, e.KeyRange, true
}
// SetRange changes M to map key range r to value v.
......@@ -359,6 +362,40 @@ func (M *RangedMap) HasRange(r KeyRange) (yes bool) {
}
}
// IntersectsRange returns whether some keys from range r belong to the map.
func (M *RangedMap) IntersectsRange(r KeyRange) (yes bool) {
if traceRangeMap {
fmt.Printf("\n\nIntersectsRange:\n")
fmt.Printf(" M: %s\n", M)
fmt.Printf(" r: %s\n", r)
defer func() {
fmt.Printf("->·: %v\n", yes)
}()
}
M.verify()
if r.Empty() {
return false
}
// find first ilo: r.lo < [ilo].hi
l := len(M.entryv)
ilo := sort.Search(l, func(i int) bool {
return r.Lo <= M.entryv[i].Hi_
})
debugfRMap("\tilo: %d\n", ilo)
if ilo == l { // not found
return false
}
// [ilo].hi may be either inside r (≤ r.hi), or > r.hi
// - if it is inside -> overlap is there,
// - if it is > r.hi -> overlap is there if [ilo].lo < r.hi
// => in any case overlap is there if [ilo].lo < r.hi
return M.entryv[ilo].Lo <= r.Hi_
}
// --------
......
......@@ -38,14 +38,15 @@ const (
func TestRangedMap(t *testing.T) {
type testEntry struct {
M *RangedMap
X RangedMapEntry
Set *RangedMap // M.SetRange(X.keycov, X.value)
Del *RangedMap // M.DelRange(X.keycov)
Has bool // M.HasRange(X.keycov)
M *RangedMap
X RangedMapEntry
Set *RangedMap // M.SetRange(X.keycov, X.value)
Del *RangedMap // M.DelRange(X.keycov)
Has bool // M.HasRange(X.keycov)
Intersects bool // M.IntersectsRange(X.Keycov)
}
E := func(M *RangedMap, X RangedMapEntry, S, D *RangedMap, H bool) testEntry {
return testEntry{M, X, S, D, H}
E := func(M *RangedMap, X RangedMapEntry, S, D *RangedMap, H, I bool) testEntry {
return testEntry{M, X, S, D, H, I}
}
// M is shorthand to create RangedMap, e.g. M(1,2,a, 3,4,b) will return {[1,2):a [3,4):b}.
......@@ -101,7 +102,8 @@ func TestRangedMap(t *testing.T) {
X(0,0,x), // X
M(), // Set
M(), // Del
y), // Has
y, // Has
n), // Intersects
// empty vs !empty
E(
......@@ -109,7 +111,8 @@ func TestRangedMap(t *testing.T) {
X(1,2,x), // X
M(1,2,x), // Set
M(), // Del
n), // Has
n, // Has
n), // Intersects
// !empty vs empty
E(
......@@ -117,7 +120,8 @@ func TestRangedMap(t *testing.T) {
X(0,0,x), // X
M(1,2,a), // Set
M(1,2,a), // Del
y), // Has
y, // Has
n), // Intersects
// basic change
E(
......@@ -125,7 +129,8 @@ func TestRangedMap(t *testing.T) {
X(1,2,x), // X
M(1,2,x), // Set
M(), // Del
y), // Has
y, // Has
y), // Intersects
// adjacent [1,3) [3,5)
E(
......@@ -133,7 +138,8 @@ func TestRangedMap(t *testing.T) {
X(3,5,x), // X
M(1,3,a, 3,5,x), // Set
M(1,3,a), // Del
n), // Has
n, // Has
n), // Intersects
// overlapping [1,3) [2,4)
E(
......@@ -141,7 +147,8 @@ func TestRangedMap(t *testing.T) {
X(2,4,x), // X
M(1,2,a, 2,4,x), // Set
M(1,2,a), // Del
n), // Has
n, // Has
y), // Intersects
// [1,7) vs [3,5) -> split
E(
......@@ -149,7 +156,8 @@ func TestRangedMap(t *testing.T) {
X(3,5,x), // X
M(1,3,a, 3,5,x, 5,7,a), // Set
M(1,3,a, 5,7,a), // Del
y), // Has
y, // Has
y), // Intersects
// several ranges vs [-∞,∞)
E(
......@@ -157,14 +165,16 @@ func TestRangedMap(t *testing.T) {
X(noo,oo,x), // X
M(noo,oo,x), // Set
M(), // Del
n), // Has
n, // Has
y), // Intersects
E(
M(1,2,a, 2,3,b), // M
X(1,3,x), // X
M(1,3,x), // Set
M(), // Del
y), // Has
y, // Has
y), // Intersects
// coalesce (same value, no overlap)
E(
......@@ -172,7 +182,8 @@ func TestRangedMap(t *testing.T) {
X(2,4,a), // X
M(1,5,a), // Set
M(1,2,a, 4,5,a), // Del
n), // Has
n, // Has
n), // Intersects
// coalesce (same value, overlap)
E(
......@@ -180,7 +191,8 @@ func TestRangedMap(t *testing.T) {
X(2,6,a), // X
M(1,8,a), // Set
M(1,2,a, 6,8,a), // Del
n), // Has
n, // Has
y), // Intersects
// - shrink left/right (value !same) + new entry
E(
......@@ -188,19 +200,22 @@ func TestRangedMap(t *testing.T) {
X(2,6,x), // X
M(1,2,a, 2,6,x), // Set
M(1,2,a), // Del
n), // Has
n, // Has
y), // Intersects
E(
M(5,8,b), // M
X(2,6,x), // X
M(2,6,x, 6,8,b), // Set
M( 6,8,b), // Del
n), // Has
n, // Has
y), // Intersects
E(
M(1,4,a, 5,8,b), // M
X(2,6,x), // X
M(1,2,a, 2,6,x, 6,8,b), // Set
M(1,2,a, 6,8,b), // Del
n), // Has
M(1,2,a, 6,8,b), // Del
n, // Has
y), // Intersects
}
for _, tt := range testv {
......@@ -210,6 +225,7 @@ func TestRangedMap(t *testing.T) {
v := X.Value
assertMapHasRange(t, M, r, tt.Has)
assertMapIntersectsRange(t, M, r, tt.Intersects)
Mset := M.Clone()
Mdel := M.Clone()
Mset.SetRange(r, v)
......@@ -226,11 +242,9 @@ func TestRangedMap(t *testing.T) {
}
assertMapHasRange(t, Mset, r, true)
rInMdel := false
if r.Empty() {
rInMdel = true
}
assertMapHasRange(t, Mdel, r, rInMdel)
assertMapHasRange(t, Mdel, r, r.Empty())
assertMapIntersectsRange(t, Mset, r, !r.Empty())
assertMapIntersectsRange(t, Mdel, r, false)
verifyGet(t, M)
verifyGet(t, Mset)
......@@ -238,7 +252,7 @@ func TestRangedMap(t *testing.T) {
}
}
// assertMapHasRange asserts that RangeMap M.HasRange(r) == hasOK.
// assertMapHasRange asserts that RangedMap M.HasRange(r) == hasOK.
func assertMapHasRange(t *testing.T, M *RangedMap, r KeyRange, hasOK bool) {
t.Helper()
has := M.HasRange(r)
......@@ -247,6 +261,15 @@ func assertMapHasRange(t *testing.T, M *RangedMap, r KeyRange, hasOK bool) {
}
}
// assertMapIntersectsRange asserts that RangedMap M.IntersectsRange(r) == intersectsOK.
func assertMapIntersectsRange(t *testing.T, M *RangedMap, r KeyRange, intersectsOK bool) {
t.Helper()
intersects := M.IntersectsRange(r)
if !(intersects == intersectsOK) {
t.Errorf("IntersectsRange:\n M: %s\n r: %s\n ->·: %t\n ok·: %t\n", M, r, intersects, intersectsOK)
}
}
// verifyGet verifies RangedMap.Get .
func verifyGet(t *testing.T, M *RangedMap) {
t.Helper()
......@@ -260,10 +283,10 @@ func verifyGet(t *testing.T, M *RangedMap) {
lo := kmax(e.Lo, Z.Lo)
hi_ := kmin(e.Hi_, Z.Hi_)
for k := lo; k <= hi_; k++ {
v, ok := M.Get_(k)
if !(v == e.Value && ok) {
t.Errorf("%s\tGet(%s):\nhave: %q, %t\nwant: %q, true",
M, KStr(k), v, ok, e.Value)
v, r, ok := M.Get_(k)
if !(v == e.Value && r == e.KeyRange && ok) {
t.Errorf("%s\tGet(%s):\nhave: %q%s, %t\nwant: %q%s, true",
M, KStr(k), v, r, ok, e.Value, e.KeyRange)
}
}
}
......@@ -280,10 +303,10 @@ func verifyGet(t *testing.T, M *RangedMap) {
lo := kmax(r.Lo, Z.Lo)
hi_ := kmin(r.Hi_, Z.Hi_)
for k := lo; k <= hi_; k++ {
v, ok := M.Get_(k)
if !(v == "" && !ok) {
t.Errorf("%s\tGet(%s):\nhave: %q, %t\nwant: %q, false",
M, KStr(k), v, ok, "")
v, r_, ok := M.Get_(k)
if !(v == "" && r_.Empty() && !ok) {
t.Errorf("%s\tGet(%s):\nhave: %q%s, %t\nwant: %q[), false",
M, KStr(k), v, r_, ok, "")
}
}
}
......
......@@ -63,10 +63,15 @@ func (S *RangedKeySet) DelRange(r KeyRange) {
}
// HasRange returns whether all keys from range r belong to the set.
func (S *RangedKeySet) HasRange(r KeyRange) (yes bool) {
func (S *RangedKeySet) HasRange(r KeyRange) bool {
return S.m.HasRange(r)
}
// IntersectsRange returns whether some keys from range r belong to the set.
func (S *RangedKeySet) IntersectsRange(r KeyRange) bool {
return S.m.IntersectsRange(r)
}
// Union returns RangedKeySet(A.keys | B.keys).
func (A *RangedKeySet) Union(B *RangedKeySet) *RangedKeySet {
......@@ -82,7 +87,12 @@ func (A *RangedKeySet) Difference(B *RangedKeySet) *RangedKeySet {
return D
}
// TODO Intersection
// Intersection returns RangedKeySet(A.keys ^ B.keys).
func (A *RangedKeySet) Intersection(B *RangedKeySet) *RangedKeySet {
I := A.Clone()
I.IntersectionInplace(B)
return I
}
func (A *RangedKeySet) UnionInplace(B *RangedKeySet) {
A.verify()
......@@ -109,6 +119,21 @@ func (A *RangedKeySet) DifferenceInplace(B *RangedKeySet) {
}
}
func (A *RangedKeySet) IntersectionInplace(B *RangedKeySet) {
A.verify()
B.verify()
defer A.verify()
// XXX very dumb
// A^B = (A∪B) \ (A\B ∪ B\A)
AdB := A.Difference(B)
BdA := B.Difference(A)
ddd := AdB
ddd.UnionInplace(BdA)
A.UnionInplace(B)
A.DifferenceInplace(ddd)
}
// --------
......@@ -152,6 +177,6 @@ func (S *RangedKeySet) AllRanges() /*readonly*/[]KeyRange {
}
func (S RangedKeySet) String() string {
// RangeMap<void> supports formatting for set out of the box
// RangedMap<void> supports formatting for set out of the box
return S.m.String()
}
......@@ -44,12 +44,13 @@ func TestRangedKeySetTypes(t *testing.T) {
func TestRangedKeySet(t *testing.T) {
type testEntry struct {
A, B *RangedKeySet
Union *RangedKeySet
Difference *RangedKeySet
A, B *RangedKeySet
Union *RangedKeySet
Difference *RangedKeySet
Intersection *RangedKeySet
}
E := func(A, B, U, D *RangedKeySet) testEntry {
return testEntry{A, B, U, D}
E := func(A, B, U, D, I *RangedKeySet) testEntry {
return testEntry{A, B, U, D, I}
}
// S is shorthand to create RangedKeySet, e.g. S(1,2, 4,5) will return {[1,2) [4,5)}
......@@ -81,68 +82,78 @@ func TestRangedKeySet(t *testing.T) {
S(), // A
S(), // B
S(), // U
S()), // D
S(), // D
S()), // I
E(
S(), // A
S(1,2), // B
S(1,2), // U
S()), // D
S(), // D
S()), // I
E(
S(1,2), // A
S(), // B
S(1,2), // U
S(1,2)),// D
S(1,2), // D
S()), // I
E(
S(1,2), // A
S(1,2), // B
S(1,2), // U
S()), // D
S(), // D
S(1,2)),// I
// adjacent [1,3) [3,5)
E(
S(1,3), // A
S(3,5), // B
S(1,5), // U
S(1,3)), // D
S(1,5), // U
S(1,3), // D
S()), // I
// overlapping [1,3) [2,4)
E(
S(1,3), // A
S(2,4), // B
S(1,4), // U
S(1,2)), // D
S(1,4), // U
S(1,2), // D
S(2,3)),// I
// [1,7) \ [3,5) -> [1,3) [5,7)
E(
S(1,7), // A
S(3,5), // B
S(1,7),
S(1,3, 5,7)),
S(1,7), // U
S(1,3, 5,7), // D
S(3,5)), // I
// several ranges \ [-∞,∞) -> ø
E(
S(1,3, 5,7, 11,100), // A
S(noo, oo), // B
S(noo, oo), // U
S()), // D
S(noo, oo), // U
S(), // D
S(1,3, 5,7, 11,100)), // I
// [1,3) [5,7) + insert [3,5) -> [1,7)
E(
S(1,3, 5,7), // A
S(3,5), // B
S(1,7), // U
S(1,3, 5,7)), // D
S(1,3, 5,7), // D
S()), // I
// delete covering several ranges
// [-1,0) [1,3) [5,7) [9,11) [15,20) [100,200) \ [2,17)
E(
S(-1,0, 1,3, 5,7, 9,11, 15,20, 100,200), // A
S(2,17), // B
S(-1,0, 1,3, 5,7, 9,11, 15,20, 100,200),// A
S(2,17), // B
S(-1,0, 1,20, 100,200), // U
S(-1,0, 1,2, 17,20, 100,200)), // D
S(-1,0, 1,2, 17,20, 100,200), // D
S(2,3, 5,7, 9,11, 15,17)), // I
}
for _, tt := range testv {
......@@ -150,6 +161,7 @@ func TestRangedKeySet(t *testing.T) {
B := tt.B
U := A.Union(B)
D := A.Difference(B)
I := A.Intersection(B)
if !U.Equal(tt.Union) {
t.Errorf("Union:\n A: %s\n B: %s\n ->u: %s\n okU: %s\n", A, B, U, tt.Union)
......@@ -157,12 +169,18 @@ func TestRangedKeySet(t *testing.T) {
if !D.Equal(tt.Difference) {
t.Errorf("Difference:\n A: %s\n B: %s\n ->d: %s\n okD: %s\n", A, B, D, tt.Difference)
}
if !I.Equal(tt.Intersection) {
t.Errorf("Intersection:\n A: %s\n B: %s\n ->i: %s\n okI: %s\n", A, B, I, tt.Intersection)
}
// HasRange
assertSetHasRanges(t, A, A.AllRanges(), true)
assertSetHasRanges(t, B, B.AllRanges(), true)
assertSetHasRanges(t, U, A.AllRanges(), true)
assertSetHasRanges(t, U, B.AllRanges(), true)
assertSetHasRanges(t, A, I.AllRanges(), true)
assertSetHasRanges(t, B, I.AllRanges(), true)
assertSetHasRanges(t, U, I.AllRanges(), true)
Dab := D
Dba := B.Difference(A)
......@@ -170,6 +188,20 @@ func TestRangedKeySet(t *testing.T) {
assertSetHasRanges(t, B, Dab.AllRanges(), false)
assertSetHasRanges(t, B, Dba.AllRanges(), true)
assertSetHasRanges(t, A, Dba.AllRanges(), false)
assertSetHasRanges(t, Dab, I.AllRanges(), false)
assertSetHasRanges(t, Dba, I.AllRanges(), false)
assertSetHasRanges(t, I, Dab.AllRanges(), false)
assertSetHasRanges(t, I, Dba.AllRanges(), false)
// IntersectsRange (= (A^B)!=ø)
assertSetIntersectsRanges(t, A, I.AllRanges(), !I.Empty())
assertSetIntersectsRanges(t, B, I.AllRanges(), !I.Empty())
assertSetIntersectsRanges(t, Dab, B.AllRanges(), false)
assertSetIntersectsRanges(t, Dba, A.AllRanges(), false)
assertSetIntersectsRanges(t, Dab, I.AllRanges(), false)
assertSetIntersectsRanges(t, Dba, I.AllRanges(), false)
assertSetIntersectsRanges(t, I, Dab.AllRanges(), false)
assertSetIntersectsRanges(t, I, Dba.AllRanges(), false)
}
}
......@@ -183,3 +215,14 @@ func assertSetHasRanges(t *testing.T, S *RangedKeySet, rangev []KeyRange, hasOK
}
}
}
// assertSetIntersectsRanges asserts for all ranges from rangev that RangedSet S.IntersectsRange(r) == intersectsOK.
func assertSetIntersectsRanges(t *testing.T, S *RangedKeySet, rangev []KeyRange, intersectsOK bool) {
t.Helper()
for _, r := range rangev {
intersects := S.IntersectsRange(r)
if intersects != intersectsOK {
t.Errorf("IntersectsRange:\n S: %s\n r: %s\n ->: %v\n ok: %v\n", S, r, intersects, intersectsOK)
}
}
}
......@@ -48,9 +48,11 @@ type _RangedMap_strEntry struct {
// Get returns value associated with key k.
func (M *_RangedMap_str) Get(k Key) string {
v, _ := M.Get_(k)
return v
//
// KeyRange indicates all keys adjacent to k, that are too mapped to the same value.
func (M *_RangedMap_str) Get(k Key) (string, KeyRange) {
v, r, _ := M.Get_(k)
return v, r
}
// Set changes M to map key k to value v.
......@@ -65,20 +67,21 @@ func (M *_RangedMap_str) Del(k Key) {
// Has returns whether key k is present in the map.
func (M *_RangedMap_str) Has(k Key) bool {
_, ok := M.Get_(k)
_, _, ok := M.Get_(k)
return ok
}
// Get_ is comma-ok version of Get.
func (M *_RangedMap_str) Get_(k Key) (v string, ok bool) {
func (M *_RangedMap_str) Get_(k Key) (v string, r KeyRange, ok bool) {
r = KeyRange{0,-1} // zero value represents non-empty [0,1)
if trace_RangedMap_str {
fmt.Printf("\n\nGet_:\n")
fmt.Printf(" M: %s\n", M)
fmt.Printf(" k: %s\n", KStr(k))
defer func() {
fmt.Printf("->·: %v, %t\n", v, ok)
fmt.Printf("->·: %v%s, %t\n", v, r, ok)
}()
}
......@@ -101,7 +104,7 @@ func (M *_RangedMap_str) Get_(k Key) (v string, ok bool) {
}
// found
return e.Value, true
return e.Value, e.KeyRange, true
}
// SetRange changes M to map key range r to value v.
......@@ -361,6 +364,40 @@ func (M *_RangedMap_str) HasRange(r KeyRange) (yes bool) {
}
}
// IntersectsRange returns whether some keys from range r belong to the map.
func (M *_RangedMap_str) IntersectsRange(r KeyRange) (yes bool) {
if trace_RangedMap_str {
fmt.Printf("\n\nIntersectsRange:\n")
fmt.Printf(" M: %s\n", M)
fmt.Printf(" r: %s\n", r)
defer func() {
fmt.Printf("->·: %v\n", yes)
}()
}
M.verify()
if r.Empty() {
return false
}
// find first ilo: r.lo < [ilo].hi
l := len(M.entryv)
ilo := sort.Search(l, func(i int) bool {
return r.Lo <= M.entryv[i].Hi_
})
debugf_RangedMap_str("\tilo: %d\n", ilo)
if ilo == l { // not found
return false
}
// [ilo].hi may be either inside r (≤ r.hi), or > r.hi
// - if it is inside -> overlap is there,
// - if it is > r.hi -> overlap is there if [ilo].lo < r.hi
// => in any case overlap is there if [ilo].lo < r.hi
return M.entryv[ilo].Lo <= r.Hi_
}
// --------
......
......@@ -48,9 +48,11 @@ type _RangedMap_voidEntry struct {
// Get returns value associated with key k.
func (M *_RangedMap_void) Get(k Key) void {
v, _ := M.Get_(k)
return v
//
// KeyRange indicates all keys adjacent to k, that are too mapped to the same value.
func (M *_RangedMap_void) Get(k Key) (void, KeyRange) {
v, r, _ := M.Get_(k)
return v, r
}
// Set changes M to map key k to value v.
......@@ -65,20 +67,21 @@ func (M *_RangedMap_void) Del(k Key) {
// Has returns whether key k is present in the map.
func (M *_RangedMap_void) Has(k Key) bool {
_, ok := M.Get_(k)
_, _, ok := M.Get_(k)
return ok
}
// Get_ is comma-ok version of Get.
func (M *_RangedMap_void) Get_(k Key) (v void, ok bool) {
func (M *_RangedMap_void) Get_(k Key) (v void, r KeyRange, ok bool) {
r = KeyRange{0,-1} // zero value represents non-empty [0,1)
if trace_RangedMap_void {
fmt.Printf("\n\nGet_:\n")
fmt.Printf(" M: %s\n", M)
fmt.Printf(" k: %s\n", KStr(k))
defer func() {
fmt.Printf("->·: %v, %t\n", v, ok)
fmt.Printf("->·: %v%s, %t\n", v, r, ok)
}()
}
......@@ -101,7 +104,7 @@ func (M *_RangedMap_void) Get_(k Key) (v void, ok bool) {
}
// found
return e.Value, true
return e.Value, e.KeyRange, true
}
// SetRange changes M to map key range r to value v.
......@@ -361,6 +364,40 @@ func (M *_RangedMap_void) HasRange(r KeyRange) (yes bool) {
}
}
// IntersectsRange returns whether some keys from range r belong to the map.
func (M *_RangedMap_void) IntersectsRange(r KeyRange) (yes bool) {
if trace_RangedMap_void {
fmt.Printf("\n\nIntersectsRange:\n")
fmt.Printf(" M: %s\n", M)
fmt.Printf(" r: %s\n", r)
defer func() {
fmt.Printf("->·: %v\n", yes)
}()
}
M.verify()
if r.Empty() {
return false
}
// find first ilo: r.lo < [ilo].hi
l := len(M.entryv)
ilo := sort.Search(l, func(i int) bool {
return r.Lo <= M.entryv[i].Hi_
})
debugf_RangedMap_void("\tilo: %d\n", ilo)
if ilo == l { // not found
return false
}
// [ilo].hi may be either inside r (≤ r.hi), or > r.hi
// - if it is inside -> overlap is there,
// - if it is > r.hi -> overlap is there if [ilo].lo < r.hi
// => in any case overlap is there if [ilo].lo < r.hi
return M.entryv[ilo].Lo <= r.Hi_
}
// --------
......
......@@ -46,9 +46,10 @@ type Node = blib.Node
type TreeEntry = blib.TreeEntry
type BucketEntry = blib.BucketEntry
type Key = blib.Key
const KeyMax = blib.KeyMax
const KeyMin = blib.KeyMin
type Key = blib.Key
type KeyRange = blib.KeyRange
const KeyMax = blib.KeyMax
const KeyMin = blib.KeyMin
// value is assumed to be persistent reference.
// deletion is represented as VDEL.
......
......@@ -36,55 +36,18 @@ import (
"lab.nexedi.com/nexedi/wendelin.core/wcfs/internal/zdata"
)
type Tree = xbtreetest.Tree
type Node = xbtreetest.Node
type Key = xbtreetest.Key
type Tree = xbtreetest.Tree
type Node = xbtreetest.Node
type Key = xbtreetest.Key
type KeyRange = xbtreetest.KeyRange
type ZBlk = zdata.ZBlk
// ztreeGetBlk returns ztree[k] and tree path that lead to this block.
// XXX +return blkRevMax and use it ?
func ztreeGetBlk(ctx context.Context, ztree *Tree, k Key) (zblk ZBlk, ok bool, path []Node, err error) {
path = []Node{}
xzblk, ok, err := ztree.VGet(ctx, k, func(node Node) {
path = append(path, node)
})
if err != nil {
return nil, false, nil, err
}
if ok {
zblk, ok = xzblk.(ZBlk)
if !ok {
return nil, false, nil, fmt.Errorf("expect ZBlk*; got %s", xzodb.TypeOf(xzblk)) // XXX errctx
}
}
return zblk, ok, path, nil
}
func init() {
xbtreetest.ZTreeGetBlkData = _ZTreeGetBlkData
xbtreetest.ZGetBlkData = _ZGetBlkData
xbtreetest.ZGetBlkData = _ZGetBlkData
}
// _ZTreeGetBlkData returns block data from block pointed to by ztree[k].
func _ZTreeGetBlkData(ctx context.Context, ztree *Tree, k Key) (data string, ok bool, path []Node, err error) {
defer xerr.Contextf(&err, "@%s: tree<%s>: get blkdata from [%d]", ztree.PJar().At(), ztree.POid(), k)
zblk, ok, path, err := ztreeGetBlk(ctx, ztree, k)
if err != nil || !ok {
return "", ok, path, err
}
bdata, _, err := zblk.LoadBlkData(ctx)
if err != nil {
return "", false, nil, err
}
return string(bdata), true, path, nil
}
// _ZGetBlkData loads block data from ZBlk object specified by its oid.
func _ZGetBlkData(ctx context.Context, zconn *zodb.Connection, zblkOid zodb.Oid) (data string, err error) {
......
......@@ -36,9 +36,10 @@ type Node = blib.Node
type TreeEntry = blib.TreeEntry
type BucketEntry = blib.BucketEntry
type Key = blib.Key
const KeyMax = blib.KeyMax
const KeyMin = blib.KeyMin
type Key = blib.Key
type KeyRange = blib.KeyRange
const KeyMax = blib.KeyMax
const KeyMin = blib.KeyMin
type setKey = set.I64
......
......@@ -25,19 +25,17 @@ import (
"lab.nexedi.com/kirr/go123/exc"
"lab.nexedi.com/kirr/neo/go/transaction"
"lab.nexedi.com/kirr/neo/go/zodb"
_ "lab.nexedi.com/kirr/neo/go/zodb/wks"
)
// ZBlk-related functions are imported at runtime by package xbtreetest/init
var (
ZTreeGetBlkData func(context.Context, *Tree, Key) (string, bool, []Node, error)
ZGetBlkData func(context.Context, *zodb.Connection, zodb.Oid) (string, error)
ZGetBlkData func(context.Context, *zodb.Connection, zodb.Oid) (string, error)
)
func zassertInitDone() {
if ZTreeGetBlkData == nil {
if ZGetBlkData == nil {
panic("xbtreetest/zdata not initialized -> import xbtreetest/init to fix")
}
}
......@@ -54,15 +52,3 @@ func xzgetBlkData(ctx context.Context, zconn *zodb.Connection, zblkOid zodb.Oid)
data, err := ZGetBlkData(ctx, zconn, zblkOid); X(err)
return string(data)
}
// xzgetBlkDataAt loads block data from ZBlk object specified by oid@at.
func xzgetBlkDataAt(db *zodb.DB, zblkOid zodb.Oid, at zodb.Tid) string {
zassertInitDone()
X := exc.Raiseif
txn, ctx := transaction.New(context.Background())
defer txn.Abort()
zconn, err := db.Open(ctx, &zodb.ConnOptions{At: at}); X(err)
return xzgetBlkData(ctx, zconn, zblkOid)
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -412,45 +412,48 @@ func (bf *ZBigFile) BlkSize() int64 {
// it also returns:
//
// - BTree path in .blktab to loaded block,
// - blocks covered by leaf node in the BTree path,
// - max(_.serial for _ in ZBlk(#blk), all BTree/Bucket that lead to ZBlk)
// which provides a rough upper-bound estimate for file[blk] revision.
//
// TODO load into user-provided buf.
func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath []btree.LONode, zblk ZBlk, blkRevMax zodb.Tid, err error) {
func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath []btree.LONode, blkCov btree.LKeyRange, zblk ZBlk, blkRevMax zodb.Tid, err error) {
defer xerr.Contextf(&err, "bigfile %s: loadblk %d", bf.POid(), blk)
:= btree.LKeyRange{Lo: 0, Hi_: -1} // empty KeyRange
err = bf.PActivate(ctx)
if err != nil {
return nil, nil, nil, 0, err
return nil, nil, , nil, 0, err
}
defer bf.PDeactivate()
blkRevMax = 0
xzblk, ok, err := bf.blktab.VGet(ctx, blk, func(node btree.LONode) {
xzblk, ok, err := bf.blktab.VGet(ctx, blk, func(node btree.LONode, keycov btree.LKeyRange) {
treePath = append(treePath, node)
blkCov = keycov // will be set last for leaf
blkRevMax = tidmax(blkRevMax, node.PSerial())
})
if err != nil {
return nil, nil, nil, 0, err
return nil, nil, , nil, 0, err
}
if !ok {
return make([]byte, bf.blksize), treePath, nil, blkRevMax, nil
return make([]byte, bf.blksize), treePath, blkCov, nil, blkRevMax, nil
}
zblk, err = vZBlk(xzblk)
if err != nil {
return nil, nil, nil, 0, err
return nil, nil, , nil, 0, err
}
blkdata, zblkrev, err := zblk.LoadBlkData(ctx)
if err != nil {
return nil, nil, nil, 0, err
return nil, nil, , nil, 0, err
}
blkRevMax = tidmax(blkRevMax, zblkrev)
l := int64(len(blkdata))
if l > bf.blksize {
return nil, nil, nil, 0, fmt.Errorf("zblk %s: invalid blk: size = %d (> blksize = %d)", zblk.POid(), l, bf.blksize)
return nil, nil, , nil, 0, fmt.Errorf("zblk %s: invalid blk: size = %d (> blksize = %d)", zblk.POid(), l, bf.blksize)
}
// append trailing \0 to data to reach .blksize
......@@ -460,37 +463,39 @@ func (bf *ZBigFile) LoadBlk(ctx context.Context, blk int64) (_ []byte, treePath
blkdata = d
}
return blkdata, treePath, zblk, blkRevMax, nil
return blkdata, treePath, blkCov, zblk, blkRevMax, nil
}
// Size returns whole file size.
//
// it also returns BTree path scanned to obtain the size.
func (bf *ZBigFile) Size(ctx context.Context) (_ int64, treePath []btree.LONode, err error) {
func (bf *ZBigFile) Size(ctx context.Context) (_ int64, treePath []btree.LONode, blkCov btree.LKeyRange, err error) {
defer xerr.Contextf(&err, "bigfile %s: size", bf.POid())
:= btree.LKeyRange{Lo: 0, Hi_: -1} // empty KeyRange
err = bf.PActivate(ctx)
if err != nil {
return 0, nil, err
return 0, nil, , err
}
defer bf.PDeactivate()
tailblk, ok, err := bf.blktab.VMaxKey(ctx, func(node btree.LONode) {
tailblk, ok, err := bf.blktab.VMaxKey(ctx, func(node btree.LONode, keycov btree.LKeyRange) {
treePath = append(treePath, node)
blkCov = keycov // will be set last for leaf
})
if err != nil {
return 0, nil, err
return 0, nil, , err
}
if !ok {
return 0, treePath, nil
return 0, treePath, blkCov, nil
}
size := (tailblk + 1) * bf.blksize
if size / bf.blksize != tailblk + 1 {
return 0, nil, syscall.EFBIG // overflow
return 0, nil, , syscall.EFBIG // overflow
}
return size, treePath, nil
return size, treePath, blkCov, nil
}
// vZBlk checks and converts xzblk to a ZBlk object.
......
......@@ -110,17 +110,17 @@ func TestZBlk(t *testing.T) {
t.Fatalf("zf: [1] -> %#v; want z1", z1_)
}
size, _, err := zf.Size(ctx); X(err)
size, _, _, err := zf.Size(ctx); X(err)
assert.Equal(size, int64(zf_size), "ZBigFile size wrong")
// LoadBlk
z0Data, _, _, _, err = zf.LoadBlk(ctx, 1); X(err)
z0Data, _, _, _, _, err = zf.LoadBlk(ctx, 1); X(err)
assert.Equal(len(z0Data), int(zf.blksize))
z0Data = bytes.TrimRight(z0Data, "\x00")
assert.Equal(z0Data, z0DataOK)
z1Data, _, _, _, err = zf.LoadBlk(ctx, 3); X(err)
z1Data, _, _, _, _, err = zf.LoadBlk(ctx, 3); X(err)
assert.Equal(len(z1Data), int(zf.blksize))
z1Data = bytes.TrimRight(z1Data, "\x00")
assert.Equal(z1Data, z1DataOK)
......
......@@ -195,16 +195,16 @@ func (δFtail *ΔFtail) Tail() zodb.Tid { return δFtail.δBtail.Tail() }
// ---- Track/rebuild/Update/Forget ----
// Track associates file[blk]@head with tree path and zblk object.
// Track associates file[blk]@head with zblk object and file[blkcov]@head with tree path.
//
// Path root becomes associated with the file, and the path and zblk object become tracked.
// One root can be associated with several files (each provided on different Track calls).
//
// zblk can be nil, which represents a hole.
// blk=-1 should be used for tracking after ZBigFile.Size() query (no zblk is accessed at all).
// if zblk is nil -> blk is ignored and can be arbitrary.
//
// Objects in path and zblk must be with .PJar().At() == .head
func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zblk ZBlk) {
func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, blkcov btree.LKeyRange, zblk ZBlk) {
// XXX locking
head := δFtail.Head()
......@@ -222,10 +222,7 @@ func (δFtail *ΔFtail) Track(file *ZBigFile, blk int64, path []btree.LONode, zb
// path.at == head is verified by ΔBtail.Track
foid := file.POid()
if blk == -1 {
blk = xbtree.KeyMax
}
δFtail.δBtail.Track(blk, path)
δFtail.δBtail.Track(path, blkcov)
rootObj := path[0].(*btree.LOBTree)
root := rootObj.POid()
......@@ -684,7 +681,10 @@ func (δFtail *ΔFtail) SliceByFileRev(zfile *ZBigFile, lo, hi zodb.Tid) /*reado
//fmt.Printf("Zinblk: %v\n", Zinblk)
// vδT for current epoch
vδT := δFtail.δBtail.SliceByRootRev(root, epoch, head) // NOTE @head, not hi
var vδT []xbtree.ΔTree
if root != xbtree.VDEL {
vδT = δFtail.δBtail.SliceByRootRev(root, epoch, head) // NOTE @head, not hi
}
it := len(vδT) - 1
if it >= 0 {
ZinblkAt = vδT[it].Rev
......
......@@ -266,8 +266,8 @@ func testΔFtail(t_ *testing.T, testq chan ΔFTestEntry) {
// ( later retrackAll should be called after new epoch to track zfile[-∞,∞) again )
retrackAll := func() {
for blk := range blkTab {
_, path, zblk, _, err := zfile.LoadBlk(ctx, blk); X(err)
δFtail.Track(zfile, blk, path, zblk)
_, path, blkcov, zblk, _, err := zfile.LoadBlk(ctx, blk); X(err)
δFtail.Track(zfile, blk, path, blkcov, zblk)
}
}
retrackAll()
......@@ -614,8 +614,8 @@ func TestΔFtailSliceUntrackedUniform(t_ *testing.T) {
zfile, _ := t.XLoadZFile(ctx, zconn)
xtrackBlk := func(blk int64) {
_, path, zblk, _, err := zfile.LoadBlk(ctx, blk); X(err)
δFtail.Track(zfile, blk, path, zblk)
_, path, blkcov, zblk, _, err := zfile.LoadBlk(ctx, blk); X(err)
δFtail.Track(zfile, blk, path, blkcov, zblk)
}
// track 0, but do not track 1 and 2.
......
......@@ -963,14 +963,14 @@ retry:
zfile := file.zfile
// XXX need to do only if δfile.Size changed
size, sizePath, err := zfile.Size(ctx)
size, sizePath, blkCov, err := zfile.Size(ctx)
if err != nil {
return err
}
file.size = size
// see "3) for */head/data the following invariant is maintained..."
bfdir.δFtail.Track(zfile, -1, sizePath, nil)
bfdir.δFtail.Track(zfile, -1, sizePath, blkCov, nil)
// XXX we can miss a change to file if δblk is not yet tracked
// -> need to update file.rev at read time -> locking=XXX
......@@ -1283,7 +1283,7 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err erro
}
// noone was loading - we became responsible to load this block
blkdata, treepath, zblk, blkrevMax, err := f.zfile.LoadBlk(ctx, blk)
blkdata, treepath, blkcov, zblk, blkrevMax, err := f.zfile.LoadBlk(ctx, blk)
loading.blkdata = blkdata
loading.err = err
......@@ -1298,7 +1298,7 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err erro
// we have the data - it can be used after watchers are updated
// XXX should we use ctx here? (see readPinWatchers comments)
f.readPinWatchers(ctx, blk, treepath, zblk, blkrevMax)
f.readPinWatchers(ctx, blk, treepath, blkcov, zblk, blkrevMax)
// data can be used now
close(loading.ready)
......@@ -1516,7 +1516,7 @@ func (w *Watch) _pin(ctx context.Context, blk int64, rev zodb.Tid) (err error) {
// XXX do we really need to use/propagate caller context here? ideally update
// watchers should be synchronous, and in practice we just use 30s timeout.
// Should a READ interrupt cause watch update failure? -> probably no
func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btree.LONode, zblk ZBlk, blkrevMax zodb.Tid) {
func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btree.LONode, blkcov btree.LKeyRange, zblk ZBlk, blkrevMax zodb.Tid) {
// only head/ is being watched for
if f.head.rev != 0 {
return
......@@ -1531,7 +1531,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
bfdir := f.head.bfdir
δFtail := bfdir.δFtail
bfdir.δFmu.Lock() // XXX locking correct? XXX -> better push down?
δFtail.Track(f.zfile, blk, treepath, zblk) // XXX pass in zblk.rev here?
δFtail.Track(f.zfile, blk, treepath, blkcov, zblk) // XXX pass in zblk.rev here?
f.accessed.Add(blk)
bfdir.δFmu.Unlock()
......@@ -2229,7 +2229,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
rev := zfile.PSerial()
zfile.PDeactivate()
size, sizePath, err := zfile.Size(ctx)
size, sizePath, blkCov, err := zfile.Size(ctx)
if err != nil {
return nil, err
}
......@@ -2248,7 +2248,7 @@ func (head *Head) bigopen(ctx context.Context, oid zodb.Oid) (_ *BigFile, err er
if head.rev == 0 {
// see "3) for */head/data the following invariant is maintained..."
head.bfdir.δFmu.Lock() // XXX locking ok?
head.bfdir.δFtail.Track(f.zfile, -1, sizePath, nil)
head.bfdir.δFtail.Track(f.zfile, -1, sizePath, blkCov, nil)
head.bfdir.δFmu.Unlock()
// FIXME: scan zfile.blktab - so that we can detect all btree changes
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment