Commit a98c9d24 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 3398c0af
...@@ -25,6 +25,7 @@ import ( ...@@ -25,6 +25,7 @@ import (
"compress/zlib" "compress/zlib"
"context" "context"
"crypto/sha1" "crypto/sha1"
"fmt"
"io" "io"
"math/rand" "math/rand"
"net/url" "net/url"
...@@ -77,7 +78,7 @@ func (c *Client) Close() error { ...@@ -77,7 +78,7 @@ func (c *Client) Close() error {
// return err // return err
} }
func (c *Client) LastTid() (zodb.Tid, error) { func (c *Client) LastTid(ctx context.Context) (zodb.Tid, error) {
panic("TODO") panic("TODO")
/* /*
c.Mlink // XXX check we are connected c.Mlink // XXX check we are connected
...@@ -98,17 +99,17 @@ func (c *Client) LastTid() (zodb.Tid, error) { ...@@ -98,17 +99,17 @@ func (c *Client) LastTid() (zodb.Tid, error) {
*/ */
} }
func (c *Client) LastOid() (zodb.Oid, error) { func (c *Client) LastOid(ctx context.Context) (zodb.Oid, error) {
// XXX there is no LastOid in NEO/py // XXX there is no LastOid in NEO/py
panic("TODO") panic("TODO")
} }
// decompress decompresses data according to zlib encoding. // decompress decompresses data according to zlib encoding.
// //
// out buffer, if there is enough capacity, is used for decompression destionation. // out buffer, if there is enough capacity, is used for decompression destination.
// if out has not not enough capacity a new buffer is allocated and used. // if out has not not enough capacity a new buffer is allocated and used.
// //
// return: destination buffer with full decompressed data. // return: destination buffer with full decompressed data or error.
func decompress(in []byte, out []byte) ([]byte, error) { func decompress(in []byte, out []byte) ([]byte, error) {
bin := bytes.NewReader(in) bin := bytes.NewReader(in)
zr, err := zlib.NewReader(bin) zr, err := zlib.NewReader(bin)
...@@ -126,23 +127,33 @@ func decompress(in []byte, out []byte) ([]byte, error) { ...@@ -126,23 +127,33 @@ func decompress(in []byte, out []byte) ([]byte, error) {
return bout.Bytes(), nil return bout.Bytes(), nil
} }
func (c *Client) Load(xid zodb.Xid) (data []byte, tid zodb.Tid, err error) { func (c *Client) Load(ctx context.Context, xid zodb.Xid) (data []byte, serial zodb.Tid, err error) {
// XXX check pt is operational first? -> no if there is no data - we'll // XXX check pt is operational first? -> no if there is no data - we'll
// just won't find ready cell // just won't find ready cell
//
// XXX or better still check first M told us ok to go? (ClusterState=RUNNING)
//if c.node.ClusterState != ClusterRunning {
// return nil, 0, &Error{NOT_READY, "cluster not operational"}
//}
cellv := c.node.PartTab.Get(xid.Oid) cellv := c.node.PartTab.Get(xid.Oid)
// XXX cellv = filter(cellv, UP_TO_DATE) // XXX cellv = filter(cellv, UP_TO_DATE)
if len(cellv) == 0 {
return nil, 0, fmt.Errorf("no storages alive for oid %v", xid.Oid) // XXX err ctx
}
cell := cellv[rand.Intn(len(cellv))] cell := cellv[rand.Intn(len(cellv))]
stor := c.node.NodeTab.Get(cell.NodeUUID) stor := c.node.NodeTab.Get(cell.NodeUUID)
if stor == nil { if stor == nil {
panic(0) // XXX return nil, 0, fmt.Errorf("storage %v not yet known", cell.NodeUUID) // XXX err ctx
} }
// XXX check stor.State == RUNNING // XXX check stor.State == RUNNING -> in link
Sconn, err := stor.Conn() Sconn := stor.Conn // XXX temp stub
//Sconn, err := stor.Conn()
if err != nil { if err != nil {
panic(0) // XXX return nil, 0, err // XXX err ctx
} }
defer lclose(Sconn) defer lclose(ctx, Sconn)
req := neo.GetObject{Oid: xid.Oid} req := neo.GetObject{Oid: xid.Oid}
if xid.TidBefore { if xid.TidBefore {
...@@ -160,12 +171,12 @@ func (c *Client) Load(xid zodb.Xid) (data []byte, tid zodb.Tid, err error) { ...@@ -160,12 +171,12 @@ func (c *Client) Load(xid zodb.Xid) (data []byte, tid zodb.Tid, err error) {
} }
checksum := sha1.Sum(data) checksum := sha1.Sum(data)
if checksum != reply.Checksum { if checksum != resp.Checksum {
// XXX data corrupt // XXX data corrupt
} }
data := resp.Data data = resp.Data
if reply.Compression { if resp.Compression {
data, err = decompress(resp.Data, make([]byte, 0, len(resp.Data))) data, err = decompress(resp.Data, make([]byte, 0, len(resp.Data)))
if err != nil { if err != nil {
// XXX data corrupt // XXX data corrupt
......
package client
import (
"context"
"io"
"lab.nexedi.com/kirr/neo/go/xcommon/log"
)
// lclose closes c and logs closing error if there was any.
// the error is otherwise ignored
// XXX dup in neo,server
func lclose(ctx context.Context, c io.Closer) {
err := c.Close()
if err != nil {
log.Error(ctx, err)
}
}
...@@ -82,9 +82,6 @@ type NodeTable struct { ...@@ -82,9 +82,6 @@ type NodeTable struct {
} }
// // special error indicating dial is currently in progress
// var errDialInprogress = errors.New("dialing...")
// even if dialing a peer failed, we'll attempt redial after this timeout // even if dialing a peer failed, we'll attempt redial after this timeout
const δtRedial = 3 * time.Second const δtRedial = 3 * time.Second
......
...@@ -334,8 +334,8 @@ func (stor *Storage) m1initialize(ctx context.Context, Mconn *neo.Conn) (err err ...@@ -334,8 +334,8 @@ func (stor *Storage) m1initialize(ctx context.Context, Mconn *neo.Conn) (err err
// TODO AskUnfinishedTransactions // TODO AskUnfinishedTransactions
case *neo.LastIDs: case *neo.LastIDs:
lastTid, zerr1 := stor.zstor.LastTid() lastTid, zerr1 := stor.zstor.LastTid(ctx)
lastOid, zerr2 := stor.zstor.LastOid() lastOid, zerr2 := stor.zstor.LastOid(ctx)
if zerr := xerr.First(zerr1, zerr2); zerr != nil { if zerr := xerr.First(zerr1, zerr2); zerr != nil {
return zerr // XXX send the error to M return zerr // XXX send the error to M
} }
...@@ -488,10 +488,11 @@ func (stor *Storage) serveClient(ctx context.Context, conn *neo.Conn) { ...@@ -488,10 +488,11 @@ func (stor *Storage) serveClient(ctx context.Context, conn *neo.Conn) {
for { for {
err := stor.serveClient1(ctx, conn) err := stor.serveClient1(ctx, conn)
if err != nil { if err != nil {
return err log.Infof(ctx, "%v: %v", conn, err)
return
} }
lclose(conn) lclose(ctx, conn)
// keep on going in the same goroutine to avoid goroutine creation overhead // keep on going in the same goroutine to avoid goroutine creation overhead
// TODO Accept += timeout, go away if inactive // TODO Accept += timeout, go away if inactive
...@@ -545,7 +546,7 @@ func (stor *Storage) serveClient(ctx context.Context, conn *neo.Conn) { ...@@ -545,7 +546,7 @@ func (stor *Storage) serveClient(ctx context.Context, conn *neo.Conn) {
} }
// serveClient1 serves 1 request from a client // serveClient1 serves 1 request from a client
func (stor *Storage) serveClient1(conn *neo.Conn) error { func (stor *Storage) serveClient1(ctx context.Context, conn *neo.Conn) error {
req, err := conn.Recv() req, err := conn.Recv()
if err != nil { if err != nil {
return err // XXX log / err / send error before closing return err // XXX log / err / send error before closing
......
...@@ -30,7 +30,7 @@ import ( ...@@ -30,7 +30,7 @@ import (
// lclose closes c and logs closing error if there was any. // lclose closes c and logs closing error if there was any.
// the error is otherwise ignored // the error is otherwise ignored
// XXX dup in neo // XXX dup in neo, client
func lclose(ctx context.Context, c io.Closer) { func lclose(ctx context.Context, c io.Closer) {
err := c.Close() err := c.Close()
if err != nil { if err != nil {
......
...@@ -9,7 +9,7 @@ import ( ...@@ -9,7 +9,7 @@ import (
// lclose closes c and logs closing error if there was any. // lclose closes c and logs closing error if there was any.
// the error is otherwise ignored // the error is otherwise ignored
// XXX dup in server // XXX dup in server, client
func lclose(ctx context.Context, c io.Closer) { func lclose(ctx context.Context, c io.Closer) {
err := c.Close() err := c.Close()
if err != nil { if err != nil {
......
...@@ -24,6 +24,7 @@ package storage ...@@ -24,6 +24,7 @@ package storage
//go:generate sh -c "go run ../../xcommon/tracing/cmd/gotrace/{gotrace,util}.go ." //go:generate sh -c "go run ../../xcommon/tracing/cmd/gotrace/{gotrace,util}.go ."
import ( import (
"context"
"fmt" "fmt"
"sort" "sort"
"sync" "sync"
...@@ -107,7 +108,7 @@ type revCacheEntry struct { ...@@ -107,7 +108,7 @@ type revCacheEntry struct {
// StorLoader represents loading part of a storage. // StorLoader represents loading part of a storage.
// XXX -> zodb? // XXX -> zodb?
type StorLoader interface { type StorLoader interface {
Load(xid zodb.Xid) (data []byte, serial zodb.Tid, err error) Load(ctx context.Context, xid zodb.Xid) (data []byte, serial zodb.Tid, err error)
} }
// lock order: Cache.mu > oidCacheEntry // lock order: Cache.mu > oidCacheEntry
...@@ -148,7 +149,7 @@ func (c *Cache) SetSizeMax(sizeMax int) { ...@@ -148,7 +149,7 @@ func (c *Cache) SetSizeMax(sizeMax int) {
// Load loads data from database via cache. // Load loads data from database via cache.
// //
// If data is already in cache - cached content is returned. // If data is already in cache - cached content is returned.
func (c *Cache) Load(xid zodb.Xid) (data []byte, serial zodb.Tid, err error) { func (c *Cache) Load(ctx context.Context, xid zodb.Xid) (data []byte, serial zodb.Tid, err error) {
rce, rceNew := c.lookupRCE(xid) rce, rceNew := c.lookupRCE(xid)
// rce is already in cache - use it // rce is already in cache - use it
...@@ -162,7 +163,7 @@ func (c *Cache) Load(xid zodb.Xid) (data []byte, serial zodb.Tid, err error) { ...@@ -162,7 +163,7 @@ func (c *Cache) Load(xid zodb.Xid) (data []byte, serial zodb.Tid, err error) {
} else { } else {
// XXX use connection poll // XXX use connection poll
// XXX or it should be cared by loader? // XXX or it should be cared by loader?
c.loadRCE(rce, xid.Oid) c.loadRCE(ctx, rce, xid.Oid)
} }
if rce.err != nil { if rce.err != nil {
...@@ -184,7 +185,7 @@ func (c *Cache) Load(xid zodb.Xid) (data []byte, serial zodb.Tid, err error) { ...@@ -184,7 +185,7 @@ func (c *Cache) Load(xid zodb.Xid) (data []byte, serial zodb.Tid, err error) {
// If data is not yet in cache loading for it is started in the background. // If data is not yet in cache loading for it is started in the background.
// Prefetch is not blocking operation and does not wait for loading, if any was // Prefetch is not blocking operation and does not wait for loading, if any was
// started, to complete. // started, to complete.
func (c *Cache) Prefetch(xid zodb.Xid) { func (c *Cache) Prefetch(ctx context.Context, xid zodb.Xid) {
rce, rceNew := c.lookupRCE(xid) rce, rceNew := c.lookupRCE(xid)
// !rceNew -> no need to adjust LRU - it will be adjusted by further actual data Load // !rceNew -> no need to adjust LRU - it will be adjusted by further actual data Load
...@@ -193,7 +194,7 @@ func (c *Cache) Prefetch(xid zodb.Xid) { ...@@ -193,7 +194,7 @@ func (c *Cache) Prefetch(xid zodb.Xid) {
// spawn loading in the background if rce was not yet loaded // spawn loading in the background if rce was not yet loaded
if rceNew { if rceNew {
// XXX use connection poll // XXX use connection poll
go c.loadRCE(rce, xid.Oid) go c.loadRCE(ctx, rce, xid.Oid)
} }
} }
...@@ -290,15 +291,16 @@ func (c *Cache) lookupRCE(xid zodb.Xid) (rce *revCacheEntry, rceNew bool) { ...@@ -290,15 +291,16 @@ func (c *Cache) lookupRCE(xid zodb.Xid) (rce *revCacheEntry, rceNew bool) {
// //
// rce must be new just created by lookupRCE() with returned rceNew=true. // rce must be new just created by lookupRCE() with returned rceNew=true.
// loading completion is signalled by closing rce.ready. // loading completion is signalled by closing rce.ready.
func (c *Cache) loadRCE(rce *revCacheEntry, oid zodb.Oid) { func (c *Cache) loadRCE(ctx context.Context, rce *revCacheEntry, oid zodb.Oid) {
oce := rce.parent oce := rce.parent
data, serial, err := c.loader.Load(zodb.Xid{ data, serial, err := c.loader.Load(ctx, zodb.Xid{
Oid: oid, Oid: oid,
XTid: zodb.XTid{Tid: rce.before, TidBefore: true}, XTid: zodb.XTid{Tid: rce.before, TidBefore: true},
}) })
// normalize data/serial if it was error // normalize data/serial if it was error
if err != nil { if err != nil {
// XXX err == canceled? -> ?
data = nil data = nil
serial = 0 serial = 0
} }
......
...@@ -21,6 +21,7 @@ package storage ...@@ -21,6 +21,7 @@ package storage
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
...@@ -49,7 +50,7 @@ type tOidData struct { ...@@ -49,7 +50,7 @@ type tOidData struct {
err error // e.g. io error err error // e.g. io error
} }
func (stor *tStorage) Load(xid zodb.Xid) (data []byte, serial zodb.Tid, err error) { func (stor *tStorage) Load(_ context.Context, xid zodb.Xid) (data []byte, serial zodb.Tid, err error) {
//fmt.Printf("> load(%v)\n", xid) //fmt.Printf("> load(%v)\n", xid)
//defer func() { fmt.Printf("< %v, %v, %v\n", data, serial, err) }() //defer func() { fmt.Printf("< %v, %v, %v\n", data, serial, err) }()
tid := xid.Tid tid := xid.Tid
...@@ -142,11 +143,12 @@ func TestCache(t *testing.T) { ...@@ -142,11 +143,12 @@ func TestCache(t *testing.T) {
} }
c := NewCache(tstor, 100 /* > Σ all data */) c := NewCache(tstor, 100 /* > Σ all data */)
ctx := context.Background()
checkLoad := func(xid zodb.Xid, data []byte, serial zodb.Tid, err error) { checkLoad := func(xid zodb.Xid, data []byte, serial zodb.Tid, err error) {
t.Helper() t.Helper()
bad := &bytes.Buffer{} bad := &bytes.Buffer{}
d, s, e := c.Load(xid) d, s, e := c.Load(ctx, xid)
if !reflect.DeepEqual(data, d) { if !reflect.DeepEqual(data, d) {
fmt.Fprintf(bad, "data:\n%s\n", pretty.Compare(data, d)) fmt.Fprintf(bad, "data:\n%s\n", pretty.Compare(data, d))
} }
...@@ -335,7 +337,7 @@ func TestCache(t *testing.T) { ...@@ -335,7 +337,7 @@ func TestCache(t *testing.T) {
// (<14 also becomes ready and takes oce lock first, merging <12 and <14 into <16. // (<14 also becomes ready and takes oce lock first, merging <12 and <14 into <16.
// <16 did not yet took oce lock so c.size is temporarily reduced and // <16 did not yet took oce lock so c.size is temporarily reduced and
// <16 is not yet on LRU list) // <16 is not yet on LRU list)
c.loadRCE(rce1_b14, 1) c.loadRCE(ctx, rce1_b14, 1)
checkRCE(rce1_b14, 14, 10, world, nil) checkRCE(rce1_b14, 14, 10, world, nil)
checkRCE(rce1_b16, 16, 10, world, nil) checkRCE(rce1_b16, 16, 10, world, nil)
checkRCE(rce1_b12, 12, 10, world, nil) checkRCE(rce1_b12, 12, 10, world, nil)
...@@ -344,7 +346,7 @@ func TestCache(t *testing.T) { ...@@ -344,7 +346,7 @@ func TestCache(t *testing.T) {
// (<16 takes oce lock and updates c.size and LRU list) // (<16 takes oce lock and updates c.size and LRU list)
rce1_b16.ready = make(chan struct{}) // so loadRCE could run rce1_b16.ready = make(chan struct{}) // so loadRCE could run
c.loadRCE(rce1_b16, 1) c.loadRCE(ctx, rce1_b16, 1)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b16) checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b16)
checkMRU(12, rce1_b16, rce1_b10, rce1_b8, rce1_b7, rce1_b4) checkMRU(12, rce1_b16, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
...@@ -364,7 +366,7 @@ func TestCache(t *testing.T) { ...@@ -364,7 +366,7 @@ func TestCache(t *testing.T) {
checkMRU(12, rce1_b16, rce1_b10, rce1_b8, rce1_b7, rce1_b4) // no <17 and <18 yet checkMRU(12, rce1_b16, rce1_b10, rce1_b8, rce1_b7, rce1_b4) // no <17 and <18 yet
// (<18 loads and takes oce lock first - merge <17 with <18) // (<18 loads and takes oce lock first - merge <17 with <18)
c.loadRCE(rce1_b18, 1) c.loadRCE(ctx, rce1_b18, 1)
checkRCE(rce1_b18, 18, 16, zz, nil) checkRCE(rce1_b18, 18, 16, zz, nil)
checkRCE(rce1_b17, 17, 16, zz, nil) checkRCE(rce1_b17, 17, 16, zz, nil)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b16, rce1_b18) checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b16, rce1_b18)
...@@ -437,7 +439,7 @@ func TestCache(t *testing.T) { ...@@ -437,7 +439,7 @@ func TestCache(t *testing.T) {
// <9 must be separate from <8 and <10 because it is IO error there // <9 must be separate from <8 and <10 because it is IO error there
rce1_b9, new9 := c.lookupRCE(xidlt(1,9)) rce1_b9, new9 := c.lookupRCE(xidlt(1,9))
ok1(new9) ok1(new9)
c.loadRCE(rce1_b9, 1) c.loadRCE(ctx, rce1_b9, 1)
checkRCE(rce1_b9, 9, 0, nil, ioerr) checkRCE(rce1_b9, 9, 0, nil, ioerr)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b9, rce1_b10, rce1_b16, rce1_b20, rce1_b22) checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b9, rce1_b10, rce1_b16, rce1_b20, rce1_b22)
checkMRU(17, rce1_b9, rce1_b22, rce1_b20, rce1_b16, rce1_b10, rce1_b8, rce1_b7, rce1_b4) checkMRU(17, rce1_b9, rce1_b22, rce1_b20, rce1_b16, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
...@@ -507,7 +509,7 @@ func TestCache(t *testing.T) { ...@@ -507,7 +509,7 @@ func TestCache(t *testing.T) {
checkMRU(15, rce1_b16, rce1_b7, rce1_b9, rce1_b22) checkMRU(15, rce1_b16, rce1_b7, rce1_b9, rce1_b22)
// reload <20 -> <22 should be evicted // reload <20 -> <22 should be evicted
go c.Load(xidlt(1,20)) go c.Load(ctx, xidlt(1,20))
tc.Expect(gcstart, gcfinish) tc.Expect(gcstart, gcfinish)
// - evicted <22 (lru.1, www, size=3) // - evicted <22 (lru.1, www, size=3)
...@@ -520,7 +522,7 @@ func TestCache(t *testing.T) { ...@@ -520,7 +522,7 @@ func TestCache(t *testing.T) {
checkMRU(14, rce1_b20_2, rce1_b16, rce1_b7, rce1_b9) checkMRU(14, rce1_b20_2, rce1_b16, rce1_b7, rce1_b9)
// load big <78 -> several rce must be evicted // load big <78 -> several rce must be evicted
go c.Load(xidlt(1,78)) go c.Load(ctx, xidlt(1,78))
tc.Expect(gcstart, gcfinish) tc.Expect(gcstart, gcfinish)
// - evicted <9 (lru.1, ioerr, size=0) // - evicted <9 (lru.1, ioerr, size=0)
...@@ -548,7 +550,7 @@ func TestCache(t *testing.T) { ...@@ -548,7 +550,7 @@ func TestCache(t *testing.T) {
checkMRU(0) checkMRU(0)
// XXX verify caching vs ctx cancel
// XXX verify db inconsistency checks // XXX verify db inconsistency checks
// XXX verify loading with before > cache.before // XXX verify loading with before > cache.before
} }
......
...@@ -198,14 +198,14 @@ func (fs *FileStorage) Close() error { ...@@ -198,14 +198,14 @@ func (fs *FileStorage) Close() error {
} }
func (fs *FileStorage) LastTid() (zodb.Tid, error) { func (fs *FileStorage) LastTid(_ context.Context) (zodb.Tid, error) {
// XXX check we have transactions at all // XXX check we have transactions at all
// XXX what to return if not? // XXX what to return if not?
// XXX must be under lock // XXX must be under lock
return fs.txnhMax.Tid, nil // XXX error always nil ? return fs.txnhMax.Tid, nil // XXX error always nil ?
} }
func (fs *FileStorage) LastOid() (zodb.Oid, error) { func (fs *FileStorage) LastOid(_ context.Context) (zodb.Oid, error) {
// XXX check we have objects at all? // XXX check we have objects at all?
// XXX what to return if not? // XXX what to return if not?
// XXX must be under lock // XXX must be under lock
...@@ -224,7 +224,7 @@ func (e *ErrXidLoad) Error() string { ...@@ -224,7 +224,7 @@ func (e *ErrXidLoad) Error() string {
return fmt.Sprintf("loading %v: %v", e.Xid, e.Err) return fmt.Sprintf("loading %v: %v", e.Xid, e.Err)
} }
func (fs *FileStorage) Load(xid zodb.Xid) (data []byte, tid zodb.Tid, err error) { func (fs *FileStorage) Load(_ context.Context, xid zodb.Xid) (data []byte, tid zodb.Tid, err error) {
// lookup in index position of oid data record within latest transaction who changed this oid // lookup in index position of oid data record within latest transaction who changed this oid
dataPos, ok := fs.index.Get(xid.Oid) dataPos, ok := fs.index.Get(xid.Oid)
if !ok { if !ok {
......
...@@ -75,7 +75,7 @@ type oidLoadedOk struct { ...@@ -75,7 +75,7 @@ type oidLoadedOk struct {
// checkLoad verifies that fs.Load(xid) returns expected result // checkLoad verifies that fs.Load(xid) returns expected result
func checkLoad(t *testing.T, fs *FileStorage, xid zodb.Xid, expect oidLoadedOk) { func checkLoad(t *testing.T, fs *FileStorage, xid zodb.Xid, expect oidLoadedOk) {
data, tid, err := fs.Load(xid) data, tid, err := fs.Load(context.Background(), xid)
if err != nil { if err != nil {
t.Errorf("load %v: %v", xid, err) t.Errorf("load %v: %v", xid, err)
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
package zodb package zodb
import ( import (
"context"
"fmt" "fmt"
) )
...@@ -142,12 +143,12 @@ type IStorage interface { ...@@ -142,12 +143,12 @@ type IStorage interface {
// LastTid returns the id of the last committed transaction. // LastTid returns the id of the last committed transaction.
// if no transactions have been committed yet, LastTid returns Tid zero value // if no transactions have been committed yet, LastTid returns Tid zero value
LastTid() (Tid, error) LastTid(ctx context.Context) (Tid, error)
// LastOid returns highest object id of objects committed to storage. // LastOid returns highest object id of objects committed to storage.
// if there is no data committed yet, LastOid returns Oid zero value // if there is no data committed yet, LastOid returns Oid zero value
// XXX ZODB/py does not define this in IStorage // XXX ZODB/py does not define this in IStorage
LastOid() (Oid, error) LastOid(ctx context.Context) (Oid, error)
// LoadSerial and LoadBefore generalized into 1 Load (see Xid for details) // LoadSerial and LoadBefore generalized into 1 Load (see Xid for details)
// //
...@@ -156,9 +157,9 @@ type IStorage interface { ...@@ -156,9 +157,9 @@ type IStorage interface {
// XXX currently deleted data is returned as data=nil -- is it ok? // XXX currently deleted data is returned as data=nil -- is it ok?
// TODO specify error when data not found -> ErrOidMissing | ErrXidMissing // TODO specify error when data not found -> ErrOidMissing | ErrXidMissing
// TODO data []byte -> something allocated from slab ? // TODO data []byte -> something allocated from slab ?
Load(xid Xid) (data []byte, serial Tid, err error) // XXX -> StorageRecordInformation ? Load(ctx context.Context, xid Xid) (data []byte, serial Tid, err error) // XXX -> StorageRecordInformation ?
// Prefetch(xid Xid) (no error) // Prefetch(ctx, xid Xid) (no error)
// Store(oid Oid, serial Tid, data []byte, txn ITransaction) error // Store(oid Oid, serial Tid, data []byte, txn ITransaction) error
// XXX Restore ? // XXX Restore ?
...@@ -171,7 +172,7 @@ type IStorage interface { ...@@ -171,7 +172,7 @@ type IStorage interface {
// XXX allow iteration both ways (forward & backward) // XXX allow iteration both ways (forward & backward)
// XXX text // XXX text
Iterate(tidMin, tidMax Tid) IStorageIterator // XXX , error ? Iterate(tidMin, tidMax Tid) IStorageIterator // XXX ctx , error ?
} }
type IStorageIterator interface { type IStorageIterator interface {
...@@ -180,12 +181,12 @@ type IStorageIterator interface { ...@@ -180,12 +181,12 @@ type IStorageIterator interface {
// 2. iterator over transaction data records. // 2. iterator over transaction data records.
// transaction metadata stays valid until next call to NextTxn(). // transaction metadata stays valid until next call to NextTxn().
// end of iteration is indicated with io.EOF // end of iteration is indicated with io.EOF
NextTxn() (*TxnInfo, IStorageRecordIterator, error) NextTxn() (*TxnInfo, IStorageRecordIterator, error) // XXX ctx
} }
type IStorageRecordIterator interface { // XXX naming -> IRecordIterator type IStorageRecordIterator interface { // XXX naming -> IRecordIterator
// NextData yields information about next storage data record. // NextData yields information about next storage data record.
// returned data stays valid until next call to NextData(). // returned data stays valid until next call to NextData().
// end of iteration is indicated with io.EOF // end of iteration is indicated with io.EOF
NextData() (*StorageRecordInformation, error) NextData() (*StorageRecordInformation, error) // XXX ctx
} }
...@@ -33,8 +33,8 @@ import ( ...@@ -33,8 +33,8 @@ import (
// Catobj dumps content of one ZODB object // Catobj dumps content of one ZODB object
// The object is printed in raw form without any headers (see Dumpobj) // The object is printed in raw form without any headers (see Dumpobj)
func Catobj(w io.Writer, stor zodb.IStorage, xid zodb.Xid) error { func Catobj(ctx context.Context, w io.Writer, stor zodb.IStorage, xid zodb.Xid) error {
data, _, err := stor.Load(xid) data, _, err := stor.Load(ctx, xid)
if err != nil { if err != nil {
return err return err
} }
...@@ -44,10 +44,10 @@ func Catobj(w io.Writer, stor zodb.IStorage, xid zodb.Xid) error { ...@@ -44,10 +44,10 @@ func Catobj(w io.Writer, stor zodb.IStorage, xid zodb.Xid) error {
} }
// Dumpobj dumps content of one ZODB object with zodbdump-like header // Dumpobj dumps content of one ZODB object with zodbdump-like header
func Dumpobj(w io.Writer, stor zodb.IStorage, xid zodb.Xid, hashOnly bool) error { func Dumpobj(ctx context.Context, w io.Writer, stor zodb.IStorage, xid zodb.Xid, hashOnly bool) error {
var objInfo zodb.StorageRecordInformation var objInfo zodb.StorageRecordInformation
data, tid, err := stor.Load(xid) data, tid, err := stor.Load(ctx, xid)
if err != nil { if err != nil {
return err return err
} }
...@@ -118,7 +118,9 @@ func catobjMain(argv []string) { ...@@ -118,7 +118,9 @@ func catobjMain(argv []string) {
Fatal("only 1 object allowed with -raw") Fatal("only 1 object allowed with -raw")
} }
stor, err := zodb.OpenStorageURL(context.Background(), storUrl) // TODO read-only ctx := context.Background()
stor, err := zodb.OpenStorageURL(ctx, storUrl) // TODO read-only
if err != nil { if err != nil {
Fatal(err) Fatal(err)
} }
...@@ -126,9 +128,9 @@ func catobjMain(argv []string) { ...@@ -126,9 +128,9 @@ func catobjMain(argv []string) {
catobj := func(xid zodb.Xid) error { catobj := func(xid zodb.Xid) error {
if raw { if raw {
return Catobj(os.Stdout, stor, xid) return Catobj(ctx, os.Stdout, stor, xid)
} else { } else {
return Dumpobj(os.Stdout, stor, xid, hashOnly) return Dumpobj(ctx, os.Stdout, stor, xid, hashOnly)
} }
} }
......
...@@ -32,14 +32,19 @@ import ( ...@@ -32,14 +32,19 @@ import (
) )
// paramFunc is a function to retrieve 1 storage parameter // paramFunc is a function to retrieve 1 storage parameter
type paramFunc func(stor zodb.IStorage) (string, error) type paramFunc func(ctx context.Context, stor zodb.IStorage) (string, error)
var infov = []struct {name string; getParam paramFunc} { var infov = []struct {name string; getParam paramFunc} {
// XXX e.g. stor.LastTid() should return err itself // XXX e.g. stor.LastTid() should return err itself
{"name", func(stor zodb.IStorage) (string, error) { return stor.StorageName(), nil }}, {"name", func(ctx context.Context, stor zodb.IStorage) (string, error) {
return stor.StorageName(), nil
}},
// TODO reenable size // TODO reenable size
// {"size", func(stor zodb.IStorage) (string, error) { return stor.StorageSize(), nil }}, // {"size", func(stor zodb.IStorage) (string, error) { return stor.StorageSize(), nil }},
{"last_tid", func(stor zodb.IStorage) (string, error) {tid, err := stor.LastTid(); return tid.String(), err }}, {"last_tid", func(ctx context.Context, stor zodb.IStorage) (string, error) {
tid, err := stor.LastTid(ctx)
return tid.String(), err
}},
} }
// {} parameter_name -> get_parameter(stor) // {} parameter_name -> get_parameter(stor)
...@@ -52,7 +57,7 @@ func init() { ...@@ -52,7 +57,7 @@ func init() {
} }
// Info prints general information about a ZODB storage // Info prints general information about a ZODB storage
func Info(w io.Writer, stor zodb.IStorage, parameterv []string) error { func Info(ctx context.Context, w io.Writer, stor zodb.IStorage, parameterv []string) error {
wantnames := false wantnames := false
if len(parameterv) == 0 { if len(parameterv) == 0 {
for _, info := range infov { for _, info := range infov {
...@@ -71,7 +76,7 @@ func Info(w io.Writer, stor zodb.IStorage, parameterv []string) error { ...@@ -71,7 +76,7 @@ func Info(w io.Writer, stor zodb.IStorage, parameterv []string) error {
if wantnames { if wantnames {
out += parameter + "=" out += parameter + "="
} }
value, err := getParam(stor) value, err := getParam(ctx, stor)
if err != nil { if err != nil {
return fmt.Errorf("getting %s: %v", parameter, err) return fmt.Errorf("getting %s: %v", parameter, err)
} }
...@@ -115,12 +120,14 @@ func infoMain(argv []string) { ...@@ -115,12 +120,14 @@ func infoMain(argv []string) {
} }
storUrl := argv[0] storUrl := argv[0]
stor, err := zodb.OpenStorageURL(context.Background(), storUrl) // TODO read-only ctx := context.Background()
stor, err := zodb.OpenStorageURL(ctx, storUrl) // TODO read-only
if err != nil { if err != nil {
Fatal(err) Fatal(err)
} }
err = Info(os.Stdout, stor, argv[1:]) err = Info(ctx, os.Stdout, stor, argv[1:])
if err != nil { if err != nil {
Fatal(err) Fatal(err)
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment