Commit f71eebf7 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 5dd3d1ab
......@@ -34,6 +34,7 @@ import (
"lab.nexedi.com/kirr/neo/go/xcommon/xcontainer/list"
)
// XXX managing LRU under 1 big gcMu might be bad for scalability.
// TODO maintain nhit / nmiss + way to read cache stats
// Cache adds RAM caching layer over a storage.
......@@ -67,10 +68,6 @@ type oidCacheEntry struct {
//
// NOTE ^^^ .serial = 0 while loading is in progress
// NOTE ^^^ .serial = 0 if .err != nil
//
// XXX or?
// cached revisions in descending order
// .before > .serial >= next.before > next.serial ?
rcev []*revCacheEntry
}
......@@ -105,7 +102,7 @@ type revCacheEntry struct {
}
// StorLoader represents loading part of a storage.
// XXX -> zodb?
// XXX -> zodb.IStorageLoader (or zodb.Loader ?) ?
type StorLoader interface {
Load(ctx context.Context, xid zodb.Xid) (buf *zodb.Buf, serial zodb.Tid, err error)
}
......@@ -208,7 +205,7 @@ func (c *Cache) Prefetch(ctx context.Context, xid zodb.Xid) {
// rce will become ready.
//
// rceNew indicates whether rce is new and so loading on it has not been
// initiated yet. If so rce should be loaded with loadRCE.
// initiated yet. If so the caller should proceed to loading rce via loadRCE.
func (c *Cache) lookupRCE(xid zodb.Xid) (rce *revCacheEntry, rceNew bool) {
// loadSerial(serial) -> loadBefore(serial+1)
before := xid.Tid
......@@ -463,7 +460,7 @@ func (c *Cache) gcsignal() {
default:
// also ok - .gcCh is created with size 1 so if we could not
// put something to it - there is already 1 element in there
// and so gc will get signal to run
// and so gc will get signal to run.
}
}
......
......@@ -37,8 +37,6 @@ import (
// tStorage implements read-only storage for cache testing
type tStorage struct {
//txnv []tTxnRecord // transactions; .tid↑
// oid -> [](.serial↑, .data)
dataMap map[zodb.Oid][]tOidData
}
......@@ -139,7 +137,6 @@ func TestCache(t *testing.T) {
__ := Checker{t}
ok1 := func(v bool) { t.Helper(); __.ok1(v) }
//eq := func(a, b interface{}) { t.Helper(); __.assertEq(a, b) }
hello := []byte("hello")
world := []byte("world!!")
......@@ -592,29 +589,3 @@ func (c *Checker) assertEq(a, b interface{}) {
c.t.Fatal("!eq:\n", pretty.Compare(a, b))
}
}
/*
type tTxnRecord struct {
tid zodb.Tid
// data records for oid changed in transaction
// .oid↑
datav []tDataRecord
}
type tDataRecord struct {
oid zodb.Oid
data []byte
}
if xid.TidBefore {
// find max txn with .tid < xid.Tid
n := len(s.txnv)
i := n - 1 - sort.Search(n, func(i int) bool {
return s.txnv[n - 1 - i].tid < xid.Tid
})
if i == -1 {
// XXX xid.Tid < all .tid - no such transaction
}
}
*/
......@@ -17,5 +17,5 @@
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
// Package storage provides common bits related to ZODB storages. XXX text
// Package storage provides common infrastructure related to ZODB storages.
package storage
......@@ -51,7 +51,7 @@
// https://github.com/zopefoundation/ZODB/blob/a89485c1/src/ZODB/fsIndex.py
// https://github.com/zopefoundation/ZODB/commit/1bb14faf
//
// Unless one is doing something FileStorage-specific, it is advices not to use
// Unless one is doing something FileStorage-specific, it is adviced not to use
// fs1 package directly, and instead link-in lab.nexedi.com/kirr/neo/go/zodb/wks,
// open storage by zodb.OpenStorage and use it by way of zodb.IStorage interface.
//
......@@ -89,10 +89,6 @@ type FileStorage struct {
// XXX keep loaded with LoadNoStrings ?
txnhMin TxnHeader
txnhMax TxnHeader
// XXX topPos = txnhMax.Pos + txnhMax.Len
//topPos int64 // position pointing just past last committed transaction
// // (= size(.file) when no commit is in progress)
}
// IStorage
......@@ -103,12 +99,14 @@ func (fs *FileStorage) StorageName() string {
}
// open opens FileStorage without loading index
//
// TODO read-write support
func open(path string) (*FileStorage, error) {
fs := &FileStorage{}
f, err := os.Open(path) // XXX opens in O_RDONLY
f, err := os.Open(path)
if err != nil {
return nil, err // XXX err more context ?
return nil, err
}
fs.file = f
......@@ -119,22 +117,12 @@ func open(path string) (*FileStorage, error) {
return nil, err
}
/*
// TODO recreate index if missing / not sane (cancel this job on ctx.Done)
// TODO verify index sane / topPos matches
topPos, index, err := LoadIndexFile(path + ".index")
if err != nil {
panic(err) // XXX err
}
fs.index = index
*/
// determine topPos from file size
// if it is invalid (e.g. a transaction committed only half-way) we'll catch it
// while loading/recreating index XXX recheck this logic
fi, err := f.Stat()
if err != nil {
return nil, err // XXX err ctx
return nil, err
}
topPos := fi.Size()
......@@ -142,19 +130,19 @@ func open(path string) (*FileStorage, error) {
// FIXME support empty file case -> then both txnhMin and txnhMax stays invalid
err = fs.txnhMin.Load(f, txnValidFrom, LoadAll) // XXX txnValidFrom here -> ?
if err != nil {
return nil, err // XXX +context
return nil, err
}
err = fs.txnhMax.Load(f, topPos, LoadAll)
// expect EOF but .LenPrev must be good
// FIXME ^^^ it will be no EOF if a txn was committed only partially
if err != io.EOF {
if err == nil {
err = fmt.Errorf("no EOF after topPos") // XXX err context
err = fmt.Errorf("%s: no EOF after topPos", f.Name())
}
return nil, err // XXX +context
return nil, fmt.Errorf("%s: %s", f.Name(), err)
}
if fs.txnhMax.LenPrev <= 0 {
panic("could not read LenPrev @topPos") // XXX err
return nil, fmt.Errorf("%s: could not read LenPrev @%d (last transaction)", f.Name(), fs.txnhMax.Pos)
}
err = fs.txnhMax.LoadPrev(f, LoadAll)
......@@ -166,35 +154,39 @@ func open(path string) (*FileStorage, error) {
return fs, nil
}
// Open opens FileStorage XXX text
// Open opens FileStorage @path.
//
// TODO read-write support
func Open(ctx context.Context, path string) (*FileStorage, error) {
// open data file
fs, err := open(path)
if err != nil {
return nil, err
}
// load/rebuild index
err = fs.loadIndex()
if err != nil {
log.Print(err)
log.Printf("%s: index recompute...", path)
fs.index, err = fs.computeIndex(ctx) // XXX better .reindex() which saves it?
// XXX if !ro -> .reindex() which saves it
fs.index, err = fs.computeIndex(ctx)
if err != nil {
fs.file.Close() // XXX lclose
return nil, err
}
}
// TODO verify index sane / topPos matches
// XXX place
// TODO verify index is sane / topPos matches
if fs.index.TopPos != fs.txnhMax.Pos + fs.txnhMax.Len {
panic("inconsistent index topPos") // XXX
panic("TODO: inconsistent index topPos") // XXX
}
return fs, nil
}
func (fs *FileStorage) Close() error {
// TODO dump index
// TODO dump index (if !ro ?)
err := fs.file.Close()
if err != nil {
return err
......@@ -530,8 +522,8 @@ func (fs *FileStorage) computeIndex(ctx context.Context) (index *Index, err erro
// loadIndex loads on-disk index to RAM
func (fs *FileStorage) loadIndex() (err error) {
// XXX LoadIndexFile already contains "%s: index load"
// XXX lock?
// XXX LoadIndexFile already contains "%s: index load"
defer xerr.Contextf(&err, "%s", fs.file.Name())
index, err := LoadIndexFile(fs.file.Name() + ".index")
......@@ -563,7 +555,8 @@ func (fs *FileStorage) saveIndex() (err error) {
return nil
}
// IndexCorruptError is the error returned when index verification fails
// indexCorruptError is the error returned when index verification fails.
//
// XXX but io errors during verification return not this
type indexCorruptError struct {
index *Index
......@@ -576,6 +569,7 @@ func (e *indexCorruptError) Error() string {
}
// VerifyIndex verifies that index is correct
//
// XXX -> not exported @ fs1
func (fs *FileStorage) verifyIndex(ctx context.Context) error {
// XXX lock appends?
......@@ -596,6 +590,7 @@ func (fs *FileStorage) verifyIndex(ctx context.Context) error {
// Reindex rebuilds the index
//
// XXX -> not exported @ fs1
func (fs *FileStorage) reindex(ctx context.Context) error {
// XXX lock appends?
......
......@@ -94,38 +94,38 @@ const (
lenIterStart int64 = -0x1111111111111112 // = 0xeeeeeeeeeeeeeeee if unsigned
)
// ErrTxnRecord is returned on transaction record read / decode errors
type ErrTxnRecord struct {
// TxnError is returned on transaction record read / decode errors
type TxnError struct {
Pos int64 // position of transaction record
Subj string // about what .Err is
Err error // actual error
}
func (e *ErrTxnRecord) Error() string {
func (e *TxnError) Error() string {
return fmt.Sprintf("transaction record @%v: %v: %v", e.Pos, e.Subj, e.Err)
}
// err creates ErrTxnRecord for transaction located at txnh.Pos
// err creates TxnError for transaction located at txnh.Pos
func (txnh *TxnHeader) err(subj string, err error) error {
return &ErrTxnRecord{txnh.Pos, subj, err}
return &TxnError{txnh.Pos, subj, err}
}
// ErrDataRecord is returned on data record read / decode errors
type ErrDataRecord struct {
// DataError is returned on data record read / decode errors
type DataError struct {
Pos int64 // position of data record
Subj string // about what .Err is
Err error // actual error
}
func (e *ErrDataRecord) Error() string {
func (e *DataError) Error() string {
return fmt.Sprintf("data record @%v: %v: %v", e.Pos, e.Subj, e.Err)
}
// err creates ErrDataRecord for data record located at dh.Pos
// err creates DataError for data record located at dh.Pos
// XXX add link to containing txn? (check whether we can do it on data access) ?
func (dh *DataHeader) err(subj string, err error) error {
return &ErrDataRecord{dh.Pos, subj, err}
return &DataError{dh.Pos, subj, err}
}
......@@ -159,12 +159,10 @@ func (fh *FileHeader) Load(r io.ReaderAt) error {
_, err := r.ReadAt(fh.Magic[:], 0)
err = okEOF(err)
if err != nil {
//return fh.err("read", err)
return err // XXX err more context
return err
}
if string(fh.Magic[:]) != Magic {
return fmt.Errorf("%s: invalid magic %q", xio.Name(r), fh.Magic) // XXX -> decode err
//return decodeErr(fh, "invalid magic %q", fh.Magic)
return fmt.Errorf("%s: invalid magic %q", xio.Name(r), fh.Magic)
}
return nil
......@@ -385,7 +383,10 @@ func (txnh *TxnHeader) LoadPrev(r io.ReaderAt, flags TxnLoadFlags) error {
err := txnh.Load(r, txnh.Pos - lenPrev, flags)
if err != nil {
// EOF forward is unexpected here
return noEOF(err)
if err == io.EOF {
err = txnh.err("read", io.ErrUnexpectedEOF)
}
return err
}
if txnh.Len != lenPrev {
......@@ -527,7 +528,7 @@ func (dh *DataHeader) LoadPrevRev(r io.ReaderAt) error {
err := dh.loadPrevRev(r)
if err != nil {
// data record @...: loading prev rev: data record @...: ...
err = &ErrDataRecord{posCur, "loading prev rev", err}
err = &DataError{posCur, "loading prev rev", err}
}
return err
}
......@@ -542,12 +543,12 @@ func (dh *DataHeader) loadPrevRev(r io.ReaderAt) error {
}
if dh.Oid != oid {
// XXX vvv valid only if ErrDataRecord prints oid
// XXX vvv valid only if DataError prints oid
return decodeErr(dh, "oid mismatch")
}
if dh.Tid >= tid {
// XXX vvv valid only if ErrDataRecord prints tid
// XXX vvv valid only if DataError prints tid
return decodeErr(dh, "tid mismatch")
}
......@@ -614,7 +615,7 @@ func (dh *DataHeader) LoadBack(r io.ReaderAt) error {
}()
if err != nil {
err = &ErrDataRecord{posCur, "loading back rev", err}
err = &DataError{posCur, "loading back rev", err}
}
return err
......@@ -644,7 +645,7 @@ func (dh *DataHeader) loadNext(r io.ReaderAt, txnh *TxnHeader) error {
}
if nextPos + DataHeaderSize > txnTailPos {
return &ErrDataRecord{nextPos, "decode", fmt.Errorf("data record header overlaps txn boundary")} // XXX
return &DataError{nextPos, "decode", fmt.Errorf("data record header overlaps txn boundary")} // XXX
}
err := dh.Load(r, nextPos)
......
......@@ -58,7 +58,7 @@ type Dumper interface {
// To do so it reads file header and then iterates over all transactions in the file.
// The logic to actually output information and, if needed read/process data, is implemented by Dumper d.
func Dump(w io.Writer, path string, dir fs1.IterDir, d Dumper) (err error) {
defer xerr.Contextf(&err, "%s: %s", path, d.DumperName()) // XXX ok?
defer xerr.Contextf(&err, "%s: %s", d.DumperName(), path)
it, f, err := fs1.IterateFile(path, dir)
if err != nil {
......@@ -359,9 +359,10 @@ func (d *DumperFsTail) DumpTxn(buf *xfmt.Buffer, it *fs1.Iter) error {
d.data = xbytes.Realloc64(d.data, dataLen)
_, err := it.R.ReadAt(d.data, txnh.DataPos())
if err != nil {
// XXX -> txnh.Err(...) ?
// XXX err = noEOF(err)
return &fs1.ErrTxnRecord{txnh.Pos, "read data payload", err}
if err == io.EOF {
err = io.ErrUnexpectedEOF // XXX -> noEOF(err)
}
return &fs1.TxnError{txnh.Pos, "read data payload", err}
}
// print information about read txn record
......
......@@ -18,6 +18,7 @@
// See https://www.nexedi.com/licensing for rationale and options.
// Package fsb specializes cznic/b.Tree for FileStorage index needs.
//
// See gen-fsbtree for details.
package fsb
......
#!/bin/bash -e
# generate b.Tree with compile-time KEY=zodb.Oid, VALUE=int64, tuned kd and direct oidCmp calls
# Copyright (C) 2017 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
#
# This program is free software: you can Use, Study, Modify and Redistribute
# it under the terms of the GNU General Public License version 3, or (at your
# option) any later version, as published by the Free Software Foundation.
#
# You can also Link and Combine this program with other software covered by
# the terms of any of the Free Software licenses or any of the Open Source
# Initiative approved licenses and Convey the resulting work. Corresponding
# source of such a combination shall include the source code for all other
# software used.
#
# This program is distributed WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
KEY=zodb.Oid
VALUE=int64
......
......@@ -18,7 +18,7 @@
#
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
"""generate reference database and index for tests"""
"""generate reference fs1 database and index for tests"""
from ZODB.FileStorage import FileStorage
from zodbtools.test.gen_testdata import gen_testdb
......
......@@ -49,7 +49,7 @@ func loadZdumpPy(t *testing.T, path string) string {
// bugs. Here we want to compare output ideally bit-to-bit but those
// \v vs \x0b glitches prevents that to be done directly. So here we
// are with this ugly hack:
var pyNoBackLetter = []struct {backNoLetterRe, backLetter string} {
var pyNoBackLetter = []struct{ backNoLetterRe, backLetter string }{
{`\\x07`, `\a`},
{`\\x08`, `\b`},
{`\\x0b`, `\v`},
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment