Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
W
wendelin.core
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Joshua
wendelin.core
Commits
54192026
Commit
54192026
authored
Oct 02, 2018
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
c673f1b1
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
69 additions
and
19 deletions
+69
-19
wcfs/wcfs.go
wcfs/wcfs.go
+10
-10
wcfs/wcfs_test.py
wcfs/wcfs_test.py
+1
-1
wcfs/zblk.go
wcfs/zblk.go
+54
-6
wcfs/zblk_test.go
wcfs/zblk_test.go
+4
-2
No files found.
wcfs/wcfs.go
View file @
54192026
...
@@ -446,6 +446,8 @@ func (bfroot *BigFileRoot) Mkdir(name string, mode uint32, fctx *fuse.Context) (
...
@@ -446,6 +446,8 @@ func (bfroot *BigFileRoot) Mkdir(name string, mode uint32, fctx *fuse.Context) (
// /bigfile/<bigfileX>/head/data -> Getattr serves stat.
// /bigfile/<bigfileX>/head/data -> Getattr serves stat.
func
(
bfdata
*
BigFileData
)
GetAttr
(
out
*
fuse
.
Attr
,
_
nodefs
.
File
,
fctx
*
fuse
.
Context
)
fuse
.
Status
{
func
(
bfdata
*
BigFileData
)
GetAttr
(
out
*
fuse
.
Attr
,
_
nodefs
.
File
,
fctx
*
fuse
.
Context
)
fuse
.
Status
{
// XXX locking
out
.
Mode
=
fuse
.
S_IFREG
|
0444
out
.
Mode
=
fuse
.
S_IFREG
|
0444
out
.
Size
=
0
// FIXME
out
.
Size
=
0
// FIXME
// .Blocks
// .Blocks
...
@@ -461,20 +463,18 @@ func (bfdata *BigFileData) GetAttr(out *fuse.Attr, _ nodefs.File, fctx *fuse.Con
...
@@ -461,20 +463,18 @@ func (bfdata *BigFileData) GetAttr(out *fuse.Attr, _ nodefs.File, fctx *fuse.Con
}
}
// Read implements reading from /bigfile/<bigfileX>/head/data.
// /bigfile/<bigfileX>/head/data -> Read serves read.
// XXX and from /bigfile/<bigfileX>/@<tidX>/data.
func
(
bf
*
BigFileData
)
Read
(
_
nodefs
.
File
,
dest
[]
byte
,
off
int64
,
_
*
fuse
.
Context
)
(
fuse
.
ReadResult
,
fuse
.
Status
)
{
/*
// XXX locking
func (bf *BigFileData) Read(_ nodefs.File, dest []byte, off int64, _ fuse.Context) (fuse.ReadResult, fuse.Status) {
.at
panic
(
"TODO"
)
.topoid
//bf.zbf.blksize
// XXX
}
}
*/
// /bigfile/<bigfileX>/head/at -> served by readAt.
// /bigfile/<bigfileX>/head/at -> readAt serves read.
func
(
bf
*
BigFile
)
readAt
()
[]
byte
{
func
(
bf
*
BigFile
)
readAt
()
[]
byte
{
// XXX locking
// XXX locking
// XXX zconn -> zbf.PJar() ?
// XXX zconn -> zbf.PJar() ?
return
[]
byte
(
bf
.
zconn
.
At
()
.
String
())
return
[]
byte
(
bf
.
zconn
.
At
()
.
String
())
}
}
...
...
wcfs/wcfs_test.py
View file @
54192026
...
@@ -156,7 +156,7 @@ def test_bigfile_empty():
...
@@ -156,7 +156,7 @@ def test_bigfile_empty():
# head/at = last txn of whole db
# head/at = last txn of whole db
assert
readfile
(
fpath
+
"/head/at"
)
==
tid2
.
encode
(
'hex'
)
assert
readfile
(
fpath
+
"/head/at"
)
==
tid2
.
encode
(
'hex'
)
# TODO check head/at syncs to later non-bigfile commits
wc
.
close
()
wc
.
close
()
...
...
wcfs/zblk.go
View file @
54192026
...
@@ -20,6 +20,7 @@
...
@@ -20,6 +20,7 @@
package
main
package
main
// ZBlk* + ZBigFile loading
// ZBlk* + ZBigFile loading
// module: "wendelin.bigfile.file_zodb"
// module: "wendelin.bigfile.file_zodb"
//
//
// ZBigFile
// ZBigFile
...
@@ -35,8 +36,6 @@ package main
...
@@ -35,8 +36,6 @@ package main
// ZData
// ZData
// str (chunk)
// str (chunk)
import
(
import
(
"context"
"context"
"fmt"
"fmt"
...
@@ -47,6 +46,7 @@ import (
...
@@ -47,6 +46,7 @@ import (
"golang.org/x/sync/errgroup"
"golang.org/x/sync/errgroup"
"lab.nexedi.com/kirr/go123/mem"
"lab.nexedi.com/kirr/go123/mem"
"lab.nexedi.com/kirr/go123/xerr"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/kirr/neo/go/zodb"
"lab.nexedi.com/kirr/neo/go/zodb/btree"
"lab.nexedi.com/kirr/neo/go/zodb/btree"
pickle
"github.com/kisielk/og-rek"
pickle
"github.com/kisielk/og-rek"
...
@@ -54,6 +54,15 @@ import (
...
@@ -54,6 +54,15 @@ import (
"./internal/pycompat"
"./internal/pycompat"
)
)
// zBlkLoader is the interface that every ZBlk* block implements internally for
// loading its data.
type
zBlkLoader
interface
{
// loadBlkData loads from database and returns data block stored by this ZBlk.
//
// XXX trailing \0 can be stripped.
loadBlkData
(
ctx
context
.
Context
)
([]
byte
,
error
)
}
// module of Wendelin ZODB py objects
// module of Wendelin ZODB py objects
const
zwendelin
=
"wendelin.bigfile.file_zodb"
const
zwendelin
=
"wendelin.bigfile.file_zodb"
...
@@ -63,6 +72,7 @@ const zwendelin = "wendelin.bigfile.file_zodb"
...
@@ -63,6 +72,7 @@ const zwendelin = "wendelin.bigfile.file_zodb"
type
ZBlk0
struct
{
type
ZBlk0
struct
{
zodb
.
Persistent
zodb
.
Persistent
// XXX py source uses bytes(buf) but on python2 it still results in str
blkdata
string
blkdata
string
}
}
...
@@ -82,7 +92,7 @@ func (zb *zBlk0State) PySetState(pystate interface{}) error {
...
@@ -82,7 +92,7 @@ func (zb *zBlk0State) PySetState(pystate interface{}) error {
return
nil
return
nil
}
}
func
(
zb
*
ZBlk0
)
L
oadBlkData
(
ctx
context
.
Context
)
([]
byte
,
error
)
{
func
(
zb
*
ZBlk0
)
l
oadBlkData
(
ctx
context
.
Context
)
([]
byte
,
error
)
{
// XXX err ctx
// XXX err ctx
err
:=
zb
.
PActivate
(
ctx
)
err
:=
zb
.
PActivate
(
ctx
)
...
@@ -100,6 +110,7 @@ func (zb *ZBlk0) LoadBlkData(ctx context.Context) ([]byte, error) {
...
@@ -100,6 +110,7 @@ func (zb *ZBlk0) LoadBlkData(ctx context.Context) ([]byte, error) {
type
ZData
struct
{
type
ZData
struct
{
zodb
.
Persistent
zodb
.
Persistent
// XXX py source uses bytes(buf) but on python2 it still results in str
data
string
data
string
}
}
...
@@ -142,7 +153,7 @@ func (zb *zBlk1State) PySetState(pystate interface{}) error {
...
@@ -142,7 +153,7 @@ func (zb *zBlk1State) PySetState(pystate interface{}) error {
return
nil
return
nil
}
}
func
(
zb
*
ZBlk1
)
L
oadBlkData
(
ctx
context
.
Context
)
([]
byte
,
error
)
{
func
(
zb
*
ZBlk1
)
l
oadBlkData
(
ctx
context
.
Context
)
([]
byte
,
error
)
{
// XXX errctx
// XXX errctx
err
:=
zb
.
PActivate
(
ctx
)
err
:=
zb
.
PActivate
(
ctx
)
...
@@ -178,7 +189,6 @@ func (zb *ZBlk1) LoadBlkData(ctx context.Context) ([]byte, error) {
...
@@ -178,7 +189,6 @@ func (zb *ZBlk1) LoadBlkData(ctx context.Context) ([]byte, error) {
// no PDeactivate, zd remains live
// no PDeactivate, zd remains live
//fmt.Printf("@%d -> zdata #%s (%d)\n", offset, zd.POid(), len(zd.data))
//fmt.Printf("@%d -> zdata #%s (%d)\n", offset, zd.POid(), len(zd.data))
mu
.
Lock
()
mu
.
Lock
()
defer
mu
.
Unlock
()
defer
mu
.
Unlock
()
...
@@ -200,6 +210,7 @@ func (zb *ZBlk1) LoadBlkData(ctx context.Context) ([]byte, error) {
...
@@ -200,6 +210,7 @@ func (zb *ZBlk1) LoadBlkData(ctx context.Context) ([]byte, error) {
// XXX off + len > blksize !ok
// XXX off + len > blksize !ok
//fmt.Printf("\nbucket: %v\n\n", b.Entryv())
//fmt.Printf("\nbucket: %v\n\n", b.Entryv())
for
_
,
e
:=
range
b
.
Entryv
()
{
for
_
,
e
:=
range
b
.
Entryv
()
{
zd
,
ok
:=
e
.
Value
()
.
(
*
ZData
)
zd
,
ok
:=
e
.
Value
()
.
(
*
ZData
)
if
!
ok
{
if
!
ok
{
...
@@ -215,7 +226,6 @@ func (zb *ZBlk1) LoadBlkData(ctx context.Context) ([]byte, error) {
...
@@ -215,7 +226,6 @@ func (zb *ZBlk1) LoadBlkData(ctx context.Context) ([]byte, error) {
return
nil
return
nil
}
}
// loadBTree spawns loading of all BTree children.
// loadBTree spawns loading of all BTree children.
var
loadBTree
func
(
t
*
btree
.
IOBTree
)
error
var
loadBTree
func
(
t
*
btree
.
IOBTree
)
error
loadBTree
=
func
(
t
*
btree
.
IOBTree
)
error
{
loadBTree
=
func
(
t
*
btree
.
IOBTree
)
error
{
...
@@ -351,6 +361,44 @@ func (bf *zBigFileState) PySetState(pystate interface{}) (err error) {
...
@@ -351,6 +361,44 @@ func (bf *zBigFileState) PySetState(pystate interface{}) (err error) {
return
nil
return
nil
}
}
// LoadBlk loads data for file block #blk.
//
// XXX better load into user-provided buf?
func
(
bf
*
ZBigFile
)
LoadBlk
(
ctx
context
.
Context
,
blk
int64
)
(
_
[]
byte
,
err
error
)
{
defer
xerr
.
Contextf
(
&
err
,
"bigfile %s: loadblk %d"
,
bf
.
POid
(),
blk
)
err
=
bf
.
PActivate
(
ctx
)
if
err
!=
nil
{
return
nil
,
err
}
defer
bf
.
PDeactivate
()
xzblk
,
ok
,
err
:=
bf
.
blktab
.
Get
(
ctx
,
blk
)
if
err
!=
nil
{
return
nil
,
err
}
if
!
ok
{
return
make
([]
byte
,
bf
.
blksize
),
nil
}
zblk
,
ok
:=
xzblk
.
(
zBlkLoader
)
if
!
ok
{
return
nil
,
fmt
.
Errorf
(
"expect ZBlk*; got %s"
,
typeOf
(
xzblk
))
}
blkdata
,
err
:=
zblk
.
loadBlkData
(
ctx
)
if
err
!=
nil
{
return
nil
,
err
}
if
l
:=
int64
(
len
(
blkdata
));
l
>
bf
.
blksize
{
return
nil
,
fmt
.
Errorf
(
"invalid blk: size = %d (> blksize = %d)"
,
l
,
bf
.
blksize
)
}
// XXX append trailing \0 to reach .blksize ?
return
blkdata
,
nil
}
// ----------------------------------------
// ----------------------------------------
...
...
wcfs/zblk_test.go
View file @
54192026
...
@@ -86,10 +86,10 @@ func TestZBlk(t *testing.T) {
...
@@ -86,10 +86,10 @@ func TestZBlk(t *testing.T) {
binary
.
BigEndian
.
PutUint32
(
data
[
i
*
4
:
],
i
)
binary
.
BigEndian
.
PutUint32
(
data
[
i
*
4
:
],
i
)
}
}
z0Data
,
err
:=
z0
.
L
oadBlkData
(
ctx
);
X
(
err
)
z0Data
,
err
:=
z0
.
l
oadBlkData
(
ctx
);
X
(
err
)
assert
.
Equal
(
z0Data
,
data
,
"ZBlk0 data wrong"
)
assert
.
Equal
(
z0Data
,
data
,
"ZBlk0 data wrong"
)
z1Data
,
err
:=
z1
.
L
oadBlkData
(
ctx
);
X
(
err
)
z1Data
,
err
:=
z1
.
l
oadBlkData
(
ctx
);
X
(
err
)
if
false
{
if
false
{
fmt
.
Printf
(
"%#v
\n
"
,
z1Data
)
fmt
.
Printf
(
"%#v
\n
"
,
z1Data
)
}
}
...
@@ -113,4 +113,6 @@ func TestZBlk(t *testing.T) {
...
@@ -113,4 +113,6 @@ func TestZBlk(t *testing.T) {
}
}
// XXX check zf.blktab.MaxKey ?
// XXX check zf.blktab.MaxKey ?
// XXX check zf.LoadBlk()
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment