Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
1
Issues
1
List
Boards
Labels
Milestones
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
neoppod
Commits
7087d5f4
Commit
7087d5f4
authored
Apr 25, 2018
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
de6e50fc
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
90 additions
and
177 deletions
+90
-177
go/neo/neo_test.go
go/neo/neo_test.go
+28
-164
go/neo/t_cluster_test.go
go/neo/t_cluster_test.go
+36
-13
go/neo/t_events_test.go
go/neo/t_events_test.go
+20
-0
go/neo/t_tracego_test.go
go/neo/t_tracego_test.go
+6
-0
No files found.
go/neo/neo_test.go
View file @
7087d5f4
...
...
@@ -49,160 +49,42 @@ import (
)
/
*
func TestMasterStorage
0
(t0 *testing.T) {
/
/ M drives cluster with 1 S & C through recovery -> verification -> service -> shutdown
func
TestMasterStorage
(
t0
*
testing
.
T
)
{
t
:=
NewTestCluster
(
t0
,
"abc1"
)
defer
t
.
Stop
()
M
:=
t
.
NewMaster
(
"m"
)
//
zstor := xfs1stor("../zodb/storage/fs1/testdata/1.fs")
zstor
:=
xfs1stor
(
"../zodb/storage/fs1/testdata/1.fs"
)
zback
:=
xfs1back
(
"../zodb/storage/fs1/testdata/1.fs"
)
S
:=
t
.
NewStorage
(
"s"
,
"m:1"
,
zback
)
// XXX do we need to provide Mlist here?
C := t.NewClient("c")
C
:=
t
.
NewClient
(
"c"
,
"m:1"
)
// start nodes XXX move starting to TestCluster?
gwg
,
gctx
:=
errgroup
.
WithContext
(
bg
)
//defer xwait(gwg) XXX not yet correctly stopped on context cancel
gwg
.
Go
(
func
()
error
{
return
M
.
Run
(
gctx
)
})
gwg
.
Go
(
func
()
error
{
return
S
.
Run
(
gctx
)
})
gwg
.
Go
(
func
()
error
{
return
C
.
run
(
gctx
)
})
tM
:=
t
.
Checker
(
"m"
)
tS
:=
t
.
Checker
(
"s"
)
tC
:=
t
.
Checker
(
"c"
)
tMS
:=
t
.
Checker
(
"m-s"
)
tSM
:=
t
.
Checker
(
"s-m"
)
// M starts listening
tM.Expect(netlisten("m:1"))
tM.Expect(δnode("m", "m:1", proto.MASTER, 1, proto.RUNNING, proto.IdTimeNone))
tM.Expect(clusterState("m", proto.ClusterRecovering))
// TODO create C; C tries connect to master - rejected ("not yet operational")
// S starts listening
tS.Expect(netlisten("s:1"))
// S connects M
tSM.Expect(netconnect("s:2", "m:2", "m:1"))
tSM.Expect(conntx("s:2", "m:2", 1, &proto.RequestIdentification{
NodeType: proto.STORAGE,
UUID: 0,
Address: xnaddr("s:1"),
ClusterName: "abc1",
IdTime: proto.IdTimeNone,
}))
tM.Expect(δnode("m", "s:1", proto.STORAGE, 1, proto.PENDING, 0.01))
tSM.Expect(conntx("m:2", "s:2", 1, &proto.AcceptIdentification{
NodeType: proto.MASTER,
MyUUID: proto.UUID(proto.MASTER, 1),
NumPartitions: 1,
NumReplicas: 0,
YourUUID: proto.UUID(proto.STORAGE, 1),
}))
// TODO test ID rejects (uuid already registered, ...)
// M starts recovery on S
tMS.Expect(conntx("m:2", "s:2", 0, &proto.Recovery{}))
tMS.Expect(conntx("s:2", "m:2", 0, &proto.AnswerRecovery{
// empty new node
PTid: 0,
BackupTid: proto.INVALID_TID,
TruncateTid: proto.INVALID_TID,
}))
tMS.Expect(conntx("m:2", "s:2", 2, &proto.AskPartitionTable{}))
tMS.Expect(conntx("s:2", "m:2", 2, &proto.AnswerPartitionTable{
PTid: 0,
RowList: []proto.RowInfo{},
}))
// M ready to start: new cluster, no in-progress S recovery
tM.Expect(masterStartReady("m", true))
}
*/
// M drives cluster with 1 S & C through recovery -> verification -> service -> shutdown
func
TestMasterStorage
(
t
*
testing
.
T
)
{
rt
:=
NewEventRouter
()
dispatch
:=
tracetest
.
NewEventDispatcher
(
rt
)
tracer
:=
NewTraceCollector
(
dispatch
)
net
:=
pipenet
.
New
(
"testnet"
)
// test network
tracer
.
Attach
()
defer
tracer
.
Detach
()
// XXX -> M = testenv.NewMaster("m") (mkhost, chan, register to tracer ...)
// XXX ----//---- S, C
Mhost
:=
xnet
.
NetTrace
(
net
.
Host
(
"m"
),
tracer
)
Shost
:=
xnet
.
NetTrace
(
net
.
Host
(
"s"
),
tracer
)
Chost
:=
xnet
.
NetTrace
(
net
.
Host
(
"c"
),
tracer
)
cM
:=
tracetest
.
NewSyncChan
(
"m.main"
)
// trace of events local to M
cS
:=
tracetest
.
NewSyncChan
(
"s.main"
)
// trace of events local to S XXX with cause root also on S
cC
:=
tracetest
.
NewSyncChan
(
"c.main"
)
cMS
:=
tracetest
.
NewSyncChan
(
"m-s"
)
// trace of events with cause root being m -> s send
cSM
:=
tracetest
.
NewSyncChan
(
"s-m"
)
// trace of events with cause root being s -> m send
cMC
:=
tracetest
.
NewSyncChan
(
"m-c"
)
// ----//---- m -> c
cCM
:=
tracetest
.
NewSyncChan
(
"c-m"
)
// ----//---- c -> m
cCS
:=
tracetest
.
NewSyncChan
(
"c-s"
)
// ----//---- c -> s
tM
:=
tracetest
.
NewEventChecker
(
t
,
dispatch
,
cM
)
tS
:=
tracetest
.
NewEventChecker
(
t
,
dispatch
,
cS
)
tC
:=
tracetest
.
NewEventChecker
(
t
,
dispatch
,
cC
)
tMS
:=
tracetest
.
NewEventChecker
(
t
,
dispatch
,
cMS
)
tSM
:=
tracetest
.
NewEventChecker
(
t
,
dispatch
,
cSM
)
tMC
:=
tracetest
.
NewEventChecker
(
t
,
dispatch
,
cMC
)
tCM
:=
tracetest
.
NewEventChecker
(
t
,
dispatch
,
cCM
)
tCS
:=
tracetest
.
NewEventChecker
(
t
,
dispatch
,
cCS
)
rt
.
BranchNode
(
"m"
,
cM
)
rt
.
BranchNode
(
"s"
,
cS
)
rt
.
BranchLink
(
"s-m"
,
cSM
,
cMS
)
rt
.
BranchLink
(
"c-m"
,
cCM
,
cMC
)
rt
.
BranchLink
(
"c-s"
,
cCS
,
rt
.
defaultq
/* S never pushes to C */
)
// rt.BranchState("s", cMS) // state on S is controlled by M
// rt.BranchState("c", cMC) // state on C is controlled by M
rt
.
BranchNode
(
"c"
,
cC
)
// cluster nodes
M
:=
tNewMaster
(
"abc1"
,
":1"
,
Mhost
)
zstor
:=
xfs1stor
(
"../zodb/storage/fs1/testdata/1.fs"
)
zback
:=
xfs1back
(
"../zodb/storage/fs1/testdata/1.fs"
)
S
:=
tNewStorage
(
"abc1"
,
"m:1"
,
":1"
,
Shost
,
zback
)
C
:=
newClient
(
"abc1"
,
"m:1"
,
Chost
)
// let tracer know how to map state addresses to node names
tracer
.
RegisterNode
(
M
.
node
,
"m"
)
// XXX better Mhost.Name() ?
tracer
.
RegisterNode
(
S
.
node
,
"s"
)
tracer
.
RegisterNode
(
C
.
node
,
"c"
)
gwg
:=
&
errgroup
.
Group
{}
tCM
:=
t
.
Checker
(
"c-m"
)
tMC
:=
t
.
Checker
(
"m-c"
)
tCS
:=
t
.
Checker
(
"c-s"
)
// ----------------------------------------
// start master
Mclock
:=
&
vclock
{}
M
.
monotime
=
Mclock
.
monotime
Mctx
,
Mcancel
:=
context
.
WithCancel
(
bg
)
gox
(
gwg
,
func
()
{
err
:=
M
.
Run
(
Mctx
)
fmt
.
Println
(
"M err: "
,
err
)
exc
.
Raiseif
(
err
)
})
// start storage
Sctx
,
Scancel
:=
context
.
WithCancel
(
bg
)
gox
(
gwg
,
func
()
{
err
:=
S
.
Run
(
Sctx
)
fmt
.
Println
(
"S err: "
,
err
)
exc
.
Raiseif
(
err
)
})
// trace
// M starts listening
tM
.
Expect
(
netlisten
(
"m:1"
))
tM
.
Expect
(
δnode
(
"m"
,
"m:1"
,
proto
.
MASTER
,
1
,
proto
.
RUNNING
,
proto
.
IdTimeNone
))
...
...
@@ -213,7 +95,9 @@ func TestMasterStorage(t *testing.T) {
// S starts listening
tS
.
Expect
(
netlisten
(
"s:1"
))
// S connects M
tSM
.
Expect
(
netdial
(
"s"
,
"m:1"
))
tSM
.
Expect
(
netconnect
(
"s:2"
,
"m:2"
,
"m:1"
))
tSM
.
Expect
(
conntx
(
"s:2"
,
"m:2"
,
1
,
&
proto
.
RequestIdentification
{
NodeType
:
proto
.
STORAGE
,
...
...
@@ -223,6 +107,7 @@ func TestMasterStorage(t *testing.T) {
IdTime
:
proto
.
IdTimeNone
,
}))
tM
.
Expect
(
δnode
(
"m"
,
"s:1"
,
proto
.
STORAGE
,
1
,
proto
.
PENDING
,
0.01
))
tSM
.
Expect
(
conntx
(
"m:2"
,
"s:2"
,
1
,
&
proto
.
AcceptIdentification
{
...
...
@@ -253,7 +138,6 @@ func TestMasterStorage(t *testing.T) {
// M ready to start: new cluster, no in-progress S recovery
tM
.
Expect
(
masterStartReady
(
"m"
,
true
))
// ----------------------------------------
// M <- start cmd
...
...
@@ -312,22 +196,12 @@ func TestMasterStorage(t *testing.T) {
// TODO S join while service
// TODO M.Stop while service
// ----------------------------------------
// XXX try starting client from the beginning
// start client
Cctx
,
Ccancel
:=
context
.
WithCancel
(
bg
)
gox
(
gwg
,
func
()
{
err
:=
C
.
run
(
Cctx
)
fmt
.
Println
(
"C err: "
,
err
)
exc
.
Raiseif
(
err
)
})
// trace
// trace of client start
// C connects M
tCM
.
Expect
(
netdial
(
"c"
,
"m:1"
))
tCM
.
Expect
(
netconnect
(
"c:1"
,
"m:3"
,
"m:1"
))
tCM
.
Expect
(
conntx
(
"c:1"
,
"m:3"
,
1
,
&
proto
.
RequestIdentification
{
NodeType
:
proto
.
CLIENT
,
...
...
@@ -372,7 +246,6 @@ func TestMasterStorage(t *testing.T) {
tC
.
Expect
(
δnode
(
"c"
,
"s:1"
,
proto
.
STORAGE
,
1
,
proto
.
RUNNING
,
0.01
))
tC
.
Expect
(
δnode
(
"c"
,
""
,
proto
.
CLIENT
,
1
,
proto
.
RUNNING
,
0.02
))
// ----------------------------------------
// C asks M about last tid XXX better master sends it itself on new client connected
...
...
@@ -414,6 +287,7 @@ func TestMasterStorage(t *testing.T) {
// trace
// ... -> connects to S
tCS
.
Expect
(
netdial
(
"c"
,
"s:1"
))
tCS
.
Expect
(
netconnect
(
"c:2"
,
"s:3"
,
"s:1"
))
tCS
.
Expect
(
conntx
(
"c:2"
,
"s:3"
,
1
,
&
proto
.
RequestIdentification
{
NodeType
:
proto
.
CLIENT
,
...
...
@@ -450,7 +324,6 @@ func TestMasterStorage(t *testing.T) {
xwait
(
wg
)
// ----------------------------------------
// verify NextSerial is properly returned in AnswerObject via trace-loading prev. revision of obj1
...
...
@@ -494,7 +367,7 @@ func TestMasterStorage(t *testing.T) {
// XXX hack: disable tracing early so that C.Load() calls do not deadlock
// TODO refactor cluster creation into func
// TODO move client all loading tests into separate test where tracing will be off
tracer
.
Detach
()
t
.
got
racer
.
Detach
()
for
{
_
,
dataIter
,
err
:=
ziter
.
NextTxn
(
bg
)
...
...
@@ -542,9 +415,6 @@ func TestMasterStorage(t *testing.T) {
}
// TODO S.Stop() or Scancel()
// expect:
// M.nodeTab -= S
...
...
@@ -557,12 +427,6 @@ func TestMasterStorage(t *testing.T) {
// (M needs to resend to all storages recovery messages just from start)
time
.
Sleep
(
100
*
time
.
Millisecond
)
// XXX temp so net tx'ers could actually tx
return
Mcancel
()
// FIXME ctx cancel not fully handled
Scancel
()
// ---- // ----
Ccancel
()
// ---- // ----
xwait
(
gwg
)
}
...
...
go/neo/t_cluster_test.go
View file @
7087d5f4
...
...
@@ -32,6 +32,7 @@ import (
"lab.nexedi.com/kirr/neo/go/xcommon/xtracing/tracetest"
"lab.nexedi.com/kirr/neo/go/neo/storage"
"lab.nexedi.com/kirr/neo/go/zodb"
)
...
...
@@ -50,7 +51,7 @@ type TestCluster struct {
nodeTab
map
[
string
/*node*/
]
*
tNode
checkTab
map
[
string
/*node*/
]
*
tracetest
.
EventChecker
t
test
t
esting
.
TB
// original testing env this cluster was created at
testing
.
TB
// original testing env this cluster was created at
}
// tNode represents information about a test node ... XXX
...
...
@@ -59,9 +60,20 @@ type tNode struct {
}
// XXX stub
type
ITestMaster
interface
{}
type
ITestStorage
interface
{}
type
ITestClient
interface
{}
type
ITestMaster
interface
{
Run
(
ctx
context
.
Context
)
error
Start
()
error
}
type
ITestStorage
interface
{
Run
(
ctx
context
.
Context
)
error
}
type
ITestClient
interface
{
run
(
ctx
context
.
Context
)
error
zodb
.
IStorageDriver
}
// NewTestCluster creates new NEO test cluster.
//
...
...
@@ -77,7 +89,7 @@ func NewTestCluster(ttest testing.TB, name string) *TestCluster {
checkTab
:
make
(
map
[
string
]
*
tracetest
.
EventChecker
),
//... XXX
ttest
:
ttest
,
TB
:
ttest
,
}
t
.
erouter
=
NewEventRouter
()
...
...
@@ -134,7 +146,7 @@ func (t *TestCluster) registerNewNode(name string) *tNode {
// tracechecker for events on node
c1
:=
tracetest
.
NewSyncChan
(
name
)
// trace of events local to node
t
.
erouter
.
BranchNode
(
name
,
c1
)
t
.
checkTab
[
name
]
=
tracetest
.
NewEventChecker
(
t
.
ttest
,
t
.
edispatch
,
c1
)
t
.
checkTab
[
name
]
=
tracetest
.
NewEventChecker
(
t
.
TB
,
t
.
edispatch
,
c1
)
// tracecheckers for events on links of all node1-node2 pairs
for
name2
:=
range
t
.
nodeTab
{
...
...
@@ -143,8 +155,8 @@ func (t *TestCluster) registerNewNode(name string) *tNode {
// ----//---- node2 -> node1 send
c21
:=
tracetest
.
NewSyncChan
(
name2
+
"-"
+
name
)
t12
:=
tracetest
.
NewEventChecker
(
t
.
ttest
,
t
.
edispatch
,
c12
)
t21
:=
tracetest
.
NewEventChecker
(
t
.
ttest
,
t
.
edispatch
,
c21
)
t12
:=
tracetest
.
NewEventChecker
(
t
.
TB
,
t
.
edispatch
,
c12
)
t21
:=
tracetest
.
NewEventChecker
(
t
.
TB
,
t
.
edispatch
,
c21
)
t
.
erouter
.
BranchLink
(
name
+
"-"
+
name2
,
c12
,
c21
)
t
.
checkTab
[
name
+
"-"
+
name2
]
=
t12
...
...
@@ -167,22 +179,33 @@ func (t *TestCluster) registerNewNode(name string) *tNode {
// XXX error of creating py process?
func
(
t
*
TestCluster
)
NewMaster
(
name
string
)
ITestMaster
{
node
:=
t
.
registerNewNode
(
name
)
return
tNewMaster
(
t
.
name
,
":1"
,
node
.
net
)
m
:=
tNewMaster
(
t
.
name
,
":1"
,
node
.
net
)
// let tracer know how to map state addresses to node names
t
.
gotracer
.
RegisterNode
(
m
.
node
,
name
)
return
m
}
func
(
t
*
TestCluster
)
NewStorage
(
name
,
masterAddr
string
,
back
storage
.
Backend
)
ITestStorage
{
node
:=
t
.
registerNewNode
(
name
)
return
tNewStorage
(
t
.
name
,
masterAddr
,
":1"
,
node
.
net
,
back
)
s
:=
tNewStorage
(
t
.
name
,
masterAddr
,
":1"
,
node
.
net
,
back
)
t
.
gotracer
.
RegisterNode
(
s
.
node
,
name
)
return
s
}
func
(
t
*
TestCluster
)
NewClient
(
name
,
masterAddr
string
)
ITestClient
{
node
:=
t
.
registerNewNode
(
name
)
return
newClient
(
t
.
name
,
masterAddr
,
node
.
net
)
c
:=
newClient
(
t
.
name
,
masterAddr
,
node
.
net
)
t
.
gotracer
.
RegisterNode
(
c
.
node
,
name
)
return
c
}
// test-wrapper around Storage - to automatically listen by address, not provided listener.
// tStorage is test-wrapper around Storage.
//
// - to automatically listen by address, not provided listener.
type
tStorage
struct
{
*
Storage
serveAddr
string
...
...
@@ -204,7 +227,7 @@ func (s *tStorage) Run(ctx context.Context) error {
return
s
.
Storage
.
Run
(
ctx
,
l
)
}
// t
est-wrapper around Master
// t
Master is test-wrapper around Master.
//
// - automatically listens by address, not provided listener.
// - uses virtual clock.
...
...
go/neo/t_events_test.go
View file @
7087d5f4
...
...
@@ -35,6 +35,12 @@ import (
// NOTE to ease testing we use strings only to reprsent addresses or where
// event happenned - not e.g. net.Addr or *NodeTab.
// xnet.TraceDial
// event: network dial starts
type
eventNetDial
struct
{
Dialer
,
Addr
string
}
// xnet.TraceConnect
// event: network connection was made
type
eventNetConnect
struct
{
...
...
@@ -92,6 +98,10 @@ func masterStartReady(where string, ready bool) *eventMStartReady {
// ---- shortcuts ----
func
netdial
(
dialer
,
addr
string
)
*
eventNetDial
{
return
&
eventNetDial
{
Dialer
:
dialer
,
Addr
:
addr
}
}
// shortcut for net connect event
func
netconnect
(
src
,
dst
,
dialed
string
)
*
eventNetConnect
{
return
&
eventNetConnect
{
Src
:
src
,
Dst
:
dst
,
Dialed
:
dialed
}
...
...
@@ -229,10 +239,20 @@ func (r *EventRouter) Route(event interface{}) (dst *tracetest.SyncChan) {
defer
r
.
mu
.
Unlock
()
switch
ev
:=
event
.
(
type
)
{
default
:
panic
(
fmt
.
Sprintf
(
"event router: unexpected event %T"
,
ev
))
// networking
case
*
eventNetListen
:
dst
=
r
.
byNode
[
host
(
ev
.
Laddr
)]
case
*
eventNetDial
:
link
:=
ev
.
Dialer
+
"-"
+
host
(
ev
.
Addr
)
ldst
:=
r
.
byLink
[
link
]
if
ldst
!=
nil
{
dst
=
ldst
.
a
}
case
*
eventNetConnect
:
link
:=
host
(
ev
.
Src
)
+
"-"
+
host
(
ev
.
Dst
)
ldst
:=
r
.
byLink
[
link
]
...
...
go/neo/t_tracego_test.go
View file @
7087d5f4
...
...
@@ -86,6 +86,12 @@ func (t *TraceCollector) RegisterNode(node *NodeApp, name string) {
t
.
clusterState2Owner
[
&
node
.
ClusterState
]
=
name
}
func
(
t
*
TraceCollector
)
TraceNetDial
(
ev
*
xnet
.
TraceDial
)
{
t
.
d
.
Dispatch
(
&
eventNetDial
{
Dialer
:
ev
.
Dialer
,
Addr
:
ev
.
Addr
,
})
}
func
(
t
*
TraceCollector
)
TraceNetConnect
(
ev
*
xnet
.
TraceConnect
)
{
t
.
d
.
Dispatch
(
&
eventNetConnect
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment