Commit dcdb9592 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 6e1926c0
...@@ -71,7 +71,7 @@ func tracetestMasterStorage(t0 *tracetest.T) { ...@@ -71,7 +71,7 @@ func tracetestMasterStorage(t0 *tracetest.T) {
// ---------------------------------------- // ----------------------------------------
// XXX convert prologue to tStartSimpleNEOGoSrv (XXX naming) // XXX convert prologue to tNewCluster_MS
// M starts listening // M starts listening
tM.Expect(netlisten("m:1")) tM.Expect(netlisten("m:1"))
...@@ -408,6 +408,7 @@ func tracetestMasterStorage(t0 *tracetest.T) { ...@@ -408,6 +408,7 @@ func tracetestMasterStorage(t0 *tracetest.T) {
} }
/*
// dispatch1 dispatched directly to single output channel // dispatch1 dispatched directly to single output channel
// //
// XXX hack - better we don't need it. // XXX hack - better we don't need it.
...@@ -419,12 +420,16 @@ type tdispatch1 struct { ...@@ -419,12 +420,16 @@ type tdispatch1 struct {
func (d tdispatch1) Dispatch(event interface{}) { func (d tdispatch1) Dispatch(event interface{}) {
d.outch.Send(event) d.outch.Send(event)
} }
*/
func benchmarkGetObject(b *testing.B, Mnet, Snet, Cnet xnet.Networker, benchit func(xcload1 func())) { func benchmarkGetObject(b *testing.B, Mnet, Snet, Cnet xnet.Networker, benchit func(xcload1 func())) {
// create test cluster <- XXX factor to utility func X := exc.Raiseif
// create test cluster
zback := xfs1back("../zodb/storage/fs1/testdata/1.fs") zback := xfs1back("../zodb/storage/fs1/testdata/1.fs")
t := tStartSimpleNEOGoSrv("abc1", zback) var t0 *tracetest.T // XXX stub
t := tNewCluster_MS(t0, "abc1", zback)
defer t.Stop() defer t.Stop()
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
...@@ -482,7 +487,8 @@ func benchmarkGetObject(b *testing.B, Mnet, Snet, Cnet xnet.Networker, benchit f ...@@ -482,7 +487,8 @@ func benchmarkGetObject(b *testing.B, Mnet, Snet, Cnet xnet.Networker, benchit f
// tracer.Detach() // tracer.Detach()
t.TraceOff() t.TraceOff()
err := M.Start(); t.FatalIf(err) var M ITestMaster // XXX stub -> M = t.Master("m")
err := M.Start(); X(err)
C := t.NewClient("c", "m:1") // XXX better use M.Addr() ? C := t.NewClient("c", "m:1") // XXX better use M.Addr() ?
......
...@@ -27,6 +27,7 @@ import ( ...@@ -27,6 +27,7 @@ import (
"lab.nexedi.com/kirr/go123/xnet" "lab.nexedi.com/kirr/go123/xnet"
"lab.nexedi.com/kirr/go123/xnet/pipenet" "lab.nexedi.com/kirr/go123/xnet/pipenet"
"lab.nexedi.com/kirr/go123/xnet/virtnet"
"lab.nexedi.com/kirr/go123/xsync" "lab.nexedi.com/kirr/go123/xsync"
"lab.nexedi.com/kirr/neo/go/internal/xcontext" "lab.nexedi.com/kirr/neo/go/internal/xcontext"
...@@ -43,12 +44,14 @@ import ( ...@@ -43,12 +44,14 @@ import (
// Create it with tNewCluster. // Create it with tNewCluster.
// Create nodes with .NewMaster, .NewStorage and .NewClient. // Create nodes with .NewMaster, .NewStorage and .NewClient.
// //
// NOTE about network addresses being predictable due to using pipenet/lonet for inter-networking.
//
// XXX text about events tracing // XXX text about events tracing
type tCluster struct { type tCluster struct {
*tracetest.T // original testing env this cluster was created at *tracetest.T // original testing env this cluster was created at
name string // name of the cluster name string // name of the cluster
network *pipenet.Network // nodes interoperate via netowrk XXX -> lo network *virtnet.SubNetwork // nodes interoperate via netowrk
gotracer *TraceCollector // for tracing go nodes XXX -> GoTracer gotracer *TraceCollector // for tracing go nodes XXX -> GoTracer
//tpy *PyTracer // for tracing py nodes //tpy *PyTracer // for tracing py nodes
...@@ -92,11 +95,14 @@ func tNewCluster(ttest *tracetest.T, name string) *tCluster { ...@@ -92,11 +95,14 @@ func tNewCluster(ttest *tracetest.T, name string) *tCluster {
T: ttest, T: ttest,
name: name, name: name,
network: pipenet.New("testnet"), // test network
nodeTab: make(map[string]*tNode), nodeTab: make(map[string]*tNode),
} }
// test network
// XXX allow to use lonet
t.network = pipenet.AsVirtNet(pipenet.New("testnet"))
t.erouter = NewEventRouter() t.erouter = NewEventRouter()
t.gotracer = NewTraceCollector(ttest) t.gotracer = NewTraceCollector(ttest)
ttest.SetEventRouter(t.erouter.RouteEvent) ttest.SetEventRouter(t.erouter.RouteEvent)
...@@ -298,22 +304,20 @@ func (m *tMaster) Run(ctx context.Context) error { ...@@ -298,22 +304,20 @@ func (m *tMaster) Run(ctx context.Context) error {
// -------- // --------
// tStartSimpleNEOGoSrv starts simple NEO/go server with 1 master and 1 storage. // tNewCluster_MS starts simple NEO cluster with 1 master and 1 storage nodes.
// The cluster is returned in ready-to-start. // The cluster is returned in ready-to-start state.
// func tNewCluster_MS(t0 *tracetest.T, name string, Sback storage.Backend) *tCluster {
// XXX naming -> tStartSimpleClusterMS ?
func tStartSimpleNEOGoSrv(t0 *tracetest.T, name string, Sback storage.Backend) *tCluster {
t := tNewCluster(t0, name) t := tNewCluster(t0, name)
M := t.NewMaster("m") t.NewMaster("m")
// XXX if we would always use lonet or pipenet - all network addresses would be predictable // NOTE all network addresses are predictable
// M starts listening // M starts listening
t.Expect("m", netlisten("m:1")) t.Expect("m", netlisten("m:1"))
t.Expect("m", δnode("m", "m:1", proto.MASTER, 1, proto.RUNNING, proto.IdTimeNone)) t.Expect("m", δnode("m", "m:1", proto.MASTER, 1, proto.RUNNING, proto.IdTimeNone))
t.Expect("m", clusterState("m", proto.ClusterRecovering)) t.Expect("m", clusterState("m", proto.ClusterRecovering))
S := t.NewStorage("s", "m:1", Sback) t.NewStorage("s", "m:1", Sback)
// S starts listening // S starts listening
t.Expect("s", netlisten("s:1")) t.Expect("s", netlisten("s:1"))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment