Commit 0430db7a authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 339da2c1
This diff is collapsed.
......@@ -168,6 +168,8 @@ func nodeLinkPipe() (nl1, nl2 *NodeLink) {
func TestNodeLink(t *testing.T) {
// TODO catch exception -> add proper location from it -> t.Fatal (see git-backup)
println("000")
// Close vs recvPkt
nl1, nl2 := _nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
wg := &xsync.WorkGroup{}
......@@ -182,6 +184,8 @@ func TestNodeLink(t *testing.T) {
xwait(wg)
xclose(nl2)
println("222")
// Close vs sendPkt
nl1, nl2 = _nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
wg = &xsync.WorkGroup{}
......@@ -204,18 +208,23 @@ func TestNodeLink(t *testing.T) {
tdelay()
xclose(nl2)
})
println("222 + 1")
c, err := nl2.Accept()
if !(c == nil && xlinkError(err) == ErrLinkClosed) {
t.Fatalf("NodeLink.Accept() after close: conn = %v, err = %v", c, err)
}
println("222 + 2")
// nl1 is not accepting connections - because it has LinkClient role
// check Accept behaviour.
c, err = nl1.Accept()
if !(c == nil && xlinkError(err) == ErrLinkNoListen) {
t.Fatalf("NodeLink.Accept() on non-listening node link: conn = %v, err = %v", c, err)
}
println("222 + 3")
xclose(nl1)
println("333")
// Close vs recvPkt on another side
nl1, nl2 = _nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
wg = &xsync.WorkGroup{}
......
......@@ -160,6 +160,8 @@ type Listener interface {
// On success returned are:
// - primary link connection which carried identification
// - requested identification packet
//
// XXX Conn, RequestIdentification -> Request
Accept(ctx context.Context) (*Conn, *RequestIdentification, error)
}
......
......@@ -368,6 +368,20 @@ func TestMasterStorage(t *testing.T) {
YourUUID: neo.UUID(neo.CLIENT, 1),
}))
// XXX C <- M NotifyNodeInformation C1,M1,S1
// C asks M about PT
tc.Expect(conntx("c:1", "m:3", 3, &neo.AskPartitionTable{}))
tc.Expect(conntx("m:3", "c:1", 3, &neo.AnswerPartitionTable{
PTid: 1,
RowList: []neo.RowInfo{
{0, []neo.CellInfo{{neo.UUID(neo.STORAGE, 1), neo.UP_TO_DATE}}},
},
}))
_ = C
......
......@@ -51,10 +51,12 @@ type Master struct {
// master manages node and partition tables and broadcast their updates
// to all nodes in cluster
// XXX dup from .node - kill here
///*
stateMu sync.RWMutex // XXX recheck: needed ?
nodeTab *neo.NodeTable
partTab *neo.PartitionTable // XXX ^ is also in node
partTab *neo.PartitionTable
clusterState neo.ClusterState
//*/
......@@ -196,6 +198,20 @@ func (m *Master) Run(ctx context.Context) (err error) {
continue
}
// for storages the only incoming connection is for RequestIdentification
// and then master only drives it. So close accept as noone will be
// listening for it on your side anymore.
switch idReq.NodeType {
case neo.CLIENT:
// ok
case neo.STORAGE:
fallthrough
default:
l.CloseAccept()
}
// handover to main driver
select {
case m.nodeCome <- nodeCome{conn, idReq}:
// ok
......@@ -318,7 +334,6 @@ loop:
// new connection comes in
case n := <-m.nodeCome:
node, resp := m.identify(ctx, n, /* XXX only accept storages -> PENDING */)
// XXX set node.State = PENDING
if node == nil {
goreject(ctx, wg, n.conn, resp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment