Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Levin Zimmermann
neoppod
Commits
146534b0
Commit
146534b0
authored
Sep 01, 2017
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
915cb69a
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
63 additions
and
65 deletions
+63
-65
go/neo/server/server.go
go/neo/server/server.go
+1
-1
go/neo/server/storage.go
go/neo/server/storage.go
+62
-64
No files found.
go/neo/server/server.go
View file @
146534b0
...
@@ -139,7 +139,7 @@ type nodeLeave struct {
...
@@ -139,7 +139,7 @@ type nodeLeave struct {
// reject sends rejective identification response and closes associated link
// reject sends rejective identification response and closes associated link
func
reject
(
ctx
context
.
Context
,
req
*
neo
.
Request
,
resp
neo
.
Msg
)
{
func
reject
(
ctx
context
.
Context
,
req
*
neo
.
Request
,
resp
neo
.
Msg
)
{
// XXX cancel on ctx?
// XXX cancel on ctx?
//
XXX log
?
//
log.Info(ctx, "identification rejected")
?
err1
:=
req
.
Reply
(
resp
)
err1
:=
req
.
Reply
(
resp
)
err2
:=
req
.
Link
()
.
Close
()
err2
:=
req
.
Link
()
.
Close
()
err
:=
xerr
.
Merge
(
err1
,
err2
)
err
:=
xerr
.
Merge
(
err1
,
err2
)
...
...
go/neo/server/storage.go
View file @
146534b0
...
@@ -43,7 +43,7 @@ type Storage struct {
...
@@ -43,7 +43,7 @@ type Storage struct {
// context for providing operational service
// context for providing operational service
// it is renewed every time master tells us StartOpertion, so users
// it is renewed every time master tells us StartOpertion, so users
// must read it initially only once under opMu via
opCtxRead
// must read it initially only once under opMu via
withWhileOperational.
opMu
sync
.
Mutex
opMu
sync
.
Mutex
opCtx
context
.
Context
opCtx
context
.
Context
...
@@ -94,7 +94,7 @@ func (stor *Storage) Run(ctx context.Context) error {
...
@@ -94,7 +94,7 @@ func (stor *Storage) Run(ctx context.Context) error {
wg
.
Add
(
1
)
wg
.
Add
(
1
)
go
func
(
ctx
context
.
Context
)
(
err
error
)
{
go
func
(
ctx
context
.
Context
)
(
err
error
)
{
defer
wg
.
Done
()
defer
wg
.
Done
()
defer
task
.
Running
(
&
ctx
,
"
serve"
)(
&
err
)
// XXX or "accept" ?
defer
task
.
Running
(
&
ctx
,
"
accept"
)(
&
err
)
// XXX dup from master
// XXX dup from master
for
{
for
{
...
@@ -108,28 +108,23 @@ func (stor *Storage) Run(ctx context.Context) error {
...
@@ -108,28 +108,23 @@ func (stor *Storage) Run(ctx context.Context) error {
continue
continue
}
}
resp
,
ok
:=
stor
.
identify
(
idReq
)
if
!
ok
{
goreject
(
ctx
,
&
wg
,
req
,
resp
)
continue
}
wg
.
Add
(
1
)
wg
.
Add
(
1
)
go
func
()
{
go
func
()
{
defer
wg
.
Done
()
defer
wg
.
Done
()
stor
.
serveLink
(
ctx
,
req
,
idReq
)
// XXX ignore err?
}()
}()
// handover to main driver
//
//
handover to main driver
select
{
//
select {
//case stor.nodeCome <- nodeCome{req, idReq}:
//
case stor.nodeCome <- nodeCome{req, idReq}:
// // ok
//
// ok
//
case
<-
ctx
.
Done
()
:
//
case <-ctx.Done():
// shutdown
//
// shutdown
lclose
(
ctx
,
req
.
Link
())
//
lclose(ctx, req.Link())
continue
//
continue
}
//
}
}
}
}(
serveCtx
)
}(
serveCtx
)
...
@@ -221,8 +216,8 @@ func (stor *Storage) talkMaster1(ctx context.Context) (err error) {
...
@@ -221,8 +216,8 @@ func (stor *Storage) talkMaster1(ctx context.Context) (err error) {
// cancelled or some other error.
// cancelled or some other error.
//
//
// return error indicates:
// return error indicates:
// - nil: initialization was ok and a command came from master to start operation
// - nil: initialization was ok and a command came from master to start operation
.
// - !nil: initialization was cancelled or failed somehow
// - !nil: initialization was cancelled or failed somehow
.
func
(
stor
*
Storage
)
m1initialize
(
ctx
context
.
Context
,
mlink
*
neo
.
NodeLink
)
(
reqStart
*
neo
.
Request
,
err
error
)
{
func
(
stor
*
Storage
)
m1initialize
(
ctx
context
.
Context
,
mlink
*
neo
.
NodeLink
)
(
reqStart
*
neo
.
Request
,
err
error
)
{
defer
task
.
Runningf
(
&
ctx
,
"init %v"
,
mlink
)(
&
err
)
defer
task
.
Runningf
(
&
ctx
,
"init %v"
,
mlink
)(
&
err
)
...
@@ -292,9 +287,9 @@ func (stor *Storage) m1initialize(ctx context.Context, mlink *neo.NodeLink) (req
...
@@ -292,9 +287,9 @@ func (stor *Storage) m1initialize(ctx context.Context, mlink *neo.NodeLink) (req
}
}
}
}
// m1serve drives storage by master messages during service phase
// m1serve drives storage by master messages during service phase
.
//
//
// Service is regular phase serving requests from clients to load/save object,
// Service is regular phase serving requests from clients to load/save object
s
,
// handling transaction commit (with master) and syncing data with other
// handling transaction commit (with master) and syncing data with other
// storage nodes (XXX correct?).
// storage nodes (XXX correct?).
//
//
...
@@ -362,7 +357,14 @@ func (stor *Storage) identify(idReq *neo.RequestIdentification) (neo.Msg, bool)
...
@@ -362,7 +357,14 @@ func (stor *Storage) identify(idReq *neo.RequestIdentification) (neo.Msg, bool)
return
&
neo
.
Error
{
neo
.
PROTOCOL_ERROR
,
"cluster name mismatch"
},
false
return
&
neo
.
Error
{
neo
.
PROTOCOL_ERROR
,
"cluster name mismatch"
},
false
}
}
// XXX check operational?
// check operational
stor
.
opMu
.
Lock
()
operational
:=
(
stor
.
opCtx
.
Err
()
==
nil
)
stor
.
opMu
.
Unlock
()
if
!
operational
{
return
&
neo
.
Error
{
neo
.
NOT_READY
,
"cluster not operational"
},
false
}
return
&
neo
.
AcceptIdentification
{
return
&
neo
.
AcceptIdentification
{
NodeType
:
stor
.
node
.
MyInfo
.
Type
,
NodeType
:
stor
.
node
.
MyInfo
.
Type
,
...
@@ -374,58 +376,62 @@ func (stor *Storage) identify(idReq *neo.RequestIdentification) (neo.Msg, bool)
...
@@ -374,58 +376,62 @@ func (stor *Storage) identify(idReq *neo.RequestIdentification) (neo.Msg, bool)
}
}
// withWhileOperational derives new context from ctx which will be cancelled, when either
// - ctx is cancelled, or
// - master tells us to stop operational service
func
(
stor
*
Storage
)
withWhileOperational
(
ctx
context
.
Context
)
(
context
.
Context
,
context
.
CancelFunc
)
{
stor
.
opMu
.
Lock
()
opCtx
:=
stor
.
opCtx
stor
.
opMu
.
Unlock
()
return
xcontext
.
Merge
(
ctx
,
opCtx
)
}
// ServeLink serves incoming node-node link connection
// serveLink serves incoming node-node link connection
func
(
stor
*
Storage
)
ServeLink
(
ctx
context
.
Context
,
link
*
neo
.
NodeLink
)
(
err
error
)
{
func
(
stor
*
Storage
)
serveLink
(
ctx
context
.
Context
,
req
*
neo
.
Request
,
idReq
*
neo
.
RequestIdentification
)
(
err
error
)
{
link
:=
req
.
Link
()
defer
task
.
Runningf
(
&
ctx
,
"serve %s"
,
link
)(
&
err
)
defer
task
.
Runningf
(
&
ctx
,
"serve %s"
,
link
)(
&
err
)
defer
xio
.
CloseWhenDone
(
ctx
,
link
)()
defer
xio
.
CloseWhenDone
(
ctx
,
link
)()
// XXX only accept clients
// handle identification
// XXX only accept when operational (?)
idResp
,
ok
:=
stor
.
identify
(
idReq
)
nodeInfo
,
err
:=
IdentifyPeer
(
ctx
,
link
,
neo
.
STORAGE
)
if
!
ok
{
reject
(
ctx
,
req
,
idResp
)
// XXX log?
return
nil
}
err
=
accept
(
ctx
,
req
,
idResp
)
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Error
(
ctx
,
err
)
return
err
return
}
}
var
serveReq
func
(
context
.
Context
,
neo
.
Request
)
// client passed identification, now serve other requests
switch
nodeInfo
.
NodeType
{
log
.
Info
(
ctx
,
"identification accepted"
)
// FIXME must be in identify?
case
neo
.
CLIENT
:
serveReq
=
stor
.
serveClient
default
:
// rederive ctx to be also cancelled if M tells us StopOperation
// XXX vvv should be reply to peer
ctx
,
cancel
:=
stor
.
withWhileOperational
(
ctx
)
log
.
Errorf
(
ctx
,
"%v: unexpected peer type: %v"
,
link
,
nodeInfo
.
NodeType
)
defer
cancel
()
return
}
// identification passed, now serve other requests
wg
:=
sync
.
WaitGroup
{}
// XXX -> errgroup?
for
{
for
{
req
,
err
:=
link
.
Recv1
()
req
,
err
:=
link
.
Recv1
()
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Error
(
ctx
,
err
)
log
.
Error
(
ctx
,
err
)
break
return
err
}
}
go
serveReq
(
ctx
,
req
)
wg
.
Add
(
1
)
go
func
()
{
defer
wg
.
Done
()
stor
.
serveClient
(
ctx
,
req
)
}()
}
}
// TODO wait all spawned serveConn
wg
.
Wait
()
return
nil
return
nil
}
}
// withWhileOperational derives new context from ctx which will be cancelled, when either
// - ctx is cancelled, or
// - master tells us to stop operational service
func
(
stor
*
Storage
)
withWhileOperational
(
ctx
context
.
Context
)
(
context
.
Context
,
context
.
CancelFunc
)
{
stor
.
opMu
.
Lock
()
opCtx
:=
stor
.
opCtx
stor
.
opMu
.
Unlock
()
return
xcontext
.
Merge
(
ctx
,
opCtx
)
}
// serveClient serves incoming client request.
// serveClient serves incoming client request.
//
//
// XXX +error return?
// XXX +error return?
...
@@ -433,21 +439,13 @@ func (stor *Storage) withWhileOperational(ctx context.Context) (context.Context,
...
@@ -433,21 +439,13 @@ func (stor *Storage) withWhileOperational(ctx context.Context) (context.Context,
// XXX version that reuses goroutine to serve next client requests
// XXX version that reuses goroutine to serve next client requests
// XXX for py compatibility (py has no way to tell us Conn is closed)
// XXX for py compatibility (py has no way to tell us Conn is closed)
func
(
stor
*
Storage
)
serveClient
(
ctx
context
.
Context
,
req
neo
.
Request
)
{
func
(
stor
*
Storage
)
serveClient
(
ctx
context
.
Context
,
req
neo
.
Request
)
{
// XXX vvv move level-up
//log.Infof(ctx, "%s: serving new client conn", conn) // XXX -> running?
// rederive ctx to be also cancelled if M tells us StopOperation
// XXX level up
ctx
,
cancel
:=
stor
.
withWhileOperational
(
ctx
)
defer
cancel
()
link
:=
req
.
Link
()
link
:=
req
.
Link
()
for
{
for
{
resp
:=
stor
.
serveClient1
(
ctx
,
req
.
Msg
)
resp
:=
stor
.
serveClient1
(
ctx
,
req
.
Msg
)
err
:=
req
.
Reply
(
resp
)
err
:=
req
.
Reply
(
resp
)
if
err
!=
nil
{
if
err
!=
nil
{
log
.
Info
(
ctx
,
err
)
log
.
Error
(
ctx
,
err
)
return
return
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment