Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neo
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Jobs
Commits
Open sidebar
Kirill Smelkov
neo
Commits
4cec19a6
Commit
4cec19a6
authored
Aug 31, 2017
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
e993689f
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
18 additions
and
25 deletions
+18
-25
go/neo/client/client.go
go/neo/client/client.go
+12
-16
go/neo/neo.go
go/neo/neo.go
+3
-0
go/neo/server/master.go
go/neo/server/master.go
+3
-9
No files found.
go/neo/client/client.go
View file @
4cec19a6
...
...
@@ -53,7 +53,7 @@ type Client struct {
mlink
*
neo
.
NodeLink
mlinkReady
chan
struct
{}
// reinitialized at each new talk cycle
// operational state
-
maintained by recvMaster.
// operational state
in node is
maintained by recvMaster.
// users retrieve it via withOperational.
//
// NOTE being operational means:
...
...
@@ -62,11 +62,7 @@ type Client struct {
// - .ClusterState = RUNNING <- XXX needed?
//
// however master link is accessed separately (see ^^^ and masterLink)
opMu
sync
.
RWMutex
// node.NodeTab
// node.PartTab
// XXX + node.ClusterState
operational
bool
operational
bool
// XXX <- somehow move to NodeCommon?
opReady
chan
struct
{}
// reinitialized each time state becomes non-operational
}
...
...
@@ -139,19 +135,19 @@ func (c *Client) masterLink(ctx context.Context) (*neo.NodeLink, error) {
// withOperational waits for cluster state to be operational.
//
// If successful it returns with operational state RLocked (c.
op
Mu) and
// If successful it returns with operational state RLocked (c.
node.State
Mu) and
// unlocked otherwise.
//
// The only error possible is if provided ctx cancel.
func
(
c
*
Client
)
withOperational
(
ctx
context
.
Context
)
error
{
for
{
c
.
op
Mu
.
RLock
()
c
.
node
.
State
Mu
.
RLock
()
if
c
.
operational
{
return
nil
}
ready
:=
c
.
opReady
c
.
op
Mu
.
RUnlock
()
c
.
node
.
State
Mu
.
RUnlock
()
select
{
case
<-
ctx
.
Done
()
:
...
...
@@ -255,11 +251,11 @@ func (c *Client) recvMaster(ctx context.Context, mlink *neo.NodeLink) error {
return
err
}
c
.
op
Mu
.
Lock
()
c
.
node
.
State
Mu
.
Lock
()
switch
msg
:=
req
.
Msg
.
(
type
)
{
default
:
c
.
op
Mu
.
Unlock
()
c
.
node
.
State
Mu
.
Unlock
()
return
fmt
.
Errorf
(
"unexpected message: %T"
,
msg
)
// M sends whole PT
...
...
@@ -289,13 +285,13 @@ func (c *Client) recvMaster(ctx context.Context, mlink *neo.NodeLink) error {
if
operational
!=
c
.
operational
{
c
.
operational
=
operational
if
operational
{
opready
=
c
.
opReady
// don't close from under
op
Mu
opready
=
c
.
opReady
// don't close from under
State
Mu
}
else
{
c
.
opReady
=
make
(
chan
struct
{})
}
}
c
.
op
Mu
.
Unlock
()
c
.
node
.
State
Mu
.
Unlock
()
if
opready
!=
nil
{
close
(
opready
)
...
...
@@ -312,9 +308,9 @@ func (c *Client) initFromMaster(ctx context.Context, Mlink *neo.NodeLink) error
}
pt
:=
neo
.
PartTabFromDump
(
rpt
.
PTid
,
rpt
.
RowList
)
c
.
op
Mu
.
Lock
()
c
.
node
.
State
Mu
.
Lock
()
c
.
node
.
PartTab
=
pt
c
.
op
Mu
.
Unlock
()
c
.
node
.
State
Mu
.
Unlock
()
/*
XXX don't need this in init?
...
...
@@ -383,7 +379,7 @@ func (c *Client) Load(ctx context.Context, xid zodb.Xid) (data []byte, serial zo
}
}
}
c
.
op
Mu
.
RUnlock
()
c
.
node
.
State
Mu
.
RUnlock
()
if
len
(
storv
)
==
0
{
// XXX recheck it adds traceback to log
...
...
go/neo/neo.go
View file @
4cec19a6
...
...
@@ -31,6 +31,7 @@ import (
"context"
"fmt"
"net"
"sync"
"lab.nexedi.com/kirr/go123/xerr"
...
...
@@ -50,6 +51,7 @@ const (
// NodeCommon is common data in all NEO nodes: Master, Storage & Client XXX text
// XXX naming -> Node ?
// XXX -> internal?
type
NodeCommon
struct
{
MyInfo
NodeInfo
ClusterName
string
...
...
@@ -57,6 +59,7 @@ type NodeCommon struct {
Net
xnet
.
Networker
// network AP we are sending/receiving on
MasterAddr
string
// address of master XXX -> Address ?
StateMu
sync
.
RWMutex
// <- XXX just embed?
NodeTab
*
NodeTable
// information about nodes in the cluster
PartTab
*
PartitionTable
// information about data distribution in the cluster
ClusterState
ClusterState
// master idea about cluster state
...
...
go/neo/server/master.go
View file @
4cec19a6
...
...
@@ -53,14 +53,6 @@ type Master struct {
// master manages node and partition tables and broadcast their updates
// to all nodes in cluster
// XXX dup from .node - kill here
/*
stateMu sync.RWMutex // XXX recheck: needed ?
nodeTab *neo.NodeTable
partTab *neo.PartitionTable
clusterState neo.ClusterState
*/
// channels controlling main driver
ctlStart
chan
chan
error
// request to start cluster
ctlStop
chan
chan
struct
{}
// request to stop cluster
...
...
@@ -242,6 +234,7 @@ func (m *Master) runMain(ctx context.Context) (err error) {
defer
task
.
Running
(
&
ctx
,
"main"
)(
&
err
)
// NOTE Run's goroutine is the only mutator of nodeTab, partTab and other cluster state
// XXX however since clients request state reading we should use node.StateMu
for
ctx
.
Err
()
==
nil
{
// recover partition table from storages and wait till enough
...
...
@@ -943,11 +936,12 @@ func (m *Master) serveClient(ctx context.Context, cli *neo.Node) (err error) {
func
(
m
*
Master
)
serveClient1
(
ctx
context
.
Context
,
req
neo
.
Msg
)
(
resp
neo
.
Msg
)
{
switch
req
:=
req
.
(
type
)
{
case
*
neo
.
AskPartitionTable
:
// XXX lock
m
.
node
.
StateMu
.
RLock
()
rpt
:=
&
neo
.
AnswerPartitionTable
{
PTid
:
m
.
node
.
PartTab
.
PTid
,
RowList
:
m
.
node
.
PartTab
.
Dump
(),
}
m
.
node
.
StateMu
.
RUnlock
()
return
rpt
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment