Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neo
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Jobs
Commits
Open sidebar
Kirill Smelkov
neo
Commits
f39babe5
Commit
f39babe5
authored
Apr 25, 2017
by
Julien Muchembled
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Remove UNKNOWN node state
parent
23b6a66a
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
29 additions
and
42 deletions
+29
-42
neo/lib/node.py
neo/lib/node.py
+1
-1
neo/lib/protocol.py
neo/lib/protocol.py
+0
-2
neo/master/handlers/__init__.py
neo/master/handlers/__init__.py
+9
-15
neo/master/handlers/administration.py
neo/master/handlers/administration.py
+3
-3
neo/master/handlers/master.py
neo/master/handlers/master.py
+1
-1
neo/neoctl/neoctl.py
neo/neoctl/neoctl.py
+1
-1
neo/storage/handlers/__init__.py
neo/storage/handlers/__init__.py
+1
-2
neo/tests/functional/__init__.py
neo/tests/functional/__init__.py
+0
-5
neo/tests/functional/testCluster.py
neo/tests/functional/testCluster.py
+3
-3
neo/tests/functional/testMaster.py
neo/tests/functional/testMaster.py
+1
-1
neo/tests/functional/testStorage.py
neo/tests/functional/testStorage.py
+1
-1
neo/tests/testNodes.py
neo/tests/testNodes.py
+5
-5
neo/tests/testPT.py
neo/tests/testPT.py
+1
-1
neo/tests/threaded/test.py
neo/tests/threaded/test.py
+2
-1
No files found.
neo/lib/node.py
View file @
f39babe5
...
...
@@ -32,7 +32,7 @@ class Node(object):
id_timestamp
=
None
def
__init__
(
self
,
manager
,
address
=
None
,
uuid
=
None
,
state
=
NodeStates
.
UNKN
OWN
):
state
=
NodeStates
.
TEMPORARILY_D
OWN
):
self
.
_state
=
state
self
.
_address
=
address
self
.
_uuid
=
uuid
...
...
neo/lib/protocol.py
View file @
f39babe5
...
...
@@ -123,7 +123,6 @@ def NodeStates():
TEMPORARILY_DOWN
DOWN
PENDING
UNKNOWN
@
Enum
def
CellStates
():
...
...
@@ -150,7 +149,6 @@ node_state_prefix_dict = {
NodeStates
.
TEMPORARILY_DOWN
:
'T'
,
NodeStates
.
DOWN
:
'D'
,
NodeStates
.
PENDING
:
'P'
,
NodeStates
.
UNKNOWN
:
'U'
,
}
# used for logging
...
...
neo/master/handlers/__init__.py
View file @
f39babe5
...
...
@@ -18,9 +18,7 @@ from ..app import monotonic_time
from
neo.lib
import
logging
from
neo.lib.exception
import
StoppedOperation
from
neo.lib.handler
import
EventHandler
from
neo.lib.protocol
import
(
uuid_str
,
NodeTypes
,
NodeStates
,
Packets
,
ProtocolError
,
)
from
neo.lib.protocol
import
Packets
class
MasterHandler
(
EventHandler
):
"""This class implements a generic part of the event handlers."""
...
...
@@ -66,10 +64,6 @@ class MasterHandler(EventHandler):
conn
.
answer
(
Packets
.
AnswerPartitionTable
(
pt
.
getID
(),
pt
.
getRowList
()))
DISCONNECTED_STATE_DICT
=
{
NodeTypes
.
STORAGE
:
NodeStates
.
TEMPORARILY_DOWN
,
}
class
BaseServiceHandler
(
MasterHandler
):
"""This class deals with events for a service phase."""
...
...
@@ -84,17 +78,17 @@ class BaseServiceHandler(MasterHandler):
return
# for example, when a storage is removed by an admin
assert
node
.
isStorage
(),
node
logging
.
info
(
'storage node lost'
)
new_state
=
DISCONNECTED_STATE_DICT
.
get
(
node
.
getType
(),
NodeStates
.
DOWN
)
assert
node
.
getState
()
not
in
(
NodeStates
.
TEMPORARILY_DOWN
,
NodeStates
.
DOWN
),
(
uuid_str
(
self
.
app
.
uuid
),
node
.
whoSetState
(),
new_state
)
was_pending
=
node
.
isPending
()
node
.
setState
(
new_state
)
if
was_pending
:
if
node
.
isPending
():
# was in pending state, so drop it from the node manager to forget
# it and do not set in running state when it comes back
logging
.
info
(
'drop a pending node from the node manager'
)
app
.
nm
.
remove
(
node
)
node
.
setDown
()
elif
node
.
isTemporarilyDown
():
# Already put in TEMPORARILY_DOWN state
# by AdministrationHandler.setNodeState
return
else
:
node
.
setTemporarilyDown
()
app
.
broadcastNodesInformation
([
node
])
if
app
.
truncate_tid
:
raise
StoppedOperation
...
...
neo/master/handlers/administration.py
View file @
f39babe5
...
...
@@ -34,8 +34,8 @@ CLUSTER_STATE_WORKFLOW = {
ClusterStates
.
STARTING_BACKUP
),
}
NODE_STATE_WORKFLOW
=
{
NodeTypes
.
MASTER
:
(
NodeStates
.
UNKN
OWN
,),
NodeTypes
.
STORAGE
:
(
NodeStates
.
UNKN
OWN
,
NodeStates
.
DOWN
),
NodeTypes
.
MASTER
:
(
NodeStates
.
TEMPORARILY_D
OWN
,),
NodeTypes
.
STORAGE
:
(
NodeStates
.
TEMPORARILY_D
OWN
,
NodeStates
.
DOWN
),
}
class
AdministrationHandler
(
MasterHandler
):
...
...
@@ -95,7 +95,7 @@ class AdministrationHandler(MasterHandler):
message
=
(
'state changed'
if
state_changed
else
'node already in %s state'
%
state
)
if
node
.
isStorage
():
keep
=
state
==
NodeStates
.
UNKN
OWN
keep
=
state
==
NodeStates
.
TEMPORARILY_D
OWN
try
:
cell_list
=
app
.
pt
.
dropNodeList
([
node
],
keep
)
except
PartitionTableException
,
e
:
...
...
neo/master/handlers/master.py
View file @
f39babe5
...
...
@@ -91,5 +91,5 @@ class PrimaryHandler(ElectionHandler):
conn
,
timestamp
,
node_list
)
for
node_type
,
_
,
uuid
,
state
,
_
in
node_list
:
assert
node_type
==
NodeTypes
.
MASTER
,
node_type
if
uuid
==
self
.
app
.
uuid
and
state
==
NodeStates
.
UNKN
OWN
:
if
uuid
==
self
.
app
.
uuid
and
state
==
NodeStates
.
TEMPORARILY_D
OWN
:
sys
.
exit
()
neo/neoctl/neoctl.py
View file @
f39babe5
...
...
@@ -157,7 +157,7 @@ class NeoCTL(BaseApplication):
return
self
.
setClusterState
(
ClusterStates
.
VERIFYING
)
def
killNode
(
self
,
node
):
return
self
.
_setNodeState
(
node
,
NodeStates
.
UNKN
OWN
)
return
self
.
_setNodeState
(
node
,
NodeStates
.
TEMPORARILY_D
OWN
)
def
dropNode
(
self
,
node
):
return
self
.
_setNodeState
(
node
,
NodeStates
.
DOWN
)
...
...
neo/storage/handlers/__init__.py
View file @
f39babe5
...
...
@@ -56,8 +56,7 @@ class BaseMasterHandler(BaseHandler):
if
uuid
==
self
.
app
.
uuid
:
# This is me, do what the master tell me
logging
.
info
(
"I was told I'm %s"
,
state
)
if
state
in
(
NodeStates
.
DOWN
,
NodeStates
.
TEMPORARILY_DOWN
,
NodeStates
.
UNKNOWN
):
if
state
in
(
NodeStates
.
DOWN
,
NodeStates
.
TEMPORARILY_DOWN
):
erase
=
state
==
NodeStates
.
DOWN
self
.
app
.
shutdown
(
erase
=
erase
)
elif
node_type
==
NodeTypes
.
CLIENT
and
state
!=
NodeStates
.
RUNNING
:
...
...
neo/tests/functional/__init__.py
View file @
f39babe5
...
...
@@ -609,10 +609,6 @@ class NEOCluster(object):
self
.
expectStorageState
(
process
.
getUUID
(),
NodeStates
.
PENDING
,
*
args
,
**
kw
)
def
expectUnknown
(
self
,
process
,
*
args
,
**
kw
):
self
.
expectStorageState
(
process
.
getUUID
(),
NodeStates
.
UNKNOWN
,
*
args
,
**
kw
)
def
expectUnavailable
(
self
,
process
,
*
args
,
**
kw
):
self
.
expectStorageState
(
process
.
getUUID
(),
NodeStates
.
TEMPORARILY_DOWN
,
*
args
,
**
kw
)
...
...
@@ -679,7 +675,6 @@ class NEOCluster(object):
self
.
expectCondition
(
callback
,
*
args
,
**
kw
)
def
expectStorageNotKnown
(
self
,
process
,
*
args
,
**
kw
):
# /!\ Not Known != Unknown
process_uuid
=
process
.
getUUID
()
def
expected_storage_not_known
(
last_try
):
for
storage
in
self
.
getStorageList
():
...
...
neo/tests/functional/testCluster.py
View file @
f39babe5
...
...
@@ -48,7 +48,7 @@ class ClusterTests(NEOFunctionalTest):
neo
.
stop
()
neo
.
run
(
except_storages
=
(
s2
,
))
neo
.
expectPending
(
s1
)
neo
.
expectUn
known
(
s2
)
neo
.
expectUn
available
(
s2
)
neo
.
expectClusterRecovering
()
# Starting missing storage allows cluster to exit Recovery without
# neoctl action.
...
...
@@ -61,11 +61,11 @@ class ClusterTests(NEOFunctionalTest):
neo
.
stop
()
neo
.
run
(
except_storages
=
(
s2
,
))
neo
.
expectPending
(
s1
)
neo
.
expectUn
known
(
s2
)
neo
.
expectUn
available
(
s2
)
neo
.
expectClusterRecovering
()
neo
.
startCluster
()
neo
.
expectRunning
(
s1
)
neo
.
expectUn
known
(
s2
)
neo
.
expectUn
available
(
s2
)
neo
.
expectClusterRunning
()
def
testClusterBreaks
(
self
):
...
...
neo/tests/functional/testMaster.py
View file @
f39babe5
...
...
@@ -59,7 +59,7 @@ class MasterTests(NEOFunctionalTest):
self
.
assertEqual
(
len
(
killed_uuid_list
),
1
)
uuid
=
killed_uuid_list
[
0
]
# Check the state of the primary we just killed
self
.
neo
.
expectMasterState
(
uuid
,
(
None
,
NodeStates
.
UNKN
OWN
))
self
.
neo
.
expectMasterState
(
uuid
,
(
None
,
NodeStates
.
TEMPORARILY_D
OWN
))
# BUG: The following check expects neoctl to reconnect before
# the election finishes.
self
.
assertEqual
(
self
.
neo
.
getPrimary
(),
None
)
...
...
neo/tests/functional/testStorage.py
View file @
f39babe5
...
...
@@ -409,7 +409,7 @@ class StorageTests(NEOFunctionalTest):
# restart the cluster with the first storage killed
self
.
neo
.
run
(
except_storages
=
[
started
[
1
]])
self
.
neo
.
expectPending
(
started
[
0
])
self
.
neo
.
expectUn
known
(
started
[
1
])
self
.
neo
.
expectUn
available
(
started
[
1
])
self
.
neo
.
expectClusterRecovering
()
# Cluster doesn't know there are outdated cells
self
.
neo
.
expectOudatedCells
(
number
=
0
)
...
...
neo/tests/testNodes.py
View file @
f39babe5
...
...
@@ -35,7 +35,7 @@ class NodesTests(NeoUnitTestBase):
address
=
(
'127.0.0.1'
,
10000
)
uuid
=
self
.
getNewUUID
(
None
)
node
=
Node
(
self
.
nm
,
address
=
address
,
uuid
=
uuid
)
self
.
assertEqual
(
node
.
getState
(),
NodeStates
.
UNKN
OWN
)
self
.
assertEqual
(
node
.
getState
(),
NodeStates
.
TEMPORARILY_D
OWN
)
self
.
assertEqual
(
node
.
getAddress
(),
address
)
self
.
assertEqual
(
node
.
getUUID
(),
uuid
)
self
.
assertTrue
(
time
()
-
1
<
node
.
getLastStateChange
()
<
time
())
...
...
@@ -43,7 +43,7 @@ class NodesTests(NeoUnitTestBase):
def
testState
(
self
):
""" Check if the last changed time is updated when state is changed """
node
=
Node
(
self
.
nm
)
self
.
assertEqual
(
node
.
getState
(),
NodeStates
.
UNKN
OWN
)
self
.
assertEqual
(
node
.
getState
(),
NodeStates
.
TEMPORARILY_D
OWN
)
self
.
assertTrue
(
time
()
-
1
<
node
.
getLastStateChange
()
<
time
())
previous_time
=
node
.
getLastStateChange
()
node
.
setState
(
NodeStates
.
RUNNING
)
...
...
@@ -161,7 +161,7 @@ class NodeManagerTests(NeoUnitTestBase):
(
NodeTypes
.
STORAGE
,
self
.
storage
.
getAddress
(),
new_uuid
,
NodeStates
.
RUNNING
,
None
),
(
NodeTypes
.
ADMIN
,
self
.
admin
.
getAddress
(),
self
.
admin
.
getUUID
(),
NodeStates
.
UNKN
OWN
,
None
),
NodeStates
.
TEMPORARILY_D
OWN
,
None
),
)
app
=
Mock
()
app
.
pt
=
Mock
({
'dropNode'
:
True
})
...
...
@@ -180,9 +180,9 @@ class NodeManagerTests(NeoUnitTestBase):
new_storage
=
storage_list
[
0
]
self
.
assertNotEqual
(
new_storage
.
getUUID
(),
old_uuid
)
self
.
assertEqual
(
new_storage
.
getState
(),
NodeStates
.
RUNNING
)
# admin is still here but in
UNKN
OWN state
# admin is still here but in
TEMPORARILY_D
OWN state
self
.
checkNodes
([
self
.
master
,
self
.
admin
,
new_storage
])
self
.
assertEqual
(
self
.
admin
.
getState
(),
NodeStates
.
UNKN
OWN
)
self
.
assertEqual
(
self
.
admin
.
getState
(),
NodeStates
.
TEMPORARILY_D
OWN
)
class
MasterDBTests
(
NeoUnitTestBase
):
...
...
neo/tests/testPT.py
View file @
f39babe5
...
...
@@ -34,7 +34,7 @@ class PartitionTableTests(NeoUnitTestBase):
# check getter
self
.
assertEqual
(
cell
.
getNode
(),
sn
)
self
.
assertEqual
(
cell
.
getState
(),
CellStates
.
OUT_OF_DATE
)
self
.
assertEqual
(
cell
.
getNodeState
(),
NodeStates
.
UNKN
OWN
)
self
.
assertEqual
(
cell
.
getNodeState
(),
NodeStates
.
TEMPORARILY_D
OWN
)
self
.
assertEqual
(
cell
.
getUUID
(),
uuid
)
self
.
assertEqual
(
cell
.
getAddress
(),
server
)
# check state setter
...
...
neo/tests/threaded/test.py
View file @
f39babe5
...
...
@@ -552,7 +552,8 @@ class Test(NEOThreadedTest):
# restart it with one storage only
if
1
:
cluster
.
start
(
storage_list
=
(
s1
,))
self
.
assertEqual
(
NodeStates
.
UNKNOWN
,
cluster
.
getNodeState
(
s2
))
self
.
assertEqual
(
NodeStates
.
TEMPORARILY_DOWN
,
cluster
.
getNodeState
(
s2
))
@
with_cluster
(
storage_count
=
2
,
partitions
=
2
,
replicas
=
1
)
def
testRestartStoragesWithReplicas
(
self
,
cluster
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment