Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Carlos Ramos Carreño
neoppod
Commits
b27db46f
Commit
b27db46f
authored
Apr 25, 2017
by
Julien Muchembled
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Rename node states: DOWN -> UNKNOWN, TEMPORARILY_DOWN -> DOWN
parent
f39babe5
Changes
24
Hide whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
85 additions
and
92 deletions
+85
-92
TODO
TODO
+0
-3
neo/client/handlers/master.py
neo/client/handlers/master.py
+1
-1
neo/lib/handler.py
neo/lib/handler.py
+1
-1
neo/lib/node.py
neo/lib/node.py
+6
-7
neo/lib/protocol.py
neo/lib/protocol.py
+3
-3
neo/lib/pt.py
neo/lib/pt.py
+1
-1
neo/master/app.py
neo/master/app.py
+3
-3
neo/master/handlers/__init__.py
neo/master/handlers/__init__.py
+4
-5
neo/master/handlers/administration.py
neo/master/handlers/administration.py
+3
-3
neo/master/handlers/client.py
neo/master/handlers/client.py
+2
-2
neo/master/handlers/master.py
neo/master/handlers/master.py
+2
-2
neo/master/recovery.py
neo/master/recovery.py
+6
-6
neo/neoctl/neoctl.py
neo/neoctl/neoctl.py
+2
-2
neo/storage/handlers/__init__.py
neo/storage/handlers/__init__.py
+2
-2
neo/storage/handlers/storage.py
neo/storage/handlers/storage.py
+2
-3
neo/tests/functional/__init__.py
neo/tests/functional/__init__.py
+3
-4
neo/tests/functional/testCluster.py
neo/tests/functional/testCluster.py
+9
-9
neo/tests/functional/testMaster.py
neo/tests/functional/testMaster.py
+3
-3
neo/tests/functional/testStorage.py
neo/tests/functional/testStorage.py
+18
-18
neo/tests/master/testMasterPT.py
neo/tests/master/testMasterPT.py
+3
-3
neo/tests/master/testRecovery.py
neo/tests/master/testRecovery.py
+1
-1
neo/tests/testNodes.py
neo/tests/testNodes.py
+6
-6
neo/tests/testPT.py
neo/tests/testPT.py
+3
-3
neo/tests/threaded/test.py
neo/tests/threaded/test.py
+1
-1
No files found.
TODO
View file @
b27db46f
Documentation
Documentation
- Clarify node state signification, and consider renaming them in the code.
Ideas:
TEMPORARILY_DOWN becomes UNAVAILABLE
- Clarify the use of each error codes:
- Clarify the use of each error codes:
- NOT_READY removed (connection kept opened until ready)
- NOT_READY removed (connection kept opened until ready)
- Split PROTOCOL_ERROR (BAD IDENTIFICATION, ...)
- Split PROTOCOL_ERROR (BAD IDENTIFICATION, ...)
...
...
neo/client/handlers/master.py
View file @
b27db46f
...
@@ -142,7 +142,7 @@ class PrimaryNotificationsHandler(MTEventHandler):
...
@@ -142,7 +142,7 @@ class PrimaryNotificationsHandler(MTEventHandler):
def
notifyNodeInformation
(
self
,
conn
,
timestamp
,
node_list
):
def
notifyNodeInformation
(
self
,
conn
,
timestamp
,
node_list
):
super
(
PrimaryNotificationsHandler
,
self
).
notifyNodeInformation
(
super
(
PrimaryNotificationsHandler
,
self
).
notifyNodeInformation
(
conn
,
timestamp
,
node_list
)
conn
,
timestamp
,
node_list
)
# XXX: 'update' automatically closes
D
OWN nodes. Do we really want
# XXX: 'update' automatically closes
UNKN
OWN nodes. Do we really want
# to do the same thing for nodes in other non-running states ?
# to do the same thing for nodes in other non-running states ?
getByUUID
=
self
.
app
.
nm
.
getByUUID
getByUUID
=
self
.
app
.
nm
.
getByUUID
for
node
in
node_list
:
for
node
in
node_list
:
...
...
neo/lib/handler.py
View file @
b27db46f
...
@@ -138,7 +138,7 @@ class EventHandler(object):
...
@@ -138,7 +138,7 @@ class EventHandler(object):
def
connectionClosed
(
self
,
conn
):
def
connectionClosed
(
self
,
conn
):
"""Called when a connection is closed by the peer."""
"""Called when a connection is closed by the peer."""
logging
.
debug
(
'connection closed for %r'
,
conn
)
logging
.
debug
(
'connection closed for %r'
,
conn
)
self
.
connectionLost
(
conn
,
NodeStates
.
TEMPORARILY_
DOWN
)
self
.
connectionLost
(
conn
,
NodeStates
.
DOWN
)
def
connectionLost
(
self
,
conn
,
new_state
):
def
connectionLost
(
self
,
conn
,
new_state
):
""" this is a method to override in sub-handlers when there is no need
""" this is a method to override in sub-handlers when there is no need
...
...
neo/lib/node.py
View file @
b27db46f
...
@@ -31,8 +31,7 @@ class Node(object):
...
@@ -31,8 +31,7 @@ class Node(object):
_identified
=
False
_identified
=
False
id_timestamp
=
None
id_timestamp
=
None
def
__init__
(
self
,
manager
,
address
=
None
,
uuid
=
None
,
def
__init__
(
self
,
manager
,
address
=
None
,
uuid
=
None
,
state
=
NodeStates
.
DOWN
):
state
=
NodeStates
.
TEMPORARILY_DOWN
):
self
.
_state
=
state
self
.
_state
=
state
self
.
_address
=
address
self
.
_address
=
address
self
.
_uuid
=
uuid
self
.
_uuid
=
uuid
...
@@ -64,7 +63,7 @@ class Node(object):
...
@@ -64,7 +63,7 @@ class Node(object):
def
setState
(
self
,
new_state
):
def
setState
(
self
,
new_state
):
if
self
.
_state
==
new_state
:
if
self
.
_state
==
new_state
:
return
return
if
new_state
==
NodeStates
.
D
OWN
:
if
new_state
==
NodeStates
.
UNKN
OWN
:
self
.
_manager
.
remove
(
self
)
self
.
_manager
.
remove
(
self
)
self
.
_state
=
new_state
self
.
_state
=
new_state
else
:
else
:
...
@@ -271,7 +270,7 @@ class NodeManager(EventQueue):
...
@@ -271,7 +270,7 @@ class NodeManager(EventQueue):
if
node
in
self
.
_node_set
:
if
node
in
self
.
_node_set
:
logging
.
warning
(
'adding a known node %r, ignoring'
,
node
)
logging
.
warning
(
'adding a known node %r, ignoring'
,
node
)
return
return
assert
not
node
.
is
D
own
(),
node
assert
not
node
.
is
Unkn
own
(),
node
self
.
_node_set
.
add
(
node
)
self
.
_node_set
.
add
(
node
)
self
.
_updateAddress
(
node
,
None
)
self
.
_updateAddress
(
node
,
None
)
self
.
_updateUUID
(
node
,
None
)
self
.
_updateUUID
(
node
,
None
)
...
@@ -321,7 +320,7 @@ class NodeManager(EventQueue):
...
@@ -321,7 +320,7 @@ class NodeManager(EventQueue):
set_dict
.
setdefault
(
new_key
,
set
()).
add
(
node
)
set_dict
.
setdefault
(
new_key
,
set
()).
add
(
node
)
def
_updateState
(
self
,
node
,
old_state
):
def
_updateState
(
self
,
node
,
old_state
):
assert
not
node
.
is
D
own
(),
node
assert
not
node
.
is
Unkn
own
(),
node
self
.
__updateSet
(
self
.
_state_dict
,
old_state
,
node
.
getState
(),
node
)
self
.
__updateSet
(
self
.
_state_dict
,
old_state
,
node
.
getState
(),
node
)
def
getList
(
self
,
node_filter
=
None
):
def
getList
(
self
,
node_filter
=
None
):
...
@@ -427,7 +426,7 @@ class NodeManager(EventQueue):
...
@@ -427,7 +426,7 @@ class NodeManager(EventQueue):
log_args
=
node_type
,
uuid_str
(
uuid
),
addr
,
state
,
id_timestamp
log_args
=
node_type
,
uuid_str
(
uuid
),
addr
,
state
,
id_timestamp
if
node
is
None
:
if
node
is
None
:
assert
state
!=
NodeStates
.
D
OWN
,
(
self
.
_node_set
,)
+
log_args
assert
state
!=
NodeStates
.
UNKN
OWN
,
(
self
.
_node_set
,)
+
log_args
node
=
self
.
_createNode
(
klass
,
address
=
addr
,
uuid
=
uuid
,
node
=
self
.
_createNode
(
klass
,
address
=
addr
,
uuid
=
uuid
,
state
=
state
)
state
=
state
)
logging
.
debug
(
'creating node %r'
,
node
)
logging
.
debug
(
'creating node %r'
,
node
)
...
@@ -439,7 +438,7 @@ class NodeManager(EventQueue):
...
@@ -439,7 +438,7 @@ class NodeManager(EventQueue):
'Discrepancy between node_by_uuid (%r) and '
\
'Discrepancy between node_by_uuid (%r) and '
\
'node_by_addr (%r)'
%
(
node_by_uuid
,
node_by_addr
)
'node_by_addr (%r)'
%
(
node_by_uuid
,
node_by_addr
)
node_by_uuid
.
setUUID
(
None
)
node_by_uuid
.
setUUID
(
None
)
if
state
==
NodeStates
.
D
OWN
:
if
state
==
NodeStates
.
UNKN
OWN
:
logging
.
debug
(
'dropping node %r (%r), found with %s '
logging
.
debug
(
'dropping node %r (%r), found with %s '
'%s %s %s %s'
,
node
,
node
.
isConnected
(),
*
log_args
)
'%s %s %s %s'
,
node
,
node
.
isConnected
(),
*
log_args
)
if
node
.
isConnected
():
if
node
.
isConnected
():
...
...
neo/lib/protocol.py
View file @
b27db46f
...
@@ -119,9 +119,9 @@ def NodeTypes():
...
@@ -119,9 +119,9 @@ def NodeTypes():
@
Enum
@
Enum
def
NodeStates
():
def
NodeStates
():
RUNNING
UNKNOWN
TEMPORARILY_DOWN
DOWN
DOWN
RUNNING
PENDING
PENDING
@
Enum
@
Enum
...
@@ -146,8 +146,8 @@ def CellStates():
...
@@ -146,8 +146,8 @@ def CellStates():
# used for logging
# used for logging
node_state_prefix_dict
=
{
node_state_prefix_dict
=
{
NodeStates
.
RUNNING
:
'R'
,
NodeStates
.
RUNNING
:
'R'
,
NodeStates
.
TEMPORARILY_DOWN
:
'T'
,
NodeStates
.
DOWN
:
'D'
,
NodeStates
.
DOWN
:
'D'
,
NodeStates
.
UNKNOWN
:
'U'
,
NodeStates
.
PENDING
:
'P'
,
NodeStates
.
PENDING
:
'P'
,
}
}
...
...
neo/lib/pt.py
View file @
b27db46f
...
@@ -168,7 +168,7 @@ class PartitionTable(object):
...
@@ -168,7 +168,7 @@ class PartitionTable(object):
def
_setCell
(
self
,
offset
,
node
,
state
):
def
_setCell
(
self
,
offset
,
node
,
state
):
if
state
==
CellStates
.
DISCARDED
:
if
state
==
CellStates
.
DISCARDED
:
return
self
.
removeCell
(
offset
,
node
)
return
self
.
removeCell
(
offset
,
node
)
if
node
.
is
D
own
():
if
node
.
is
Unkn
own
():
raise
PartitionTableException
(
'Invalid node state'
)
raise
PartitionTableException
(
'Invalid node state'
)
self
.
count_dict
.
setdefault
(
node
,
0
)
self
.
count_dict
.
setdefault
(
node
,
0
)
...
...
neo/master/app.py
View file @
b27db46f
...
@@ -238,7 +238,7 @@ class Application(BaseApplication):
...
@@ -238,7 +238,7 @@ class Application(BaseApplication):
# If I know any storage node, make sure that they are not in the
# If I know any storage node, make sure that they are not in the
# running state, because they are not connected at this stage.
# running state, because they are not connected at this stage.
for
node
in
self
.
nm
.
getStorageList
():
for
node
in
self
.
nm
.
getStorageList
():
assert
node
.
is
Temporarily
Down
(),
node
assert
node
.
isDown
(),
node
if
self
.
uuid
is
None
:
if
self
.
uuid
is
None
:
self
.
uuid
=
self
.
getNewUUID
(
None
,
self
.
server
,
NodeTypes
.
MASTER
)
self
.
uuid
=
self
.
getNewUUID
(
None
,
self
.
server
,
NodeTypes
.
MASTER
)
...
@@ -340,7 +340,7 @@ class Application(BaseApplication):
...
@@ -340,7 +340,7 @@ class Application(BaseApplication):
try
:
try
:
if
master_conn
is
None
:
if
master_conn
is
None
:
for
node
in
self
.
nm
.
getMasterList
():
for
node
in
self
.
nm
.
getMasterList
():
node
.
set
Temporarily
Down
()
node
.
setDown
()
node
=
self
.
primary_master
node
=
self
.
primary_master
failed
.
add
(
node
.
getAddress
())
failed
.
add
(
node
.
getAddress
())
if
not
node
.
isConnected
(
True
):
if
not
node
.
isConnected
(
True
):
...
@@ -487,7 +487,7 @@ class Application(BaseApplication):
...
@@ -487,7 +487,7 @@ class Application(BaseApplication):
if
node
.
isStorage
():
if
node
.
isStorage
():
conn
.
send
(
Packets
.
NotifyNodeInformation
(
monotonic_time
(),
((
conn
.
send
(
Packets
.
NotifyNodeInformation
(
monotonic_time
(),
((
node
.
getType
(),
node
.
getAddress
(),
node
.
getUUID
(),
node
.
getType
(),
node
.
getAddress
(),
node
.
getUUID
(),
NodeStates
.
TEMPORARILY_
DOWN
,
None
),)))
NodeStates
.
DOWN
,
None
),)))
if
conn
.
pending
():
if
conn
.
pending
():
conn
.
abort
()
conn
.
abort
()
continue
continue
...
...
neo/master/handlers/__init__.py
View file @
b27db46f
...
@@ -82,13 +82,12 @@ class BaseServiceHandler(MasterHandler):
...
@@ -82,13 +82,12 @@ class BaseServiceHandler(MasterHandler):
# was in pending state, so drop it from the node manager to forget
# was in pending state, so drop it from the node manager to forget
# it and do not set in running state when it comes back
# it and do not set in running state when it comes back
logging
.
info
(
'drop a pending node from the node manager'
)
logging
.
info
(
'drop a pending node from the node manager'
)
node
.
setDown
()
node
.
setUnknown
()
elif
node
.
isTemporarilyDown
():
elif
node
.
isDown
():
# Already put in TEMPORARILY_DOWN state
# Already put in DOWN state by AdministrationHandler.setNodeState
# by AdministrationHandler.setNodeState
return
return
else
:
else
:
node
.
set
Temporarily
Down
()
node
.
setDown
()
app
.
broadcastNodesInformation
([
node
])
app
.
broadcastNodesInformation
([
node
])
if
app
.
truncate_tid
:
if
app
.
truncate_tid
:
raise
StoppedOperation
raise
StoppedOperation
...
...
neo/master/handlers/administration.py
View file @
b27db46f
...
@@ -34,8 +34,8 @@ CLUSTER_STATE_WORKFLOW = {
...
@@ -34,8 +34,8 @@ CLUSTER_STATE_WORKFLOW = {
ClusterStates
.
STARTING_BACKUP
),
ClusterStates
.
STARTING_BACKUP
),
}
}
NODE_STATE_WORKFLOW
=
{
NODE_STATE_WORKFLOW
=
{
NodeTypes
.
MASTER
:
(
NodeStates
.
TEMPORARILY_
DOWN
,),
NodeTypes
.
MASTER
:
(
NodeStates
.
DOWN
,),
NodeTypes
.
STORAGE
:
(
NodeStates
.
TEMPORARILY_DOWN
,
NodeStates
.
D
OWN
),
NodeTypes
.
STORAGE
:
(
NodeStates
.
DOWN
,
NodeStates
.
UNKN
OWN
),
}
}
class
AdministrationHandler
(
MasterHandler
):
class
AdministrationHandler
(
MasterHandler
):
...
@@ -95,7 +95,7 @@ class AdministrationHandler(MasterHandler):
...
@@ -95,7 +95,7 @@ class AdministrationHandler(MasterHandler):
message
=
(
'state changed'
if
state_changed
else
message
=
(
'state changed'
if
state_changed
else
'node already in %s state'
%
state
)
'node already in %s state'
%
state
)
if
node
.
isStorage
():
if
node
.
isStorage
():
keep
=
state
==
NodeStates
.
TEMPORARILY_
DOWN
keep
=
state
==
NodeStates
.
DOWN
try
:
try
:
cell_list
=
app
.
pt
.
dropNodeList
([
node
],
keep
)
cell_list
=
app
.
pt
.
dropNodeList
([
node
],
keep
)
except
PartitionTableException
,
e
:
except
PartitionTableException
,
e
:
...
...
neo/master/handlers/client.py
View file @
b27db46f
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from
neo.lib.handler
import
DelayEvent
from
neo.lib.handler
import
DelayEvent
from
neo.lib.protocol
import
NodeStates
,
Packets
,
ProtocolError
,
MAX_TID
,
Errors
from
neo.lib.protocol
import
Packets
,
ProtocolError
,
MAX_TID
,
Errors
from
..app
import
monotonic_time
from
..app
import
monotonic_time
from
.
import
MasterHandler
from
.
import
MasterHandler
...
@@ -28,7 +28,7 @@ class ClientServiceHandler(MasterHandler):
...
@@ -28,7 +28,7 @@ class ClientServiceHandler(MasterHandler):
node
=
app
.
nm
.
getByUUID
(
conn
.
getUUID
())
node
=
app
.
nm
.
getByUUID
(
conn
.
getUUID
())
assert
node
is
not
None
,
conn
assert
node
is
not
None
,
conn
app
.
tm
.
clientLost
(
node
)
app
.
tm
.
clientLost
(
node
)
node
.
set
State
(
NodeStates
.
DOWN
)
node
.
set
Unknown
(
)
app
.
broadcastNodesInformation
([
node
])
app
.
broadcastNodesInformation
([
node
])
app
.
nm
.
remove
(
node
)
app
.
nm
.
remove
(
node
)
...
...
neo/master/handlers/master.py
View file @
b27db46f
...
@@ -26,7 +26,7 @@ class SecondaryHandler(MasterHandler):
...
@@ -26,7 +26,7 @@ class SecondaryHandler(MasterHandler):
def
_connectionLost
(
self
,
conn
):
def
_connectionLost
(
self
,
conn
):
app
=
self
.
app
app
=
self
.
app
node
=
app
.
nm
.
getByUUID
(
conn
.
getUUID
())
node
=
app
.
nm
.
getByUUID
(
conn
.
getUUID
())
node
.
set
Temporarily
Down
()
node
.
setDown
()
app
.
broadcastNodesInformation
([
node
])
app
.
broadcastNodesInformation
([
node
])
...
@@ -91,5 +91,5 @@ class PrimaryHandler(ElectionHandler):
...
@@ -91,5 +91,5 @@ class PrimaryHandler(ElectionHandler):
conn
,
timestamp
,
node_list
)
conn
,
timestamp
,
node_list
)
for
node_type
,
_
,
uuid
,
state
,
_
in
node_list
:
for
node_type
,
_
,
uuid
,
state
,
_
in
node_list
:
assert
node_type
==
NodeTypes
.
MASTER
,
node_type
assert
node_type
==
NodeTypes
.
MASTER
,
node_type
if
uuid
==
self
.
app
.
uuid
and
state
==
NodeStates
.
TEMPORARILY_
DOWN
:
if
uuid
==
self
.
app
.
uuid
and
state
==
NodeStates
.
DOWN
:
sys
.
exit
()
sys
.
exit
()
neo/master/recovery.py
View file @
b27db46f
...
@@ -71,11 +71,11 @@ class RecoveryManager(MasterHandler):
...
@@ -71,11 +71,11 @@ class RecoveryManager(MasterHandler):
for
node
in
app
.
nm
.
getMasterList
():
for
node
in
app
.
nm
.
getMasterList
():
if
not
(
node
is
app
.
_node
or
node
.
isConnected
(
True
)):
if
not
(
node
is
app
.
_node
or
node
.
isConnected
(
True
)):
# During recovery, master nodes are not put back in
# During recovery, master nodes are not put back in
#
TEMPORARILY_
DOWN state by handlers. This is done
# DOWN state by handlers. This is done
# entirely in this method (here and after this poll
# entirely in this method (here and after this poll
# loop), to minimize the notification packets.
# loop), to minimize the notification packets.
if
not
node
.
is
Temporarily
Down
():
if
not
node
.
isDown
():
node
.
set
Temporarily
Down
()
node
.
setDown
()
node_list
.
append
(
node
)
node_list
.
append
(
node
)
ClientConnection
(
app
,
app
.
election_handler
,
node
)
ClientConnection
(
app
,
app
.
election_handler
,
node
)
if
node_list
:
if
node_list
:
...
@@ -128,10 +128,10 @@ class RecoveryManager(MasterHandler):
...
@@ -128,10 +128,10 @@ class RecoveryManager(MasterHandler):
if
not
(
node
is
app
.
_node
or
node
.
isIdentified
()):
if
not
(
node
is
app
.
_node
or
node
.
isIdentified
()):
if
node
.
isConnected
(
True
):
if
node
.
isConnected
(
True
):
node
.
getConnection
().
close
()
node
.
getConnection
().
close
()
assert
node
.
is
Temporarily
Down
(),
node
assert
node
.
isDown
(),
node
elif
not
node
.
is
Temporarily
Down
():
elif
not
node
.
isDown
():
assert
self
.
try_secondary
,
node
assert
self
.
try_secondary
,
node
node
.
set
Temporarily
Down
()
node
.
setDown
()
node_list
.
append
(
node
)
node_list
.
append
(
node
)
app
.
broadcastNodesInformation
(
node_list
)
app
.
broadcastNodesInformation
(
node_list
)
...
...
neo/neoctl/neoctl.py
View file @
b27db46f
...
@@ -157,10 +157,10 @@ class NeoCTL(BaseApplication):
...
@@ -157,10 +157,10 @@ class NeoCTL(BaseApplication):
return
self
.
setClusterState
(
ClusterStates
.
VERIFYING
)
return
self
.
setClusterState
(
ClusterStates
.
VERIFYING
)
def
killNode
(
self
,
node
):
def
killNode
(
self
,
node
):
return
self
.
_setNodeState
(
node
,
NodeStates
.
TEMPORARILY_
DOWN
)
return
self
.
_setNodeState
(
node
,
NodeStates
.
DOWN
)
def
dropNode
(
self
,
node
):
def
dropNode
(
self
,
node
):
return
self
.
_setNodeState
(
node
,
NodeStates
.
D
OWN
)
return
self
.
_setNodeState
(
node
,
NodeStates
.
UNKN
OWN
)
def
getPrimary
(
self
):
def
getPrimary
(
self
):
"""
"""
...
...
neo/storage/handlers/__init__.py
View file @
b27db46f
...
@@ -56,8 +56,8 @@ class BaseMasterHandler(BaseHandler):
...
@@ -56,8 +56,8 @@ class BaseMasterHandler(BaseHandler):
if
uuid
==
self
.
app
.
uuid
:
if
uuid
==
self
.
app
.
uuid
:
# This is me, do what the master tell me
# This is me, do what the master tell me
logging
.
info
(
"I was told I'm %s"
,
state
)
logging
.
info
(
"I was told I'm %s"
,
state
)
if
state
in
(
NodeStates
.
DOWN
,
NodeStates
.
TEMPORARILY_
DOWN
):
if
state
in
(
NodeStates
.
UNKNOWN
,
NodeStates
.
DOWN
):
erase
=
state
==
NodeStates
.
D
OWN
erase
=
state
==
NodeStates
.
UNKN
OWN
self
.
app
.
shutdown
(
erase
=
erase
)
self
.
app
.
shutdown
(
erase
=
erase
)
elif
node_type
==
NodeTypes
.
CLIENT
and
state
!=
NodeStates
.
RUNNING
:
elif
node_type
==
NodeTypes
.
CLIENT
and
state
!=
NodeStates
.
RUNNING
:
logging
.
info
(
'Notified of non-running client, abort (%s)'
,
logging
.
info
(
'Notified of non-running client, abort (%s)'
,
...
...
neo/storage/handlers/storage.py
View file @
b27db46f
...
@@ -18,8 +18,7 @@ import weakref
...
@@ -18,8 +18,7 @@ import weakref
from
functools
import
wraps
from
functools
import
wraps
from
neo.lib.connection
import
ConnectionClosed
from
neo.lib.connection
import
ConnectionClosed
from
neo.lib.handler
import
DelayEvent
,
EventHandler
from
neo.lib.handler
import
DelayEvent
,
EventHandler
from
neo.lib.protocol
import
Errors
,
NodeStates
,
Packets
,
ProtocolError
,
\
from
neo.lib.protocol
import
Errors
,
Packets
,
ProtocolError
,
ZERO_HASH
ZERO_HASH
def
checkConnectionIsReplicatorConnection
(
func
):
def
checkConnectionIsReplicatorConnection
(
func
):
def
wrapper
(
self
,
conn
,
*
args
,
**
kw
):
def
wrapper
(
self
,
conn
,
*
args
,
**
kw
):
...
@@ -53,7 +52,7 @@ class StorageOperationHandler(EventHandler):
...
@@ -53,7 +52,7 @@ class StorageOperationHandler(EventHandler):
node
=
app
.
nm
.
getByUUID
(
uuid
)
node
=
app
.
nm
.
getByUUID
(
uuid
)
else
:
else
:
node
=
app
.
nm
.
getByAddress
(
conn
.
getAddress
())
node
=
app
.
nm
.
getByAddress
(
conn
.
getAddress
())
node
.
set
State
(
NodeStates
.
DOWN
)
node
.
set
Unknown
(
)
replicator
=
app
.
replicator
replicator
=
app
.
replicator
if
replicator
.
current_node
is
node
:
if
replicator
.
current_node
is
node
:
replicator
.
abort
()
replicator
.
abort
()
...
...
neo/tests/functional/__init__.py
View file @
b27db46f
...
@@ -609,9 +609,8 @@ class NEOCluster(object):
...
@@ -609,9 +609,8 @@ class NEOCluster(object):
self
.
expectStorageState
(
process
.
getUUID
(),
NodeStates
.
PENDING
,
self
.
expectStorageState
(
process
.
getUUID
(),
NodeStates
.
PENDING
,
*
args
,
**
kw
)
*
args
,
**
kw
)
def
expectUnavailable
(
self
,
process
,
*
args
,
**
kw
):
def
expectDown
(
self
,
process
,
*
args
,
**
kw
):
self
.
expectStorageState
(
process
.
getUUID
(),
self
.
expectStorageState
(
process
.
getUUID
(),
NodeStates
.
DOWN
,
*
args
,
**
kw
)
NodeStates
.
TEMPORARILY_DOWN
,
*
args
,
**
kw
)
def
expectPrimary
(
self
,
uuid
=
None
,
*
args
,
**
kw
):
def
expectPrimary
(
self
,
uuid
=
None
,
*
args
,
**
kw
):
def
callback
(
last_try
):
def
callback
(
last_try
):
...
@@ -674,7 +673,7 @@ class NEOCluster(object):
...
@@ -674,7 +673,7 @@ class NEOCluster(object):
return
current_try
,
current_try
return
current_try
,
current_try
self
.
expectCondition
(
callback
,
*
args
,
**
kw
)
self
.
expectCondition
(
callback
,
*
args
,
**
kw
)
def
expectStorage
NotK
nown
(
self
,
process
,
*
args
,
**
kw
):
def
expectStorage
Unk
nown
(
self
,
process
,
*
args
,
**
kw
):
process_uuid
=
process
.
getUUID
()
process_uuid
=
process
.
getUUID
()
def
expected_storage_not_known
(
last_try
):
def
expected_storage_not_known
(
last_try
):
for
storage
in
self
.
getStorageList
():
for
storage
in
self
.
getStorageList
():
...
...
neo/tests/functional/testCluster.py
View file @
b27db46f
...
@@ -48,7 +48,7 @@ class ClusterTests(NEOFunctionalTest):
...
@@ -48,7 +48,7 @@ class ClusterTests(NEOFunctionalTest):
neo
.
stop
()
neo
.
stop
()
neo
.
run
(
except_storages
=
(
s2
,
))
neo
.
run
(
except_storages
=
(
s2
,
))
neo
.
expectPending
(
s1
)
neo
.
expectPending
(
s1
)
neo
.
expect
Unavailable
(
s2
)
neo
.
expect
Down
(
s2
)
neo
.
expectClusterRecovering
()
neo
.
expectClusterRecovering
()
# Starting missing storage allows cluster to exit Recovery without
# Starting missing storage allows cluster to exit Recovery without
# neoctl action.
# neoctl action.
...
@@ -61,11 +61,11 @@ class ClusterTests(NEOFunctionalTest):
...
@@ -61,11 +61,11 @@ class ClusterTests(NEOFunctionalTest):
neo
.
stop
()
neo
.
stop
()
neo
.
run
(
except_storages
=
(
s2
,
))
neo
.
run
(
except_storages
=
(
s2
,
))
neo
.
expectPending
(
s1
)
neo
.
expectPending
(
s1
)
neo
.
expect
Unavailable
(
s2
)
neo
.
expect
Down
(
s2
)
neo
.
expectClusterRecovering
()
neo
.
expectClusterRecovering
()
neo
.
startCluster
()
neo
.
startCluster
()
neo
.
expectRunning
(
s1
)
neo
.
expectRunning
(
s1
)
neo
.
expect
Unavailable
(
s2
)
neo
.
expect
Down
(
s2
)
neo
.
expectClusterRunning
()
neo
.
expectClusterRunning
()
def
testClusterBreaks
(
self
):
def
testClusterBreaks
(
self
):
...
@@ -149,20 +149,20 @@ class ClusterTests(NEOFunctionalTest):
...
@@ -149,20 +149,20 @@ class ClusterTests(NEOFunctionalTest):
)
)
storages
=
self
.
neo
.
getStorageProcessList
()
storages
=
self
.
neo
.
getStorageProcessList
()
self
.
neo
.
run
(
except_storages
=
storages
)
self
.
neo
.
run
(
except_storages
=
storages
)
self
.
neo
.
expectStorage
NotK
nown
(
storages
[
0
])
self
.
neo
.
expectStorage
Unk
nown
(
storages
[
0
])
self
.
neo
.
expectStorage
NotK
nown
(
storages
[
1
])
self
.
neo
.
expectStorage
Unk
nown
(
storages
[
1
])
storages
[
0
].
start
()
storages
[
0
].
start
()
self
.
neo
.
expectPending
(
storages
[
0
])
self
.
neo
.
expectPending
(
storages
[
0
])
self
.
neo
.
expectStorage
NotK
nown
(
storages
[
1
])
self
.
neo
.
expectStorage
Unk
nown
(
storages
[
1
])
storages
[
1
].
start
()
storages
[
1
].
start
()
self
.
neo
.
expectPending
(
storages
[
0
])
self
.
neo
.
expectPending
(
storages
[
0
])
self
.
neo
.
expectPending
(
storages
[
1
])
self
.
neo
.
expectPending
(
storages
[
1
])
storages
[
0
].
stop
()
storages
[
0
].
stop
()
self
.
neo
.
expect
Unavailable
(
storages
[
0
])
self
.
neo
.
expect
Down
(
storages
[
0
])
self
.
neo
.
expectPending
(
storages
[
1
])
self
.
neo
.
expectPending
(
storages
[
1
])
storages
[
1
].
stop
()
storages
[
1
].
stop
()
self
.
neo
.
expect
Unavailable
(
storages
[
0
])
self
.
neo
.
expect
Down
(
storages
[
0
])
self
.
neo
.
expect
Unavailable
(
storages
[
1
])
self
.
neo
.
expect
Down
(
storages
[
1
])
def
test_suite
():
def
test_suite
():
return
unittest
.
makeSuite
(
ClusterTests
)
return
unittest
.
makeSuite
(
ClusterTests
)
...
...
neo/tests/functional/testMaster.py
View file @
b27db46f
...
@@ -59,7 +59,7 @@ class MasterTests(NEOFunctionalTest):
...
@@ -59,7 +59,7 @@ class MasterTests(NEOFunctionalTest):
self
.
assertEqual
(
len
(
killed_uuid_list
),
1
)
self
.
assertEqual
(
len
(
killed_uuid_list
),
1
)
uuid
=
killed_uuid_list
[
0
]
uuid
=
killed_uuid_list
[
0
]
# Check the state of the primary we just killed
# Check the state of the primary we just killed
self
.
neo
.
expectMasterState
(
uuid
,
(
None
,
NodeStates
.
TEMPORARILY_
DOWN
))
self
.
neo
.
expectMasterState
(
uuid
,
(
None
,
NodeStates
.
DOWN
))
# BUG: The following check expects neoctl to reconnect before
# BUG: The following check expects neoctl to reconnect before
# the election finishes.
# the election finishes.
self
.
assertEqual
(
self
.
neo
.
getPrimary
(),
None
)
self
.
assertEqual
(
self
.
neo
.
getPrimary
(),
None
)
...
@@ -78,12 +78,12 @@ class MasterTests(NEOFunctionalTest):
...
@@ -78,12 +78,12 @@ class MasterTests(NEOFunctionalTest):
# Test sanity checks.
# Test sanity checks.
self
.
assertEqual
(
len
(
killed_uuid_list
),
1
)
self
.
assertEqual
(
len
(
killed_uuid_list
),
1
)
self
.
neo
.
expectMasterState
(
killed_uuid_list
[
0
],
self
.
neo
.
expectMasterState
(
killed_uuid_list
[
0
],
NodeStates
.
TEMPORARILY_
DOWN
)
NodeStates
.
DOWN
)
self
.
assertEqual
(
len
(
self
.
neo
.
getMasterList
()),
MASTER_NODE_COUNT
)
self
.
assertEqual
(
len
(
self
.
neo
.
getMasterList
()),
MASTER_NODE_COUNT
)
uuid
,
=
self
.
neo
.
killPrimary
()
uuid
,
=
self
.
neo
.
killPrimary
()
# Check the state of the primary we just killed
# Check the state of the primary we just killed
self
.
neo
.
expectMasterState
(
uuid
,
NodeStates
.
TEMPORARILY_
DOWN
)
self
.
neo
.
expectMasterState
(
uuid
,
NodeStates
.
DOWN
)
# Check that a primary master arose.
# Check that a primary master arose.
self
.
neo
.
expectPrimary
(
timeout
=
10
)
self
.
neo
.
expectPrimary
(
timeout
=
10
)
# Check that the uuid really changed.
# Check that the uuid really changed.
...
...
neo/tests/functional/testStorage.py
View file @
b27db46f
...
@@ -168,7 +168,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -168,7 +168,7 @@ class StorageTests(NEOFunctionalTest):
self
.
neo
.
neoctl
.
killNode
(
started
[
0
].
getUUID
())
self
.
neo
.
neoctl
.
killNode
(
started
[
0
].
getUUID
())
# Cluster still operational. All cells of first storage should be
# Cluster still operational. All cells of first storage should be
# outdated.
# outdated.
self
.
neo
.
expect
Unavailable
(
started
[
0
])
self
.
neo
.
expect
Down
(
started
[
0
])
self
.
neo
.
expectOudatedCells
(
2
)
self
.
neo
.
expectOudatedCells
(
2
)
self
.
neo
.
expectClusterRunning
()
self
.
neo
.
expectClusterRunning
()
...
@@ -177,7 +177,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -177,7 +177,7 @@ class StorageTests(NEOFunctionalTest):
started
[
1
].
stop
()
started
[
1
].
stop
()
# Cluster not operational anymore. Only cells of second storage that
# Cluster not operational anymore. Only cells of second storage that
# were shared with the third one should become outdated.
# were shared with the third one should become outdated.
self
.
neo
.
expect
Unavailable
(
started
[
1
])
self
.
neo
.
expect
Down
(
started
[
1
])
self
.
neo
.
expectClusterRecovering
()
self
.
neo
.
expectClusterRecovering
()
self
.
neo
.
expectOudatedCells
(
3
)
self
.
neo
.
expectOudatedCells
(
3
)
...
@@ -198,7 +198,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -198,7 +198,7 @@ class StorageTests(NEOFunctionalTest):
# stop it, the cluster must switch to verification
# stop it, the cluster must switch to verification
started
[
0
].
stop
()
started
[
0
].
stop
()
self
.
neo
.
expect
Unavailable
(
started
[
0
])
self
.
neo
.
expect
Down
(
started
[
0
])
self
.
neo
.
expectClusterRecovering
()
self
.
neo
.
expectClusterRecovering
()
# client must have been disconnected
# client must have been disconnected
self
.
assertEqual
(
len
(
self
.
neo
.
getClientlist
()),
0
)
self
.
assertEqual
(
len
(
self
.
neo
.
getClientlist
()),
0
)
...
@@ -224,7 +224,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -224,7 +224,7 @@ class StorageTests(NEOFunctionalTest):
# stop one storage, cluster must remains running
# stop one storage, cluster must remains running
started
[
0
].
stop
()
started
[
0
].
stop
()
self
.
neo
.
expect
Unavailable
(
started
[
0
])
self
.
neo
.
expect
Down
(
started
[
0
])
self
.
neo
.
expectRunning
(
started
[
1
])
self
.
neo
.
expectRunning
(
started
[
1
])
self
.
neo
.
expectRunning
(
started
[
2
])
self
.
neo
.
expectRunning
(
started
[
2
])
self
.
neo
.
expectOudatedCells
(
number
=
10
)
self
.
neo
.
expectOudatedCells
(
number
=
10
)
...
@@ -232,17 +232,17 @@ class StorageTests(NEOFunctionalTest):
...
@@ -232,17 +232,17 @@ class StorageTests(NEOFunctionalTest):
# stop a second storage, cluster is still running
# stop a second storage, cluster is still running
started
[
1
].
stop
()
started
[
1
].
stop
()
self
.
neo
.
expect
Unavailable
(
started
[
0
])
self
.
neo
.
expect
Down
(
started
[
0
])
self
.
neo
.
expect
Unavailable
(
started
[
1
])
self
.
neo
.
expect
Down
(
started
[
1
])
self
.
neo
.
expectRunning
(
started
[
2
])
self
.
neo
.
expectRunning
(
started
[
2
])
self
.
neo
.
expectOudatedCells
(
number
=
20
)
self
.
neo
.
expectOudatedCells
(
number
=
20
)
self
.
neo
.
expectClusterRunning
()
self
.
neo
.
expectClusterRunning
()
# stop the last, cluster died
# stop the last, cluster died
started
[
2
].
stop
()
started
[
2
].
stop
()
self
.
neo
.
expect
Unavailable
(
started
[
0
])
self
.
neo
.
expect
Down
(
started
[
0
])
self
.
neo
.
expect
Unavailable
(
started
[
1
])
self
.
neo
.
expect
Down
(
started
[
1
])
self
.
neo
.
expect
Unavailable
(
started
[
2
])
self
.
neo
.
expect
Down
(
started
[
2
])
self
.
neo
.
expectOudatedCells
(
number
=
20
)
self
.
neo
.
expectOudatedCells
(
number
=
20
)
self
.
neo
.
expectClusterRecovering
()
self
.
neo
.
expectClusterRecovering
()
...
@@ -312,7 +312,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -312,7 +312,7 @@ class StorageTests(NEOFunctionalTest):
# kill one storage, it should be set as unavailable
# kill one storage, it should be set as unavailable
started
[
0
].
stop
()
started
[
0
].
stop
()
self
.
neo
.
expect
Unavailable
(
started
[
0
])
self
.
neo
.
expect
Down
(
started
[
0
])
self
.
neo
.
expectRunning
(
started
[
1
])
self
.
neo
.
expectRunning
(
started
[
1
])
# and the partition table must not change
# and the partition table must not change
self
.
neo
.
expectAssignedCells
(
started
[
0
],
10
)
self
.
neo
.
expectAssignedCells
(
started
[
0
],
10
)
...
@@ -320,7 +320,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -320,7 +320,7 @@ class StorageTests(NEOFunctionalTest):
# ask neoctl to drop it
# ask neoctl to drop it
self
.
neo
.
neoctl
.
dropNode
(
started
[
0
].
getUUID
())
self
.
neo
.
neoctl
.
dropNode
(
started
[
0
].
getUUID
())
self
.
neo
.
expectStorage
NotK
nown
(
started
[
0
])
self
.
neo
.
expectStorage
Unk
nown
(
started
[
0
])
self
.
neo
.
expectAssignedCells
(
started
[
0
],
0
)
self
.
neo
.
expectAssignedCells
(
started
[
0
],
0
)
self
.
neo
.
expectAssignedCells
(
started
[
1
],
10
)
self
.
neo
.
expectAssignedCells
(
started
[
1
],
10
)
self
.
assertRaises
(
RuntimeError
,
self
.
neo
.
neoctl
.
dropNode
,
self
.
assertRaises
(
RuntimeError
,
self
.
neo
.
neoctl
.
dropNode
,
...
@@ -335,7 +335,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -335,7 +335,7 @@ class StorageTests(NEOFunctionalTest):
(
started
,
stopped
)
=
self
.
__setup
(
storage_number
=
2
,
replicas
=
1
,
(
started
,
stopped
)
=
self
.
__setup
(
storage_number
=
2
,
replicas
=
1
,
pending_number
=
1
,
partitions
=
10
)
pending_number
=
1
,
partitions
=
10
)
self
.
neo
.
expectRunning
(
started
[
0
])
self
.
neo
.
expectRunning
(
started
[
0
])
self
.
neo
.
expectStorage
NotK
nown
(
stopped
[
0
])
self
.
neo
.
expectStorage
Unk
nown
(
stopped
[
0
])
self
.
neo
.
expectOudatedCells
(
number
=
0
)
self
.
neo
.
expectOudatedCells
(
number
=
0
)
# populate the cluster with some data
# populate the cluster with some data
...
@@ -362,7 +362,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -362,7 +362,7 @@ class StorageTests(NEOFunctionalTest):
# kill the first storage
# kill the first storage
started
[
0
].
stop
()
started
[
0
].
stop
()
self
.
neo
.
expect
Unavailable
(
started
[
0
])
self
.
neo
.
expect
Down
(
started
[
0
])
self
.
neo
.
expectOudatedCells
(
number
=
10
)
self
.
neo
.
expectOudatedCells
(
number
=
10
)
self
.
neo
.
expectAssignedCells
(
started
[
0
],
10
)
self
.
neo
.
expectAssignedCells
(
started
[
0
],
10
)
self
.
neo
.
expectAssignedCells
(
stopped
[
0
],
10
)
self
.
neo
.
expectAssignedCells
(
stopped
[
0
],
10
)
...
@@ -371,7 +371,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -371,7 +371,7 @@ class StorageTests(NEOFunctionalTest):
# drop it from partition table
# drop it from partition table
self
.
neo
.
neoctl
.
dropNode
(
started
[
0
].
getUUID
())
self
.
neo
.
neoctl
.
dropNode
(
started
[
0
].
getUUID
())
self
.
neo
.
expectStorage
NotK
nown
(
started
[
0
])
self
.
neo
.
expectStorage
Unk
nown
(
started
[
0
])
self
.
neo
.
expectRunning
(
stopped
[
0
])
self
.
neo
.
expectRunning
(
stopped
[
0
])
self
.
neo
.
expectAssignedCells
(
started
[
0
],
0
)
self
.
neo
.
expectAssignedCells
(
started
[
0
],
0
)
self
.
neo
.
expectAssignedCells
(
stopped
[
0
],
10
)
self
.
neo
.
expectAssignedCells
(
stopped
[
0
],
10
)
...
@@ -395,12 +395,12 @@ class StorageTests(NEOFunctionalTest):
...
@@ -395,12 +395,12 @@ class StorageTests(NEOFunctionalTest):
# drop the first then the second storage
# drop the first then the second storage
started
[
0
].
stop
()
started
[
0
].
stop
()
self
.
neo
.
expect
Unavailable
(
started
[
0
])
self
.
neo
.
expect
Down
(
started
[
0
])
self
.
neo
.
expectRunning
(
started
[
1
])
self
.
neo
.
expectRunning
(
started
[
1
])
self
.
neo
.
expectOudatedCells
(
number
=
10
)
self
.
neo
.
expectOudatedCells
(
number
=
10
)
started
[
1
].
stop
()
started
[
1
].
stop
()
self
.
neo
.
expect
Unavailable
(
started
[
0
])
self
.
neo
.
expect
Down
(
started
[
0
])
self
.
neo
.
expect
Unavailable
(
started
[
1
])
self
.
neo
.
expect
Down
(
started
[
1
])
self
.
neo
.
expectOudatedCells
(
number
=
10
)
self
.
neo
.
expectOudatedCells
(
number
=
10
)
self
.
neo
.
expectClusterRecovering
()
self
.
neo
.
expectClusterRecovering
()
# XXX: need to sync with storages first
# XXX: need to sync with storages first
...
@@ -409,7 +409,7 @@ class StorageTests(NEOFunctionalTest):
...
@@ -409,7 +409,7 @@ class StorageTests(NEOFunctionalTest):
# restart the cluster with the first storage killed
# restart the cluster with the first storage killed
self
.
neo
.
run
(
except_storages
=
[
started
[
1
]])
self
.
neo
.
run
(
except_storages
=
[
started
[
1
]])
self
.
neo
.
expectPending
(
started
[
0
])
self
.
neo
.
expectPending
(
started
[
0
])
self
.
neo
.
expect
Unavailable
(
started
[
1
])
self
.
neo
.
expect
Down
(
started
[
1
])
self
.
neo
.
expectClusterRecovering
()
self
.
neo
.
expectClusterRecovering
()
# Cluster doesn't know there are outdated cells
# Cluster doesn't know there are outdated cells
self
.
neo
.
expectOudatedCells
(
number
=
0
)
self
.
neo
.
expectOudatedCells
(
number
=
0
)
...
...
neo/tests/master/testMasterPT.py
View file @
b27db46f
...
@@ -70,9 +70,9 @@ class MasterPartitionTableTests(NeoUnitTestBase):
...
@@ -70,9 +70,9 @@ class MasterPartitionTableTests(NeoUnitTestBase):
pt
.
_setCell
(
0
,
sn1
,
CellStates
.
OUT_OF_DATE
)
pt
.
_setCell
(
0
,
sn1
,
CellStates
.
OUT_OF_DATE
)
sn1
.
setState
(
NodeStates
.
RUNNING
)
sn1
.
setState
(
NodeStates
.
RUNNING
)
pt
.
_setCell
(
1
,
sn2
,
CellStates
.
UP_TO_DATE
)
pt
.
_setCell
(
1
,
sn2
,
CellStates
.
UP_TO_DATE
)
sn2
.
setState
(
NodeStates
.
TEMPORARILY_
DOWN
)
sn2
.
setState
(
NodeStates
.
DOWN
)
pt
.
_setCell
(
2
,
sn3
,
CellStates
.
UP_TO_DATE
)
pt
.
_setCell
(
2
,
sn3
,
CellStates
.
UP_TO_DATE
)
sn3
.
setState
(
NodeStates
.
D
OWN
)
sn3
.
setState
(
NodeStates
.
UNKN
OWN
)
pt
.
_setCell
(
3
,
sn4
,
CellStates
.
UP_TO_DATE
)
pt
.
_setCell
(
3
,
sn4
,
CellStates
.
UP_TO_DATE
)
sn4
.
setState
(
NodeStates
.
RUNNING
)
sn4
.
setState
(
NodeStates
.
RUNNING
)
# outdate nodes
# outdate nodes
...
@@ -146,7 +146,7 @@ class MasterPartitionTableTests(NeoUnitTestBase):
...
@@ -146,7 +146,7 @@ class MasterPartitionTableTests(NeoUnitTestBase):
uuid2
=
self
.
getStorageUUID
()
uuid2
=
self
.
getStorageUUID
()
server2
=
(
"127.0.0.2"
,
19001
)
server2
=
(
"127.0.0.2"
,
19001
)
sn2
=
self
.
createStorage
(
server2
,
uuid2
)
sn2
=
self
.
createStorage
(
server2
,
uuid2
)
sn2
.
setState
(
NodeStates
.
TEMPORARILY_
DOWN
)
sn2
.
setState
(
NodeStates
.
DOWN
)
# add node without uuid
# add node without uuid
server3
=
(
"127.0.0.3"
,
19001
)
server3
=
(
"127.0.0.3"
,
19001
)
sn3
=
self
.
createStorage
(
server3
,
None
,
NodeStates
.
RUNNING
)
sn3
=
self
.
createStorage
(
server3
,
None
,
NodeStates
.
RUNNING
)
...
...
neo/tests/master/testRecovery.py
View file @
b27db46f
...
@@ -94,7 +94,7 @@ class MasterRecoveryTests(NeoUnitTestBase):
...
@@ -94,7 +94,7 @@ class MasterRecoveryTests(NeoUnitTestBase):
conn
=
self
.
getFakeConnection
(
uuid
,
self
.
storage_port
)
conn
=
self
.
getFakeConnection
(
uuid
,
self
.
storage_port
)
offset
=
1000000
offset
=
1000000
self
.
assertFalse
(
self
.
app
.
pt
.
hasOffset
(
offset
))
self
.
assertFalse
(
self
.
app
.
pt
.
hasOffset
(
offset
))
cell_list
=
[(
offset
,
((
uuid
,
NodeStates
.
D
OWN
,),),)]
cell_list
=
[(
offset
,
((
uuid
,
NodeStates
.
UNKN
OWN
,),),)]
node
.
setPending
()
node
.
setPending
()
self
.
checkProtocolErrorRaised
(
recovery
.
answerPartitionTable
,
conn
,
self
.
checkProtocolErrorRaised
(
recovery
.
answerPartitionTable
,
conn
,
2
,
cell_list
)
2
,
cell_list
)
...
...
neo/tests/testNodes.py
View file @
b27db46f
...
@@ -35,7 +35,7 @@ class NodesTests(NeoUnitTestBase):
...
@@ -35,7 +35,7 @@ class NodesTests(NeoUnitTestBase):
address
=
(
'127.0.0.1'
,
10000
)
address
=
(
'127.0.0.1'
,
10000
)
uuid
=
self
.
getNewUUID
(
None
)
uuid
=
self
.
getNewUUID
(
None
)
node
=
Node
(
self
.
nm
,
address
=
address
,
uuid
=
uuid
)
node
=
Node
(
self
.
nm
,
address
=
address
,
uuid
=
uuid
)
self
.
assertEqual
(
node
.
getState
(),
NodeStates
.
TEMPORARILY_
DOWN
)
self
.
assertEqual
(
node
.
getState
(),
NodeStates
.
DOWN
)
self
.
assertEqual
(
node
.
getAddress
(),
address
)
self
.
assertEqual
(
node
.
getAddress
(),
address
)
self
.
assertEqual
(
node
.
getUUID
(),
uuid
)
self
.
assertEqual
(
node
.
getUUID
(),
uuid
)
self
.
assertTrue
(
time
()
-
1
<
node
.
getLastStateChange
()
<
time
())
self
.
assertTrue
(
time
()
-
1
<
node
.
getLastStateChange
()
<
time
())
...
@@ -43,7 +43,7 @@ class NodesTests(NeoUnitTestBase):
...
@@ -43,7 +43,7 @@ class NodesTests(NeoUnitTestBase):
def
testState
(
self
):
def
testState
(
self
):
""" Check if the last changed time is updated when state is changed """
""" Check if the last changed time is updated when state is changed """
node
=
Node
(
self
.
nm
)
node
=
Node
(
self
.
nm
)
self
.
assertEqual
(
node
.
getState
(),
NodeStates
.
TEMPORARILY_
DOWN
)
self
.
assertEqual
(
node
.
getState
(),
NodeStates
.
DOWN
)
self
.
assertTrue
(
time
()
-
1
<
node
.
getLastStateChange
()
<
time
())
self
.
assertTrue
(
time
()
-
1
<
node
.
getLastStateChange
()
<
time
())
previous_time
=
node
.
getLastStateChange
()
previous_time
=
node
.
getLastStateChange
()
node
.
setState
(
NodeStates
.
RUNNING
)
node
.
setState
(
NodeStates
.
RUNNING
)
...
@@ -156,12 +156,12 @@ class NodeManagerTests(NeoUnitTestBase):
...
@@ -156,12 +156,12 @@ class NodeManagerTests(NeoUnitTestBase):
old_uuid
=
self
.
storage
.
getUUID
()
old_uuid
=
self
.
storage
.
getUUID
()
new_uuid
=
self
.
getStorageUUID
()
new_uuid
=
self
.
getStorageUUID
()
node_list
=
(
node_list
=
(
(
NodeTypes
.
CLIENT
,
None
,
self
.
client
.
getUUID
(),
NodeStates
.
D
OWN
,
None
),
(
NodeTypes
.
CLIENT
,
None
,
self
.
client
.
getUUID
(),
NodeStates
.
UNKN
OWN
,
None
),
(
NodeTypes
.
MASTER
,
new_address
,
self
.
master
.
getUUID
(),
NodeStates
.
RUNNING
,
None
),
(
NodeTypes
.
MASTER
,
new_address
,
self
.
master
.
getUUID
(),
NodeStates
.
RUNNING
,
None
),
(
NodeTypes
.
STORAGE
,
self
.
storage
.
getAddress
(),
new_uuid
,
(
NodeTypes
.
STORAGE
,
self
.
storage
.
getAddress
(),
new_uuid
,
NodeStates
.
RUNNING
,
None
),
NodeStates
.
RUNNING
,
None
),
(
NodeTypes
.
ADMIN
,
self
.
admin
.
getAddress
(),
self
.
admin
.
getUUID
(),
(
NodeTypes
.
ADMIN
,
self
.
admin
.
getAddress
(),
self
.
admin
.
getUUID
(),
NodeStates
.
TEMPORARILY_
DOWN
,
None
),
NodeStates
.
DOWN
,
None
),
)
)
app
=
Mock
()
app
=
Mock
()
app
.
pt
=
Mock
({
'dropNode'
:
True
})
app
.
pt
=
Mock
({
'dropNode'
:
True
})
...
@@ -180,9 +180,9 @@ class NodeManagerTests(NeoUnitTestBase):
...
@@ -180,9 +180,9 @@ class NodeManagerTests(NeoUnitTestBase):
new_storage
=
storage_list
[
0
]
new_storage
=
storage_list
[
0
]
self
.
assertNotEqual
(
new_storage
.
getUUID
(),
old_uuid
)
self
.
assertNotEqual
(
new_storage
.
getUUID
(),
old_uuid
)
self
.
assertEqual
(
new_storage
.
getState
(),
NodeStates
.
RUNNING
)
self
.
assertEqual
(
new_storage
.
getState
(),
NodeStates
.
RUNNING
)
# admin is still here but in
TEMPORARILY_
DOWN state
# admin is still here but in DOWN state
self
.
checkNodes
([
self
.
master
,
self
.
admin
,
new_storage
])
self
.
checkNodes
([
self
.
master
,
self
.
admin
,
new_storage
])
self
.
assertEqual
(
self
.
admin
.
getState
(),
NodeStates
.
TEMPORARILY_
DOWN
)
self
.
assertEqual
(
self
.
admin
.
getState
(),
NodeStates
.
DOWN
)
class
MasterDBTests
(
NeoUnitTestBase
):
class
MasterDBTests
(
NeoUnitTestBase
):
...
...
neo/tests/testPT.py
View file @
b27db46f
...
@@ -34,7 +34,7 @@ class PartitionTableTests(NeoUnitTestBase):
...
@@ -34,7 +34,7 @@ class PartitionTableTests(NeoUnitTestBase):
# check getter
# check getter
self
.
assertEqual
(
cell
.
getNode
(),
sn
)
self
.
assertEqual
(
cell
.
getNode
(),
sn
)
self
.
assertEqual
(
cell
.
getState
(),
CellStates
.
OUT_OF_DATE
)
self
.
assertEqual
(
cell
.
getState
(),
CellStates
.
OUT_OF_DATE
)
self
.
assertEqual
(
cell
.
getNodeState
(),
NodeStates
.
TEMPORARILY_
DOWN
)
self
.
assertEqual
(
cell
.
getNodeState
(),
NodeStates
.
DOWN
)
self
.
assertEqual
(
cell
.
getUUID
(),
uuid
)
self
.
assertEqual
(
cell
.
getUUID
(),
uuid
)
self
.
assertEqual
(
cell
.
getAddress
(),
server
)
self
.
assertEqual
(
cell
.
getAddress
(),
server
)
# check state setter
# check state setter
...
@@ -109,7 +109,7 @@ class PartitionTableTests(NeoUnitTestBase):
...
@@ -109,7 +109,7 @@ class PartitionTableTests(NeoUnitTestBase):
for
x
in
xrange
(
num_partitions
):
for
x
in
xrange
(
num_partitions
):
self
.
assertEqual
(
len
(
pt
.
partition_list
[
x
]),
0
)
self
.
assertEqual
(
len
(
pt
.
partition_list
[
x
]),
0
)
self
.
assertEqual
(
pt
.
count_dict
[
sn1
],
0
)
self
.
assertEqual
(
pt
.
count_dict
[
sn1
],
0
)
sn1
.
setState
(
NodeStates
.
D
OWN
)
sn1
.
setState
(
NodeStates
.
UNKN
OWN
)
self
.
assertRaises
(
PartitionTableException
,
pt
.
_setCell
,
self
.
assertRaises
(
PartitionTableException
,
pt
.
_setCell
,
0
,
sn1
,
CellStates
.
UP_TO_DATE
)
0
,
sn1
,
CellStates
.
UP_TO_DATE
)
for
x
in
xrange
(
num_partitions
):
for
x
in
xrange
(
num_partitions
):
...
@@ -325,7 +325,7 @@ class PartitionTableTests(NeoUnitTestBase):
...
@@ -325,7 +325,7 @@ class PartitionTableTests(NeoUnitTestBase):
self
.
assertFalse
(
pt
.
operational
())
self
.
assertFalse
(
pt
.
operational
())
# adding a node in all partition
# adding a node in all partition
sn1
=
createStorage
()
sn1
=
createStorage
()
sn1
.
setState
(
NodeStates
.
TEMPORARILY_
DOWN
)
sn1
.
setState
(
NodeStates
.
DOWN
)
for
x
in
xrange
(
num_partitions
):
for
x
in
xrange
(
num_partitions
):
pt
.
_setCell
(
x
,
sn1
,
CellStates
.
FEEDING
)
pt
.
_setCell
(
x
,
sn1
,
CellStates
.
FEEDING
)
self
.
assertTrue
(
pt
.
filled
())
self
.
assertTrue
(
pt
.
filled
())
...
...
neo/tests/threaded/test.py
View file @
b27db46f
...
@@ -552,7 +552,7 @@ class Test(NEOThreadedTest):
...
@@ -552,7 +552,7 @@ class Test(NEOThreadedTest):
# restart it with one storage only
# restart it with one storage only
if
1
:
if
1
:
cluster
.
start
(
storage_list
=
(
s1
,))
cluster
.
start
(
storage_list
=
(
s1
,))
self
.
assertEqual
(
NodeStates
.
TEMPORARILY_
DOWN
,
self
.
assertEqual
(
NodeStates
.
DOWN
,
cluster
.
getNodeState
(
s2
))
cluster
.
getNodeState
(
s2
))
@
with_cluster
(
storage_count
=
2
,
partitions
=
2
,
replicas
=
1
)
@
with_cluster
(
storage_count
=
2
,
partitions
=
2
,
replicas
=
1
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment