Commit 6d746894 authored by Grégory Wisniewski's avatar Grégory Wisniewski

Use new Enum class for Node States and remove unused imports.


git-svn-id: https://svn.erp5.org/repos/neo/trunk@1338 71dcc9de-d417-0410-9af5-da40c76e7ee4
parent 01fab3d2
......@@ -32,3 +32,5 @@ def setupLog(name='NEO', filename=None, verbose=False):
format = PREFIX + SUFFIX
logging_std.basicConfig(filename=filename, level=level, format=format)
logging = logging_std.getLogger(name.upper())
from neo import protocol
......@@ -28,7 +28,7 @@ from neo.bootstrap import BootstrapManager
from neo.pt import PartitionTable
from neo import protocol
from neo.util import parseMasterList
from neo.protocol import NodeTypes
from neo.protocol import NodeTypes, NodeStates
class Dispatcher:
"""Dispatcher use to redirect master request to handler"""
......@@ -129,7 +129,7 @@ class Application(object):
data = bootstrap.getPrimaryConnection(self.connector_handler)
(node, conn, uuid, num_partitions, num_replicas) = data
nm.update([(node.getType(), node.getAddress(), node.getUUID(),
protocol.RUNNING_STATE)])
NodeStates.RUNNING)])
self.master_node = node
self.master_conn = conn
self.uuid = uuid
......
......@@ -19,8 +19,7 @@ from neo import logging
from neo.client.handlers import BaseHandler, AnswerBaseHandler
from neo.pt import MTPartitionTable as PartitionTable
from neo import protocol
from neo.protocol import NodeTypes
from neo.protocol import NodeTypes, NodeStates
from neo.util import dump
class PrimaryBootstrapHandler(AnswerBaseHandler):
......@@ -165,7 +164,7 @@ class PrimaryNotificationsHandler(BaseHandler):
self.app.nm.update(node_list)
for node_type, addr, uuid, state in node_list:
if node_type != NodeTypes.STORAGE \
or state != protocol.RUNNING_STATE:
or state != NodeStates.RUNNING:
continue
# close connection to this storage if no longer running
conn = self.app.em.getConnectionByUUID(uuid)
......
......@@ -19,7 +19,6 @@ from neo import logging
from ZODB.TimeStamp import TimeStamp
from neo.client.handlers import BaseHandler, AnswerBaseHandler
from neo import protocol
from neo.protocol import NodeTypes
class StorageEventHandler(BaseHandler):
......
......@@ -17,6 +17,7 @@
from neo import logging
from neo import protocol
from neo.protocol import NodeStates
from neo.protocol import PacketMalformedError, UnexpectedPacketError, \
BrokenNodeDisallowedError, NotReadyError, ProtocolError
from protocol import ERROR, REQUEST_NODE_IDENTIFICATION, ACCEPT_NODE_IDENTIFICATION, \
......@@ -141,17 +142,17 @@ class EventHandler(object):
def timeoutExpired(self, conn):
"""Called when a timeout event occurs."""
logging.debug('timeout expired for %s:%d', *(conn.getAddress()))
self.connectionLost(conn, protocol.TEMPORARILY_DOWN_STATE)
self.connectionLost(conn, NodeStates.TEMPORARILY_DOWN)
def connectionClosed(self, conn):
"""Called when a connection is closed by the peer."""
logging.debug('connection closed for %s:%d', *(conn.getAddress()))
self.connectionLost(conn, protocol.TEMPORARILY_DOWN_STATE)
self.connectionLost(conn, NodeStates.TEMPORARILY_DOWN)
def peerBroken(self, conn):
"""Called when a peer is broken."""
logging.error('%s:%d is broken', *(conn.getAddress()))
self.connectionLost(conn, protocol.BROKEN_STATE)
self.connectionLost(conn, NodeStates.BROKEN)
def connectionLost(self, conn, new_state):
""" this is a method to override in sub-handlers when there is no need
......
......@@ -21,7 +21,7 @@ from time import time
from struct import pack, unpack
from neo import protocol
from neo.protocol import UUID_NAMESPACES, ClusterStates, NodeTypes
from neo.protocol import UUID_NAMESPACES, ClusterStates, NodeStates, NodeTypes
from neo.node import NodeManager
from neo.event import EventManager
from neo.connection import ListeningConnection, ClientConnection
......@@ -724,7 +724,7 @@ class Application(object):
node = self.nm.getByUUID(c.getUUID())
if node.isClient():
node_list = [(node.getType(), node.getAddress(),
node.getUUID(), protocol.DOWN_STATE)]
node.getUUID(), NodeStates.DOWN)]
c.notify(protocol.notifyNodeInformation(node_list))
# then ask storages and master nodes to shutdown
logging.info("asking all remaining nodes to shutdown")
......@@ -732,13 +732,13 @@ class Application(object):
node = self.nm.getByUUID(c.getUUID())
if node.isStorage() or node.isMaster():
node_list = [(node.getType(), node.getAddress(),
node.getUUID(), protocol.DOWN_STATE)]
node.getUUID(), NodeStates.DOWN)]
c.notify(protocol.notifyNodeInformation(node_list))
# then shutdown
sys.exit("Cluster has been asked to shut down")
def identifyStorageNode(self, uuid, node):
state = protocol.RUNNING_STATE
state = NodeStates.RUNNING
handler = None
if self.cluster_state == ClusterStates.RECOVERING:
if uuid is None:
......@@ -752,12 +752,12 @@ class Application(object):
# Here the uuid is not cleared to allow lookup pending nodes by
# uuid from the test framework. It's safe since nodes with a
# conflicting UUID are rejected in the identification handler.
state = protocol.PENDING_STATE
state = NodeStates.PENDING
handler = verification.VerificationHandler
elif self.cluster_state == ClusterStates.RUNNING:
if uuid is None or node is None:
# same as for verification
state = protocol.PENDING_STATE
state = NodeStates.PENDING
handler = storage.StorageServiceHandler
elif self.cluster_state == ClusterStates.STOPPING:
raise protocol.NotReadyError
......@@ -767,7 +767,7 @@ class Application(object):
def identifyNode(self, node_type, uuid, node):
state = protocol.RUNNING_STATE
state = NodeStates.RUNNING
handler = identification.IdentificationHandler
if node_type == NodeTypes.ADMIN:
......
......@@ -19,7 +19,7 @@ from neo import logging
from neo import protocol
from neo.handler import EventHandler
from neo.protocol import NodeTypes
from neo.protocol import NodeTypes, NodeStates
class MasterHandler(EventHandler):
"""This class implements a generic part of the event handlers."""
......@@ -72,7 +72,7 @@ class MasterHandler(EventHandler):
DISCONNECTED_STATE_DICT = {
NodeTypes.STORAGE: protocol.TEMPORARILY_DOWN_STATE,
NodeTypes.STORAGE: NodeStates.TEMPORARILY_DOWN,
}
class BaseServiceHandler(MasterHandler):
......@@ -86,11 +86,11 @@ class BaseServiceHandler(MasterHandler):
def connectionLost(self, conn, new_state):
node = self.app.nm.getByUUID(conn.getUUID())
assert node is not None
if new_state != protocol.BROKEN_STATE:
new_state = DISCONNECTED_STATE_DICT.get(node.getType(), protocol.DOWN_STATE)
if new_state != NodeStates.BROKEN:
new_state = DISCONNECTED_STATE_DICT.get(node.getType(), NodeStates.DOWN)
if node.getState() == new_state:
return
if new_state != protocol.BROKEN_STATE and node.isPending():
if new_state != NodeStates.BROKEN and node.isPending():
# was in pending state, so drop it from the node manager to forget
# it and do not set in running state when it comes back
logging.info('drop a pending node from the node manager')
......
......@@ -19,7 +19,7 @@ from neo import logging
from neo import protocol
from neo.master.handlers import MasterHandler
from neo.protocol import ClusterStates, RUNNING_STATE
from neo.protocol import ClusterStates, NodeStates
from neo.util import dump
class AdministrationHandler(MasterHandler):
......@@ -52,7 +52,7 @@ class AdministrationHandler(MasterHandler):
if uuid == app.uuid:
node.setState(state)
# get message for self
if state != RUNNING_STATE:
if state != NodeStates.RUNNING:
p = protocol.noError('node state changed')
conn.answer(p, packet.getId())
app.shutdown()
......@@ -63,7 +63,7 @@ class AdministrationHandler(MasterHandler):
conn.answer(p, packet.getId())
return
if state == protocol.RUNNING_STATE:
if state == NodeStates.RUNNING:
# first make sure to have a connection to the node
node_conn = None
for node_conn in app.em.getConnectionList():
......@@ -73,7 +73,7 @@ class AdministrationHandler(MasterHandler):
# no connection to the node
raise protocol.ProtocolError('no connection to the node')
elif state == protocol.DOWN_STATE and node.isStorage():
elif state == NodeStates.DOWN and node.isStorage():
# modify the partition table if required
cell_list = []
if modify_partition_table:
......
......@@ -18,7 +18,7 @@
from neo import logging
from neo import protocol
from neo.protocol import HIDDEN_STATE
from neo.protocol import NodeStates
from neo.master.handlers import BaseServiceHandler
from neo.protocol import UnexpectedPacketError
from neo.util import dump, getNextTID
......@@ -113,7 +113,7 @@ class ClientServiceHandler(BaseServiceHandler):
uuid_set = set()
for part in partition_set:
uuid_set.update((cell.getUUID() for cell in app.pt.getCellList(part) \
if cell.getNodeState() != HIDDEN_STATE))
if cell.getNodeState() != NodeStates.HIDDEN))
# Request locking data.
# build a new set as we may not send the message to all nodes as some
......
......@@ -18,7 +18,7 @@
from neo import logging
from neo import protocol
from neo.protocol import NodeTypes
from neo.protocol import NodeTypes, NodeStates
from neo.master.handlers import MasterHandler
from neo.exception import ElectionFailure
......@@ -50,7 +50,7 @@ class ElectionHandler(MasterHandler):
if node.getUUID() is None:
node.setUUID(uuid)
if state in (node.getState(), protocol.RUNNING_STATE):
if state in (node.getState(), NodeStates.RUNNING):
# No change. Don't care.
continue
......
......@@ -17,8 +17,7 @@
from neo.neoctl.neoctl import NeoCTL, NotReadyException
from neo.util import bin, dump
from neo import protocol
from neo.protocol import ClusterStates, NodeTypes
from neo.protocol import ClusterStates, NodeStates, NodeTypes
action_dict = {
'print': {
......@@ -44,9 +43,7 @@ class TerminalNeoCTL(object):
# Utility methods (could be functions)
def asNodeState(self, value):
if not value.endswith('_STATE'):
value += '_STATE'
return protocol.node_states.getFromStr(value)
return NodeStates.getByName(value.upper())
def asNodeType(self, value):
return NodeTypes.getByName(value.upper())
......@@ -197,7 +194,7 @@ class Application(object):
def execute(self, args):
"""Execute the command given."""
# print node type : print list of node of the given type (STORAGE_NODE_TYPE, MASTER_NODE_TYPE...)
# set node uuid state [1|0] : set the node for the given uuid to the state (RUNNING_STATE, DOWN_STATE...)
# set node uuid state [1|0] : set the node for the given uuid to the state (RUNNING, DOWN...)
# and modify the partition if asked
# set cluster name [shutdown|operational] : either shutdown the cluster or mark it as operational
current_action = action_dict
......
......@@ -20,7 +20,7 @@ from neo.connection import ClientConnection
from neo.event import EventManager
from neo.neoctl.handler import CommandEventHandler
from neo import protocol
from neo.protocol import ClusterStates
from neo.protocol import ClusterStates, NodeStates
class NotReadyException(Exception):
pass
......@@ -133,7 +133,7 @@ class NeoCTL(object):
"""
Set node into "down" state and remove it from partition table.
"""
self.setNodeState(node, protocol.DOWN_STATE, update_partition_table=1)
self.setNodeState(node, NodeStates.DOWN, update_partition_table=1)
def getPrimaryMaster(self):
"""
......
......@@ -18,15 +18,14 @@
from time import time
from neo import logging
from neo import protocol
from neo.util import dump
from neo.protocol import NodeTypes
from neo.protocol import NodeTypes, NodeStates
class Node(object):
"""This class represents a node."""
def __init__(self, manager, address=None, uuid=None,
state=protocol.UNKNOWN_STATE):
state=NodeStates.UNKNOWN):
self._state = state
self._address = address
self._uuid = uuid
......@@ -89,42 +88,42 @@ class Node(object):
def isRunning(self):
# FIXME: is it like 'connected' ?
return self._state == protocol.RUNNING_STATE
return self._state == NodeStates.RUNNING
def isTemporarilyDown(self):
# FIXME: is it like 'unconnected' or UNKNOWN_STATE ?
return self._state == protocol.TEMPORARILY_DOWN_STATE
# FIXME: is it like 'unconnected' or UNKNOWN state ?
return self._state == NodeStates.TEMPORARILY_DOWN
def isDown(self):
# FIXME: is it like 'unconnected' or 'forgotten' ?
return self._state == protocol.DOWN_STATE
return self._state == NodeStates.DOWN
def isBroken(self):
return self._state == protocol.BROKEN_STATE
return self._state == NodeStates.BROKEN
def isHidden(self):
return self._state == protocol.HIDDEN_STATE
return self._state == NodeStates.HIDDEN
def isPending(self):
return self._state == protocol.PENDING_STATE
return self._state == NodeStates.PENDING
def setRunning(self):
self.setState(protocol.RUNNING_STATE)
self.setState(NodeStates.RUNNING)
def setTemporarilyDown(self):
self.setState(protocol.TEMPORARILY_DOWN_STATE)
self.setState(NodeStates.TEMPORARILY_DOWN)
def setDown(self):
self.setState(protocol.DOWN_STATE)
self.setState(NodeStates.DOWN)
def setBroken(self):
self.setState(protocol.BROKEN_STATE)
self.setState(NodeStates.BROKEN)
def setHidden(self):
self.setState(protocol.HIDDEN_STATE)
self.setState(NodeStates.HIDDEN)
def setPending(self):
self.setState(protocol.PENDING_STATE)
self.setState(NodeStates.PENDING)
def asTuple(self):
""" Returned tuple is intented to be used in procotol encoders """
......@@ -318,7 +317,7 @@ class NodeManager(object):
node = node_by_uuid or node_by_addr
log_args = (node_type, dump(uuid), addr, state)
if state == protocol.DOWN_STATE:
if state == NodeStates.DOWN:
# drop down nodes
logging.debug('drop node %s %s %s %s' % log_args)
self.remove(node)
......
......@@ -336,26 +336,27 @@ class NodeTypes(Enum):
NodeTypes = NodeTypes()
# Node states.
node_states = OldEnum({
'RUNNING_STATE': 0,
'TEMPORARILY_DOWN_STATE': 1,
'DOWN_STATE': 2,
'BROKEN_STATE': 3,
'HIDDEN_STATE' : 4,
'PENDING_STATE': 5,
'UNKNOWN_STATE': 6,
})
class NodeStates(Enum):
RUNNING = Enum.Item(1)
TEMPORARILY_DOWN = Enum.Item(2)
DOWN = Enum.Item(3)
BROKEN = Enum.Item(4)
HIDDEN = Enum.Item(5)
PENDING = Enum.Item(6)
UNKNOWN = Enum.Item(7)
NodeStates = NodeStates()
# used for logging
node_state_prefix_dict = {
RUNNING_STATE: 'R',
TEMPORARILY_DOWN_STATE: 'T',
DOWN_STATE: 'D',
BROKEN_STATE: 'B',
HIDDEN_STATE: 'H',
PENDING_STATE: 'P',
UNKNOWN_STATE: 'U',
NodeStates.RUNNING: 'R',
NodeStates.TEMPORARILY_DOWN: 'T',
NodeStates.DOWN: 'D',
NodeStates.BROKEN: 'B',
NodeStates.HIDDEN: 'H',
NodeStates.PENDING: 'P',
NodeStates.UNKNOWN: 'U',
}
# Partition cell states.
......@@ -505,7 +506,7 @@ def _decodeClusterState(state):
return cluster_state
def _decodeNodeState(state):
node_state = node_states.get(state)
node_state = NodeStates.get(state)
if node_state is None:
raise PacketMalformedError('invalid node state %d' % state)
return node_state
......
......@@ -219,7 +219,7 @@ class PartitionTable(object):
DEBUG:root:pt: 00000000: .UU.|U..U|.UU.|U..U|.UU.|U..U|.UU.|U..U|.UU.
DEBUG:root:pt: 00000009: U..U|.UU.|U..U|.UU.|U..U|.UU.|U..U|.UU.|U..U
Here, there are 4 nodes in RUNNING_STATE.
Here, there are 4 nodes in RUNNING state.
The first partition has 2 replicas in UP_TO_DATE_STATE, on nodes 1 and
2 (nodes 0 and 3 are displayed as unused for that partition by
displaying a dot).
......
......@@ -19,10 +19,9 @@ from neo import logging
from neo.handler import EventHandler
from neo import protocol
from neo.protocol import BROKEN_STATE, \
DOWN_STATE, TEMPORARILY_DOWN_STATE, HIDDEN_STATE
from neo.util import dump
from neo.exception import PrimaryFailure, OperationFailure
from neo.protocol import NodeStates
class BaseStorageHandler(EventHandler):
"""This class implements a generic part of the event handlers."""
......@@ -52,11 +51,12 @@ class BaseMasterHandler(BaseStorageHandler):
if uuid == self.app.uuid:
# This is me, do what the master tell me
logging.info("I was told I'm %s" %(state))
if state in (DOWN_STATE, TEMPORARILY_DOWN_STATE, BROKEN_STATE):
if state in (NodeStates.DOWN, NodeStates.TEMPORARILY_DOWN,
NodeStates.BROKEN):
conn.close()
erase = state == DOWN_STATE
erase = state == NodeStates.DOWN
self.app.shutdown(erase=erase)
elif state == HIDDEN_STATE:
elif state == NodeStates.HIDDEN:
raise OperationFailure
......
......@@ -18,9 +18,8 @@
from neo import logging
from neo.storage.handlers import BaseMasterHandler
from neo.protocol import BROKEN_STATE, DOWN_STATE, \
TEMPORARILY_DOWN_STATE, DISCARDED_STATE, OUT_OF_DATE_STATE
from neo.protocol import NodeTypes
from neo.protocol import DISCARDED_STATE, OUT_OF_DATE_STATE
from neo.protocol import NodeTypes, NodeStates
class HiddenHandler(BaseMasterHandler):
"""This class implements a generic part of the event handlers."""
......@@ -38,9 +37,10 @@ class HiddenHandler(BaseMasterHandler):
if node_type == NodeTypes.STORAGE:
if uuid == self.app.uuid:
# This is me, do what the master tell me
if state in (DOWN_STATE, TEMPORARILY_DOWN_STATE, BROKEN_STATE):
if state in (NodeStates.DOWN, NodeStates.TEMPORARILY_DOWN,
NodeStates.BROKEN):
conn.close()
erase_db = state == DOWN_STATE
erase_db = state == NodeStates.DOWN
self.app.shutdown(erase=erase_db)
def handleRequestNodeIdentification(self, conn, packet, node_type,
......
......@@ -20,8 +20,8 @@ from random import choice
from neo.storage.handlers import replication
from neo import protocol
from neo.protocol import UP_TO_DATE_STATE, OUT_OF_DATE_STATE, RUNNING_STATE
from neo.protocol import NodeTypes
from neo.protocol import UP_TO_DATE_STATE, OUT_OF_DATE_STATE
from neo.protocol import NodeTypes, NodeStates
from neo.connection import ClientConnection
from neo.util import dump
......@@ -155,7 +155,7 @@ class Replicator(object):
cell_list = app.pt.getCellList(self.current_partition.getRID(),
readable=True)
node_list = [cell.getNode() for cell in cell_list
if cell.getNodeState() == RUNNING_STATE]
if cell.getNodeState() == NodeStates.RUNNING]
node = choice(node_list)
except IndexError:
# Not operational.
......
......@@ -23,8 +23,7 @@ from neo.tests import NeoTestBase
from neo import protocol
from neo.pt import PartitionTable
from neo.protocol import UnexpectedPacketError, INVALID_UUID
from neo.protocol import NodeTypes, INVALID_PTID, \
RUNNING_STATE, BROKEN_STATE, TEMPORARILY_DOWN_STATE, \
from neo.protocol import NodeTypes, NodeStates, INVALID_PTID, \
UP_TO_DATE_STATE, FEEDING_STATE, DISCARDED_STATE
from neo.client.handlers import BaseHandler
from neo.client.handlers.master import PrimaryBootstrapHandler
......@@ -90,7 +89,7 @@ class ClientHandlerTests(NeoTestBase):
#self.assertEquals(app.master_conn, None)
#self.assertEquals(app.primary_master_node, None)
def _testStorageWithMethod(self, method, handler_class, state=TEMPORARILY_DOWN_STATE):
def _testStorageWithMethod(self, method, handler_class, state=NodeStates.TEMPORARILY_DOWN):
storage_ip = '127.0.0.1'
storage_port = 10011
fake_storage_node_uuid = self.getNewUUID()
......@@ -200,9 +199,9 @@ class ClientHandlerTests(NeoTestBase):
def test_storagePeerBroken(self):
self._testStorageWithMethod(self._testPeerBroken,
StorageBootstrapHandler, state=BROKEN_STATE)
StorageBootstrapHandler, state=NodeStates.BROKEN)
self._testStorageWithMethod(self._testPeerBroken,
StorageAnswersHandler, state=BROKEN_STATE)
StorageAnswersHandler, state=NodeStates.BROKEN)
def test_notReady(self):
app = Mock({'setNodeNotReady': None})
......@@ -571,7 +570,7 @@ class ClientHandlerTests(NeoTestBase):
# first notify unknown master nodes
uuid = self.getNewUUID()
test_node = (NodeTypes.MASTER, '127.0.0.1', 10010, uuid,
RUNNING_STATE)
NodeStates.RUNNING)
nm = self._testNotifyNodeInformation(test_node, getByUUID=None)
# Check that two nodes got added (second is with INVALID_UUID)
add_call_list = nm.mockGetNamedCalls('add')
......@@ -585,7 +584,7 @@ class ClientHandlerTests(NeoTestBase):
node = Mock({})
uuid = self.getNewUUID()
test_node = (NodeTypes.MASTER, '127.0.0.1', 10010, uuid,
RUNNING_STATE)
NodeStates.RUNNING)
nm = self._testNotifyNodeInformation(test_node, getByAddress=node,
getByUUID=node)
# Check that node got replaced
......@@ -600,7 +599,7 @@ class ClientHandlerTests(NeoTestBase):
def test_unknownStorageNotifyNodeInformation(self):
test_node = (NodeTypes.STORAGE, '127.0.0.1', 10010, self.getNewUUID(),
RUNNING_STATE)
NodeStates.RUNNING)
nm = self._testNotifyNodeInformation(test_node, getByUUID=None)
# Check that node got added
add_call_list = nm.mockGetNamedCalls('add')
......@@ -615,7 +614,7 @@ class ClientHandlerTests(NeoTestBase):
def test_knownStorageNotifyNodeInformation(self):
node = Mock({'setState': None, 'setAddress': None})
test_node = (NodeTypes.STORAGE, '127.0.0.1', 10010, self.getNewUUID(),
RUNNING_STATE)
NodeStates.RUNNING)
nm = self._testNotifyNodeInformation(test_node, getByUUID=node)
# Check that node got replaced
add_call_list = nm.mockGetNamedCalls('add')
......@@ -735,7 +734,7 @@ class ClientHandlerTests(NeoTestBase):
setCell_call_list[0].checkArgs(test_cell_list[0][0], added_node,
test_cell_list[0][2])
# TODO: confirm condition under which an unknown node should be added with a TEMPORARILY_DOWN_STATE (implementation is unclear)
# TODO: confirm condition under which an unknown node should be added with a TEMPORARILY_DOWN (implementation is unclear)
def test_knownNodeNotifyPartitionChanges(self):
test_ptid = 1
......@@ -764,8 +763,8 @@ class ClientHandlerTests(NeoTestBase):
self.assertEquals(calls[0].getParam(0).getUUID(), uuid2)
self.assertEquals(calls[1].getParam(0).getUUID(), uuid3)
self.assertEquals(calls[2].getParam(0).getUUID(), uuid4)
self.assertEquals(calls[0].getParam(0).getState(), TEMPORARILY_DOWN_STATE)
self.assertEquals(calls[1].getParam(0).getState(), TEMPORARILY_DOWN_STATE)
self.assertEquals(calls[0].getParam(0).getState(), NodeStates.TEMPORARILY_DOWN)
self.assertEquals(calls[1].getParam(0).getState(), NodeStates.TEMPORARILY_DOWN)
# and the others are updated
self.assertEqual(app.ptid, test_ptid + 1)
calls = app.pt.mockGetNamedCalls('setCell')
......
......@@ -20,6 +20,7 @@ import unittest
from neo.tests.functional import NEOCluster, NEOFunctionalTest
from neo.neoctl.neoctl import NotReadyException
from neo import protocol
from neo.protocol import NodeStates
from neo.util import dump
MASTER_NODE_COUNT = 3
......@@ -62,7 +63,7 @@ class MasterTests(NEOFunctionalTest):
self.assertEqual(len(killed_uuid_list), 1)
uuid = killed_uuid_list[0]
# Check the state of the primary we just killed
self.neo.expectMasterState(uuid, (None, protocol.UNKNOWN_STATE))
self.neo.expectMasterState(uuid, (None, NodeStates.UNKNOWN))
self.assertEqual(self.neo.getPrimaryMaster(), None)
# Check that a primary master arised.
self.neo.expectPrimaryMaster(timeout=10)
......@@ -72,7 +73,7 @@ class MasterTests(NEOFunctionalTest):
def testStoppingPrimaryMasterWithOneSecondary(self):
self.neo.expectAllMasters(MASTER_NODE_COUNT,
state=protocol.RUNNING_STATE)
state=NodeStates.RUNNING)
# Kill one secondary master.
killed_uuid_list = self.neo.killSecondaryMaster()
......@@ -86,7 +87,7 @@ class MasterTests(NEOFunctionalTest):
self.assertEqual(len(killed_uuid_list), 1)
uuid = killed_uuid_list[0]
# Check the state of the primary we just killed
self.neo.expectMasterState(uuid, (None, protocol.UNKNOWN_STATE))
self.neo.expectMasterState(uuid, (None, NodeStates.UNKNOWN))
self.assertEqual(self.neo.getPrimaryMaster(), None)
# Check that a primary master arised.
self.neo.expectPrimaryMaster(timeout=10)
......@@ -96,7 +97,7 @@ class MasterTests(NEOFunctionalTest):
def testMasterSequentialStart(self):
self.neo.expectAllMasters(MASTER_NODE_COUNT,
state=protocol.RUNNING_STATE)
state=NodeStates.RUNNING)
master_list = self.neo.getMasterProcessList()
# Stop the cluster (so we can start processes manually)
......@@ -110,7 +111,7 @@ class MasterTests(NEOFunctionalTest):
self.neo.expectPrimaryMaster(first_master_uuid, timeout=30)
# Check that no other node is known as running.
self.assertEqual(len(self.neo.getMasterList(
state=protocol.RUNNING_STATE)), 1)
state=NodeStates.RUNNING)), 1)
# Start a second master.
second_master = master_list[1]
......@@ -120,7 +121,7 @@ class MasterTests(NEOFunctionalTest):
second_master.start()
# Check that the second master is running under his known UUID.
self.neo.expectMasterState(second_master.getUUID(),
protocol.RUNNING_STATE)
NodeStates.RUNNING)
# Check that the primary master didn't change.
self.assertEqual(self.neo.getPrimaryMaster(), first_master_uuid)
......@@ -132,7 +133,7 @@ class MasterTests(NEOFunctionalTest):
third_master.start()
# Check that the third master is running under his known UUID.
self.neo.expectMasterState(third_master.getUUID(),
protocol.RUNNING_STATE)
NodeStates.RUNNING)
# Check that the primary master didn't change.
self.assertEqual(self.neo.getPrimaryMaster(), first_master_uuid)
......
......@@ -23,7 +23,7 @@ from Persistence import Persistent
from neo.tests.functional import NEOCluster, NEOFunctionalTest
from neo.client.Storage import Storage as NEOStorage
from neo import protocol
from neo.protocol import ClusterStates
from neo.protocol import ClusterStates, NodeStates
class PObject(Persistent):
......@@ -106,18 +106,18 @@ class StorageTests(NEOFunctionalTest):
self.__checkDatabase(db_name)
# check storages state
storage_list = self.neo.getStorageList(protocol.RUNNING_STATE)
storage_list = self.neo.getStorageList(NodeStates.RUNNING)
self.assertEqual(len(storage_list), 2)
def __expectRunning(self, process):
self.neo.expectStorageState(process.getUUID(), protocol.RUNNING_STATE)
self.neo.expectStorageState(process.getUUID(), NodeStates.RUNNING)
def __expectPending(self, process):
self.neo.expectStorageState(process.getUUID(), protocol.PENDING_STATE)
self.neo.expectStorageState(process.getUUID(), NodeStates.PENDING)
def __expectUnavailable(self, process):
self.neo.expectStorageState(process.getUUID(),
protocol.TEMPORARILY_DOWN_STATE)
NodeStates.TEMPORARILY_DOWN)
def __expectNotKnown(self, process):
def expected_storage_not_known(last_try):
......
This diff is collapsed.
This diff is collapsed.
......@@ -18,9 +18,9 @@
import unittest, os
from mock import Mock
from neo.tests import NeoTestBase
from neo.protocol import NodeStates
from neo.protocol import UP_TO_DATE_STATE, OUT_OF_DATE_STATE, FEEDING_STATE, \
DISCARDED_STATE, RUNNING_STATE, TEMPORARILY_DOWN_STATE, DOWN_STATE, \
BROKEN_STATE, INVALID_UUID
DISCARDED_STATE, INVALID_UUID
from neo.pt import Cell
from neo.master.pt import PartitionTable
from neo.node import StorageNode
......@@ -115,15 +115,15 @@ class MasterPartitionTableTests(NeoTestBase):
num_replicas = 3
pt = PartitionTable(num_partitions, num_replicas)
pt.setCell(0, sn1, OUT_OF_DATE_STATE)