Commit c90445fc authored by Grégory Wisniewski's avatar Grégory Wisniewski

Outdate cell of any storage node lost to ensure the replication process to

started when it come back. This cause broadcast partition table changes at
storage connection closure and might be optimized in the future.


git-svn-id: https://svn.erp5.org/repos/neo/branches/prototype3@908 71dcc9de-d417-0410-9af5-da40c76e7ee4
parent 7b7ee181
...@@ -308,6 +308,12 @@ class Application(object): ...@@ -308,6 +308,12 @@ class Application(object):
size -= amt size -= amt
start += amt start += amt
def outdateAndBroadcastPartition(self):
" Outdate cell of non-working nodes and broadcast changes """
cell_list = self.pt.outdate()
if cell_list:
self.broadcastPartitionChanges(self.pt.setNextID(), cell_list)
def sendPartitionTable(self, conn): def sendPartitionTable(self, conn):
""" Send the partition table through the given connection """ """ Send the partition table through the given connection """
row_list = [] row_list = []
......
...@@ -37,10 +37,13 @@ class StorageServiceHandler(BaseServiceHandler): ...@@ -37,10 +37,13 @@ class StorageServiceHandler(BaseServiceHandler):
conn.notify(protocol.startOperation()) conn.notify(protocol.startOperation())
def _nodeLost(self, conn, node): def _nodeLost(self, conn, node):
pt = self.app.pt # XXX: here the cells are outdated to trigger the replication process
# TODO: check this, is it need ? do we have to broadcast changes ? # when the node will come back. It might be better to reduce network
pt.outdate() # overload since others nodes known that it's temporarily down and thus,
if not pt.operational(): # outdate by themselves its cells.
logging.info('storage node lost')
self.app.outdateAndBroadcastPartition()
if not self.app.pt.operational():
raise OperationFailure, 'cannot continue operation' raise OperationFailure, 'cannot continue operation'
def handleNotifyInformationLocked(self, conn, packet, tid): def handleNotifyInformationLocked(self, conn, packet, tid):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment