Commit 8a9fed79 authored by Grégory Wisniewski's avatar Grégory Wisniewski

Wrap lines to fit in 80 columns.


git-svn-id: https://svn.erp5.org/repos/neo/trunk@1365 71dcc9de-d417-0410-9af5-da40c76e7ee4
parent 007ab1e5
......@@ -27,12 +27,14 @@ class AdminEventHandler(EventHandler):
"""This class deals with events for administrating cluster."""
def askPartitionList(self, conn, packet, min_offset, max_offset, uuid):
logging.info("ask partition list from %s to %s for %s" %(min_offset, max_offset, dump(uuid)))
logging.info("ask partition list from %s to %s for %s" %
(min_offset, max_offset, dump(uuid)))
app = self.app
# check we have one pt otherwise ask it to PMN
if app.pt is None:
if self.app.master_conn is None:
raise protocol.NotReadyError('Not connected to a primary master.')
raise protocol.NotReadyError('Not connected to a primary ' \
'master.')
p = Packets.AskPartitionTable([])
msg_id = self.app.master_conn.ask(p)
app.dispatcher.register(msg_id, conn,
......@@ -41,7 +43,8 @@ class AdminEventHandler(EventHandler):
'uuid' : uuid,
'msg_id' : packet.getId()})
else:
app.sendPartitionTable(conn, min_offset, max_offset, uuid, packet.getId())
app.sendPartitionTable(conn, min_offset, max_offset, uuid,
packet.getId())
def askNodeList(self, conn, packet, node_type):
......@@ -89,10 +92,12 @@ class AdminEventHandler(EventHandler):
def askClusterState(self, conn, packet):
if self.app.cluster_state is None:
if self.app.master_conn is None:
raise protocol.NotReadyError('Not connected to a primary master.')
raise protocol.NotReadyError('Not connected to a primary ' \
'master.')
# required it from PMN first
msg_id = self.app.master_conn.ask(Packets.AskClusterState())
self.app.dispatcher.register(msg_id, conn, {'msg_id' : packet.getId()})
self.app.dispatcher.register(msg_id, conn,
{'msg_id' : packet.getId()})
else:
conn.answer(Packets.AnswerClusterState(self.app.cluster_state),
packet.getId())
......
......@@ -18,7 +18,8 @@
from ZODB import BaseStorage, ConflictResolution, POSException
from neo.client.app import Application
from neo.client.exception import NEOStorageConflictError, NEOStorageNotFoundError
from neo.client.exception import NEOStorageConflictError, \
NEOStorageNotFoundError
class Storage(BaseStorage.BaseStorage,
ConflictResolution.ConflictResolvingStorage):
......@@ -62,7 +63,8 @@ class Storage(BaseStorage.BaseStorage,
def tpc_begin(self, transaction, tid=None, status=' '):
if self._is_read_only:
raise POSException.ReadOnlyError()
return self.app.tpc_begin(transaction=transaction, tid=tid, status=status)
return self.app.tpc_begin(transaction=transaction, tid=tid,
status=status)
def tpc_vote(self, transaction):
if self._is_read_only:
......
This diff is collapsed.
......@@ -21,7 +21,7 @@ class NeoStorage(BaseConfig):
def open(self):
from Storage import Storage
return Storage(master_nodes = self.config.master_nodes, name = self.config.name,
connector = self.config.connector)
return Storage(master_nodes=self.config.master_nodes,
name=self.config.name, connector = self.config.connector)
......@@ -109,14 +109,16 @@ class PrimaryNotificationsHandler(BaseHandler):
app.master_conn = None
app.primary_master_node = None
else:
logging.warn('app.master_conn is %s, but we are closing %s', app.master_conn, conn)
logging.warn('app.master_conn is %s, but we are closing %s',
app.master_conn, conn)
super(PrimaryNotificationsHandler, self).connectionClosed(conn)
def timeoutExpired(self, conn):
app = self.app
if app.master_conn is not None:
assert conn is app.master_conn
logging.critical("connection timeout to primary master node expired")
logging.critical("connection timeout to primary master node ' \
'expired")
BaseHandler.timeoutExpired(self, conn)
def peerBroken(self, conn):
......
......@@ -78,8 +78,8 @@ class StorageAnswersHandler(AnswerBaseHandler):
def answerObject(self, conn, packet, oid, start_serial, end_serial,
compression, checksum, data):
app = self.app
app.local_var.asked_object = (oid, start_serial, end_serial, compression,
checksum, data)
app.local_var.asked_object = (oid, start_serial, end_serial,
compression, checksum, data)
def answerStoreObject(self, conn, packet, conflicting, oid, serial):
app = self.app
......
......@@ -131,7 +131,8 @@ class MQ(object):
- The size calculation is not accurate.
"""
def __init__(self, life_time=10000, buffer_levels=9, max_history_size=100000, max_size=20*1024*1024):
def __init__(self, life_time=10000, buffer_levels=9,
max_history_size=100000, max_size=20*1024*1024):
self._history_buffer = FIFO()
self._cache_buffers = []
for level in range(buffer_levels):
......@@ -203,7 +204,8 @@ class MQ(object):
except KeyError:
counter = 1
# XXX It might be better to adjust the level according to the object size.
# XXX It might be better to adjust the level according to the object
# size.
level = min(int(log(counter, 2)), self._buffer_levels - 1)
element = cache_buffers[level].append()
data = Data()
......
......@@ -50,7 +50,9 @@ def lockCheckWrapper(func):
def wrapper(self, *args, **kw):
if not self._lock._is_owned():
import traceback
logging.warning('%s called on %s instance without being locked. Stack:\n%s', func.func_code.co_name, self.__class__.__name__, ''.join(traceback.format_stack()))
logging.warning('%s called on %s instance without being locked.' \
' Stack:\n%s', func.func_code.co_name, self.__class__.__name__,
''.join(traceback.format_stack()))
# Call anyway
return func(self, *args, **kw)
return wrapper
......@@ -366,7 +368,7 @@ class Connection(BaseConnection):
if self.write_buf:
self.em.addWriter(self)
def expectMessage(self, msg_id = None, timeout = 5, additional_timeout = 30):
def expectMessage(self, msg_id=None, timeout=5, additional_timeout=30):
"""Expect a message for a reply to a given message ID or any message.
The purpose of this method is to define how much amount of time is
......@@ -403,7 +405,9 @@ class Connection(BaseConnection):
@not_closed
def ask(self, packet, timeout=5, additional_timeout=30):
""" Send a packet with a new ID and register the expectation of an answer """
"""
Send a packet with a new ID and register the expectation of an answer
"""
msg_id = self._getNextId()
packet.setId(msg_id)
self.expectMessage(msg_id)
......
......@@ -70,8 +70,8 @@ class SocketConnector:
raise ConnectorInProgressException
if err == errno.ECONNREFUSED:
raise ConnectorConnectionRefusedException
raise ConnectorException, 'makeClientConnection to %s failed: ' \
'%s:%s' % (addr, err, errmsg)
raise ConnectorException, 'makeClientConnection to %s failed:' \
' %s:%s' % (addr, err, errmsg)
finally:
logging.debug('%r connecting to %r', self.socket.getsockname(),
addr)
......@@ -85,15 +85,16 @@ class SocketConnector:
self.socket.listen(5)
except socket.error, (err, errmsg):
self.socket.close()
raise ConnectorException, 'makeListeningConnection on %s failed: %s:%s' % \
(addr, err, errmsg)
raise ConnectorException, 'makeListeningConnection on %s failed:' \
' %s:%s' % (addr, err, errmsg)
def getError(self):
return self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
def getDescriptor(self):
# this descriptor must only be used by the event manager, where it guarantee
# unicity only while the connector is opened and registered in epoll
# this descriptor must only be used by the event manager, where it
# guarantee unicity only while the connector is opened and registered
# in epoll
return self.socket.fileno()
def getNewConnection(self):
......
......@@ -22,8 +22,10 @@ from time import time
from neo.epoll import Epoll
class IdleEvent(object):
"""This class represents an event called when a connection is waiting for
a message too long."""
"""
This class represents an event called when a connection is waiting for
a message too long.
"""
def __init__(self, conn, msg_id, timeout, additional_timeout):
self._conn = conn
......@@ -141,8 +143,8 @@ class SelectEventManager(object):
self._addPendingConnection(to_process)
def _poll(self, timeout = 1):
rlist, wlist, xlist = select(self.reader_set, self.writer_set, self.exc_list,
timeout)
rlist, wlist, xlist = select(self.reader_set, self.writer_set,
self.exc_list, timeout)
for s in rlist:
conn = self.connection_dict[s]
conn.lock()
......
......@@ -37,7 +37,8 @@ class EventHandler(object):
# if decoding fail, there's no packet instance
logging.error('malformed packet from %s:%d: %s', *args)
else:
logging.error('malformed packet %s from %s:%d: %s', packet.getType(), *args)
logging.error('malformed packet %s from %s:%d: %s',
packet.getType(), *args)
response = protocol.protocolError(message)
if packet is not None:
conn.answer(response, packet.getId())
......@@ -386,7 +387,8 @@ class EventHandler(object):
d[Packets.StartOperation] = self.startOperation
d[Packets.StopOperation] = self.stopOperation
d[Packets.AskUnfinishedTransactions] = self.askUnfinishedTransactions
d[Packets.AnswerUnfinishedTransactions] = self.answerUnfinishedTransactions
d[Packets.AnswerUnfinishedTransactions] = \
self.answerUnfinishedTransactions
d[Packets.AskObjectPresent] = self.askObjectPresent
d[Packets.AnswerObjectPresent] = self.answerObjectPresent
d[Packets.DeleteTransaction] = self.deleteTransaction
......@@ -411,7 +413,8 @@ class EventHandler(object):
d[Packets.AskTIDs] = self.askTIDs
d[Packets.AnswerTIDs] = self.answerTIDs
d[Packets.AskTransactionInformation] = self.askTransactionInformation
d[Packets.AnswerTransactionInformation] = self.answerTransactionInformation
d[Packets.AnswerTransactionInformation] = \
self.answerTransactionInformation
d[Packets.AskObjectHistory] = self.askObjectHistory
d[Packets.AnswerObjectHistory] = self.answerObjectHistory
d[Packets.AskOIDs] = self.askOIDs
......
......@@ -48,7 +48,8 @@ class LockUser(object):
return isinstance(other, self.__class__) and self.ident == other.ident
def __repr__(self):
return '%s@%s:%s %s' % (self.ident, self.caller[0], self.caller[1], self.caller[3])
return '%s@%s:%s %s' % (self.ident, self.caller[0], self.caller[1],
self.caller[3])
def formatStack(self):
return ''.join(traceback.format_list(self.stack))
......@@ -59,7 +60,8 @@ class VerboseLockBase(object):
self.debug_lock = debug_lock
self.owner = None
self.waiting = []
self._note('%s@%X created by %r', self.__class__.__name__, id(self), LockUser(1))
self._note('%s@%X created by %r', self.__class__.__name__, id(self),
LockUser(1))
def _note(self, fmt, *args):
sys.stderr.write(fmt % args + '\n')
......@@ -75,12 +77,16 @@ class VerboseLockBase(object):
def acquire(self, blocking=1):
me = LockUser()
owner = self._getOwner()
self._note('[%r]%s.acquire(%s) Waiting for lock. Owned by:%r Waiting:%r', me, self, blocking, owner, self.waiting)
if (self.debug_lock and owner is not None) or (not self.reentrant and blocking and me == owner):
self._note('[%r]%s.acquire(%s) Waiting for lock. Owned by:%r ' \
'Waiting:%r', me, self, blocking, owner, self.waiting)
if (self.debug_lock and owner is not None) or \
(not self.reentrant and blocking and me == owner):
if me == owner:
self._note('[%r]%s.acquire(%s): Deadlock detected: I already own this lock:%r', me, self, blocking, owner)
self._note('[%r]%s.acquire(%s): Deadlock detected: ' \
' I already own this lock:%r', me, self, blocking, owner)
else:
self._note('[%r]%s.acquire(%s): debug lock triggered: %r', me, self, blocking, owner)
self._note('[%r]%s.acquire(%s): debug lock triggered: %r',
me, self, blocking, owner)
self._note('Owner traceback:\n%s', owner.formatStack())
self._note('My traceback:\n%s', me.formatStack())
self.waiting.append(me)
......@@ -89,7 +95,8 @@ class VerboseLockBase(object):
finally:
self.owner = me
self.waiting.remove(me)
self._note('[%r]%s.acquire(%s) Lock granted. Waiting: %r', me, self, blocking, self.waiting)
self._note('[%r]%s.acquire(%s) Lock granted. Waiting: %r',
me, self, blocking, self.waiting)
def release(self):
me = LockUser()
......@@ -104,7 +111,8 @@ class VerboseLockBase(object):
class VerboseRLock(VerboseLockBase):
def __init__(self, verbose=None, debug_lock=False):
super(VerboseRLock, self).__init__(reentrant=True, debug_lock=debug_lock)
super(VerboseRLock, self).__init__(reentrant=True,
debug_lock=debug_lock)
self.lock = threading_RLock()
def _locked(self):
......
......@@ -26,8 +26,8 @@ from neo.protocol import ClusterStates, NodeStates, NodeTypes, Packets
from neo.node import NodeManager
from neo.event import EventManager
from neo.connection import ListeningConnection, ClientConnection
from neo.exception import ElectionFailure, PrimaryFailure, VerificationFailure, \
OperationFailure
from neo.exception import ElectionFailure, PrimaryFailure, \
VerificationFailure, OperationFailure
from neo.master.handlers import election, identification, secondary, recovery
from neo.master.handlers import verification, storage, client, shutdown
from neo.master.handlers import administration
......@@ -63,8 +63,9 @@ class Application(object):
if partitions <= 0:
raise RuntimeError, 'partitions must be more than zero'
self.pt = PartitionTable(partitions, replicas)
logging.debug('the number of replicas is %d, the number of partitions is %d, the name is %s',
replicas, partitions, self.name)
logging.debug('the number of replicas is %d, the number of ' \
'partitions is %d, the name is %s',
replicas, partitions, self.name)
self.listening_conn = None
self.primary = None
......@@ -173,16 +174,18 @@ class Application(object):
t = current_time
for node in nm.getMasterList():
if node.isTemporarilyDown() \
and node.getLastStateChange() + expiration < current_time:
and node.getLastStateChange() + \
expiration < current_time:
logging.info('%s is down' % (node, ))
node.setDown()
self.unconnected_master_node_set.discard(node.getAddress())
self.unconnected_master_node_set.discard(
node.getAddress())
# Try to connect to master nodes.
if self.unconnected_master_node_set:
for addr in list(self.unconnected_master_node_set):
ClientConnection(em, client_handler, addr = addr,
connector_handler = self.connector_handler)
ClientConnection(em, client_handler, addr=addr,
connector_handler=self.connector_handler)
em.poll(1)
if len(self.unconnected_master_node_set) == 0 \
and len(self.negotiating_master_node_set) == 0:
......@@ -195,7 +198,7 @@ class Application(object):
if self.primary is None:
# I am the primary.
self.primary = True
logging.debug('I am the primary, so sending an announcement')
logging.debug('I am the primary, sending an announcement')
for conn in em.getClientList():
conn.notify(Packets.AnnouncePrimary())
conn.abort()
......@@ -224,12 +227,14 @@ class Application(object):
if conn.getAddress() != addr:
conn.close()
# But if there is no such connection, something wrong happened.
# But if there is no such connection, something wrong
# happened.
for conn in em.getClientList():
if conn.getAddress() == addr:
break
else:
raise ElectionFailure, 'no connection remains to the primary'
raise ElectionFailure, 'no connection remains to ' \
'the primary'
return
except ElectionFailure, m:
......@@ -321,7 +326,8 @@ class Application(object):
row_list.append((offset, self.pt.getRow(offset)))
# Split the packet if too huge.
if len(row_list) == 1000:
conn.notify(Packets.SendPartitionTable( self.pt.getID(), row_list))
conn.notify(Packets.SendPartitionTable(self.pt.getID(),
row_list))
del row_list[:]
if row_list:
conn.notify(Packets.SendPartitionTable(self.pt.getID(), row_list))
......@@ -366,9 +372,12 @@ class Application(object):
pt.make(node_list)
def recoverStatus(self):
"""Recover the status about the cluster. Obtain the last OID, the last TID,
and the last Partition Table ID from storage nodes, then get back the latest
partition table or make a new table from scratch, if this is the first time."""
"""
Recover the status about the cluster. Obtain the last OID, the last
TID, and the last Partition Table ID from storage nodes, then get
back the latest partition table or make a new table from scratch,
if this is the first time.
"""
logging.info('begin the recovery of the status')
self.changeClusterState(ClusterStates.RECOVERING)
......@@ -545,16 +554,19 @@ class Application(object):
self.broadcastPartitionChanges(self.pt.setNextID(), cell_list)
def provideService(self):
"""This is the normal mode for a primary master node. Handle transactions
"""
This is the normal mode for a primary master node. Handle transactions
and stop the service only if a catastrophy happens or the user commits
a shutdown."""
a shutdown.
"""
logging.info('provide service')
em = self.em
nm = self.nm
self.changeClusterState(ClusterStates.RUNNING)
# This dictionary is used to hold information on transactions being finished.
# This dictionary is used to hold information on transactions being
# finished.
self.finishing_transaction_dict = {}
# Now everything is passive.
......@@ -562,12 +574,13 @@ class Application(object):
try:
em.poll(1)
except OperationFailure:
# If not operational, send Stop Operation packets to storage nodes
# and client nodes. Abort connections to client nodes.
logging.critical('No longer operational, so stopping the service')
# If not operational, send Stop Operation packets to storage
# nodes and client nodes. Abort connections to client nodes.
logging.critical('No longer operational, stopping the service')
for conn in em.getConnectionList():
node = nm.getByUUID(conn.getUUID())
if node is not None and (node.isStorage() or node.isClient()):
if node is not None and (node.isStorage()
or node.isClient()):
conn.notify(Packets.StopOperation())
if node.isClient():
conn.abort()
......@@ -580,7 +593,8 @@ class Application(object):
dump(self.uuid), *(self.server))
# all incoming connections identify through this handler
self.listening_conn.setHandler(identification.IdentificationHandler(self))
self.listening_conn.setHandler(
identification.IdentificationHandler(self))
handler = secondary.SecondaryMasterHandler(self)
em = self.em
......@@ -596,8 +610,8 @@ class Application(object):
conn.setHandler(handler)
# If I know any storage node, make sure that they are not in the running state,
# because they are not connected at this stage.
# If I know any storage node, make sure that they are not in the
# running state, because they are not connected at this stage.
for node in nm.getStorageList():
if node.isRunning():
node.setTemporarilyDown()
......@@ -613,7 +627,9 @@ class Application(object):
self.provideService()
def playSecondaryRole(self):
"""I play a secondary role, thus only wait for a primary master to fail."""
"""
I play a secondary role, thus only wait for a primary master to fail.
"""
logging.info('play the secondary role with %s (%s:%d)',
dump(self.uuid), *(self.server))
......@@ -631,7 +647,9 @@ class Application(object):
self.em.poll(1)
def changeClusterState(self, state):
""" Change the cluster state and apply right handler on each connections """
"""
Change the cluster state and apply right handler on each connections
"""
if self.cluster_state == state:
return
nm, em = self.nm, self.em
......
......@@ -86,7 +86,8 @@ class BaseServiceHandler(MasterHandler):
node = self.app.nm.getByUUID(conn.getUUID())
assert node is not None
if new_state != NodeStates.BROKEN:
new_state = DISCONNECTED_STATE_DICT.get(node.getType(), NodeStates.DOWN)
new_state = DISCONNECTED_STATE_DICT.get(node.getType(),
NodeStates.DOWN)
if node.getState() == new_state:
return
if new_state != NodeStates.BROKEN and node.isPending():
......
......@@ -43,7 +43,8 @@ class AdministrationHandler(MasterHandler):
self.app.shutdown()
def setNodeState(self, conn, packet, uuid, state, modify_partition_table):
logging.info("set node state for %s-%s : %s" % (dump(uuid), state, modify_partition_table))
logging.info("set node state for %s-%s : %s" %
(dump(uuid), state, modify_partition_table))
app = self.app
node = app.nm.getByUUID(uuid)
if node is None:
......
......@@ -111,7 +111,7 @@ class ClientServiceHandler(BaseServiceHandler):
# Collect the UUIDs of nodes related to this transaction.
uuid_set = set()
for part in partition_set:
uuid_set.update((cell.getUUID() for cell in app.pt.getCellList(part) \
uuid_set.update((cell.getUUID() for cell in app.pt.getCellList(part)
if cell.getNodeState() != NodeStates.HIDDEN))
# Request locking data.
......
......@@ -57,7 +57,8 @@ class IdentificationHandler(MasterHandler):
node.setAddress(address)
node.setRunning()
# ask the app the node identification, if refused, an exception is raised
# ask the app the node identification, if refused, an exception is
# raised
result = self.app.identifyNode(node_type, uuid, node)
(uuid, node, state, handler, node_ctor) = result
if uuid is None:
......
......@@ -46,8 +46,8 @@ class RecoveryHandler(MasterHandler):
app = self.app
if uuid != app.target_uuid:
# If this is not from a target node, ignore it.
logging.warn('got answer partition table from %s while waiting for %s',
dump(uuid), dump(app.target_uuid))
logging.warn('got answer partition table from %s while waiting ' \
'for %s', dump(uuid), dump(app.target_uuid))
return
# load unknown storage nodes
for offset, row in row_list:
......
......@@ -44,11 +44,13 @@ class StorageServiceHandler(BaseServiceHandler):
def askLastIDs(self, conn, packet):
app = self.app
conn.answer(Packets.AnswerLastIDs(app.loid, app.ltid, app.pt.getID()), packet.getId())
conn.answer(Packets.AnswerLastIDs(app.loid, app.ltid, app.pt.getID()),
packet.getId())
def askUnfinishedTransactions(self, conn, packet):
app = self.app
p = Packets.AnswerUnfinishedTransactions(app.finishing_transaction_dict.keys())
p = Packets.AnswerUnfinishedTransactions(
app.finishing_transaction_dict.keys())
conn.answer(p, packet.getId())
def notifyInformationLocked(self, conn, packet, tid):
......@@ -78,7 +80,8 @@ class StorageServiceHandler(BaseServiceHandler):
p = Packets.NotifyTransactionFinished(tid)
c.answer(p, t.getMessageId())
else:
p = Packets.InvalidateObjects(t.getOIDList(), tid)
p = Packets.InvalidateObjects(t.getOIDList(),
tid)
c.notify(p)
elif node.isStorage():
if uuid in t.getUUIDSet():
......@@ -107,16 +110,18 @@ class StorageServiceHandler(BaseServiceHandler):
continue
offset = cell[0]
logging.debug("node %s is up for offset %s" %(dump(node.getUUID()), offset))
logging.debug("node %s is up for offset %s" %
(dump(node.getUUID()), offset))
# check the storage said it is up to date for a partition it was assigne to
# check the storage said it is up to date for a partition it was
# assigne to
for xcell in app.pt.getCellList(offset):
if xcell.getNode().getUUID() == node.getUUID() and \
xcell.getState() not in (CellStates.OUT_OF_DATE,
CellStates.UP_TO_DATE):
msg = "node %s telling that it is UP TO DATE for offset \
%s but where %s for that offset" % (dump(node.getUUID()), offset,
xcell.getState())
%s but where %s for that offset" % (dump(node.getUUID()),
offset, xcell.getState())
raise ProtocolError(msg)
......
......@@ -44,11 +44,11 @@ class PartitionTable(neo.pt.PartitionTable):
and n.getUUID() is not None]
if len(node_list) == 0:
# Impossible.
raise RuntimeError, \
'cannot make a partition table with an empty storage node list'
raise RuntimeError, 'cannot make a partition table with an ' \
'empty storage node list'
# Take it into account that the number of storage nodes may be less than the
# number of replicas.
# Take it into account that the number of storage nodes may be less
# than the number of replicas.
repeats = min(self.nr + 1, len(node_list))
index = 0
for offset in xrange(self.np):
......@@ -79,22 +79,23 @@ class PartitionTable(neo.pt.PartitionTable):
cell_list = []
uuid = node.getUUID()
for offset, row in enumerate(self.partition_list):
if row is not None:
for cell in row:
if cell.getNode() is node:
if not cell.isFeeding():
# If this cell is not feeding, find another node
# to be added.
node_list = [c.getNode() for c in row]
n = self.findLeastUsedNode(node_list)
if n is not None:
row.append(neo.pt.Cell(n, CellStates.OUT_OF_DATE))
self.count_dict[n] += 1
cell_list.append((offset, n.getUUID(),
CellStates.OUT_OF_DATE))
row.remove(cell)
cell_list.append((offset, uuid, CellStates.DISCARDED))
break
if row is None:
continue
for cell in row:
if cell.getNode() is node:
if not cell.isFeeding():
# If this cell is not feeding, find another node
# to be added.
node_list = [c.getNode() for c in row]
n = self.findLeastUsedNode(node_list)
if n is not None:
row.append(neo.pt.Cell(n, CellStates.OUT_OF_DATE))
self.count_dict[n] += 1
cell_list.append((offset, n.getUUID(),
CellStates.OUT_OF_DATE))
row.remove(cell)
cell_list.append((offset, uuid, CellStates.DISCARDED))
break
try:
del self.count_dict[node]
......@@ -135,7 +136,8 @@ class PartitionTable(neo.pt.PartitionTable):
if num_cells <= self.nr:
row.append(neo.pt.Cell(node, CellStates.OUT_OF_DATE))
cell_list.append((offset, node.getUUID(), CellStates.OUT_OF_DATE))
cell_list.append((offset, node.getUUID(),
CellStates.OUT_OF_DATE))
node_count += 1
else:
if max_count - node_count > 1:
......@@ -192,7 +194,8 @@ class PartitionTable(neo.pt.PartitionTable):
removed_cell_list.append(feeding_cell)
ideal_num = self.nr + 1
while len(out_of_date_cell_list) + len(up_to_date_cell_list) > ideal_num:
while len(out_of_date_cell_list) + len(up_to_date_cell_list) > \
ideal_num:
# This row contains too many cells.
if len(up_to_date_cell_list) > 1:
# There are multiple up-to-date cells, so choose whatever
......@@ -220,7 +223,8 @@ class PartitionTable(neo.pt.PartitionTable):
row.remove(cell)
if not cell.isFeeding():
self.count_dict[cell.getNode()] -= 1
changed_cell_list.append((offset, cell.getUUID(), CellStates.DISCARDED))
changed_cell_list.append((offset, cell.getUUID(),
CellStates.DISCARDED))
# Add cells, if a row contains less than the number of replicas.
for offset, row in enumerate(self.partition_list):
......@@ -233,7 +237,8 @@ class PartitionTable(neo.pt.PartitionTable):
if node is None:
break
row.append(neo.pt.Cell(node, CellStates.OUT_OF_DATE))
changed_cell_list.append((offset, node.getUUID(), CellStates.OUT_OF_DATE))
changed_cell_list.append((offset, node.getUUID(),
CellStates.OUT_OF_DATE))
self.count_dict[node] += 1
num_cells += 1
......@@ -251,6 +256,7 @@ class PartitionTable(neo.pt.PartitionTable):
for cell in row:
if not cell.getNode().isRunning() and not cell.isOutOfDate():
cell.setState(CellStates.OUT_OF_DATE)
cell_list.append((offset, cell.getUUID(), CellStates.OUT_OF_DATE))
cell_list.append((offset, cell.getUUID(),
CellStates.OUT_OF_DATE))
return cell_list
......@@ -193,10 +193,12 @@ class Application(object):
def execute(self, args):
"""Execute the command given."""
# print node type : print list of node of the given type (STORAGE_NODE_TYPE, MASTER_NODE_TYPE...)
# set node uuid state [1|0] : set the node for the given uuid to the state (RUNNING, DOWN...)
# and modify the partition if asked
# set cluster name [shutdown|operational] : either shutdown the cluster or mark it as operational
# print node type : print list of node of the given type
# (STORAGE_NODE_TYPE, MASTER_NODE_TYPE...)
# set node uuid state [1|0] : set the node for the given uuid to the
# state (RUNNING, DOWN...) and modify the partition if asked
# set cluster name [shutdown|operational] : either shutdown the
# cluster or mark it as operational
current_action = action_dict
level = 0
while current_action is not None and \
......
......@@ -147,7 +147,8 @@ def _decodeNodeType(original_node_type):
def _decodeErrorCode(original_error_code):
error_code = ErrorCodes.get(original_error_code)
if error_code is None:
raise PacketMalformedError('invalid error code %d' % original_error_code)
raise PacketMalformedError('invalid error code %d' %
original_error_code)
return error_code
def _decodeAddress(address):
......@@ -337,7 +338,8 @@ class AcceptIdentification(Packet):
node_type = _decodeNodeType(node_type)
uuid = _decodeUUID(uuid)
your_uuid == _decodeUUID(uuid)
return (node_type, uuid, address, num_partitions, num_replicas, your_uuid)
return (node_type, uuid, address, num_partitions, num_replicas,
your_uuid)
class AskPrimary(Packet):
"""
......@@ -395,8 +397,9 @@ class AnswerLastIDs(Packet):
Reply to Ask Last IDs. S -> PM, PM -> S.
"""
def _encode(self, loid, ltid, lptid):
# in this case, loid is a valid OID but considered as invalid. This is not
# an issue because the OID 0 is hard coded and will never be generated
# in this case, loid is a valid OID but considered as invalid. This is
# not an issue because the OID 0 is hard coded and will never be
# generated
if loid is None:
loid = INVALID_OID
ltid = _encodeTID(ltid)
......@@ -882,7 +885,8 @@ class AnswerTransactionInformation(Packet):
Answer information (user, description) about a transaction. S -> Any.
"""
def _encode(self, tid, user, desc, ext, oid_list):
body = [pack('!8sHHHL', tid, len(user), len(desc), len(ext), len(oid_list))]
body = [pack('!8sHHHL', tid, len(user), len(desc), len(ext),
len(oid_list))]
body.append(user)
body.append(desc)
body.append(ext)
......@@ -1268,7 +1272,8 @@ class PacketRegistry(dict):
StartOperation = register(0x000B, StartOperation)
StopOperation = register(0x000C, StopOperation)
AskUnfinishedTransactions = register(0x000D, AskUnfinishedTransactions)
AnswerUnfinishedTransactions = register(0x800d, AnswerUnfinishedTransactions)
AnswerUnfinishedTransactions = register(0x800d,
AnswerUnfinishedTransactions)
AskObjectPresent = register(0x000f, AskObjectPresent)
AnswerObjectPresent = register(0x800f, AnswerObjectPresent)
DeleteTransaction = register(0x0010, DeleteTransaction)
......@@ -1293,7 +1298,8 @@ class PacketRegistry(dict):
AskTIDs = register(0x001C, AskTIDs)
AnswerTIDs = register(0x801D, AnswerTIDs)
AskTransactionInformation = register(0x001E, AskTransactionInformation)
AnswerTransactionInformation = register(0x801E, AnswerTransactionInformation)
AnswerTransactionInformation = register(0x801E,
AnswerTransactionInformation)
AskObjectHistory = register(0x001F, AskObjectHistory)
AnswerObjectHistory = register(0x801F, AnswerObjectHistory)
AskOIDs = register(0x0020, AskOIDs)
......
......@@ -249,7 +249,8 @@ class PartitionTable(object):
line.append('X' * len(node_list))
else:
cell = []
cell_dict = dict([(node_dict.get(x.getUUID(), None), x) for x in row])
cell_dict = dict([(node_dict.get(x.getUUID(), None), x)
for x in row])
for node in xrange(len(node_list)):
if node in cell_dict:
cell.append(cell_state_dict[cell_dict[node].getState()])
......
......@@ -37,7 +37,8 @@ class BaseMasterHandler(BaseStorageHandler):
raise PrimaryFailure('re-election occurs')
def notifyClusterInformation(self, conn, packet, state):
logging.error('ignoring notify cluster information in %s' % self.__class__.__name__)
logging.error('ignoring notify cluster information in %s' %
self.__class__.__name__)
def notifyLastOID(self, conn, packet, oid):
self.app.loid = oid
......@@ -104,7 +105,8 @@ class BaseClientAndStorageOperationHandler(BaseStorageHandler):
if t is None:
p = protocol.tidNotFound('%s does not exist' % dump(tid))
else:
p = Packets.AnswerTransactionInformation(tid, t[1], t[2], t[3], t[0])
p = Packets.AnswerTransactionInformation(tid, t[1], t[2], t[3],
t[0])
conn.answer(p, packet.getId())
def askObject(self, conn, packet, oid, serial, tid):
......
......@@ -83,7 +83,8 @@ class VerificationHandler(BaseMasterHandler):
if t is None:
p = protocol.tidNotFound('%s does not exist' % dump(tid))
else:
p = Packets.AnswerTransactionInformation(tid, t[1], t[2], t[3], t[0])
p = Packets.AnswerTransactionInformation(tid, t[1], t[2], t[3],
t[0])
conn.answer(p, packet.getId())
def askObjectPresent(self, conn, packet, oid, tid):
......
......@@ -444,7 +444,7 @@ class MySQLDatabaseManager(DatabaseManager):
q("""INSERT INTO obj SELECT * FROM tobj WHERE tobj.serial = %d""" \
% tid)
q("""DELETE FROM tobj WHERE serial = %d""" % tid)
q("""INSERT INTO trans SELECT * FROM ttrans WHERE ttrans.tid = %d""" \
q("""INSERT INTO trans SELECT * FROM ttrans WHERE ttrans.tid = %d"""
% tid)
q("""DELETE FROM ttrans WHERE tid = %d""" % tid)
except:
......
......@@ -126,7 +126,8 @@ class Replicator(object):
partition.setCriticalTID(tid)
del self.critical_tid_dict[msg_id]
except KeyError:
logging.debug("setCriticalTID raised KeyError for msg_id %s" %(msg_id,))
logging.debug("setCriticalTID raised KeyError for msg_id %s" %
(msg_id, ))
def _askCriticalTID(self):
conn = self.primary_master_connection
......@@ -164,7 +165,8 @@ class Replicator(object):
addr = node.getAddress()
if addr is None:
logging.error("no address known for the selected node %s" %(dump(node.getUUID())))
logging.error("no address known for the selected node %s" %
(dump(node.getUUID()), ))
return
if self.current_connection is not None:
if self.current_connection.getAddress() == addr:
......@@ -177,8 +179,7 @@ class Replicator(object):
if self.current_connection is None:
handler = replication.ReplicationHandler(app)
self.current_connection = ClientConnection(app.em, handler,
addr = addr,
connector_handler = app.connector_handler)
addr = addr, connector_handler = app.connector_handler)
p = Packets.RequestIdentification(NodeTypes.STORAGE,
app.uuid, app.server, app.name)
self.current_connection.ask(p)
......@@ -196,7 +197,8 @@ class Replicator(object):
# Notify to a primary master node that my cell is now up-to-date.
conn = self.primary_master_connection
p = Packets.NotifyPartitionChanges(app.pt.getID(),
[(self.current_partition.getRID(), app.uuid, CellStates.UP_TO_DATE)])
[(self.current_partition.getRID(), app.uuid,
CellStates.UP_TO_DATE)])
conn.notify(p)
except KeyError:
pass
......@@ -234,7 +236,8 @@ class Replicator(object):
self._askUnfinishedTIDs()
else:
if self.replication_done:
logging.info('replication is done for %s' %(self.current_partition.getRID(),))
logging.info('replication is done for %s' %
(self.current_partition.getRID(), ))
self._finishReplication()
def removePartition(self, rid):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment