Commit e5fd0233 authored by Julien Muchembled's avatar Julien Muchembled

storage: avoid repeated 'Lock delayed' logs

On clusters with many deadlock avoidances, this flooded logs.
Hopefully, this commit reduces the size of logs without losing information.
parent 3a39ac9a
......@@ -40,6 +40,7 @@ class Transaction(object):
"""
Container for a pending transaction
"""
_delayed = {}
tid = None
voted = 0
......@@ -64,6 +65,15 @@ class Transaction(object):
def __lt__(self, other):
return self.locking_tid < other.locking_tid
def logDelay(self, ttid, locked, oid_serial):
if self._delayed.get(oid_serial) != locked:
if self._delayed:
self._delayed[oid_serial] = locked
else:
self._delayed = {oid_serial: locked}
logging.info('Lock delayed for %s:%s by %s',
dump(oid_serial[0]), dump(ttid), dump(locked))
def store(self, oid, data_id, value_serial):
"""
Add an object to the transaction
......@@ -322,8 +332,7 @@ class TransactionManager(EventQueue):
# before we processed UnlockInformation from the master.
# Or the locking transaction has already voted and there's no
# risk of deadlock if we delay.
logging.info('Lock delayed for %s:%s by %s',
dump(oid), dump(ttid), dump(locked))
transaction.logDelay(ttid, locked, (oid, serial))
# A client may have several stores delayed for the same oid
# but this is not a problem. EventQueue processes them in order
# and only the last one will not result in conflicts (that are
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment