Commit 7025db52 authored by Julien Muchembled's avatar Julien Muchembled

Rewrite of scheduler for threaded tests

The previous implementation was built around a 'pending' global variable that
was set by a few monkey-patches when some network activity was pending between
nodes. All this is replaced by an extra epoll object is used to wait for nodes
that have pending network events: this is simpler, and faster since it
significantly reduces the number of context switches.
parent 61009341
This diff is collapsed.
...@@ -77,7 +77,7 @@ class Test(NEOThreadedTest): ...@@ -77,7 +77,7 @@ class Test(NEOThreadedTest):
self.assertEqual(data_info, cluster.storage.getDataLockInfo()) self.assertEqual(data_info, cluster.storage.getDataLockInfo())
serial = storage.tpc_finish(txn) serial = storage.tpc_finish(txn)
data_info[key] = 0 data_info[key] = 0
cluster.tic(slave=1) self.tic()
self.assertEqual(data_info, cluster.storage.getDataLockInfo()) self.assertEqual(data_info, cluster.storage.getDataLockInfo())
self.assertEqual((data, serial), storage.load(oid, '')) self.assertEqual((data, serial), storage.load(oid, ''))
storage._cache.clear() storage._cache.clear()
...@@ -184,7 +184,7 @@ class Test(NEOThreadedTest): ...@@ -184,7 +184,7 @@ class Test(NEOThreadedTest):
self.assertEqual(data_info, cluster.storage.getDataLockInfo()) self.assertEqual(data_info, cluster.storage.getDataLockInfo())
tid1 = storage.tpc_finish(txn[2]) tid1 = storage.tpc_finish(txn[2])
cluster.tic(slave=1) self.tic()
data_info[key] -= 1 data_info[key] -= 1
self.assertEqual(data_info, cluster.storage.getDataLockInfo()) self.assertEqual(data_info, cluster.storage.getDataLockInfo())
...@@ -360,7 +360,7 @@ class Test(NEOThreadedTest): ...@@ -360,7 +360,7 @@ class Test(NEOThreadedTest):
try: try:
cluster.start() cluster.start()
cluster.db # open DB cluster.db # open DB
cluster.client.setPoll(0) self.background(0)
s0, s1 = cluster.client.nm.getStorageList() s0, s1 = cluster.client.nm.getStorageList()
conn = s0.getConnection() conn = s0.getConnection()
self.assertFalse(conn.isClosed()) self.assertFalse(conn.isClosed())
...@@ -403,7 +403,7 @@ class Test(NEOThreadedTest): ...@@ -403,7 +403,7 @@ class Test(NEOThreadedTest):
t, c = cluster.getTransaction() t, c = cluster.getTransaction()
c.root()[0] = 'ok' c.root()[0] = 'ok'
t.commit() t.commit()
cluster.tic(slave=1) self.tic()
data_info = cluster.storage.getDataLockInfo() data_info = cluster.storage.getDataLockInfo()
self.assertEqual(data_info.values(), [0, 0]) self.assertEqual(data_info.values(), [0, 0])
# (obj|trans) become t(obj|trans) # (obj|trans) become t(obj|trans)
...@@ -475,7 +475,7 @@ class Test(NEOThreadedTest): ...@@ -475,7 +475,7 @@ class Test(NEOThreadedTest):
# drop one # drop one
cluster.neoctl.dropNode(s1.uuid) cluster.neoctl.dropNode(s1.uuid)
checkNodeState(None) checkNodeState(None)
cluster.tic() # Let node state update reach remaining storage self.tic() # Let node state update reach remaining storage
checkNodeState(None) checkNodeState(None)
self.assertEqual([], cluster.getOudatedCells()) self.assertEqual([], cluster.getOudatedCells())
# restart with s2 only # restart with s2 only
...@@ -487,7 +487,7 @@ class Test(NEOThreadedTest): ...@@ -487,7 +487,7 @@ class Test(NEOThreadedTest):
checkNodeState(None) checkNodeState(None)
# then restart it, it must be in pending state # then restart it, it must be in pending state
s1.start() s1.start()
cluster.tic() self.tic()
checkNodeState(NodeStates.PENDING) checkNodeState(NodeStates.PENDING)
finally: finally:
cluster.stop() cluster.stop()
...@@ -517,7 +517,7 @@ class Test(NEOThreadedTest): ...@@ -517,7 +517,7 @@ class Test(NEOThreadedTest):
storage.connectToPrimary = sys.exit storage.connectToPrimary = sys.exit
# send an unexpected to master so it aborts connection to storage # send an unexpected to master so it aborts connection to storage
storage.master_conn.answer(Packets.Pong()) storage.master_conn.answer(Packets.Pong())
cluster.tic(force=1) self.tic()
self.assertEqual(cluster.neoctl.getClusterState(), self.assertEqual(cluster.neoctl.getClusterState(),
ClusterStates.VERIFYING) ClusterStates.VERIFYING)
finally: finally:
...@@ -532,10 +532,10 @@ class Test(NEOThreadedTest): ...@@ -532,10 +532,10 @@ class Test(NEOThreadedTest):
t, c = cluster.getTransaction() t, c = cluster.getTransaction()
c.root()[''] = '' c.root()[''] = ''
t.commit() t.commit()
cluster.client.setPoll(0) self.background(0)
# tell admin to shutdown the cluster # tell admin to shutdown the cluster
cluster.neoctl.setClusterState(ClusterStates.STOPPING) cluster.neoctl.setClusterState(ClusterStates.STOPPING)
cluster.tic() self.tic()
# all nodes except clients should exit # all nodes except clients should exit
for master in cluster.master_list: for master in cluster.master_list:
master.join(5) master.join(5)
...@@ -609,10 +609,8 @@ class Test(NEOThreadedTest): ...@@ -609,10 +609,8 @@ class Test(NEOThreadedTest):
# (at this time, we still have x=0 and y=1) # (at this time, we still have x=0 and y=1)
t2, c2 = cluster.getTransaction() t2, c2 = cluster.getTransaction()
# Copy y to x using a different Master-Client connection # Copy y to x using a different Master-Client connection
cluster.client.setPoll(0)
client = ClientApplication(name=cluster.name, client = ClientApplication(name=cluster.name,
master_nodes=cluster.master_nodes) master_nodes=cluster.master_nodes)
client.setPoll(1)
txn = transaction.Transaction() txn = transaction.Transaction()
client.tpc_begin(txn) client.tpc_begin(txn)
client.store(x1._p_oid, x1._p_serial, y, '', txn) client.store(x1._p_oid, x1._p_serial, y, '', txn)
...@@ -621,8 +619,6 @@ class Test(NEOThreadedTest): ...@@ -621,8 +619,6 @@ class Test(NEOThreadedTest):
m2c.add(lambda conn, packet: m2c.add(lambda conn, packet:
isinstance(packet, Packets.InvalidateObjects)) isinstance(packet, Packets.InvalidateObjects))
tid = client.tpc_finish(txn, None) tid = client.tpc_finish(txn, None)
client.setPoll(0)
cluster.client.setPoll(1)
# Change to x is committed. Testing connection must ask the # Change to x is committed. Testing connection must ask the
# storage node to return original value of x, even if we # storage node to return original value of x, even if we
# haven't processed yet any invalidation for x. # haven't processed yet any invalidation for x.
...@@ -657,14 +653,10 @@ class Test(NEOThreadedTest): ...@@ -657,14 +653,10 @@ class Test(NEOThreadedTest):
# from the storage (which is <value=1, next_tid=None>) is about # from the storage (which is <value=1, next_tid=None>) is about
# to be processed. # to be processed.
# Now modify x to receive an invalidation for it. # Now modify x to receive an invalidation for it.
cluster.client.setPoll(0)
client.setPoll(1)
txn = transaction.Transaction() txn = transaction.Transaction()
client.tpc_begin(txn) client.tpc_begin(txn)
client.store(x2._p_oid, tid, x, '', txn) # value=0 client.store(x2._p_oid, tid, x, '', txn) # value=0
tid = client.tpc_finish(txn, None) tid = client.tpc_finish(txn, None)
client.setPoll(0)
cluster.client.setPoll(1)
t1.begin() # make sure invalidation is processed t1.begin() # make sure invalidation is processed
finally: finally:
del p del p
...@@ -690,15 +682,11 @@ class Test(NEOThreadedTest): ...@@ -690,15 +682,11 @@ class Test(NEOThreadedTest):
p.apply() p.apply()
t = self.newThread(t1.begin) t = self.newThread(t1.begin)
l1.acquire() l1.acquire()
cluster.client.setPoll(0)
client.setPoll(1)
txn = transaction.Transaction() txn = transaction.Transaction()
client.tpc_begin(txn) client.tpc_begin(txn)
client.store(x2._p_oid, tid, y, '', txn) client.store(x2._p_oid, tid, y, '', txn)
tid = client.tpc_finish(txn, None) tid = client.tpc_finish(txn, None)
client.close() client.close()
client.setPoll(0)
cluster.client.setPoll(1)
finally: finally:
del p del p
l2.release() l2.release()
...@@ -728,24 +716,23 @@ class Test(NEOThreadedTest): ...@@ -728,24 +716,23 @@ class Test(NEOThreadedTest):
y = c1._storage.load(y._p_oid)[0] y = c1._storage.load(y._p_oid)[0]
# close connections to master & storage # close connections to master & storage
cluster.client.setPoll(0) self.background(0)
c, = cluster.master.nm.getClientList() c, = cluster.master.nm.getClientList()
c.getConnection().close() c.getConnection().close()
c, = cluster.storage.nm.getClientList() c, = cluster.storage.nm.getClientList()
c.getConnection().close() c.getConnection().close()
cluster.tic(force=1) self.tic()
# modify x with another client # modify x with another client
client = ClientApplication(name=cluster.name, client = ClientApplication(name=cluster.name,
master_nodes=cluster.master_nodes) master_nodes=cluster.master_nodes)
client.setPoll(1) self.background(1)
txn = transaction.Transaction() txn = transaction.Transaction()
client.tpc_begin(txn) client.tpc_begin(txn)
client.store(x1._p_oid, x1._p_serial, y, '', txn) client.store(x1._p_oid, x1._p_serial, y, '', txn)
tid = client.tpc_finish(txn, None) tid = client.tpc_finish(txn, None)
client.close() client.close()
client.setPoll(0) self.tic()
cluster.client.setPoll(1)
# Check reconnection to storage. # Check reconnection to storage.
with Patch(cluster.client.cp, getConnForNode=getConnForNode): with Patch(cluster.client.cp, getConnForNode=getConnForNode):
...@@ -765,7 +752,7 @@ class Test(NEOThreadedTest): ...@@ -765,7 +752,7 @@ class Test(NEOThreadedTest):
try: try:
cluster.start() cluster.start()
client = cluster.client client = cluster.client
client.setPoll(1) self.background(1)
txn = transaction.Transaction() txn = transaction.Transaction()
client.tpc_begin(txn) client.tpc_begin(txn)
txn_context = client._txn_container.get(txn) txn_context = client._txn_container.get(txn)
...@@ -828,13 +815,13 @@ class Test(NEOThreadedTest): ...@@ -828,13 +815,13 @@ class Test(NEOThreadedTest):
with cluster.master.filterConnection(cluster.storage) as m2s: with cluster.master.filterConnection(cluster.storage) as m2s:
m2s.add(delayNotifyInformation) m2s.add(delayNotifyInformation)
cluster.client.master_conn.close() cluster.client.master_conn.close()
cluster.client.setPoll(0) self.background(0)
client = ClientApplication(name=cluster.name, client = ClientApplication(name=cluster.name,
master_nodes=cluster.master_nodes) master_nodes=cluster.master_nodes)
p = Patch(client.storage_bootstrap_handler, notReady=notReady) p = Patch(client.storage_bootstrap_handler, notReady=notReady)
try: try:
p.apply() p.apply()
client.setPoll(1) self.background(1)
x = client.load(ZERO_TID) x = client.load(ZERO_TID)
finally: finally:
del p del p
......
...@@ -204,6 +204,7 @@ class ImporterTests(NEOThreadedTest): ...@@ -204,6 +204,7 @@ class ImporterTests(NEOThreadedTest):
t.commit() t.commit()
if cluster.storage.dm._import: if cluster.storage.dm._import:
last_import = i last_import = i
self.tic()
self.assertTrue(last_import and not cluster.storage.dm._import) self.assertTrue(last_import and not cluster.storage.dm._import)
i = len(src_root) + 1 i = len(src_root) + 1
self.assertEqual(sorted(r.walk()), sorted( self.assertEqual(sorted(r.walk()), sorted(
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment