Commit 1ef149c2 by Julien Muchembled

Introduce light functional tests, using threads and serialized processing

This allows to setup an almost fully functional cluster without additional
processes. Threads are scheduled so that they never run simultaneously,
eliminating most random.
There's still much improvement possible like controlled randomization,
or easier debugging when switching from one thread to another.

As mock objects are not usable in such tests, an API should be implemented to
trace/count any method call we'd like to check.

This fixes test_notifyNodeInformation_checkUnregisterStorage

git-svn-id: https://svn.erp5.org/repos/neo/trunk@2775 71dcc9de-d417-0410-9af5-da40c76e7ee4
1 parent 227cdd5a
......@@ -74,6 +74,7 @@ RC - Review output of pylint (CODE)
Do the replication process, the verification stage, with or without
unfinished transactions, cells have to set as outdated, if yes, should the
partition table changes be broadcasted ? (BANDWITH, SPEED)
- Implement proper shutdown (ClusterStates.STOPPING)
- Review PENDING/HIDDEN/SHUTDOWN states, don't use notifyNodeInformation()
to do a state-switch, use a exception-based mechanism ? (CODE)
- Split protocol.py in a 'protocol' module
......@@ -206,6 +207,7 @@ RC - Review output of pylint (CODE)
- Use another mock library that is eggified and maintained.
See http://garybernhardt.github.com/python-mock-comparison/
for a comparison of available mocking libraries/frameworks.
- Fix epoll descriptor leak.
Later
- Consider auto-generating cluster name upon initial startup (it might
......
......@@ -97,8 +97,8 @@ class Application(object):
# Make a listening port.
handler = AdminEventHandler(self)
ListeningConnection(self.em, handler, addr=self.server,
connector=self.connector_handler())
self.listening_conn = ListeningConnection(self.em, handler,
addr=self.server, connector=self.connector_handler())
while True:
self.connectToPrimary()
......
......@@ -132,5 +132,8 @@ class Epoll(object):
raise OSError(errno.value, 'epoll_ctl failed')
def __del__(self):
if self.efd >= 0:
close(self.efd)
efd = self.efd
if efd >= 0:
del self.efd
close(efd)
close = __del__
......@@ -68,6 +68,8 @@ UNIT_TEST_MODULES = [
'neo.tests.client.testMasterHandler',
'neo.tests.client.testStorageHandler',
'neo.tests.client.testConnectionPool',
# light functional tests
'neo.tests.threaded.test',
]
FUNC_TEST_MODULES = [
......
......@@ -513,4 +513,5 @@ class DoNothingConnector(Mock):
return self.desc
__builtin__.pdb = lambda: debug.getPdb().set_trace(sys._getframe(1))
__builtin__.pdb = lambda depth=0: \
debug.getPdb().set_trace(sys._getframe(depth+1))
......@@ -16,7 +16,7 @@
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import unittest
from mock import Mock, ReturnValues
from mock import Mock
from neo.tests import NeoUnitTestBase
from neo.lib.pt import PartitionTable
from neo.lib.protocol import NodeTypes, NodeStates
......@@ -178,56 +178,6 @@ class MasterNotificationsHandlerTests(MasterHandlerTests):
self.assertEqual(len(update_calls), 1)
update_calls[0].checkArgs(ptid, cell_list, self.app.nm)
def test_notifyNodeInformation(self):
conn = self.getConnection()
addr = ('127.0.0.1', 1000)
node_list = [
(NodeTypes.CLIENT, addr, self.getNewUUID(), NodeStates.UNKNOWN),
(NodeTypes.STORAGE, addr, self.getNewUUID(), NodeStates.DOWN),
]
# XXX: it might be better to test with real node & node manager
conn1 = self.getFakeConnection()
conn2 = self.getFakeConnection()
node1 = Mock({
'getConnection': conn1,
'__nonzero__': 1,
'isConnected': True,
'__repr__': 'Fake Node',
})
node2 = Mock({
'getConnection': conn2,
'__nonzero__': 1,
'isConnected': True,
'__repr__': 'Fake Node',
})
self.app.nm = Mock({'getByUUID': ReturnValues(node1, node2)})
self.app.cp = Mock()
self.handler.notifyNodeInformation(conn, node_list)
# node manager updated
update_calls = self.app.nm.mockGetNamedCalls('update')
self.assertEqual(len(update_calls), 1)
update_calls[0].checkArgs(node_list)
# connections closed
self.checkClosed(conn1)
self.checkClosed(conn2)
return conn2
def test_notifyNodeInformation_checkUnregisterStorage(self):
# XXX: This test fails because unregistering is done
# by neo.client.handlers.storage.StorageEventHandler
# which would require a connection to storage
# with a proper handler (defined by Application).
# It can be merged with previous one as soon as it passes.
conn2 = self.test_notifyNodeInformation()
# storage removed from connection pool
remove_calls = self.app.cp.mockGetNamedCalls('removeConnection')
self.assertEqual(len(remove_calls), 1)
remove_calls[0].checkArgs(conn2)
# storage unregistered
unregister_calls = self.app.dispatcher.mockGetNamedCalls('unregister')
self.assertEqual(len(unregister_calls), 1)
unregister_calls[0].checkArgs(conn2)
class MasterAnswersHandlerTests(MasterHandlerTests):
......
......@@ -474,27 +474,6 @@ class StorageTests(NEOFunctionalTest):
self.neo.expectStorageNotKnown(started[0])
self.neo.expectPending(stopped[0])
def testRestartWithMissingStorage(self, fast_startup=False):
# start a cluster with a replica
(started, stopped) = self.__setup(storage_number=2, replicas=1,
pending_number=0, partitions=10)
self.neo.expectRunning(started[0])
self.neo.expectRunning(started[1])
self.neo.expectOudatedCells(number=0)
self.neo.expectClusterRunning()
# XXX: need to sync with storages first
self.neo.stop()
# restart it with one storage only
self.neo.start(except_storages=started[1:],
delay_startup=not fast_startup and 1 or None)
self.neo.expectRunning(started[0])
self.neo.expectUnknown(started[1])
self.neo.expectClusterRunning()
def testRestartWithMissingStorageFastStartup(self):
self.testRestartWithMissingStorage(True)
def testRecoveryWithMultiplePT(self):
# start a cluster with 2 storages and a replica
(started, stopped) = self.__setup(storage_number=2, replicas=1,
......
#
# Copyright (c) 2011 Nexedi SARL and Contributors. All Rights Reserved.
# Julien Muchembled <jm@nexedi.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from persistent import Persistent
from neo.lib.protocol import NodeStates
from neo.tests.threaded import NEOCluster, NEOThreadedTest
from neo.client.pool import CELL_CONNECTED, CELL_GOOD
class PObject(Persistent):
pass
class Test(NEOThreadedTest):
def test_commit(self):
cluster = NEOCluster()
cluster.start(1)
try:
t, c = cluster.getTransaction()
c.root()['foo'] = PObject()
t.commit()
finally:
cluster.stop()
def test_notifyNodeInformation(self):
# translated from MasterNotificationsHandlerTests
# (neo.tests.client.testMasterHandler)
cluster = NEOCluster()
try:
cluster.start(1)
cluster.client.setPoll(0)
storage, = cluster.client.nm.getStorageList()
conn = storage.getConnection()
self.assertFalse(conn.isClosed())
getCellSortKey = cluster.client.cp.getCellSortKey
self.assertEqual(getCellSortKey(storage), CELL_CONNECTED)
cluster.neoctl.dropNode(cluster.storage.uuid)
self.assertFalse(cluster.client.nm.getStorageList())
self.assertTrue(conn.isClosed())
self.assertEqual(getCellSortKey(storage), CELL_GOOD)
# XXX: the test originally checked that 'unregister' method
# was called (even if it's useless in this case),
# but we would need an API to do that easily.
self.assertFalse(cluster.client.dispatcher.registered(conn))
finally:
cluster.stop()
def testRestartWithMissingStorage(self, fast_startup=False):
# translated from neo.tests.functional.testStorage.StorageTest
cluster = NEOCluster(replicas=1, partitions=10)
s1, s2 = cluster.storage_list
try:
cluster.start()
self.assertEqual([], cluster.getOudatedCells())
finally:
cluster.stop()
# restart it with one storage only
cluster.reset()
try:
cluster.start(storage_list=(s1,), fast_startup=fast_startup)
self.assertEqual(NodeStates.UNKNOWN, cluster.getNodeState(s2))
finally:
cluster.stop()
def testRestartWithMissingStorageFastStartup(self):
self.testRestartWithMissingStorage(True)
Styling with Markdown is supported
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!