Commit 6184c003 authored by Jérome Perrin's avatar Jérome Perrin

Run some functional tests with test suite

For now the test reading tables from erp5.com are still disabled.
parent 7dc227c0
......@@ -62,10 +62,15 @@ class ERP5(_ERP5):
for test_case in self._getAllTestList():
# skip some tests
if test_case.startswith('testLive') or test_case.startswith('testVifib') \
or test_case.startswith('testFunctional') \
or test_case.find('Performance') > 0 \
or test_case in ('testERP5LdapCatalog', # XXX (Ivan), until LDAP server is available this test will alway fail
'testERP5eGov', # it is not maintained any more
# tests reading selenium tables from erp5.com
'testFunctionalStandaloneUserTutorial',
'testFunctionalRunMyDocSample',
'testFunctionalConfigurator',
'testFunctionalConfiguratorConsulting',
# not maintained
'testERP5eGov',
'testAccounting_l10n_fr_m9'):
continue
test_list.append(test_case)
......@@ -77,28 +82,11 @@ class ERP5(_ERP5):
if not status_dict['status_code']:
status_dict = self.runUnitTest('--load', '--activity_node=2', test)
return status_dict
if test.startswith('testFunctional'):
return self._updateFunctionalTestResponse(self.runUnitTest(test))
return super(ERP5, self).run(test)
class ERP5_simulation(_ERP5):
def getTestList(self):
p = subprocess.Popen(('grep', '-lr', '--include=test*.py',
'-e', '@newSimulationExpectedFailure',
'-e', 'erp5_report_new_simulation_failures',
'Products/ERP5/tests'),
stdout=subprocess.PIPE)
return sorted(os.path.basename(x)[:-3]
for x in p.communicate()[0].splitlines())
def runUnitTest(self, *args, **kw):
return super(ERP5_simulation, self).runUnitTest(
erp5_report_new_simulation_failures='1', *args, **kw)
class ERP5UserInterface(_ERP5):
""" Run Test Suites which runs Zelenium tests """
def _updateTestResponse(self, status_dict):
def _updateFunctionalTestResponse(self, status_dict):
""" Convert the Unit Test output into more accurate information
related to funcional test run.
"""
......@@ -106,7 +94,7 @@ class ERP5UserInterface(_ERP5):
try:
summary, html_test_result = status_dict['stderr'].split("-"*79)[1:3]
except ValueError:
# In case of error when parse the file, preserve the original
# In case of error when parse the file, preserve the original
# informations. This prevents we have unfinished tests.
return status_dict
status_dict['html_test_result'] = html_test_result
......@@ -116,21 +104,22 @@ class ERP5UserInterface(_ERP5):
status_dict['failure_count'] = int(group_dict['failures'])
status_dict['test_count'] = int(group_dict['total'])
status_dict['skip_count'] = int(group_dict['expected_failure'])
return status_dict
def run(self, test):
return self._updateTestResponse(self.runUnitTest(test))
class ERP5_simulation(_ERP5):
def getTestList(self):
test_list = []
for test_path in glob.glob('%s/product/*/tests/testFunctional*.py' % sys.path[0]) + \
glob.glob('%s/bt5/*/TestTemplateItem/testFunctional*.py' % sys.path[0]):
test_case = test_path.split(os.sep)[-1][:-3] # remove .py
product = test_path.split(os.sep)[-3]
# don't test 3rd party products
if product in ('PortalTransforms', 'MailTemplates'):
continue
test_list.append(test_case)
return test_list
p = subprocess.Popen(('grep', '-lr', '--include=test*.py',
'-e', '@newSimulationExpectedFailure',
'-e', 'erp5_report_new_simulation_failures',
'Products/ERP5/tests'),
stdout=subprocess.PIPE)
return sorted(os.path.basename(x)[:-3]
for x in p.communicate()[0].splitlines())
def runUnitTest(self, *args, **kw):
return super(ERP5_simulation, self).runUnitTest(
erp5_report_new_simulation_failures='1', *args, **kw)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment