Commit 714fefc2 authored by Sebastien Robin's avatar Sebastien Robin Committed by Tristan Cavelier

CHERRY-PICKED test_result: fixed random issues where test result was staying...

CHERRY-PICKED test_result: fixed random issues where test result was staying confirmed while all tests were finished

When two test results line were stopped in parallel, both transactions were thinking that
there was still one ongoing test line, thus none of them were setting the test result in stopped.

Use activities instead of serialization to avoid having testnode getting conflict errors.
parent 1e66407c
"""
Check if a test result is finished and move test result to stopped if this is the case
"""
test_result = context
if test_result.getSimulationState() == "started":
if {"stopped"} == {x.getSimulationState()
for x in test_result.objectValues(portal_type="Test Result Line")}:
test_result.stop()
<?xml version="1.0"?>
<ZopeData>
<record id="1" aka="AAAAAAAAAAE=">
<pickle>
<global name="PythonScript" module="Products.PythonScripts.PythonScript"/>
</pickle>
<pickle>
<dictionary>
<item>
<key> <string>Script_magic</string> </key>
<value> <int>3</int> </value>
</item>
<item>
<key> <string>_bind_names</string> </key>
<value>
<object>
<klass>
<global name="NameAssignments" module="Shared.DC.Scripts.Bindings"/>
</klass>
<tuple/>
<state>
<dictionary>
<item>
<key> <string>_asgns</string> </key>
<value>
<dictionary>
<item>
<key> <string>name_container</string> </key>
<value> <string>container</string> </value>
</item>
<item>
<key> <string>name_context</string> </key>
<value> <string>context</string> </value>
</item>
<item>
<key> <string>name_m_self</string> </key>
<value> <string>script</string> </value>
</item>
<item>
<key> <string>name_subpath</string> </key>
<value> <string>traverse_subpath</string> </value>
</item>
</dictionary>
</value>
</item>
</dictionary>
</state>
</object>
</value>
</item>
<item>
<key> <string>_params</string> </key>
<value> <string></string> </value>
</item>
<item>
<key> <string>id</string> </key>
<value> <string>TestResult_stopIfFinished</string> </value>
</item>
</dictionary>
</pickle>
</record>
</ZopeData>
...@@ -243,6 +243,8 @@ class TestTaskDistribution(ERP5TypeTestCase): ...@@ -243,6 +243,8 @@ class TestTaskDistribution(ERP5TypeTestCase):
if stop_count == 2: if stop_count == 2:
self.tool.stopUnitTest(next_line_url, status_dict) self.tool.stopUnitTest(next_line_url, status_dict)
test_result = self.portal.restrictedTraverse(test_result_path) test_result = self.portal.restrictedTraverse(test_result_path)
self.assertEqual(test_result.getSimulationState(), "started")
self.tic()
if stop_count == 2: if stop_count == 2:
self.assertEquals(test_result.getSimulationState(), "stopped") self.assertEquals(test_result.getSimulationState(), "stopped")
else: else:
...@@ -366,12 +368,14 @@ class TestTaskDistribution(ERP5TypeTestCase): ...@@ -366,12 +368,14 @@ class TestTaskDistribution(ERP5TypeTestCase):
status_dict = {} status_dict = {}
self.tool.stopUnitTest(line_url, status_dict) self.tool.stopUnitTest(line_url, status_dict)
test_result = self.getPortalObject().unrestrictedTraverse(test_result_path) test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
self.tic()
self.assertEqual("stopped", test_result.getSimulationState()) self.assertEqual("stopped", test_result.getSimulationState())
# launch test r0=a # launch test r0=a
test_result_path, revision = self._createTestResult(revision="r0=a", test_list=["testFoo"]) test_result_path, revision = self._createTestResult(revision="r0=a", test_list=["testFoo"])
line_url, test = self.tool.startUnitTest(test_result_path) line_url, test = self.tool.startUnitTest(test_result_path)
self.tool.stopUnitTest(line_url, status_dict) self.tool.stopUnitTest(line_url, status_dict)
test_result = self.getPortalObject().unrestrictedTraverse(test_result_path) test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
self.tic()
self.assertEqual("stopped", test_result.getSimulationState()) self.assertEqual("stopped", test_result.getSimulationState())
# Make sure we do not relaunch test with revision r0=b # Make sure we do not relaunch test with revision r0=b
result = self._createTestResult(revision="r0=b", test_list=["testFoo"]) result = self._createTestResult(revision="r0=b", test_list=["testFoo"])
...@@ -381,6 +385,7 @@ class TestTaskDistribution(ERP5TypeTestCase): ...@@ -381,6 +385,7 @@ class TestTaskDistribution(ERP5TypeTestCase):
line_url, test = self.tool.startUnitTest(test_result_path) line_url, test = self.tool.startUnitTest(test_result_path)
self.tool.stopUnitTest(line_url, status_dict) self.tool.stopUnitTest(line_url, status_dict)
test_result = self.getPortalObject().unrestrictedTraverse(test_result_path) test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
self.tic()
self.assertEqual("stopped", test_result.getSimulationState()) self.assertEqual("stopped", test_result.getSimulationState())
def test_05c_createTestResult_with_registered_test_node(self): def test_05c_createTestResult_with_registered_test_node(self):
...@@ -426,6 +431,7 @@ class TestTaskDistribution(ERP5TypeTestCase): ...@@ -426,6 +431,7 @@ class TestTaskDistribution(ERP5TypeTestCase):
next_line.duration = line.duration + 1 next_line.duration = line.duration + 1
# So if we launch another unit test, it will process first the # So if we launch another unit test, it will process first the
# one which is the slowest # one which is the slowest
self.tic()
self.assertEqual("stopped", test_result.getSimulationState()) self.assertEqual("stopped", test_result.getSimulationState())
self.tic() self.tic()
next_test_result_path, revision = self._createTestResult( next_test_result_path, revision = self._createTestResult(
...@@ -507,11 +513,12 @@ class TestTaskDistribution(ERP5TypeTestCase): ...@@ -507,11 +513,12 @@ class TestTaskDistribution(ERP5TypeTestCase):
if tic: if tic:
self.tic() self.tic()
test_result = self.getPortalObject().unrestrictedTraverse(test_result_path) test_result = self.getPortalObject().unrestrictedTraverse(test_result_path)
self.assertEqual("stopped", test_result.getSimulationState())
self.assertEqual(None, self._createTestResult(test_list=["testFoo"])) self.assertEqual(None, self._createTestResult(test_list=["testFoo"]))
next_test_result_path, next_revision = self._createTestResult( next_test_result_path, next_revision = self._createTestResult(
test_list=["testFoo"], allow_restart=True) test_list=["testFoo"], allow_restart=True)
self.assertTrue(next_test_result_path != test_result_path) self.assertTrue(next_test_result_path != test_result_path)
self.tic()
self.assertEqual("stopped", test_result.getSimulationState())
def test_09_checkCreateTestResultAndAllowRestartWithoutTic(self): def test_09_checkCreateTestResultAndAllowRestartWithoutTic(self):
""" """
......
...@@ -243,9 +243,9 @@ class TaskDistributionTool(BaseTool): ...@@ -243,9 +243,9 @@ class TaskDistributionTool(BaseTool):
if test_result.getSimulationState() == 'started': if test_result.getSimulationState() == 'started':
if line.getSimulationState() in ["draft", "started"]: if line.getSimulationState() in ["draft", "started"]:
line.stop(**status_dict) line.stop(**status_dict)
if {"stopped"} == {x.getSimulationState() # Check by activity is all lines are finished. Do not check synchrnonously
for x in test_result.objectValues(portal_type="Test Result Line")}: # in case another test line is stopped in parallel
test_result.stop() test_result.activate().TestResult_stopIfFinished()
def _extractXMLRPCDict(self, xmlrpc_dict): def _extractXMLRPCDict(self, xmlrpc_dict):
""" """
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment