Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
erp5-Boxiang
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Hamza
erp5-Boxiang
Commits
700c62e1
Commit
700c62e1
authored
Aug 29, 2018
by
Jérome Perrin
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
TaskDistributionTool: run first tests failed in previous run
parent
4344797e
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
41 additions
and
6 deletions
+41
-6
bt5/erp5_test_result/TestTemplateItem/portal_components/test.erp5.testTaskDistribution.py
...eItem/portal_components/test.erp5.testTaskDistribution.py
+28
-0
product/ERP5/Tool/TaskDistributionTool.py
product/ERP5/Tool/TaskDistributionTool.py
+13
-6
No files found.
bt5/erp5_test_result/TestTemplateItem/portal_components/test.erp5.testTaskDistribution.py
View file @
700c62e1
...
@@ -465,6 +465,34 @@ class TestTaskDistribution(ERP5TypeTestCase):
...
@@ -465,6 +465,34 @@ class TestTaskDistribution(ERP5TypeTestCase):
next_line_url
,
next_test
=
self
.
tool
.
startUnitTest
(
next_test_result_path
)
next_line_url
,
next_test
=
self
.
tool
.
startUnitTest
(
next_test_result_path
)
self
.
assertEqual
([
'testFoo'
,
'testBar'
],
[
test
,
next_test
])
self
.
assertEqual
([
'testFoo'
,
'testBar'
],
[
test
,
next_test
])
def
test_startUnitTestRunsFailedTestFirst
(
self
):
# simulate previous run
test_result
=
self
.
portal
.
test_result_module
.
newContent
(
portal_type
=
'Test Result'
,
title
=
self
.
default_test_title
,
start_date
=
DateTime
())
test_result
.
newContent
(
portal_type
=
'Test Result Line'
,
title
=
'testFailing'
,
).
stop
(
test_count
=
1
,
duration
=
100
,
failure_count
=
1
)
test_result
.
newContent
(
portal_type
=
'Test Result Line'
,
title
=
'testFast'
,
).
stop
(
test_count
=
1
,
duration
=
50
)
test_result
.
newContent
(
portal_type
=
'Test Result Line'
,
title
=
'testSlow'
,
).
stop
(
test_count
=
1
,
duration
=
1000
)
test_result
.
stop
()
self
.
tic
()
test_result_path
,
_
=
self
.
_createTestResult
(
test_list
=
[
'testSlow'
,
'testFast'
,
'testFailing'
])
# we run first the tests failing in previous run
self
.
assertEqual
(
[
'testFailing'
,
'testSlow'
,
'testFast'
],
[
self
.
tool
.
startUnitTest
(
test_result_path
)[
1
]
for
_
in
range
(
3
)])
def
test_06b_restartStuckTest
(
self
):
def
test_06b_restartStuckTest
(
self
):
"""
"""
Check if a test result line is not stuck in 'started', if so, redraft
Check if a test result line is not stuck in 'started', if so, redraft
...
...
product/ERP5/Tool/TaskDistributionTool.py
View file @
700c62e1
...
@@ -107,7 +107,7 @@ class TaskDistributionTool(BaseTool):
...
@@ -107,7 +107,7 @@ class TaskDistributionTool(BaseTool):
node_title
))
node_title
))
node
.
start
()
node
.
start
()
def
createTestResultLineList
(
test_result
,
test_name_list
):
def
createTestResultLineList
(
test_result
,
test_name_list
):
duration
_list
=
[]
test_priority
_list
=
[]
previous_test_result_list
=
portal
.
test_result_module
.
searchFolder
(
previous_test_result_list
=
portal
.
test_result_module
.
searchFolder
(
title
=
SimpleQuery
(
comparison_operator
=
'='
,
title
=
test_result
.
getTitle
()),
title
=
SimpleQuery
(
comparison_operator
=
'='
,
title
=
test_result
.
getTitle
()),
sort_on
=
[(
'creation_date'
,
'descending'
)],
sort_on
=
[(
'creation_date'
,
'descending'
)],
...
@@ -117,11 +117,18 @@ class TaskDistributionTool(BaseTool):
...
@@ -117,11 +117,18 @@ class TaskDistributionTool(BaseTool):
previous_test_result
=
previous_test_result_list
[
0
].
getObject
()
previous_test_result
=
previous_test_result_list
[
0
].
getObject
()
for
line
in
previous_test_result
.
objectValues
():
for
line
in
previous_test_result
.
objectValues
():
if
line
.
getSimulationState
()
in
(
'stopped'
,
'public_stopped'
):
if
line
.
getSimulationState
()
in
(
'stopped'
,
'public_stopped'
):
duration_list
.
append
((
line
.
getTitle
(),
line
.
getProperty
(
'duration'
)))
# Execute first the tests that failed on previous run (so that we
duration_list
.
sort
(
key
=
lambda
x
:
-
x
[
1
])
# can see quickly if a fix was effective) and the slowest tests (to
sorted_test_list
=
[
x
[
0
]
for
x
in
duration_list
]
# make sure slow tests are executed in parrallel and prevent
# Sort tests by name to have consistent numbering of test result line on
# situations where at the end all test nodes are waiting for the
# a test suite.
# latest to finish).
test_priority_list
.
append
(
(
line
.
getStringIndex
()
==
'PASSED'
,
-
line
.
getProperty
(
'duration'
),
line
.
getTitle
()))
sorted_test_list
=
[
x
[
2
]
for
x
in
sorted
(
test_priority_list
)]
# Sort tests by name to have consistent ids for test result line on a
# test suite.
for
test_name
in
sorted
(
test_name_list
):
for
test_name
in
sorted
(
test_name_list
):
index
=
0
index
=
0
if
sorted_test_list
:
if
sorted_test_list
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment