Commit 2a4bd097 authored by Phil Hughes's avatar Phil Hughes

Merge branch '32046-backstage-error-failure-differentiation' into 'master'

Categorize errors separately from failures in xUnit results

See merge request gitlab-org/gitlab!23819
parents e2ce3c1a 2a60f152
......@@ -62,9 +62,21 @@ export default {
return (
report.existing_failures.length > 0 ||
report.new_failures.length > 0 ||
report.resolved_failures.length > 0
report.resolved_failures.length > 0 ||
report.existing_errors.length > 0 ||
report.new_errors.length > 0 ||
report.resolved_errors.length > 0
);
},
unresolvedIssues(report) {
return report.existing_failures.concat(report.existing_errors);
},
newIssues(report) {
return report.new_failures.concat(report.new_errors);
},
resolvedIssues(report) {
return report.resolved_failures.concat(report.resolved_errors);
},
},
};
</script>
......@@ -87,9 +99,9 @@ export default {
<issues-list
v-if="shouldRenderIssuesList(report)"
:key="`issues-list-${i}`"
:unresolved-issues="report.existing_failures"
:new-issues="report.new_failures"
:resolved-issues="report.resolved_failures"
:unresolved-issues="unresolvedIssues(report)"
:new-issues="newIssues(report)"
:resolved-issues="resolvedIssues(report)"
:component="$options.componentNames.TestIssueBody"
class="report-block-group-list"
/>
......
......@@ -16,6 +16,7 @@ export default {
state.summary.total = response.summary.total;
state.summary.resolved = response.summary.resolved;
state.summary.failed = response.summary.failed;
state.summary.errored = response.summary.errored;
state.status = response.status;
state.reports = response.suites;
......@@ -29,6 +30,7 @@ export default {
total: 0,
resolved: 0,
failed: 0,
errored: 0,
};
state.status = null;
},
......
......@@ -13,6 +13,7 @@ export default () => ({
total: 0,
resolved: 0,
failed: 0,
errored: 0,
},
/**
......@@ -23,10 +24,14 @@ export default () => ({
* total: {Number},
* resolved: {Number},
* failed: {Number},
* errored: {Number},
* },
* new_failures: {Array.<Object>},
* resolved_failures: {Array.<Object>},
* existing_failures: {Array.<Object>},
* new_errors: {Array.<Object>},
* resolved_errors: {Array.<Object>},
* existing_errors: {Array.<Object>},
* }
*/
reports: [],
......
......@@ -8,10 +8,11 @@ import {
} from '../constants';
const textBuilder = results => {
const { failed, resolved, total } = results;
const { failed, errored, resolved, total } = results;
const failedString = failed
? n__('%d failed/error test result', '%d failed/error test results', failed)
const failedOrErrored = (failed || 0) + (errored || 0);
const failedString = failedOrErrored
? n__('%d failed/error test result', '%d failed/error test results', failedOrErrored)
: null;
const resolvedString = resolved
? n__('%d fixed test result', '%d fixed test results', resolved)
......@@ -20,7 +21,7 @@ const textBuilder = results => {
let resultsString = s__('Reports|no changed test results');
if (failed) {
if (failedOrErrored) {
if (resolved) {
resultsString = sprintf(s__('Reports|%{failedString} and %{resolvedString}'), {
failedString,
......
......@@ -7,6 +7,7 @@ class TestReportsComparerEntity < Grape::Entity
expose :total_count, as: :total
expose :resolved_count, as: :resolved
expose :failed_count, as: :failed
expose :error_count, as: :errored
end
expose :suite_comparers, as: :suites, using: TestSuiteComparerEntity
......
......@@ -11,6 +11,7 @@ class TestSuiteComparerEntity < Grape::Entity
expose :total_count, as: :total
expose :resolved_count, as: :resolved
expose :failed_count, as: :failed
expose :error_count, as: :errored
end
# rubocop: disable CodeReuse/ActiveRecord
......@@ -28,6 +29,20 @@ class TestSuiteComparerEntity < Grape::Entity
max_tests(suite.new_failures, suite.existing_failures))
end
expose :new_errors, using: TestCaseEntity do |suite|
suite.new_errors.take(max_tests)
end
expose :existing_errors, using: TestCaseEntity do |suite|
suite.existing_errors.take(
max_tests(suite.new_errors))
end
expose :resolved_errors, using: TestCaseEntity do |suite|
suite.resolved_errors.take(
max_tests(suite.new_errors, suite.existing_errors))
end
private
def max_tests(*used)
......
......@@ -50,10 +50,7 @@ module Gitlab
status = ::Gitlab::Ci::Reports::TestCase::STATUS_FAILED
system_output = data['failure']
elsif data['error']
# For now, as an MVC, we are grouping error test cases together
# with failed ones. But we will improve this further on
# https://gitlab.com/gitlab-org/gitlab/issues/32046.
status = ::Gitlab::Ci::Reports::TestCase::STATUS_FAILED
status = ::Gitlab::Ci::Reports::TestCase::STATUS_ERROR
system_output = data['error']
else
status = ::Gitlab::Ci::Reports::TestCase::STATUS_SUCCESS
......
......@@ -29,7 +29,7 @@ module Gitlab
end
end
%w(total_count resolved_count failed_count).each do |method|
%w(total_count resolved_count failed_count error_count).each do |method|
define_method(method) do
# rubocop: disable CodeReuse/ActiveRecord
suite_comparers.sum { |suite| suite.public_send(method) } # rubocop:disable GitlabSecurity/PublicSend
......
......@@ -38,6 +38,30 @@ module Gitlab
end
end
def new_errors
strong_memoize(:new_errors) do
head_suite.error.reject do |key, _|
base_suite.error.include?(key)
end.values
end
end
def existing_errors
strong_memoize(:existing_errors) do
head_suite.error.select do |key, _|
base_suite.error.include?(key)
end.values
end
end
def resolved_errors
strong_memoize(:resolved_errors) do
head_suite.success.select do |key, _|
base_suite.error.include?(key)
end.values
end
end
def total_count
head_suite.total_count
end
......@@ -47,12 +71,16 @@ module Gitlab
end
def resolved_count
resolved_failures.count
resolved_failures.count + resolved_errors.count
end
def failed_count
new_failures.count + existing_failures.count
end
def error_count
new_errors.count + existing_errors.count
end
end
end
end
......
......@@ -699,6 +699,137 @@ describe 'Merge request > User sees merge widget', :js do
end
end
context 'when a new error exists' do
let(:base_reports) do
Gitlab::Ci::Reports::TestReports.new.tap do |reports|
reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
reports.get_suite('junit').add_test_case(create_test_case_java_success)
end
end
let(:head_reports) do
Gitlab::Ci::Reports::TestReports.new.tap do |reports|
reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
reports.get_suite('junit').add_test_case(create_test_case_java_error)
end
end
it 'shows test reports summary which includes the new error' do
within(".js-reports-container") do
click_button 'Expand'
expect(page).to have_content('Test summary contained 1 failed/error test result out of 2 total tests')
within(".js-report-section-container") do
expect(page).to have_content('rspec found no changed test results out of 1 total test')
expect(page).to have_content('junit found 1 failed/error test result out of 1 total test')
expect(page).to have_content('New')
expect(page).to have_content('addTest')
end
end
end
context 'when user clicks the new error' do
it 'shows the test report detail' do
within(".js-reports-container") do
click_button 'Expand'
within(".js-report-section-container") do
click_button 'addTest'
expect(page).to have_content('8.88')
end
end
end
end
end
context 'when an existing error exists' do
let(:base_reports) do
Gitlab::Ci::Reports::TestReports.new.tap do |reports|
reports.get_suite('rspec').add_test_case(create_test_case_rspec_error)
reports.get_suite('junit').add_test_case(create_test_case_java_success)
end
end
let(:head_reports) do
Gitlab::Ci::Reports::TestReports.new.tap do |reports|
reports.get_suite('rspec').add_test_case(create_test_case_rspec_error)
reports.get_suite('junit').add_test_case(create_test_case_java_success)
end
end
it 'shows test reports summary which includes the existing error' do
within(".js-reports-container") do
click_button 'Expand'
expect(page).to have_content('Test summary contained 1 failed/error test result out of 2 total tests')
within(".js-report-section-container") do
expect(page).to have_content('rspec found 1 failed/error test result out of 1 total test')
expect(page).to have_content('junit found no changed test results out of 1 total test')
expect(page).not_to have_content('New')
expect(page).to have_content('Test#sum when a is 4 and b is 4 returns summary')
end
end
end
context 'when user clicks the existing error' do
it 'shows test report detail of it' do
within(".js-reports-container") do
click_button 'Expand'
within(".js-report-section-container") do
click_button 'Test#sum when a is 4 and b is 4 returns summary'
expect(page).to have_content('4.44')
end
end
end
end
end
context 'when a resolved error exists' do
let(:base_reports) do
Gitlab::Ci::Reports::TestReports.new.tap do |reports|
reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
reports.get_suite('junit').add_test_case(create_test_case_java_error)
end
end
let(:head_reports) do
Gitlab::Ci::Reports::TestReports.new.tap do |reports|
reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
reports.get_suite('junit').add_test_case(create_test_case_java_success)
end
end
it 'shows test reports summary which includes the resolved error' do
within(".js-reports-container") do
click_button 'Expand'
expect(page).to have_content('Test summary contained 1 fixed test result out of 2 total tests')
within(".js-report-section-container") do
expect(page).to have_content('rspec found no changed test results out of 1 total test')
expect(page).to have_content('junit found 1 fixed test result out of 1 total test')
expect(page).to have_content('addTest')
end
end
end
context 'when user clicks the resolved error' do
it 'shows test report detail of it' do
within(".js-reports-container") do
click_button 'Expand'
within(".js-report-section-container") do
click_button 'addTest'
expect(page).to have_content('5.55')
end
end
end
end
end
context 'properly truncates the report' do
let(:base_reports) do
Gitlab::Ci::Reports::TestReports.new.tap do |reports|
......
......@@ -12,11 +12,13 @@
"properties": {
"total": { "type": "integer" },
"resolved": { "type": "integer" },
"errored": { "type": "integer" },
"failed": { "type": "integer" }
},
"required": [
"total",
"resolved",
"errored",
"failed"
]
},
......
......@@ -16,17 +16,17 @@
"properties": {
"total": { "type": "integer" },
"resolved": { "type": "integer" },
"errored": { "type": "integer" },
"failed": { "type": "integer" }
},
"required": [
"total",
"resolved",
"failed"
]
"required": ["total", "resolved", "errored", "failed"]
},
"new_failures": { "type": "array", "items": { "$ref": "test_case.json" } },
"resolved_failures": { "type": "array", "items": { "$ref": "test_case.json" } },
"existing_failures": { "type": "array", "items": { "$ref": "test_case.json" } }
"existing_failures": { "type": "array", "items": { "$ref": "test_case.json" } },
"new_errors": { "type": "array", "items": { "$ref": "test_case.json" } },
"resolved_errors": { "type": "array", "items": { "$ref": "test_case.json" } },
"existing_errors": { "type": "array", "items": { "$ref": "test_case.json" } }
},
"additionalProperties": false
}
......@@ -35,6 +35,16 @@ describe('Reports store utils', () => {
);
});
it('should render text for multiple errored results', () => {
const name = 'Test summary';
const data = { errored: 7, total: 10 };
const result = utils.summaryTextBuilder(name, data);
expect(result).toBe(
'Test summary contained 7 failed/error test results out of 10 total tests',
);
});
it('should render text for multiple fixed results', () => {
const name = 'Test summary';
const data = { resolved: 4, total: 10 };
......@@ -62,6 +72,27 @@ describe('Reports store utils', () => {
'Test summary contained 1 failed/error test result and 1 fixed test result out of 10 total tests',
);
});
it('should render text for singular failed, errored, and fixed results', () => {
// these will be singular when the copy is updated
const name = 'Test summary';
const data = { failed: 1, errored: 1, resolved: 1, total: 10 };
const result = utils.summaryTextBuilder(name, data);
expect(result).toBe(
'Test summary contained 2 failed/error test results and 1 fixed test result out of 10 total tests',
);
});
it('should render text for multiple failed, errored, and fixed results', () => {
const name = 'Test summary';
const data = { failed: 2, errored: 3, resolved: 4, total: 10 };
const result = utils.summaryTextBuilder(name, data);
expect(result).toBe(
'Test summary contained 5 failed/error test results and 4 fixed test results out of 10 total tests',
);
});
});
describe('reportTextBuilder', () => {
......@@ -89,6 +120,14 @@ describe('Reports store utils', () => {
expect(result).toBe('Rspec found 3 failed/error test results out of 10 total tests');
});
it('should render text for multiple errored results', () => {
const name = 'Rspec';
const data = { errored: 7, total: 10 };
const result = utils.reportTextBuilder(name, data);
expect(result).toBe('Rspec found 7 failed/error test results out of 10 total tests');
});
it('should render text for multiple fixed results', () => {
const name = 'Rspec';
const data = { resolved: 4, total: 10 };
......@@ -116,6 +155,27 @@ describe('Reports store utils', () => {
'Rspec found 1 failed/error test result and 1 fixed test result out of 10 total tests',
);
});
it('should render text for singular failed, errored, and fixed results', () => {
// these will be singular when the copy is updated
const name = 'Rspec';
const data = { failed: 1, errored: 1, resolved: 1, total: 10 };
const result = utils.reportTextBuilder(name, data);
expect(result).toBe(
'Rspec found 2 failed/error test results and 1 fixed test result out of 10 total tests',
);
});
it('should render text for multiple failed, errored, and fixed results', () => {
const name = 'Rspec';
const data = { failed: 2, errored: 3, resolved: 4, total: 10 };
const result = utils.reportTextBuilder(name, data);
expect(result).toBe(
'Rspec found 5 failed/error test results and 4 fixed test results out of 10 total tests',
);
});
});
describe('statusIcon', () => {
......
......@@ -5,6 +5,7 @@ import state from '~/reports/store/state';
import component from '~/reports/components/grouped_test_reports_app.vue';
import mountComponent from '../../helpers/vue_mount_component_helper';
import newFailedTestReports from '../mock_data/new_failures_report.json';
import newErrorsTestReports from '../mock_data/new_errors_report.json';
import successTestReports from '../mock_data/no_failures_report.json';
import mixedResultsTestReports from '../mock_data/new_and_fixed_failures_report.json';
import resolvedFailures from '../mock_data/resolved_failures.json';
......@@ -99,6 +100,34 @@ describe('Grouped Test Reports App', () => {
});
});
describe('with new error result', () => {
beforeEach(() => {
mock.onGet('test_results.json').reply(200, newErrorsTestReports, {});
vm = mountComponent(Component, {
endpoint: 'test_results.json',
});
});
it('renders error summary text + new badge', done => {
setTimeout(() => {
expect(vm.$el.querySelector('.gl-spinner')).toBeNull();
expect(vm.$el.querySelector('.js-code-text').textContent.trim()).toEqual(
'Test summary contained 2 failed/error test results out of 11 total tests',
);
expect(vm.$el.textContent).toContain(
'karma found 2 failed/error test results out of 3 total tests',
);
expect(vm.$el.textContent).toContain('New');
expect(vm.$el.textContent).toContain(
'rspec:pg found no changed test results out of 8 total tests',
);
done();
}, 0);
});
});
describe('with mixed results', () => {
beforeEach(() => {
mock.onGet('test_results.json').reply(200, mixedResultsTestReports, {});
......@@ -127,7 +156,7 @@ describe('Grouped Test Reports App', () => {
});
});
describe('with resolved failures', () => {
describe('with resolved failures and resolved errors', () => {
beforeEach(() => {
mock.onGet('test_results.json').reply(200, resolvedFailures, {});
vm = mountComponent(Component, {
......@@ -139,11 +168,11 @@ describe('Grouped Test Reports App', () => {
setTimeout(() => {
expect(vm.$el.querySelector('.gl-spinner')).toBeNull();
expect(vm.$el.querySelector('.js-code-text').textContent.trim()).toEqual(
'Test summary contained 2 fixed test results out of 11 total tests',
'Test summary contained 4 fixed test results out of 11 total tests',
);
expect(vm.$el.textContent).toContain(
'rspec:pg found 2 fixed test results out of 8 total tests',
'rspec:pg found 4 fixed test results out of 8 total tests',
);
done();
}, 0);
......@@ -161,6 +190,19 @@ describe('Grouped Test Reports App', () => {
done();
}, 0);
});
it('renders resolved errors', done => {
setTimeout(() => {
expect(vm.$el.querySelector('.report-block-container').textContent).toContain(
resolvedFailures.suites[0].resolved_errors[0].name,
);
expect(vm.$el.querySelector('.report-block-container').textContent).toContain(
resolvedFailures.suites[0].resolved_errors[1].name,
);
done();
}, 0);
});
});
describe('with error', () => {
......
{"status":"failed","summary":{"total":11,"resolved":2,"failed":2},"suites":[{"name":"rspec:pg","status":"failed","summary":{"total":8,"resolved":2,"failed":1},"new_failures":[{"status":"failed","name":"Test#subtract when a is 2 and b is 1 returns correct result","execution_time":0.00908,"system_output":"Failure/Error: is_expected.to eq(1)\n\n expected: 1\n got: 3\n\n (compared using ==)\n./spec/test_spec.rb:43:in `block (4 levels) in <top (required)>'"}],"resolved_failures":[{"status":"success","name":"Test#sum when a is 1 and b is 2 returns summary","execution_time":0.000318,"system_output":null},{"status":"success","name":"Test#sum when a is 100 and b is 200 returns summary","execution_time":0.000074,"system_output":null}],"existing_failures":[]},{"name":"java ant","status":"failed","summary":{"total":3,"resolved":0,"failed":1},"new_failures":[],"resolved_failures":[],"existing_failures":[{"status":"failed","name":"sumTest","execution_time":0.004,"system_output":"junit.framework.AssertionFailedError: expected:<3> but was:<-1>\n\tat CalculatorTest.sumTest(Unknown Source)\n\tat java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\n\tat java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n"}]}]}
\ No newline at end of file
{
"status": "failed",
"summary": { "total": 11, "resolved": 2, "errored": 0, "failed": 2 },
"suites": [
{
"name": "rspec:pg",
"status": "failed",
"summary": { "total": 8, "resolved": 2, "errored": 0, "failed": 1 },
"new_failures": [
{
"status": "failed",
"name": "Test#subtract when a is 2 and b is 1 returns correct result",
"execution_time": 0.00908,
"system_output": "Failure/Error: is_expected.to eq(1)\n\n expected: 1\n got: 3\n\n (compared using ==)\n./spec/test_spec.rb:43:in `block (4 levels) in <top (required)>'"
}
],
"resolved_failures": [
{
"status": "success",
"name": "Test#sum when a is 1 and b is 2 returns summary",
"execution_time": 0.000318,
"system_output": null
},
{
"status": "success",
"name": "Test#sum when a is 100 and b is 200 returns summary",
"execution_time": 0.000074,
"system_output": null
}
],
"existing_failures": [],
"new_errors": [],
"resolved_errors": [],
"existing_errors": []
},
{
"name": "java ant",
"status": "failed",
"summary": { "total": 3, "resolved": 0, "errored": 0, "failed": 1 },
"new_failures": [],
"resolved_failures": [],
"existing_failures": [
{
"status": "failed",
"name": "sumTest",
"execution_time": 0.004,
"system_output": "junit.framework.AssertionFailedError: expected:<3> but was:<-1>\n\tat CalculatorTest.sumTest(Unknown Source)\n\tat java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\n\tat java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n"
}
],
"new_errors": [],
"resolved_errors": [],
"existing_errors": []
}
]
}
{
"summary": { "total": 11, "resolved": 0, "errored": 2, "failed": 0 },
"suites": [
{
"name": "rspec:pg",
"summary": { "total": 8, "resolved": 0, "errored": 0, "failed": 0 },
"new_failures": [],
"resolved_failures": [],
"existing_failures": [],
"new_errors": [],
"resolved_errors": [],
"existing_errors": []
},
{
"name": "karma",
"summary": { "total": 3, "resolved": 0, "errored": 2, "failed": 0 },
"new_failures": [],
"resolved_failures": [],
"existing_failures": [],
"new_errors": [
{
"result": "error",
"name": "Test#sum when a is 1 and b is 2 returns summary",
"execution_time": 0.009411,
"system_output": "Failed: Error in render: 'TypeError: Cannot read property 'status' of undefined'"
},
{
"result": "error",
"name": "Test#sum when a is 100 and b is 200 returns summary",
"execution_time": 0.000162,
"system_output": "Failed: Error in render: 'TypeError: Cannot read property 'length' of undefined'"
}
],
"resolved_errors": [],
"existing_errors": []
}
]
}
{"summary":{"total":11,"resolved":0,"failed":2},"suites":[{"name":"rspec:pg","summary":{"total":8,"resolved":0,"failed":2},"new_failures":[{"result":"failure","name":"Test#sum when a is 1 and b is 2 returns summary","execution_time":0.009411,"system_output":"Failure/Error: is_expected.to eq(3)\n\n expected: 3\n got: -1\n\n (compared using ==)\n./spec/test_spec.rb:12:in `block (4 levels) in <top (required)>'"},{"result":"failure","name":"Test#sum when a is 100 and b is 200 returns summary","execution_time":0.000162,"system_output":"Failure/Error: is_expected.to eq(300)\n\n expected: 300\n got: -100\n\n (compared using ==)\n./spec/test_spec.rb:21:in `block (4 levels) in <top (required)>'"}],"resolved_failures":[],"existing_failures":[]},{"name":"java ant","summary":{"total":3,"resolved":0,"failed":0},"new_failures":[],"resolved_failures":[],"existing_failures":[]}]}
\ No newline at end of file
{
"summary": { "total": 11, "resolved": 0, "errored": 0, "failed": 2 },
"suites": [
{
"name": "rspec:pg",
"summary": { "total": 8, "resolved": 0, "errored": 0, "failed": 2 },
"new_failures": [
{
"result": "failure",
"name": "Test#sum when a is 1 and b is 2 returns summary",
"execution_time": 0.009411,
"system_output": "Failure/Error: is_expected.to eq(3)\n\n expected: 3\n got: -1\n\n (compared using ==)\n./spec/test_spec.rb:12:in `block (4 levels) in <top (required)>'"
},
{
"result": "failure",
"name": "Test#sum when a is 100 and b is 200 returns summary",
"execution_time": 0.000162,
"system_output": "Failure/Error: is_expected.to eq(300)\n\n expected: 300\n got: -100\n\n (compared using ==)\n./spec/test_spec.rb:21:in `block (4 levels) in <top (required)>'"
}
],
"resolved_failures": [],
"existing_failures": [],
"new_errors": [],
"resolved_errors": [],
"existing_errors": []
},
{
"name": "java ant",
"summary": { "total": 3, "resolved": 0, "errored": 0, "failed": 0 },
"new_failures": [],
"resolved_failures": [],
"existing_failures": [],
"new_errors": [],
"resolved_errors": [],
"existing_errors": []
}
]
}
{"status":"success","summary":{"total":11,"resolved":0,"failed":0},"suites":[{"name":"rspec:pg","status":"success","summary":{"total":8,"resolved":0,"failed":0},"new_failures":[],"resolved_failures":[],"existing_failures":[]},{"name":"java ant","status":"success","summary":{"total":3,"resolved":0,"failed":0},"new_failures":[],"resolved_failures":[],"existing_failures":[]}]}
\ No newline at end of file
{
"status": "success",
"summary": { "total": 11, "resolved": 0, "errored": 0, "failed": 0 },
"suites": [
{
"name": "rspec:pg",
"status": "success",
"summary": { "total": 8, "resolved": 0, "errored": 0, "failed": 0 },
"new_failures": [],
"resolved_failures": [],
"existing_failures": [],
"new_errors": [],
"resolved_errors": [],
"existing_errors": []
},
{
"name": "java ant",
"status": "success",
"summary": { "total": 3, "resolved": 0, "errored": 0, "failed": 0 },
"new_failures": [],
"resolved_failures": [],
"existing_failures": [],
"new_errors": [],
"resolved_errors": [],
"existing_errors": []
}
]
}
{
"status": "success",
"summary": { "total": 11, "resolved": 2, "failed": 0 },
"summary": { "total": 11, "resolved": 4, "errored": 0, "failed": 0 },
"suites": [
{
"name": "rspec:pg",
"status": "success",
"summary": { "total": 8, "resolved": 2, "failed": 0 },
"summary": { "total": 8, "resolved": 4, "errored": 0, "failed": 0 },
"new_failures": [],
"resolved_failures": [
{
......@@ -23,15 +23,36 @@
"stack_trace": null
}
],
"existing_failures": []
"existing_failures": [],
"new_errors": [],
"resolved_errors": [
{
"status": "success",
"name": "Test#sum when a is 4 and b is 4 returns summary",
"execution_time": 0.00342,
"system_output": null,
"stack_trace": null
},
{
"status": "success",
"name": "Test#sum when a is 40 and b is 400 returns summary",
"execution_time": 0.0000231,
"system_output": null,
"stack_trace": null
}
],
"existing_errors": []
},
{
"name": "java ant",
"status": "success",
"summary": { "total": 3, "resolved": 0, "failed": 0 },
"summary": { "total": 3, "resolved": 0, "errored": 0, "failed": 0 },
"new_failures": [],
"resolved_failures": [],
"existing_failures": []
"existing_failures": [],
"new_errors": [],
"resolved_errors": [],
"existing_errors": []
}
]
}
......@@ -99,7 +99,7 @@ describe Gitlab::Ci::Parsers::Test::Junit do
let(:testcase_content) { '<error>Some error</error>' }
it_behaves_like '<testcase> XML parser',
::Gitlab::Ci::Reports::TestCase::STATUS_FAILED,
::Gitlab::Ci::Reports::TestCase::STATUS_ERROR,
'Some error'
end
......
......@@ -57,6 +57,17 @@ describe Gitlab::Ci::Reports::TestReportsComparer do
is_expected.to eq(Gitlab::Ci::Reports::TestCase::STATUS_FAILED)
end
end
context 'when there is an error test case in head suites' do
before do
head_reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
head_reports.get_suite('junit').add_test_case(create_test_case_java_error)
end
it 'returns the total status in head suite' do
is_expected.to eq(Gitlab::Ci::Reports::TestCase::STATUS_FAILED)
end
end
end
describe '#total_count' do
......@@ -75,7 +86,7 @@ describe Gitlab::Ci::Reports::TestReportsComparer do
describe '#resolved_count' do
subject { comparer.resolved_count }
context 'when there is a resolved test case in head suites' do
context 'when there is a resolved failure test case in head suites' do
before do
base_reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
base_reports.get_suite('junit').add_test_case(create_test_case_java_failed)
......@@ -88,6 +99,19 @@ describe Gitlab::Ci::Reports::TestReportsComparer do
end
end
context 'when there is a resolved error test case in head suites' do
before do
base_reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
base_reports.get_suite('junit').add_test_case(create_test_case_java_error)
head_reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
head_reports.get_suite('junit').add_test_case(create_test_case_java_success)
end
it 'returns the correct count' do
is_expected.to eq(1)
end
end
context 'when there are no resolved test cases in head suites' do
before do
base_reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
......@@ -127,4 +151,30 @@ describe Gitlab::Ci::Reports::TestReportsComparer do
end
end
end
describe '#error_count' do
subject { comparer.error_count }
context 'when there is an error test case in head suites' do
before do
head_reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
head_reports.get_suite('junit').add_test_case(create_test_case_java_error)
end
it 'returns the correct count' do
is_expected.to eq(1)
end
end
context 'when there are no error test cases in head suites' do
before do
head_reports.get_suite('rspec').add_test_case(create_test_case_rspec_success)
head_reports.get_suite('junit').add_test_case(create_test_case_rspec_success)
end
it 'returns the correct count' do
is_expected.to eq(0)
end
end
end
end
......@@ -9,8 +9,9 @@ describe Gitlab::Ci::Reports::TestSuiteComparer do
let(:name) { 'rpsec' }
let(:base_suite) { Gitlab::Ci::Reports::TestSuite.new(name) }
let(:head_suite) { Gitlab::Ci::Reports::TestSuite.new(name) }
let(:test_case_success) { create_test_case_rspec_success }
let(:test_case_failed) { create_test_case_rspec_failed }
let(:test_case_success) { create_test_case_java_success }
let(:test_case_failed) { create_test_case_java_failed }
let(:test_case_error) { create_test_case_java_error }
describe '#new_failures' do
subject { comparer.new_failures }
......@@ -135,6 +136,129 @@ describe Gitlab::Ci::Reports::TestSuiteComparer do
end
end
describe '#new_errors' do
subject { comparer.new_errors }
context 'when head suite has a new error test case which does not exist in base' do
before do
base_suite.add_test_case(test_case_success)
head_suite.add_test_case(test_case_error)
end
it 'returns the error test case' do
is_expected.to eq([test_case_error])
end
end
context 'when head suite still has an error test case which errored in base' do
before do
base_suite.add_test_case(test_case_error)
head_suite.add_test_case(test_case_error)
end
it 'does not return the error test case' do
is_expected.to be_empty
end
end
context 'when head suite has a success test case which errored in base' do
before do
base_suite.add_test_case(test_case_error)
head_suite.add_test_case(test_case_success)
end
it 'does not return the error test case' do
is_expected.to be_empty
end
end
end
describe '#existing_errors' do
subject { comparer.existing_errors }
context 'when head suite has a new error test case which does not exist in base' do
before do
base_suite.add_test_case(test_case_success)
head_suite.add_test_case(test_case_error)
end
it 'does not return the error test case' do
is_expected.to be_empty
end
end
context 'when head suite still has an error test case which errored in base' do
before do
base_suite.add_test_case(test_case_error)
head_suite.add_test_case(test_case_error)
end
it 'returns the error test case' do
is_expected.to eq([test_case_error])
end
end
context 'when head suite has a success test case which errored in base' do
before do
base_suite.add_test_case(test_case_error)
head_suite.add_test_case(test_case_success)
end
it 'does not return the error test case' do
is_expected.to be_empty
end
end
end
describe '#resolved_errors' do
subject { comparer.resolved_errors }
context 'when head suite has a new error test case which does not exist in base' do
before do
base_suite.add_test_case(test_case_success)
head_suite.add_test_case(test_case_error)
end
it 'does not return the error test case' do
is_expected.to be_empty
end
it 'returns the correct resolved count' do
expect(comparer.resolved_count).to eq(0)
end
end
context 'when head suite still has an error test case which errored in base' do
before do
base_suite.add_test_case(test_case_error)
head_suite.add_test_case(test_case_error)
end
it 'does not return the error test case' do
is_expected.to be_empty
end
it 'returns the correct resolved count' do
expect(comparer.resolved_count).to eq(0)
end
end
context 'when head suite has a success test case which errored in base' do
before do
base_suite.add_test_case(test_case_error)
head_suite.add_test_case(test_case_success)
end
it 'returns the resolved test case' do
is_expected.to eq([test_case_success])
end
it 'returns the correct resolved count' do
expect(comparer.resolved_count).to eq(1)
end
end
end
describe '#total_count' do
subject { comparer.total_count }
......@@ -208,7 +332,17 @@ describe Gitlab::Ci::Reports::TestSuiteComparer do
head_suite.add_test_case(test_case_failed)
end
it 'returns the total status in head suite' do
it 'returns the total status in head suite as failed' do
is_expected.to eq(Gitlab::Ci::Reports::TestCase::STATUS_FAILED)
end
end
context 'when there is an error test case in head suite' do
before do
head_suite.add_test_case(test_case_error)
end
it 'returns the total status in head suite as failed' do
is_expected.to eq(Gitlab::Ci::Reports::TestCase::STATUS_FAILED)
end
end
......
......@@ -74,6 +74,15 @@ describe Gitlab::Ci::Reports::TestSuite do
it { is_expected.to eq(Gitlab::Ci::Reports::TestCase::STATUS_FAILED) }
end
context 'when a test case errored' do
before do
test_suite.add_test_case(test_case_success)
test_suite.add_test_case(test_case_error)
end
it { is_expected.to eq(Gitlab::Ci::Reports::TestCase::STATUS_FAILED) }
end
end
Gitlab::Ci::Reports::TestCase::STATUS_TYPES.each do |status_type|
......
......@@ -24,7 +24,7 @@ describe TestReportsComparerEntity do
it 'contains correct compared test reports details' do
expect(subject[:status]).to eq('success')
expect(subject[:summary]).to include(total: 2, resolved: 0, failed: 0)
expect(subject[:summary]).to include(total: 2, resolved: 0, failed: 0, errored: 0)
expect(subject[:suites].first[:name]).to eq('rspec')
expect(subject[:suites].first[:status]).to eq('success')
expect(subject[:suites].second[:name]).to eq('junit')
......@@ -42,7 +42,7 @@ describe TestReportsComparerEntity do
it 'contains correct compared test reports details' do
expect(subject[:status]).to eq('failed')
expect(subject[:summary]).to include(total: 2, resolved: 0, failed: 1)
expect(subject[:summary]).to include(total: 2, resolved: 0, failed: 1, errored: 0)
expect(subject[:suites].first[:name]).to eq('rspec')
expect(subject[:suites].first[:status]).to eq('success')
expect(subject[:suites].second[:name]).to eq('junit')
......@@ -60,7 +60,7 @@ describe TestReportsComparerEntity do
it 'contains correct compared test reports details' do
expect(subject[:status]).to eq('success')
expect(subject[:summary]).to include(total: 2, resolved: 1, failed: 0)
expect(subject[:summary]).to include(total: 2, resolved: 1, failed: 0, errored: 0)
expect(subject[:suites].first[:name]).to eq('rspec')
expect(subject[:suites].first[:status]).to eq('success')
expect(subject[:suites].second[:name]).to eq('junit')
......
......@@ -12,6 +12,7 @@ describe TestSuiteComparerEntity do
let(:head_suite) { Gitlab::Ci::Reports::TestSuite.new(name) }
let(:test_case_success) { create_test_case_rspec_success }
let(:test_case_failed) { create_test_case_rspec_failed }
let(:test_case_error) { create_test_case_rspec_error }
describe '#as_json' do
subject { entity.as_json }
......@@ -25,7 +26,7 @@ describe TestSuiteComparerEntity do
it 'contains correct compared test suite details' do
expect(subject[:name]).to eq(name)
expect(subject[:status]).to eq('failed')
expect(subject[:summary]).to include(total: 1, resolved: 0, failed: 1)
expect(subject[:summary]).to include(total: 1, resolved: 0, failed: 1, errored: 0)
subject[:new_failures].first.tap do |new_failure|
expect(new_failure[:status]).to eq(test_case_failed.status)
expect(new_failure[:name]).to eq(test_case_failed.name)
......@@ -37,6 +38,27 @@ describe TestSuiteComparerEntity do
end
end
context 'when head suite has a new error test case which does not exist in base' do
before do
base_suite.add_test_case(test_case_success)
head_suite.add_test_case(test_case_error)
end
it 'contains correct compared test suite details' do
expect(subject[:name]).to eq(name)
expect(subject[:status]).to eq('failed')
expect(subject[:summary]).to include(total: 1, resolved: 0, failed: 0, errored: 1)
subject[:new_errors].first.tap do |new_error|
expect(new_error[:status]).to eq(test_case_error.status)
expect(new_error[:name]).to eq(test_case_error.name)
expect(new_error[:execution_time]).to eq(test_case_error.execution_time)
expect(new_error[:system_output]).to eq(test_case_error.system_output)
end
expect(subject[:resolved_failures]).to be_empty
expect(subject[:existing_failures]).to be_empty
end
end
context 'when head suite still has a failed test case which failed in base' do
before do
base_suite.add_test_case(test_case_failed)
......@@ -46,7 +68,7 @@ describe TestSuiteComparerEntity do
it 'contains correct compared test suite details' do
expect(subject[:name]).to eq(name)
expect(subject[:status]).to eq('failed')
expect(subject[:summary]).to include(total: 1, resolved: 0, failed: 1)
expect(subject[:summary]).to include(total: 1, resolved: 0, failed: 1, errored: 0)
expect(subject[:new_failures]).to be_empty
expect(subject[:resolved_failures]).to be_empty
subject[:existing_failures].first.tap do |existing_failure|
......@@ -67,7 +89,7 @@ describe TestSuiteComparerEntity do
it 'contains correct compared test suite details' do
expect(subject[:name]).to eq(name)
expect(subject[:status]).to eq('success')
expect(subject[:summary]).to include(total: 1, resolved: 1, failed: 0)
expect(subject[:summary]).to include(total: 1, resolved: 1, failed: 0, errored: 0)
expect(subject[:new_failures]).to be_empty
subject[:resolved_failures].first.tap do |resolved_failure|
expect(resolved_failure[:status]).to eq(test_case_success.status)
......@@ -88,42 +110,57 @@ describe TestSuiteComparerEntity do
context 'prefers new over existing and resolved' do
before do
3.times { add_new_failure }
3.times { add_new_error }
3.times { add_existing_failure }
3.times { add_existing_error }
3.times { add_resolved_failure }
3.times { add_resolved_error }
end
it 'returns 2 new failures, and 1 of resolved and existing' do
expect(subject[:summary]).to include(total: 9, resolved: 3, failed: 6)
it 'returns 2 of each new category, and 1 of each resolved and existing' do
expect(subject[:summary]).to include(total: 18, resolved: 6, failed: 6, errored: 6)
expect(subject[:new_failures].count).to eq(2)
expect(subject[:new_errors].count).to eq(2)
expect(subject[:existing_failures].count).to eq(1)
expect(subject[:existing_errors].count).to eq(1)
expect(subject[:resolved_failures].count).to eq(1)
expect(subject[:resolved_errors].count).to eq(1)
end
end
context 'prefers existing over resolved' do
before do
3.times { add_existing_failure }
3.times { add_existing_error }
3.times { add_resolved_failure }
3.times { add_resolved_error }
end
it 'returns 2 existing failures, and 1 resolved' do
expect(subject[:summary]).to include(total: 6, resolved: 3, failed: 3)
it 'returns 2 of each existing category, and 1 of each resolved' do
expect(subject[:summary]).to include(total: 12, resolved: 6, failed: 3, errored: 3)
expect(subject[:new_failures].count).to eq(0)
expect(subject[:new_errors].count).to eq(0)
expect(subject[:existing_failures].count).to eq(2)
expect(subject[:existing_errors].count).to eq(2)
expect(subject[:resolved_failures].count).to eq(1)
expect(subject[:resolved_errors].count).to eq(1)
end
end
context 'limits amount of resolved' do
before do
3.times { add_resolved_failure }
3.times { add_resolved_error }
end
it 'returns 2 resolved failures' do
expect(subject[:summary]).to include(total: 3, resolved: 3, failed: 0)
it 'returns 2 of each resolved category' do
expect(subject[:summary]).to include(total: 6, resolved: 6, failed: 0, errored: 0)
expect(subject[:new_failures].count).to eq(0)
expect(subject[:new_errors].count).to eq(0)
expect(subject[:existing_failures].count).to eq(0)
expect(subject[:existing_errors].count).to eq(0)
expect(subject[:resolved_failures].count).to eq(2)
expect(subject[:resolved_errors].count).to eq(2)
end
end
......@@ -134,19 +171,38 @@ describe TestSuiteComparerEntity do
head_suite.add_test_case(failed_case)
end
def add_new_error
error_case = create_test_case_rspec_error(SecureRandom.hex)
head_suite.add_test_case(error_case)
end
def add_existing_failure
failed_case = create_test_case_rspec_failed(SecureRandom.hex)
base_suite.add_test_case(failed_case)
head_suite.add_test_case(failed_case)
end
def add_existing_error
error_case = create_test_case_rspec_error(SecureRandom.hex)
base_suite.add_test_case(error_case)
head_suite.add_test_case(error_case)
end
def add_resolved_failure
case_name = SecureRandom.hex
failed_case = create_test_case_rspec_failed(case_name)
success_case = create_test_case_rspec_success(case_name)
failed_case = create_test_case_java_failed(case_name)
success_case = create_test_case_java_success(case_name)
base_suite.add_test_case(failed_case)
head_suite.add_test_case(success_case)
end
def add_resolved_error
case_name = SecureRandom.hex
error_case = create_test_case_java_error(case_name)
success_case = create_test_case_java_success(case_name)
base_suite.add_test_case(error_case)
head_suite.add_test_case(success_case)
end
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment