Commit 488f9105 authored by Grant Young's avatar Grant Young Committed by Toon Claes

Rename Browser Performance Widget in code

Named only 'performance' in code so we want to refactor since
we'll have multiple incoming
parent cd2aa237
......@@ -34,6 +34,7 @@ module Ci
license_management: 'gl-license-management-report.json',
license_scanning: 'gl-license-scanning-report.json',
performance: 'performance.json',
browser_performance: 'browser-performance.json',
metrics: 'metrics.txt',
lsif: 'lsif.json',
dotenv: '.env',
......@@ -73,6 +74,7 @@ module Ci
license_management: :raw,
license_scanning: :raw,
performance: :raw,
browser_performance: :raw,
terraform: :raw,
requirements: :raw,
coverage_fuzzing: :raw
......@@ -93,6 +95,7 @@ module Ci
lsif
metrics
performance
browser_performance
sast
secret_detection
requirements
......@@ -180,7 +183,7 @@ module Ci
codequality: 9, ## EE-specific
license_management: 10, ## EE-specific
license_scanning: 101, ## EE-specific till 13.0
performance: 11, ## EE-specific
performance: 11, ## EE-specific till 13.2
metrics: 12, ## EE-specific
metrics_referee: 13, ## runner referees
network_referee: 14, ## runner referees
......@@ -192,7 +195,8 @@ module Ci
cluster_applications: 20,
secret_detection: 21, ## EE-specific
requirements: 22, ## EE-specific
coverage_fuzzing: 23 ## EE-specific
coverage_fuzzing: 23, ## EE-specific
browser_performance: 24 ## EE-specific
}
enum file_format: {
......
# frozen_string_literal: true
class AddBrowserPerformanceToPlanLimits < ActiveRecord::Migration[6.0]
DOWNTIME = false
def change
add_column :plan_limits, "ci_max_artifact_size_browser_performance", :integer, default: 0, null: false
end
end
......@@ -13814,7 +13814,8 @@ CREATE TABLE public.plan_limits (
ci_max_artifact_size_cluster_applications integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_secret_detection integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_requirements integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_coverage_fuzzing integer DEFAULT 0 NOT NULL
ci_max_artifact_size_coverage_fuzzing integer DEFAULT 0 NOT NULL,
ci_max_artifact_size_browser_performance integer DEFAULT 0 NOT NULL
);
CREATE SEQUENCE public.plan_limits_id_seq
......@@ -23643,5 +23644,6 @@ COPY "schema_migrations" (version) FROM STDIN;
20200706005325
20200706170536
20200707071941
20200707094341
\.
......@@ -251,10 +251,10 @@ dashboards.
> - Introduced in GitLab 11.5.
> - Requires GitLab Runner 11.5 and above.
The `performance` report collects [Performance metrics](../../user/project/merge_requests/browser_performance_testing.md)
The `performance` report collects [Browser Performance Testing metrics](../../user/project/merge_requests/browser_performance_testing.md)
as artifacts.
The collected Performance report will be uploaded to GitLab as an artifact and will
The collected Browser Performance report will be uploaded to GitLab as an artifact and will
be automatically shown in merge requests.
#### `artifacts:reports:metrics` **(PREMIUM)**
......
......@@ -117,7 +117,7 @@ The following table lists available parameters for jobs:
| [`when`](#when) | When to run job. Also available: `when:manual` and `when:delayed`. |
| [`environment`](#environment) | Name of an environment to which the job deploys. Also available: `environment:name`, `environment:url`, `environment:on_stop`, `environment:auto_stop_in` and `environment:action`. |
| [`cache`](#cache) | List of files that should be cached between subsequent runs. Also available: `cache:paths`, `cache:key`, `cache:untracked`, and `cache:policy`. |
| [`artifacts`](#artifacts) | List of files and directories to attach to a job on success. Also available: `artifacts:paths`, `artifacts:exclude`, `artifacts:expose_as`, `artifacts:name`, `artifacts:untracked`, `artifacts:when`, `artifacts:expire_in`, `artifacts:reports`, `artifacts:reports:junit`, `artifacts:reports:cobertura`, and `artifacts:reports:terraform`.<br><br>In GitLab [Enterprise Edition](https://about.gitlab.com/pricing/), these are available: `artifacts:reports:codequality`, `artifacts:reports:sast`, `artifacts:reports:dependency_scanning`, `artifacts:reports:container_scanning`, `artifacts:reports:dast`, `artifacts:reports:license_scanning`, `artifacts:reports:license_management` (removed in GitLab 13.0),`artifacts:reports:performance` and `artifacts:reports:metrics`. |
| [`artifacts`](#artifacts) | List of files and directories to attach to a job on success. Also available: `artifacts:paths`, `artifacts:exclude`, `artifacts:expose_as`, `artifacts:name`, `artifacts:untracked`, `artifacts:when`, `artifacts:expire_in`, `artifacts:reports`, `artifacts:reports:junit`, `artifacts:reports:cobertura`, and `artifacts:reports:terraform`.<br><br>In GitLab [Enterprise Edition](https://about.gitlab.com/pricing/), these are available: `artifacts:reports:codequality`, `artifacts:reports:sast`, `artifacts:reports:dependency_scanning`, `artifacts:reports:container_scanning`, `artifacts:reports:dast`, `artifacts:reports:license_scanning`, `artifacts:reports:license_management` (removed in GitLab 13.0), `artifacts:reports:performance` and `artifacts:reports:metrics`. |
| [`dependencies`](#dependencies) | Restrict which artifacts are passed to a specific job by providing a list of jobs to fetch artifacts from. |
| [`coverage`](#coverage) | Code coverage settings for a given job. |
| [`retry`](#retry) | When and how many times a job can be auto-retried in case of a failure. |
......@@ -3148,7 +3148,7 @@ These are the available report types:
| [`artifacts:reports:dast`](../pipelines/job_artifacts.md#artifactsreportsdast-ultimate) **(ULTIMATE)** | The `dast` report collects Dynamic Application Security Testing vulnerabilities. |
| [`artifacts:reports:license_management`](../pipelines/job_artifacts.md#artifactsreportslicense_management-ultimate) **(ULTIMATE)** | The `license_management` report collects Licenses (*removed from GitLab 13.0*). |
| [`artifacts:reports:license_scanning`](../pipelines/job_artifacts.md#artifactsreportslicense_scanning-ultimate) **(ULTIMATE)** | The `license_scanning` report collects Licenses. |
| [`artifacts:reports:performance`](../pipelines/job_artifacts.md#artifactsreportsperformance-premium) **(PREMIUM)** | The `performance` report collects Performance metrics. |
| [`artifacts:reports:performance`](../pipelines/job_artifacts.md#artifactsreportsperformance-premium) **(PREMIUM)** | The `performance` report collects Browser Performance metrics. |
| [`artifacts:reports:metrics`](../pipelines/job_artifacts.md#artifactsreportsmetrics-premium) **(PREMIUM)** | The `metrics` report collects Metrics. |
#### `dependencies`
......
......@@ -10,20 +10,16 @@ type: reference, howto
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/3507) in [GitLab Premium](https://about.gitlab.com/pricing/) 10.3.
If your application offers a web interface and you're using
[GitLab CI/CD](../../../ci/README.md), you can quickly determine the performance
impact of pending code changes.
[GitLab CI/CD](../../../ci/README.md), you can quickly determine the rendering performance
impact of pending code changes in the browser.
## Overview
GitLab uses [Sitespeed.io](https://www.sitespeed.io), a free and open source
tool, for measuring the performance of web sites. GitLab has built a simple
[Sitespeed plugin](https://gitlab.com/gitlab-org/gl-performance) which outputs
the performance score for each page analyzed in a file called `performance.json`.
The [Sitespeed.io performance score](https://examples.sitespeed.io/6.0/2017-11-23-23-43-35/help.html)
is a composite value based on best practices.
GitLab can [show the Performance report](#how-browser-performance-testing-works)
in the merge request widget area.
tool, for measuring the rendering performance of web sites. The
[Sitespeed plugin](https://gitlab.com/gitlab-org/gl-performance) that GitLab built outputs
the performance score for each page analyzed in a file called `browser-performance.json`
this data can be shown on Merge Requests.
## Use cases
......@@ -41,7 +37,7 @@ Consider the following workflow:
## How browser performance testing works
First, define a job in your `.gitlab-ci.yml` file that generates the
[Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsperformance-premium).
[Browser Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsperformance-premium).
GitLab then checks this report, compares key performance metrics for each page
between the source and target branches, and shows the information in the merge request.
......@@ -49,12 +45,13 @@ For an example Performance job, see
[Configuring Browser Performance Testing](#configuring-browser-performance-testing).
NOTE: **Note:**
If the Performance report has no data to compare, such as when you add the
Performance job in your `.gitlab-ci.yml` for the very first time, no information
displays in the merge request widget area. Consecutive merge requests will have data for
comparison, and the Performance report will be shown properly.
If the Browser Performance report has no data to compare, such as when you add the
Browser Performance job in your `.gitlab-ci.yml` for the very first time,
the Browser Performance report widget won't show. It must have run at least
once on the target branch (`master`, for example), before it will display in a
merge request targeting that branch.
![Performance Widget](img/browser_performance_testing.png)
![Browser Performance Widget](img/browser_performance_testing.png)
## Configuring Browser Performance Testing
......@@ -64,21 +61,7 @@ using Docker-in-Docker.
1. First, set up GitLab Runner with a
[Docker-in-Docker build](../../../ci/docker/using_docker_build.md#use-docker-in-docker-workflow-with-docker-executor).
1. After configuring the Runner, add a new job to `.gitlab-ci.yml` that generates
the expected report.
1. Define the `performance` job according to your version of GitLab:
- For GitLab 12.4 and later - [include](../../../ci/yaml/README.md#includetemplate) the
[`Browser-Performance.gitlab-ci.yml` template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Verify/Browser-Performance.gitlab-ci.yml) provided as a part of your GitLab installation.
- For GitLab versions earlier than 12.4 - Copy and use the job as defined in the
[`Browser-Performance.gitlab-ci.yml` template](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Verify/Browser-Performance.gitlab-ci.yml).
CAUTION: **Caution:**
The job definition provided by the template does not support Kubernetes yet.
For a complete example of a more complex setup that works in Kubernetes, see
[`Browser-Performance-Testing.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Browser-Performance-Testing.gitlab-ci.yml).
1. Add the following to your `.gitlab-ci.yml` file:
1. Configure the default Browser Performance Testing CI job as follows in your `.gitlab-ci.yml` file:
```yaml
include:
......@@ -89,24 +72,32 @@ using Docker-in-Docker.
URL: https://example.com
```
CAUTION: **Caution:**
The job definition provided by the template is supported in GitLab 11.5 and later versions.
It also requires GitLab Runner 11.5 or later. For earlier versions, use the
[previous job definitions](#previous-job-definitions).
NOTE: **Note:**
For versions before 12.4, see the information for [older GitLab versions](#gitlab-versions-123-and-older).
If you are using a Kubernetes cluster, use [`template: Jobs/Browser-Performance-Testing.gitlab-ci.yml`](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Browser-Performance-Testing.gitlab-ci.yml)
instead.
The above example creates a `performance` job in your CI/CD pipeline and runs
sitespeed.io against the webpage you defined in `URL` to gather key metrics.
The [GitLab plugin for sitespeed.io](https://gitlab.com/gitlab-org/gl-performance)
is downloaded to save the report as a [Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsperformance-premium)
that you can later download and analyze. Due to implementation limitations, we always
take the latest Performance artifact available.
The full HTML sitespeed.io report is saved as an artifact, and if
[GitLab Pages](../pages/index.md) is enabled, it can be viewed directly in your browser.
The example uses a CI/CD template that is included in all GitLab installations since
12.4, but it will not work with Kubernetes clusters. If you are using GitLab 12.3
or older, you must [add the configuration manually](#gitlab-versions-123-and-older)
The template uses the [GitLab plugin for sitespeed.io](https://gitlab.com/gitlab-org/gl-performance),
and it saves the full HTML sitespeed.io report as a [Browser Performance report artifact](../../../ci/pipelines/job_artifacts.md#artifactsreportsperformance-premium)
that you can later download and analyze. This implementation always takes the latest
Browser Performance artifact available. If [GitLab Pages](../pages/index.md) is enabled,
you can view the report directly in your browser.
You can also customize the jobs with environment variables:
- `SITESPEED_IMAGE`: Configure the Docker image to use for the job (default `sitespeedio/sitespeed.io`), but not the image version.
- `SITESPEED_VERSION`: Configure the version of the Docker image to use for the job (default `13.3.0`).
- `SITESPEED_OPTIONS`: Configure any additional sitespeed.io options as required (default `nil`). Refer to the [sitespeed.io documentation](https://www.sitespeed.io/documentation/sitespeed.io/configuration/) for more details.
You can also customize options by setting the `SITESPEED_OPTIONS` variable.
For example, you can override the number of runs sitespeed.io
makes on the given URL:
makes on the given URL, and change the version:
```yaml
include:
......@@ -114,18 +105,11 @@ include:
performance:
variables:
URL: https://example.com
URL: https://www.sitespeed.io/
SITESPEED_VERSION: 13.2.0
SITESPEED_OPTIONS: -n 5
```
For further customization options for sitespeed.io, including the ability to provide a
list of URLs to test, please see the
[Sitespeed.io Configuration](https://www.sitespeed.io/documentation/sitespeed.io/configuration/)
documentation.
TIP: **Tip:**
Key metrics are automatically extracted and shown in the merge request widget.
### Configuring degradation threshold
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/27599) in GitLab 13.0.
......@@ -152,15 +136,12 @@ The above CI YAML configuration is great for testing against static environments
be extended for dynamic environments, but a few extra steps are required:
1. The `performance` job should run after the dynamic environment has started.
1. In the `review` job, persist the hostname and upload it as an artifact so
it's available to the `performance` job. The same can be done for static
environments like staging and production to unify the code path. You can save it
as an artifact with `echo $CI_ENVIRONMENT_URL > environment_url.txt`
1. In the `review` job:
1. Generate a URL list file with the dynamic URL.
1. Save the file as an artifact, for example with `echo $CI_ENVIRONMENT_URL > environment_url.txt`
in your job's `script`.
1. In the `performance` job, read the previous artifact into an environment
variable. In this case, use `$URL` because the sitespeed.io command
uses it for the URL parameter. Because Review App URLs are dynamic, define
the `URL` variable through `before_script` instead of `variables`.
1. Pass the list as the URL environment variable (which can be a URL or a file containing URLs)
to the `performance` job.
1. You can now run the sitespeed.io container against the desired hostname and
paths.
......@@ -193,20 +174,21 @@ review:
performance:
dependencies:
- review
before_script:
- export URL=$(cat environment_url.txt)
variables:
URL: environment_url.txt
```
### Previous job definitions
### GitLab versions 12.3 and older
CAUTION: **Caution:**
Before GitLab 11.5, the Performance job and artifact had to be named specifically
to automatically extract report data and show it in the merge request widget.
While these old job definitions are still maintained, they have been deprecated
and may be removed in next major release, GitLab 12.0.
GitLab recommends you update your current `.gitlab-ci.yml` configuration to reflect that change.
Browser Performance Testing has gone through several changes since it's introduction.
In this section we'll detail these changes and how you can run the test based on your
GitLab version:
For GitLab 11.4 and earlier, the job should look like:
- In GitLab 12.4 [a job template was made available](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Verify/Browser-Performance.gitlab-ci.yml).
- In 13.2 the feature was renamed from `Performance` to `Browser Performance` with
additional template variables. The job name in the template is still `performance`
for compatibility reasons, but may be renamed to match in a future iteration.
- For 11.5 to 12.3 no template is available and the job has to be defined manually as follows:
```yaml
performance:
......@@ -214,28 +196,45 @@ performance:
image: docker:git
variables:
URL: https://example.com
SITESPEED_VERSION: 13.3.0
SITESPEED_OPTIONS: ''
services:
- docker:stable-dind
script:
- mkdir gitlab-exporter
- wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js
- mkdir sitespeed-results
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL $SITESPEED_OPTIONS
- mv sitespeed-results/data/performance.json performance.json
artifacts:
paths:
- performance.json
- sitespeed-results/
reports:
performance: performance.json
```
<!-- ## Troubleshooting
- For 11.4 and earlier the job should be defined as follows:
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
one might have when setting this up, or when something is changed, or on upgrading, it's
important to describe those, too. Think of things that may go wrong and include them here.
This is important to minimize requests for support, and to avoid doc comments with
questions that you know someone might ask.
```yaml
performance:
stage: performance
image: docker:git
variables:
URL: https://example.com
services:
- docker:stable-dind
script:
- mkdir gitlab-exporter
- wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js
- mkdir sitespeed-results
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL
- mv sitespeed-results/data/performance.json performance.json
artifacts:
paths:
- performance.json
- sitespeed-results/
```
Each scenario can be a third-level heading, e.g. `### Getting error message X`.
If you have none to add when creating a doc, leave this section in place
but commented out to help encourage others to add to it in the future. -->
Upgrading to the latest version and using the templates is recommended, to ensure
you receive the latest updates, including updates to the sitespeed.io versions.
......@@ -14,7 +14,6 @@ import MrWidgetApprovals from './components/approvals/approvals.vue';
import MrWidgetGeoSecondaryNode from './components/states/mr_widget_secondary_geo_node.vue';
import MergeTrainHelperText from './components/merge_train_helper_text.vue';
import { MTWPS_MERGE_STRATEGY } from '~/vue_merge_request_widget/constants';
import { TOTAL_SCORE_METRIC_NAME } from 'ee/vue_merge_request_widget/stores/constants';
export default {
components: {
......@@ -33,9 +32,9 @@ export default {
data() {
return {
isLoadingCodequality: false,
isLoadingPerformance: false,
isLoadingBrowserPerformance: false,
loadingCodequalityFailed: false,
loadingPerformanceFailed: false,
loadingBrowserPerformanceFailed: false,
loadingLicenseReportFailed: false,
};
},
......@@ -59,36 +58,36 @@ export default {
this.mr.codeclimateMetrics.resolvedIssues.length > 0))
);
},
hasPerformanceMetrics() {
hasBrowserPerformanceMetrics() {
return (
this.mr.performanceMetrics &&
((this.mr.performanceMetrics.degraded && this.mr.performanceMetrics.degraded.length > 0) ||
(this.mr.performanceMetrics.improved && this.mr.performanceMetrics.improved.length > 0))
this.mr.browserPerformanceMetrics?.degraded?.length > 0 ||
this.mr.browserPerformanceMetrics?.improved?.length > 0 ||
this.mr.browserPerformanceMetrics?.same?.length > 0
);
},
hasPerformancePaths() {
const { performance } = this.mr || {};
hasBrowserPerformancePaths() {
const browserPerformance = this.mr?.browserPerformance || {};
return Boolean(performance?.head_path && performance?.base_path);
return Boolean(browserPerformance?.head_path && browserPerformance?.base_path);
},
degradedTotalScore() {
return this.mr?.performanceMetrics?.degraded.find(
metric => metric.name === TOTAL_SCORE_METRIC_NAME,
degradedBrowserPerformanceTotalScore() {
return this.mr?.browserPerformanceMetrics?.degraded.find(
metric => metric.name === __('Total Score'),
);
},
hasPerformanceDegradation() {
const threshold = this.mr?.performance?.degradation_threshold || 0;
hasBrowserPerformanceDegradation() {
const threshold = this.mr?.browserPerformance?.degradation_threshold || 0;
if (!threshold) {
return true;
}
const totalScoreDelta = this.degradedTotalScore?.delta || 0;
const totalScoreDelta = this.degradedBrowserPerformanceTotalScore?.delta || 0;
return threshold + totalScoreDelta <= 0;
},
shouldRenderPerformance() {
return this.hasPerformancePaths && this.hasPerformanceDegradation;
shouldRenderBrowserPerformance() {
return this.hasBrowserPerformancePaths && this.hasBrowserPerformanceDegradation;
},
shouldRenderSecurityReport() {
const { enabledReports } = this.mr;
......@@ -139,37 +138,40 @@ export default {
return {};
},
performanceText() {
const { improved, degraded } = this.mr.performanceMetrics;
browserPerformanceText() {
const { improved, degraded, same } = this.mr.browserPerformanceMetrics;
const text = [];
const reportNumbers = [];
if (!improved.length && !degraded.length) {
text.push(s__('ciReport|No changes to performance metrics'));
} else if (improved.length || degraded.length) {
text.push(s__('ciReport|Performance metrics'));
if (improved.length || degraded.length || same.length) {
text.push(s__('ciReport|Browser performance test metrics: '));
if (improved.length) {
text.push(n__(' improved on %d point', ' improved on %d points', improved.length));
}
if (improved.length > 0 && degraded.length > 0) {
text.push(__(' and'));
}
if (degraded.length) {
text.push(n__(' degraded on %d point', ' degraded on %d points', degraded.length));
}
if (degraded.length > 0)
reportNumbers.push(
sprintf(s__('ciReport|%{degradedNum} degraded'), { degradedNum: degraded.length }),
);
if (same.length > 0)
reportNumbers.push(sprintf(s__('ciReport|%{sameNum} same'), { sameNum: same.length }));
if (improved.length > 0)
reportNumbers.push(
sprintf(s__('ciReport|%{improvedNum} improved'), { improvedNum: improved.length }),
);
} else {
text.push(s__('ciReport|Browser performance test metrics: No changes'));
}
return text.join('');
return [...text, ...reportNumbers.join(', ')].join('');
},
codequalityStatus() {
return this.checkReportStatus(this.isLoadingCodequality, this.loadingCodequalityFailed);
},
performanceStatus() {
return this.checkReportStatus(this.isLoadingPerformance, this.loadingPerformanceFailed);
browserPerformanceStatus() {
return this.checkReportStatus(
this.isLoadingBrowserPerformance,
this.loadingBrowserPerformanceFailed,
);
},
shouldRenderMergeTrainHelperText() {
......@@ -191,9 +193,9 @@ export default {
this.fetchCodeQuality();
}
},
hasPerformancePaths(newVal) {
hasBrowserPerformancePaths(newVal) {
if (newVal) {
this.fetchPerformance();
this.fetchBrowserPerformance();
}
},
},
......@@ -241,19 +243,20 @@ export default {
});
},
fetchPerformance() {
const { head_path, base_path } = this.mr.performance;
fetchBrowserPerformance() {
const { head_path, base_path } = this.mr.browserPerformance;
this.isLoadingPerformance = true;
this.isLoadingBrowserPerformance = true;
Promise.all([this.service.fetchReport(head_path), this.service.fetchReport(base_path)])
.then(values => {
this.mr.comparePerformanceMetrics(values[0], values[1]);
this.isLoadingPerformance = false;
this.mr.compareBrowserPerformanceMetrics(values[0], values[1]);
})
.catch(() => {
this.isLoadingPerformance = false;
this.loadingPerformanceFailed = true;
this.loadingBrowserPerformanceFailed = true;
})
.finally(() => {
this.isLoadingBrowserPerformance = false;
});
},
......@@ -308,16 +311,17 @@ export default {
class="js-codequality-widget mr-widget-border-top mr-report"
/>
<report-section
v-if="shouldRenderPerformance"
:status="performanceStatus"
:loading-text="translateText('performance').loading"
:error-text="translateText('performance').error"
:success-text="performanceText"
:unresolved-issues="mr.performanceMetrics.degraded"
:resolved-issues="mr.performanceMetrics.improved"
:has-issues="hasPerformanceMetrics"
v-if="shouldRenderBrowserPerformance"
:status="browserPerformanceStatus"
:loading-text="translateText('browser-performance').loading"
:error-text="translateText('browser-performance').error"
:success-text="browserPerformanceText"
:unresolved-issues="mr.browserPerformanceMetrics.degraded"
:resolved-issues="mr.browserPerformanceMetrics.improved"
:neutral-issues="mr.browserPerformanceMetrics.same"
:has-issues="hasBrowserPerformanceMetrics"
:component="$options.componentNames.PerformanceIssueBody"
class="js-performance-widget mr-widget-border-top mr-report"
class="js-browser-performance-widget mr-widget-border-top mr-report"
/>
<grouped-metrics-reports-app
v-if="mr.metricsReportsPath"
......
/* eslint-disable import/prefer-default-export */
// This is the name of Sitespeed's Overall Score metric in the performance report
export const TOTAL_SCORE_METRIC_NAME = 'Total Score';
......@@ -29,7 +29,7 @@ export default class MergeRequestStore extends CEMergeRequestStore {
this.appUrl = gon && gon.gitlab_url;
this.initCodeclimate(data);
this.initPerformanceReport(data);
this.initBrowserPerformanceReport(data);
this.licenseScanning = data.license_scanning;
this.metricsReportsPath = data.metrics_reports_path;
......@@ -85,11 +85,12 @@ export default class MergeRequestStore extends CEMergeRequestStore {
};
}
initPerformanceReport(data) {
this.performance = data.performance;
this.performanceMetrics = {
initBrowserPerformanceReport(data) {
this.browserPerformance = data.browser_performance;
this.browserPerformanceMetrics = {
improved: [],
degraded: [],
same: [],
};
}
......@@ -119,11 +120,12 @@ export default class MergeRequestStore extends CEMergeRequestStore {
);
}
comparePerformanceMetrics(headMetrics, baseMetrics) {
const headMetricsIndexed = MergeRequestStore.normalizePerformanceMetrics(headMetrics);
const baseMetricsIndexed = MergeRequestStore.normalizePerformanceMetrics(baseMetrics);
compareBrowserPerformanceMetrics(headMetrics, baseMetrics) {
const headMetricsIndexed = MergeRequestStore.normalizeBrowserPerformanceMetrics(headMetrics);
const baseMetricsIndexed = MergeRequestStore.normalizeBrowserPerformanceMetrics(baseMetrics);
const improved = [];
const degraded = [];
const same = [];
Object.keys(headMetricsIndexed).forEach(subject => {
const subjectMetrics = headMetricsIndexed[subject];
......@@ -150,18 +152,20 @@ export default class MergeRequestStore extends CEMergeRequestStore {
} else {
degraded.push(metricData);
}
} else {
same.push(metricData);
}
}
});
});
this.performanceMetrics = { improved, degraded };
this.browserPerformanceMetrics = { improved, degraded, same };
}
// normalize performance metrics by indexing on performance subject and metric name
static normalizePerformanceMetrics(performanceData) {
// normalize browser performance metrics by indexing on performance subject and metric name
static normalizeBrowserPerformanceMetrics(browserPerformanceData) {
const indexedSubjects = {};
performanceData.forEach(({ subject, metrics }) => {
browserPerformanceData.forEach(({ subject, metrics }) => {
const indexedMetrics = {};
metrics.forEach(({ name, ...data }) => {
indexedMetrics[name] = data;
......
......@@ -21,6 +21,7 @@ module EE
DAST_REPORT_TYPES = %w[dast].freeze
REQUIREMENTS_REPORT_FILE_TYPES = %w[requirements].freeze
COVERAGE_FUZZING_REPORT_TYPES = %w[coverage_fuzzing].freeze
BROWSER_PERFORMANCE_REPORT_FILE_TYPES = %w[browser_performance performance].freeze
scope :project_id_in, ->(ids) { where(project_id: ids) }
scope :with_files_stored_remotely, -> { where(file_store: ::JobArtifactUploader::Store::REMOTE) }
......@@ -64,6 +65,7 @@ module EE
def self.associated_file_types_for(file_type)
return unless file_types.include?(file_type)
return LICENSE_SCANNING_REPORT_FILE_TYPES if LICENSE_SCANNING_REPORT_FILE_TYPES.include?(file_type)
return BROWSER_PERFORMANCE_REPORT_FILE_TYPES if BROWSER_PERFORMANCE_REPORT_FILE_TYPES.include?(file_type)
[file_type]
end
......
......@@ -45,6 +45,7 @@ module EE
container_scanning: %i[container_scanning],
dast: %i[dast],
performance: %i[merge_request_performance_metrics],
browser_performance: %i[merge_request_performance_metrics],
license_management: %i[license_scanning],
license_scanning: %i[license_scanning],
metrics: %i[metrics_reports],
......
......@@ -34,13 +34,13 @@ module EE
download_project_job_artifacts_path(
job_artifact.project,
job_artifact.job,
file_type: file_type,
file_type: job_artifact.file_type,
proxy: true)
end
end
def degradation_threshold
if (job_artifact = batch_lookup_report_artifact_for_file_type(:performance)) &&
def degradation_threshold(file_type)
if (job_artifact = batch_lookup_report_artifact_for_file_type(file_type)) &&
can?(current_user, :read_build, job_artifact.job)
job_artifact.job.degradation_threshold
end
......
......@@ -26,18 +26,18 @@ module EE
end
end
expose :performance, if: -> (mr, _) { head_pipeline_downloadable_path_for_report_type(:performance) } do
expose :browser_performance, if: -> (mr, _) { head_pipeline_downloadable_path_for_report_type(:browser_performance) } do
expose :degradation_threshold do |merge_request|
merge_request.head_pipeline&.present(current_user: current_user)
&.degradation_threshold
&.degradation_threshold(:browser_performance)
end
expose :head_path do |merge_request|
head_pipeline_downloadable_path_for_report_type(:performance)
head_pipeline_downloadable_path_for_report_type(:browser_performance)
end
expose :base_path do |merge_request|
base_pipeline_downloadable_path_for_report_type(:performance)
base_pipeline_downloadable_path_for_report_type(:browser_performance)
end
end
......
---
title: Renamed Browser Performance Testing feature to be clearer, CI report now also
shows unchanged values
merge_request: 34634
author:
type: changed
......@@ -6,7 +6,7 @@ FactoryBot.define do
failure_reason { Ci::Build.failure_reasons[:protected_environment_failure] }
end
%i[codequality container_scanning dast dependency_scanning license_management license_scanning performance sast secret_detection].each do |report_type|
%i[codequality container_scanning dast dependency_scanning license_management license_scanning performance browser_performance sast secret_detection].each do |report_type|
trait "legacy_#{report_type}".to_sym do
success
artifacts
......
......@@ -221,6 +221,16 @@ FactoryBot.define do
end
end
trait :browser_performance do
file_format { :raw }
file_type { :browser_performance }
after(:build) do |artifact, _|
artifact.file = fixture_file_upload(
Rails.root.join('spec/fixtures/trace/sample_trace'), 'text/plain')
end
end
trait :dependency_scanning do
file_format { :raw }
file_type { :dependency_scanning }
......
......@@ -2,7 +2,7 @@
FactoryBot.define do
factory :ee_ci_pipeline, class: 'Ci::Pipeline', parent: :ci_pipeline do
%i[container_scanning dast dependency_list dependency_scanning license_management license_scanning sast secret_detection coverage_fuzzing].each do |report_type|
%i[browser_performance codequality container_scanning coverage_fuzzing dast dependency_list dependency_scanning license_management license_scanning sast secret_detection].each do |report_type|
trait "with_#{report_type}_report".to_sym do
status { :success }
......
......@@ -11,8 +11,8 @@ import { trimText } from 'helpers/text_helper';
import mockData, {
baseIssues,
headIssues,
basePerformance,
headPerformance,
baseBrowserPerformance,
headBrowserPerformance,
parsedBaseIssues,
parsedHeadIssues,
} from './mock_data';
......@@ -40,7 +40,7 @@ describe('ee merge request widget options', () => {
let mock;
let Component;
const DEFAULT_PERFORMANCE = {
const DEFAULT_BROWSER_PERFORMANCE = {
head_path: 'head.json',
base_path: 'base.json',
};
......@@ -70,13 +70,13 @@ describe('ee merge request widget options', () => {
});
});
const findPerformanceWidget = () => vm.$el.querySelector('.js-performance-widget');
const findBrowserPerformanceWidget = () => vm.$el.querySelector('.js-browser-performance-widget');
const findSecurityWidget = () => vm.$el.querySelector('.js-security-widget');
const setPerformance = (data = {}) => {
const performance = { ...DEFAULT_PERFORMANCE, ...data };
gl.mrWidgetData.performance = performance;
vm.mr.performance = performance;
const setBrowserPerformance = (data = {}) => {
const browserPerformance = { ...DEFAULT_BROWSER_PERFORMANCE, ...data };
gl.mrWidgetData.browserPerformance = browserPerformance;
vm.mr.browserPerformance = browserPerformance;
};
const VULNERABILITY_FEEDBACK_ENDPOINT = 'vulnerability_feedback_path';
......@@ -485,25 +485,25 @@ describe('ee merge request widget options', () => {
});
});
describe('performance', () => {
describe('browser_performance', () => {
beforeEach(() => {
gl.mrWidgetData = {
...mockData,
performance: {},
browserPerformance: {},
};
});
describe('when it is loading', () => {
it('should render loading indicator', done => {
mock.onGet('head.json').reply(200, headPerformance);
mock.onGet('base.json').reply(200, basePerformance);
mock.onGet('head.json').reply(200, headBrowserPerformance);
mock.onGet('base.json').reply(200, baseBrowserPerformance);
vm = mountComponent(Component, { mrData: gl.mrWidgetData });
vm.mr.performance = { ...DEFAULT_PERFORMANCE };
vm.mr.browserPerformance = { ...DEFAULT_BROWSER_PERFORMANCE };
vm.$nextTick(() => {
expect(trimText(findPerformanceWidget().textContent)).toContain(
'Loading performance report',
expect(trimText(findBrowserPerformanceWidget().textContent)).toContain(
'Loading browser-performance report',
);
done();
......@@ -513,21 +513,23 @@ describe('ee merge request widget options', () => {
describe('with successful request', () => {
beforeEach(() => {
mock.onGet(DEFAULT_PERFORMANCE.head_path).reply(200, headPerformance);
mock.onGet(DEFAULT_PERFORMANCE.base_path).reply(200, basePerformance);
mock.onGet(DEFAULT_BROWSER_PERFORMANCE.head_path).reply(200, headBrowserPerformance);
mock.onGet(DEFAULT_BROWSER_PERFORMANCE.base_path).reply(200, baseBrowserPerformance);
vm = mountComponent(Component, { mrData: gl.mrWidgetData });
});
describe('default', () => {
beforeEach(() => {
setPerformance();
setBrowserPerformance();
});
it('should render provided data', done => {
setImmediate(() => {
expect(
trimText(vm.$el.querySelector('.js-performance-widget .js-code-text').textContent),
).toEqual('Performance metrics improved on 2 points and degraded on 1 point');
trimText(
vm.$el.querySelector('.js-browser-performance-widget .js-code-text').textContent,
),
).toEqual('Browser performance test metrics: 2 degraded, 1 same, 1 improved');
done();
});
});
......@@ -535,14 +537,16 @@ describe('ee merge request widget options', () => {
describe('text connector', () => {
it('should only render information about fixed issues', done => {
setImmediate(() => {
vm.mr.performanceMetrics.degraded = [];
vm.mr.browserPerformanceMetrics.degraded = [];
vm.mr.browserPerformanceMetrics.same = [];
Vue.nextTick(() => {
expect(
trimText(
vm.$el.querySelector('.js-performance-widget .js-code-text').textContent,
vm.$el.querySelector('.js-browser-performance-widget .js-code-text')
.textContent,
),
).toEqual('Performance metrics improved on 2 points');
).toEqual('Browser performance test metrics: 1 improved');
done();
});
});
......@@ -550,14 +554,16 @@ describe('ee merge request widget options', () => {
it('should only render information about added issues', done => {
setImmediate(() => {
vm.mr.performanceMetrics.improved = [];
vm.mr.browserPerformanceMetrics.improved = [];
vm.mr.browserPerformanceMetrics.same = [];
Vue.nextTick(() => {
expect(
trimText(
vm.$el.querySelector('.js-performance-widget .js-code-text').textContent,
vm.$el.querySelector('.js-browser-performance-widget .js-code-text')
.textContent,
),
).toEqual('Performance metrics degraded on 1 point');
).toEqual('Browser performance test metrics: 2 degraded');
done();
});
});
......@@ -573,18 +579,18 @@ describe('ee merge request widget options', () => {
'with degradation_threshold = $degradation_threshold',
({ degradation_threshold, shouldExist }) => {
beforeEach(() => {
setPerformance({ degradation_threshold });
setBrowserPerformance({ degradation_threshold });
return waitForPromises();
});
if (shouldExist) {
it('should render widget when total score degradation is above threshold', () => {
expect(findPerformanceWidget()).toExist();
expect(findBrowserPerformanceWidget()).toExist();
});
} else {
it('should not render widget when total score degradation is below threshold', () => {
expect(findPerformanceWidget()).not.toExist();
expect(findBrowserPerformanceWidget()).not.toExist();
});
}
},
......@@ -593,12 +599,12 @@ describe('ee merge request widget options', () => {
describe('with empty successful request', () => {
beforeEach(done => {
mock.onGet(DEFAULT_PERFORMANCE.head_path).reply(200, []);
mock.onGet(DEFAULT_PERFORMANCE.base_path).reply(200, []);
mock.onGet(DEFAULT_BROWSER_PERFORMANCE.head_path).reply(200, []);
mock.onGet(DEFAULT_BROWSER_PERFORMANCE.base_path).reply(200, []);
vm = mountComponent(Component, { mrData: gl.mrWidgetData });
gl.mrWidgetData.performance = { ...DEFAULT_PERFORMANCE };
vm.mr.performance = gl.mrWidgetData.performance;
gl.mrWidgetData.browserPerformance = { ...DEFAULT_BROWSER_PERFORMANCE };
vm.mr.browserPerformance = gl.mrWidgetData.browserPerformance;
// wait for network request from component watch update method
setImmediate(done);
......@@ -606,38 +612,44 @@ describe('ee merge request widget options', () => {
it('should render provided data', () => {
expect(
trimText(vm.$el.querySelector('.js-performance-widget .js-code-text').textContent),
).toEqual('No changes to performance metrics');
trimText(
vm.$el.querySelector('.js-browser-performance-widget .js-code-text').textContent,
),
).toEqual('Browser performance test metrics: No changes');
});
it('does not show Expand button', () => {
const expandButton = vm.$el.querySelector('.js-performance-widget .js-collapse-btn');
const expandButton = vm.$el.querySelector(
'.js-browser-performance-widget .js-collapse-btn',
);
expect(expandButton).toBeNull();
});
it('shows success icon', () => {
expect(
vm.$el.querySelector('.js-performance-widget .js-ci-status-icon-success'),
vm.$el.querySelector('.js-browser-performance-widget .js-ci-status-icon-success'),
).not.toBeNull();
});
});
describe('with failed request', () => {
beforeEach(() => {
mock.onGet(DEFAULT_PERFORMANCE.head_path).reply(500, []);
mock.onGet(DEFAULT_PERFORMANCE.base_path).reply(500, []);
mock.onGet(DEFAULT_BROWSER_PERFORMANCE.head_path).reply(500, []);
mock.onGet(DEFAULT_BROWSER_PERFORMANCE.base_path).reply(500, []);
vm = mountComponent(Component, { mrData: gl.mrWidgetData });
gl.mrWidgetData.performance = { ...DEFAULT_PERFORMANCE };
vm.mr.performance = gl.mrWidgetData.performance;
gl.mrWidgetData.browserPerformance = { ...DEFAULT_BROWSER_PERFORMANCE };
vm.mr.browserPerformance = gl.mrWidgetData.browserPerformance;
});
it('should render error indicator', done => {
setImmediate(() => {
expect(
trimText(vm.$el.querySelector('.js-performance-widget .js-code-text').textContent),
).toContain('Failed to load performance report');
trimText(
vm.$el.querySelector('.js-browser-performance-widget .js-code-text').textContent,
),
).toContain('Failed to load browser-performance report');
done();
});
});
......
......@@ -99,63 +99,56 @@ export const parsedBaseIssues = [
},
];
export const headPerformance = [
export const headBrowserPerformance = [
{
subject: '/some/path',
metrics: [
{
name: 'Sitespeed Score',
value: 85,
},
],
},
{
subject: '/some/other/path',
metrics: [
{
name: 'Total Score',
value: 79,
value: 80,
desiredSize: 'larger',
},
{
name: 'Requests',
value: 3,
value: 30,
desiredSize: 'smaller',
},
],
},
{
subject: '/yet/another/path',
metrics: [
name: 'Speed Index',
value: 1155,
desiredSize: 'smaller',
},
{
name: 'Sitespeed Score',
value: 80,
name: 'Transfer Size (KB)',
value: '1070.1',
desiredSize: 'smaller',
},
],
},
];
export const basePerformance = [
export const baseBrowserPerformance = [
{
subject: '/some/path',
metrics: [
{
name: 'Sitespeed Score',
value: 84,
},
],
},
{
subject: '/some/other/path',
metrics: [
{
name: 'Total Score',
value: 80,
value: 82,
desiredSize: 'larger',
},
{
name: 'Requests',
value: 4,
value: 30,
desiredSize: 'smaller',
},
{
name: 'Speed Index',
value: 1165,
desiredSize: 'smaller',
},
{
name: 'Transfer Size (KB)',
value: '1065.1',
desiredSize: 'smaller',
},
],
......
......@@ -43,7 +43,7 @@ RSpec.describe 'Jobs/Browser-Performance-Testing.gitlab-ci.yml' do
expect(pipeline.errors).to be_empty
end
shared_examples_for 'performance job on tag or branch' do
shared_examples_for 'browser_performance job on tag or branch' do
it 'by default' do
expect(build_names).to include('performance')
end
......@@ -56,19 +56,19 @@ RSpec.describe 'Jobs/Browser-Performance-Testing.gitlab-ci.yml' do
end
context 'on master' do
it_behaves_like 'performance job on tag or branch'
it_behaves_like 'browser_performance job on tag or branch'
end
context 'on another branch' do
let(:pipeline_ref) { 'feature' }
it_behaves_like 'performance job on tag or branch'
it_behaves_like 'browser_performance job on tag or branch'
end
context 'on tag' do
let(:pipeline_ref) { 'v1.0.0' }
it_behaves_like 'performance job on tag or branch'
it_behaves_like 'browser_performance job on tag or branch'
end
context 'on merge request' do
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Verify/Browser-Performance.gitlab-ci.yml' do
subject(:template) do
<<~YAML
stages:
- test
- performance
include:
- template: 'Verify/Browser-Performance.gitlab-ci.yml'
placeholder:
script:
- keep pipeline validator happy by having a job when stages are intentionally empty
YAML
end
describe 'the created pipeline' do
let(:user) { create(:admin) }
let(:project) { create(:project, :repository) }
let(:default_branch) { 'master' }
let(:pipeline_ref) { default_branch }
let(:service) { Ci::CreatePipelineService.new(project, user, ref: pipeline_ref) }
let(:pipeline) { service.execute!(:push) }
let(:build_names) { pipeline.builds.pluck(:name) }
before do
stub_ci_pipeline_yaml_file(template)
allow_any_instance_of(Ci::BuildScheduleWorker).to receive(:perform).and_return(true)
allow(project).to receive(:default_branch).and_return(default_branch)
end
it 'has no errors' do
expect(pipeline.errors).to be_empty
end
shared_examples_for 'browser_performance job on tag or branch' do
it 'by default' do
expect(build_names).to include('performance')
end
end
context 'on master' do
it_behaves_like 'browser_performance job on tag or branch'
end
context 'on another branch' do
let(:pipeline_ref) { 'feature' }
it_behaves_like 'browser_performance job on tag or branch'
end
context 'on tag' do
let(:pipeline_ref) { 'v1.0.0' }
it_behaves_like 'browser_performance job on tag or branch'
end
context 'on merge request' do
let(:service) { MergeRequests::CreatePipelineService.new(project, user) }
let(:merge_request) { create(:merge_request, :simple, source_project: project) }
let(:pipeline) { service.execute(merge_request) }
it 'has no jobs' do
expect(pipeline).to be_merge_request_event
expect(build_names).to be_empty
end
end
end
end
......@@ -63,55 +63,44 @@ RSpec.describe Ci::Pipeline do
end
describe '#batch_lookup_report_artifact_for_file_type' do
subject(:artifact) { pipeline.batch_lookup_report_artifact_for_file_type(file_type) }
let(:build_artifact) { build.job_artifacts.sample }
context 'with security report artifact' do
let!(:build) { create(:ee_ci_build, :dependency_scanning, :success, pipeline: pipeline) }
let(:file_type) { :dependency_scanning }
shared_examples '#batch_lookup_report_artifact_for_file_type' do |file_type, license|
context 'when feature is available' do
before do
stub_licensed_features(dependency_scanning: true)
stub_licensed_features("#{license}": true)
end
it 'returns right kind of artifacts' do
is_expected.to eq(build_artifact)
it "returns the #{file_type} artifact" do
expect(pipeline.batch_lookup_report_artifact_for_file_type(file_type)).to eq(pipeline.job_artifacts.sample)
end
end
context 'when looking for other type of artifact' do
let(:file_type) { :codequality }
context 'when feature is not available' do
before do
stub_licensed_features("#{license}": false)
end
it 'returns nothing' do
is_expected.to be_nil
it "doesn't return the #{file_type} artifact" do
expect(pipeline.batch_lookup_report_artifact_for_file_type(file_type)).to be_nil
end
end
end
context 'with license compliance artifact' do
before do
stub_licensed_features(license_scanning: true)
end
context 'with security report artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_dependency_scanning_report, project: project) }
[:license_management, :license_scanning].each do |artifact_type|
let!(:build) { create(:ee_ci_build, artifact_type, :success, pipeline: pipeline) }
include_examples '#batch_lookup_report_artifact_for_file_type', :dependency_scanning, :dependency_scanning
end
context 'when looking for license_scanning' do
let(:file_type) { :license_scanning }
context 'with license scanning artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_license_scanning_report, project: project) }
it 'returns artifact' do
is_expected.to eq(build_artifact)
end
include_examples '#batch_lookup_report_artifact_for_file_type', :license_scanning, :license_scanning
end
context 'when looking for license_management' do
let(:file_type) { :license_management }
context 'with browser performance artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_browser_performance_report, project: project) }
it 'returns artifact' do
is_expected.to eq(build_artifact)
end
end
end
include_examples '#batch_lookup_report_artifact_for_file_type', :browser_performance, :merge_request_performance_metrics
end
end
......
......@@ -93,6 +93,7 @@ RSpec.describe EE::Ci::JobArtifact do
where(:file_type, :result) do
'license_scanning' | %w(license_management license_scanning)
'codequality' | %w(codequality)
'browser_performance' | %w(browser_performance performance)
'quality' | nil
end
......
......@@ -83,4 +83,103 @@ RSpec.describe Ci::PipelinePresenter do
it { is_expected.to be_falsey }
end
end
describe '#downloadable_path_for_report_type' do
let(:current_user) { create(:user) }
before do
allow(presenter).to receive(:current_user) { current_user }
end
shared_examples '#downloadable_path_for_report_type' do |file_type, license|
context 'when feature is available' do
before do
stub_licensed_features("#{license}": true)
project.add_reporter(current_user)
end
it 'returns the downloadable path' do
expect(presenter.downloadable_path_for_report_type(file_type)).to include(
"#{project.full_path}/-/jobs/#{pipeline.builds.last.id}/artifacts/download?file_type=#{pipeline.builds.last.job_artifacts.last.file_type}")
end
end
context 'when feature is not available' do
before do
stub_licensed_features("#{license}": false)
project.add_reporter(current_user)
end
it 'doesn\'t return the downloadable path' do
expect(presenter.downloadable_path_for_report_type(file_type)).to eq(nil)
end
end
context 'when user is not authorized' do
before do
stub_licensed_features("#{license}": true)
project.add_guest(current_user)
end
it 'doesn\'t return the downloadable path' do
expect(presenter.downloadable_path_for_report_type(file_type)).to eq(nil)
end
end
end
context 'with browser_performance artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_browser_performance_report, project: project) }
include_examples '#downloadable_path_for_report_type', :browser_performance, :merge_request_performance_metrics
end
context 'with license_scanning artifact' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_license_scanning_report, project: project) }
include_examples '#downloadable_path_for_report_type', :license_scanning, :license_scanning
end
end
describe '#degradation_threshold' do
let_it_be(:pipeline, reload: true) { create(:ee_ci_pipeline, :with_browser_performance_report, project: project) }
let(:current_user) { create(:user) }
before do
allow(presenter).to receive(:current_user) { current_user }
allow_any_instance_of(Ci::Build).to receive(:degradation_threshold).and_return(1)
end
context 'when feature is available' do
before do
project.add_reporter(current_user)
stub_licensed_features(merge_request_performance_metrics: true)
end
it 'returns the degradation threshold' do
expect(presenter.degradation_threshold(:browser_performance)).to eq(1)
end
end
context 'when feature is not available' do
before do
project.add_reporter(current_user)
stub_licensed_features(merge_request_performance_metrics: false)
end
it 'doesn\'t return the degradation threshold' do
expect(presenter.degradation_threshold(:browser_performance)).to eq(nil)
end
end
context 'when user is not authorized' do
before do
project.add_guest(current_user)
stub_licensed_features(merge_request_performance_metrics: true)
end
it 'doesn\'t return the degradation threshold' do
expect(presenter.degradation_threshold(:browser_performance)).to eq(nil)
end
end
end
end
......@@ -32,7 +32,7 @@ RSpec.describe MergeRequestWidgetEntity do
end
def create_all_artifacts
artifacts = %i(codequality performance)
artifacts = %i(codequality performance browser_performance)
artifacts.each do |artifact_type|
create(:ee_ci_build, artifact_type, :success, pipeline: pipeline, project: pipeline.project)
......@@ -64,7 +64,8 @@ RSpec.describe MergeRequestWidgetEntity do
where(:json_entry, :artifact_type) do
:codeclimate | :codequality
:performance | :performance
:browser_performance | :browser_performance
:browser_performance | :performance
end
with_them do
......@@ -109,16 +110,10 @@ RSpec.describe MergeRequestWidgetEntity do
)
allow(head_pipeline).to receive(:available_licensed_report_type?).and_return(true)
create(
:ee_ci_build,
:performance,
pipeline: head_pipeline,
yaml_variables: yaml_variables
)
end
context "when head pipeline's performance build has the threshold variable defined" do
shared_examples 'degradation_threshold' do
context "when head pipeline's browser performance build has the threshold variable defined" do
let(:yaml_variables) do
[
{ key: 'FOO', value: 'BAR' },
......@@ -127,11 +122,11 @@ RSpec.describe MergeRequestWidgetEntity do
end
it "returns the value of the variable" do
expect(subject.as_json[:performance][:degradation_threshold]).to eq(5)
expect(subject.as_json[:browser_performance][:degradation_threshold]).to eq(5)
end
end
context "when head pipeline's performance build has no threshold variable defined" do
context "when head pipeline's browser performance build has no threshold variable defined" do
let(:yaml_variables) do
[
{ key: 'FOO', value: 'BAR' }
......@@ -139,9 +134,36 @@ RSpec.describe MergeRequestWidgetEntity do
end
it "returns nil" do
expect(subject.as_json[:performance][:degradation_threshold]).to be_nil
expect(subject.as_json[:browser_performance][:degradation_threshold]).to be_nil
end
end
end
context 'with browser_performance artifact' do
before do
create(
:ee_ci_build,
:browser_performance,
pipeline: head_pipeline,
yaml_variables: yaml_variables
)
end
include_examples 'degradation_threshold'
end
context 'with performance artifact' do
before do
create(
:ee_ci_build,
:performance,
pipeline: head_pipeline,
yaml_variables: yaml_variables
)
end
include_examples 'degradation_threshold'
end
end
describe '#license_scanning', :request_store do
......
......@@ -13,7 +13,7 @@ module Gitlab
ALLOWED_KEYS =
%i[junit codequality sast secret_detection dependency_scanning container_scanning
dast performance license_management license_scanning metrics lsif
dast performance browser_performance license_management license_scanning metrics lsif
dotenv cobertura terraform accessibility cluster_applications
requirements coverage_fuzzing].freeze
......@@ -33,6 +33,7 @@ module Gitlab
validates :container_scanning, array_of_strings_or_string: true
validates :dast, array_of_strings_or_string: true
validates :performance, array_of_strings_or_string: true
validates :browser_performance, array_of_strings_or_string: true
validates :license_management, array_of_strings_or_string: true
validates :license_scanning, array_of_strings_or_string: true
validates :metrics, array_of_strings_or_string: true
......
# Read more about the feature here: https://docs.gitlab.com/ee/user/project/merge_requests/browser_performance_testing.html
performance:
stage: performance
image: docker:19.03.11
allow_failure: true
variables:
DOCKER_TLS_CERTDIR: ""
SITESPEED_IMAGE: "sitespeedio/sitespeed.io:11.2.0"
SITESPEED_IMAGE: sitespeedio/sitespeed.io
SITESPEED_VERSION: 13.3.0
SITESPEED_OPTIONS: ''
services:
- docker:19.03.11-dind
script:
......@@ -16,22 +20,22 @@ performance:
fi
- export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
- mkdir gitlab-exporter
- wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/1.0.0/index.js
- wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/1.0.1/index.js
- mkdir sitespeed-results
- docker pull --quiet ${SITESPEED_IMAGE}
- |
if [ -f .gitlab-urls.txt ]
then
sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io ${SITESPEED_IMAGE} --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io $SITESPEED_IMAGE:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt $SITESPEED_OPTIONS
else
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io ${SITESPEED_IMAGE} --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL"
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io $SITESPEED_IMAGE:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL" $SITESPEED_OPTIONS
fi
- mv sitespeed-results/data/performance.json performance.json
- mv sitespeed-results/data/performance.json browser-performance.json
artifacts:
paths:
- performance.json
- sitespeed-results/
reports:
browser_performance: browser-performance.json
rules:
- if: '$CI_KUBERNETES_ACTIVE == null || $CI_KUBERNETES_ACTIVE == ""'
when: never
......
......@@ -10,8 +10,9 @@ performance:
stage: performance
image: docker:git
variables:
URL: https://example.com
SITESPEED_VERSION: 11.2.0
URL: ''
SITESPEED_IMAGE: sitespeedio/sitespeed.io
SITESPEED_VERSION: 13.3.0
SITESPEED_OPTIONS: ''
services:
- docker:stable-dind
......@@ -19,11 +20,10 @@ performance:
- mkdir gitlab-exporter
- wget -O ./gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/master/index.js
- mkdir sitespeed-results
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL $SITESPEED_OPTIONS
- mv sitespeed-results/data/performance.json performance.json
- docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io $SITESPEED_IMAGE:$SITESPEED_VERSION --plugins.add ./gitlab-exporter --outputFolder sitespeed-results $URL $SITESPEED_OPTIONS
- mv sitespeed-results/data/performance.json browser-performance.json
artifacts:
paths:
- performance.json
- sitespeed-results/
reports:
performance: performance.json
browser_performance: browser-performance.json
......@@ -24446,6 +24446,9 @@ msgstr ""
msgid "Total Contributions"
msgstr ""
msgid "Total Score"
msgstr ""
msgid "Total artifacts size: %{total_size}"
msgstr ""
......@@ -27076,6 +27079,12 @@ msgstr ""
msgid "cannot merge"
msgstr ""
msgid "ciReport|%{degradedNum} degraded"
msgstr ""
msgid "ciReport|%{improvedNum} improved"
msgstr ""
msgid "ciReport|%{linkStartTag}Learn more about Container Scanning %{linkEndTag}"
msgstr ""
......@@ -27103,6 +27112,9 @@ msgstr ""
msgid "ciReport|%{reportType}: Loading resulted in an error"
msgstr ""
msgid "ciReport|%{sameNum} same"
msgstr ""
msgid "ciReport|(errors when loading results)"
msgstr ""
......@@ -27127,6 +27139,12 @@ msgstr ""
msgid "ciReport|Base pipeline codequality artifact not found"
msgstr ""
msgid "ciReport|Browser performance test metrics: "
msgstr ""
msgid "ciReport|Browser performance test metrics: No changes"
msgstr ""
msgid "ciReport|Code quality"
msgstr ""
......@@ -27199,15 +27217,9 @@ msgstr ""
msgid "ciReport|No changes to code quality"
msgstr ""
msgid "ciReport|No changes to performance metrics"
msgstr ""
msgid "ciReport|No code quality issues found"
msgstr ""
msgid "ciReport|Performance metrics"
msgstr ""
msgid "ciReport|Resolve with merge request"
msgstr ""
......
......@@ -44,6 +44,8 @@ RSpec.describe Gitlab::Ci::Config::Entry::Reports do
:license_management | 'gl-license-management-report.json'
:license_scanning | 'gl-license-scanning-report.json'
:performance | 'performance.json'
:browser_performance | 'browser-performance.json'
:browser_performance | 'performance.json'
:lsif | 'lsif.json'
:dotenv | 'build.dotenv'
:cobertura | 'cobertura-coverage.xml'
......
......@@ -190,6 +190,7 @@ RSpec.describe PlanLimits do
ci_max_artifact_size_license_management
ci_max_artifact_size_license_scanning
ci_max_artifact_size_performance
ci_max_artifact_size_browser_performance
ci_max_artifact_size_metrics
ci_max_artifact_size_metrics_referee
ci_max_artifact_size_network_referee
......
......@@ -33,8 +33,8 @@ RSpec.describe Ci::RetryBuildService do
job_artifacts_sast job_artifacts_secret_detection job_artifacts_dependency_scanning
job_artifacts_container_scanning job_artifacts_dast
job_artifacts_license_management job_artifacts_license_scanning
job_artifacts_performance job_artifacts_lsif
job_artifacts_terraform job_artifacts_cluster_applications
job_artifacts_performance job_artifacts_browser_performance
job_artifacts_lsif job_artifacts_terraform job_artifacts_cluster_applications
job_artifacts_codequality job_artifacts_metrics scheduled_at
job_variables waiting_for_resource_at job_artifacts_metrics_referee
job_artifacts_network_referee job_artifacts_dotenv
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment