Commit 2e4c4055 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 97f0ae74
......@@ -19,12 +19,7 @@ import PanelType from 'ee_else_ce/monitoring/components/panel_type.vue';
import { s__ } from '~/locale';
import createFlash from '~/flash';
import glFeatureFlagsMixin from '~/vue_shared/mixins/gl_feature_flags_mixin';
import {
mergeUrlParams,
redirectTo,
refreshCurrentPage,
updateHistory,
} from '~/lib/utils/url_utility';
import { mergeUrlParams, redirectTo, updateHistory } from '~/lib/utils/url_utility';
import invalidUrl from '~/lib/utils/invalid_url';
import Icon from '~/vue_shared/components/icon.vue';
import DateTimePicker from '~/vue_shared/components/date_time_picker/date_time_picker.vue';
......@@ -273,6 +268,7 @@ export default {
...mapActions('monitoringDashboard', [
'setTimeRange',
'fetchData',
'fetchDashboardData',
'setGettingStartedEmptyState',
'setInitialState',
'setPanelGroupMetrics',
......@@ -360,7 +356,7 @@ export default {
},
refreshDashboard() {
refreshCurrentPage();
this.fetchDashboardData();
},
onTimeRangeZoom({ start, end }) {
......@@ -475,7 +471,7 @@ export default {
ref="refreshDashboardBtn"
v-gl-tooltip
variant="default"
:title="s__('Metrics|Reload this page')"
:title="s__('Metrics|Refresh dashboard')"
@click="refreshDashboard"
>
<icon name="retry" />
......
......@@ -4,6 +4,7 @@ import { pickBy } from 'lodash';
import invalidUrl from '~/lib/utils/invalid_url';
import {
GlResizeObserverDirective,
GlLoadingIcon,
GlDropdown,
GlDropdownItem,
GlModal,
......@@ -37,6 +38,7 @@ export default {
MonitorStackedColumnChart,
MonitorEmptyChart,
Icon,
GlLoadingIcon,
GlTooltip,
GlDropdown,
GlDropdownItem,
......@@ -104,13 +106,17 @@ export default {
// This method is extended by ee functionality
return false;
},
graphDataHasMetrics() {
graphDataHasResult() {
return (
this.graphData.metrics &&
this.graphData.metrics[0].result &&
this.graphData.metrics[0].result.length > 0
);
},
graphDataIsLoading() {
const { metrics = [] } = this.graphData;
return metrics.some(({ loading }) => loading);
},
logsPathWithTimeRange() {
const timeRange = this.zoomedTimeRange || this.timeRange;
......@@ -140,7 +146,7 @@ export default {
},
isContextualMenuShown() {
return (
this.graphDataHasMetrics &&
this.graphDataHasResult &&
!this.isPanelType('single-stat') &&
!this.isPanelType('heatmap') &&
!this.isPanelType('column') &&
......@@ -193,7 +199,7 @@ export default {
</script>
<template>
<div v-gl-resize-observer="onResize" class="prometheus-graph">
<div class="prometheus-graph-header">
<div class="d-flex align-items-center mr-3">
<h5
ref="graphTitle"
class="prometheus-graph-title gl-font-size-large font-weight-bold text-truncate append-right-8"
......@@ -203,23 +209,27 @@ export default {
<gl-tooltip :target="() => $refs.graphTitle" :disabled="!showTitleTooltip">
{{ title }}
</gl-tooltip>
<alert-widget
v-if="isContextualMenuShown && alertWidgetAvailable"
class="mx-1"
:modal-id="`alert-modal-${index}`"
:alerts-endpoint="alertsEndpoint"
:relevant-queries="graphData.metrics"
:alerts-to-manage="getGraphAlerts(graphData.metrics)"
@setAlerts="setAlerts"
/>
<div class="flex-grow-1"></div>
<div v-if="graphDataIsLoading" class="mx-1 mt-1">
<gl-loading-icon />
</div>
<div
v-if="isContextualMenuShown"
class="prometheus-graph-widgets js-graph-widgets flex-fill"
class="js-graph-widgets"
data-qa-selector="prometheus_graph_widgets"
>
<div class="d-flex align-items-center">
<alert-widget
v-if="alertWidgetAvailable"
:modal-id="`alert-modal-${index}`"
:alerts-endpoint="alertsEndpoint"
:relevant-queries="graphData.metrics"
:alerts-to-manage="getGraphAlerts(graphData.metrics)"
@setAlerts="setAlerts"
/>
<gl-dropdown
v-gl-tooltip
class="ml-auto mx-3"
toggle-class="btn btn-transparent border-0"
data-qa-selector="prometheus_widgets_dropdown"
right
......@@ -275,28 +285,28 @@ export default {
</div>
<monitor-single-stat-chart
v-if="isPanelType('single-stat') && graphDataHasMetrics"
v-if="isPanelType('single-stat') && graphDataHasResult"
:graph-data="graphData"
/>
<monitor-heatmap-chart
v-else-if="isPanelType('heatmap') && graphDataHasMetrics"
v-else-if="isPanelType('heatmap') && graphDataHasResult"
:graph-data="graphData"
/>
<monitor-bar-chart
v-else-if="isPanelType('bar') && graphDataHasMetrics"
v-else-if="isPanelType('bar') && graphDataHasResult"
:graph-data="graphData"
/>
<monitor-column-chart
v-else-if="isPanelType('column') && graphDataHasMetrics"
v-else-if="isPanelType('column') && graphDataHasResult"
:graph-data="graphData"
/>
<monitor-stacked-column-chart
v-else-if="isPanelType('stacked-column') && graphDataHasMetrics"
v-else-if="isPanelType('stacked-column') && graphDataHasResult"
:graph-data="graphData"
/>
<component
:is="timeChartComponent"
v-else-if="graphDataHasMetrics"
v-else-if="graphDataHasResult"
ref="timeChart"
:graph-data="graphData"
:deployment-data="deploymentData"
......
......@@ -10,7 +10,10 @@ export const metricStates = {
OK: 'OK',
/**
* Metric data is being fetched
* Metric data is being fetched for the first time.
*
* Not used during data refresh, if data is available in
* the metric, the recommneded state is OK.
*/
LOADING: 'LOADING',
......
......@@ -128,7 +128,7 @@ export const receiveMetricsDashboardSuccess = ({ commit, dispatch }, { response
commit(types.RECEIVE_METRICS_DASHBOARD_SUCCESS, dashboard);
commit(types.SET_ENDPOINTS, convertObjectPropsToCamelCase(metrics_data));
return dispatch('fetchPrometheusMetrics');
return dispatch('fetchDashboardData');
};
export const receiveMetricsDashboardFailure = ({ commit }, error) => {
commit(types.RECEIVE_METRICS_DASHBOARD_FAILURE, error);
......@@ -140,7 +140,7 @@ export const receiveMetricsDashboardFailure = ({ commit }, error) => {
* Loads timeseries data: Prometheus data points and deployment data from the project
* @param {Object} Vuex store
*/
export const fetchPrometheusMetrics = ({ state, dispatch, getters }) => {
export const fetchDashboardData = ({ state, dispatch, getters }) => {
dispatch('fetchDeploymentsData');
if (!state.timeRange) {
......
import Vue from 'vue';
import pick from 'lodash/pick';
import * as types from './mutation_types';
import { mapToDashboardViewModel, normalizeQueryResult } from './utils';
......@@ -26,24 +25,6 @@ const findMetricInDashboard = (metricId, dashboard) => {
return res;
};
/**
* Set a new state for a metric.
*
* Initally metric data is not populated, so `Vue.set` is
* used to add new properties to the metric.
*
* @param {Object} metric - Metric object as defined in the dashboard
* @param {Object} state - New state
* @param {Array|null} state.result - Array of results
* @param {String} state.error - Error code from metricStates
* @param {Boolean} state.loading - True if the metric is loading
*/
const setMetricState = (metric, { result = null, loading = false, state = null }) => {
Vue.set(metric, 'result', result);
Vue.set(metric, 'loading', loading);
Vue.set(metric, 'state', state);
};
/**
* Maps a backened error state to a `metricStates` constant
* @param {Object} error - Error from backend response
......@@ -116,39 +97,32 @@ export default {
*/
[types.REQUEST_METRIC_RESULT](state, { metricId }) {
const metric = findMetricInDashboard(metricId, state.dashboard);
setMetricState(metric, {
loading: true,
state: metricStates.LOADING,
});
metric.loading = true;
if (!metric.result) {
metric.state = metricStates.LOADING;
}
},
[types.RECEIVE_METRIC_RESULT_SUCCESS](state, { metricId, result }) {
if (!metricId) {
return;
}
const metric = findMetricInDashboard(metricId, state.dashboard);
metric.loading = false;
state.showEmptyState = false;
const metric = findMetricInDashboard(metricId, state.dashboard);
if (!result || result.length === 0) {
setMetricState(metric, {
state: metricStates.NO_DATA,
});
metric.state = metricStates.NO_DATA;
metric.result = null;
} else {
const normalizedResults = result.map(normalizeQueryResult);
setMetricState(metric, {
result: Object.freeze(normalizedResults),
state: metricStates.OK,
});
metric.state = metricStates.OK;
metric.result = Object.freeze(normalizedResults);
}
},
[types.RECEIVE_METRIC_RESULT_FAILURE](state, { metricId, error }) {
if (!metricId) {
return;
}
const metric = findMetricInDashboard(metricId, state.dashboard);
setMetricState(metric, {
state: emptyStateFromError(error),
});
metric.state = emptyStateFromError(error);
metric.loading = false;
metric.result = null;
},
[types.SET_INITIAL_STATE](state, initialState = {}) {
Object.assign(state, pick(initialState, initialStateKeys));
......
......@@ -76,6 +76,12 @@ const mapToMetricsViewModel = metrics =>
queryRange: query_range,
prometheusEndpointPath: prometheus_endpoint_path,
metricId: uniqMetricsId({ metric_id, id }),
// metric data
loading: false,
result: null,
state: null,
...metric,
}));
......
<script>
/* eslint-disable @gitlab/vue-require-i18n-strings */
import { mapState, mapActions } from 'vuex';
import { GlSkeletonLoading } from '@gitlab/ui';
import DiffFileHeader from '~/diffs/components/diff_file_header.vue';
......@@ -96,7 +95,7 @@ export default {
<td class="old_line diff-line-num"></td>
<td class="new_line diff-line-num"></td>
<td v-if="error" class="js-error-lazy-load-diff diff-loading-error-block">
{{ error }} Unable to load the diff
{{ __('Unable to load the diff') }}
<button
class="btn-link btn-link-retry btn-no-padding js-toggle-lazy-diff-retry-button"
@click="fetchDiff"
......
......@@ -74,7 +74,7 @@ export default {
<div class="table-section section-20 section-wrap">
<div role="rowheader" class="table-mobile-header">{{ __('Name') }}</div>
<div class="table-mobile-content">{{ testCase.name }}</div>
<div class="table-mobile-content pr-md-1 text-truncate">{{ testCase.name }}</div>
</div>
<div class="table-section section-10 section-wrap">
......
<template functional>
<footer class="form-actions d-flex justify-content-between">
<div><slot name="prepend"></slot></div>
<div><slot></slot></div>
<div><slot name="append"></slot></div>
</footer>
</template>
......@@ -84,13 +84,6 @@
border-radius: $border-radius-default;
}
.prometheus-graph-header {
display: flex;
align-items: center;
justify-content: space-between;
margin-bottom: $gl-padding-8;
}
.alert-current-setting {
max-width: 240px;
......
......@@ -3,14 +3,18 @@
module Projects
module Import
class JiraController < Projects::ApplicationController
before_action :authenticate_user!
before_action :check_issues_available!
before_action :authorize_read_project!
before_action :jira_import_enabled?
before_action :jira_integration_configured?
before_action :authorize_admin_project!, only: [:import]
def show
@is_jira_configured = @project.jira_service.present?
return if Feature.enabled?(:jira_issue_import_vue, @project)
unless @project.latest_jira_import&.in_progress?
if !@project.latest_jira_import&.in_progress? && current_user&.can?(:admin_project, @project)
jira_client = @project.jira_service.client
jira_projects = jira_client.Project.all
......
......@@ -16,7 +16,7 @@ module Resolvers
def authorized_resource?(project)
return false unless project.jira_issues_import_feature_flag_enabled?
Ability.allowed?(context[:current_user], :admin_project, project)
context[:current_user].present? && Ability.allowed?(context[:current_user], :read_project, project)
end
end
end
......
......@@ -46,6 +46,7 @@ module JiraImport
def validate
return build_error_response(_('Jira import feature is disabled.')) unless project.jira_issues_import_feature_flag_enabled?
return build_error_response(_('You do not have permissions to run the import.')) unless user.can?(:admin_project, project)
return build_error_response(_('Cannot import because issues are not available in this project.')) unless project.feature_available?(:issues, user)
return build_error_response(_('Jira integration not configured.')) unless project.jira_service&.active?
return build_error_response(_('Unable to find Jira project to import data from.')) if jira_project_key.blank?
return build_error_response(_('Jira import is already running.')) if import_in_progress?
......
---
title: Refresh metrics dashboard data without reloading the page
merge_request: 28756
author:
type: added
---
title: Remove 'error' from diff note error message
merge_request: 29281
author:
type: fixed
......@@ -54,17 +54,17 @@ sudo touch /etc/gitlab/disable-postgresql-upgrade
## Updating to GitLab 12.0
WARNING: **Warning:**
CAUTION: **Warning:**
This version is affected by [a bug that results in new LFS objects not being replicated to
Geo secondary nodes](https://gitlab.com/gitlab-org/gitlab/issues/32696). The issue is fixed
in GitLab 12.1. Please upgrade to GitLab 12.1 or newer.
in GitLab 12.1. Please upgrade to GitLab 12.1 or later.
## Updating to GitLab 11.11
WARNING: **Warning:**
CAUTION: **Warning:**
This version is affected by [a bug that results in new LFS objects not being replicated to
Geo secondary nodes](https://gitlab.com/gitlab-org/gitlab/issues/32696). The issue is fixed
in GitLab 12.1. Please upgrade to GitLab 12.1 or newer.
in GitLab 12.1. Please upgrade to GitLab 12.1 or later.
## Updating to GitLab 10.8
......
......@@ -22,18 +22,10 @@ is recommended over [NFS](nfs.md) wherever possible for improved performance.
yum install nfs-utils nfs-utils-lib
```
1. Specify the necessary NFS shares. Mounts are specified in
`/etc/fstab`. The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See [NFS documentation](nfs.md) for the various
options. Here is an example snippet to add to `/etc/fstab`:
```plaintext
10.1.0.1:/var/opt/gitlab/.ssh /var/opt/gitlab/.ssh nfs4 defaults,soft,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-rails/uploads /var/opt/gitlab/gitlab-rails/uploads nfs4 defaults,soft,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-rails/shared /var/opt/gitlab/gitlab-rails/shared nfs4 defaults,soft,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-ci/builds /var/opt/gitlab/gitlab-ci/builds nfs4 defaults,soft,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/git-data /var/opt/gitlab/git-data nfs4 defaults,soft,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
```
1. Specify the necessary NFS exports in `/etc/fstab`.
The exact contents of `/etc/fstab` will depend on how you chose
to configure your NFS server. See [NFS documentation](nfs.md#nfs-client-mount-options)
for examples and the various options.
1. Create the shared directories. These may be different depending on your NFS
mount locations.
......
......@@ -148,12 +148,15 @@ For supported database architecture, please see our documentation on
## NFS Client mount options
Below is an example of an NFS mount point defined in `/etc/fstab` we use on
GitLab.com:
```plaintext
10.1.1.1:/var/opt/gitlab/git-data /var/opt/gitlab/git-data nfs4 defaults,soft,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
```
Here is an example snippet to add to `/etc/fstab`:
```plaintext
10.1.0.1:/var/opt/gitlab/.ssh /var/opt/gitlab/.ssh nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-rails/uploads /var/opt/gitlab/gitlab-rails/uploads nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-rails/shared /var/opt/gitlab/gitlab-rails/shared nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/gitlab-ci/builds /var/opt/gitlab/gitlab-ci/builds nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.1.0.1:/var/opt/gitlab/git-data /var/opt/gitlab/git-data nfs4 defaults,vers=4.1,hard,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
```
Note there are several options that you should consider using:
......@@ -162,6 +165,42 @@ Note there are several options that you should consider using:
| `vers=4.1` |NFS v4.1 should be used instead of v4.0 because there is a Linux [NFS client bug in v4.0](https://gitlab.com/gitlab-org/gitaly/issues/1339) that can cause significant problems due to stale data.
| `nofail` | Don't halt boot process waiting for this mount to become available
| `lookupcache=positive` | Tells the NFS client to honor `positive` cache results but invalidates any `negative` cache results. Negative cache results cause problems with Git. Specifically, a `git push` can fail to register uniformly across all NFS clients. The negative cache causes the clients to 'remember' that the files did not exist previously.
| `hard` | Instead of `soft`. [Further details](#soft-mount-option).
### soft mount option
We recommend that you use `hard` in your mount options, unless you have a specific
reason to use `soft`.
On GitLab.com, we use `soft` because there were times when we had NFS servers
reboot and `soft` improved availability, but everyone's infrastructure is different.
If your NFS is provided by on-premise storage arrays with redundant controllers,
for example, you shouldn't need to worry about NFS server availability.
The NFS man page states:
> "soft" timeout can cause silent data corruption in certain cases
Read the [Linux man page](https://linux.die.net/man/5/nfs) to understand the difference,
and if you do use `soft`, ensure that you've taken steps to mitigate the risks.
If you experience behaviour that might have been caused by
writes to disk on the NFS server not occurring, such as commits going missing,
use the `hard` option, because (from the man page):
> use the soft option only when client responsiveness is more important than data integrity
Other vendors make similar recommendations, including
[SAP](http://wiki.scn.sap.com/wiki/x/PARnFQ) and NetApp's
[knowledge base](https://kb.netapp.com/app/answers/answer_view/a_id/1004893/~/hard-mount-vs-soft-mount-),
they highlight that if the NFS client driver caches data, `soft` means there is no certainty if
writes by GitLab are actually on disk.
Mount points set with the option `hard` may not perform as well, and if the
NFS server goes down, `hard` will cause processes to hang when interacting with
the mount point. Use `SIGKILL` (`kill -9`) to deal with hung processes.
The `intr` option
[stopped working in the 2.6 kernel](https://access.redhat.com/solutions/157873).
## A single NFS mount
......
......@@ -94,10 +94,11 @@ Edit `/etc/fstab` on client as below to mount the remote shares automatically at
Note that GitLab requires advisory file locking, which is only supported natively in
NFS version 4. NFSv3 also supports locking as long as Linux Kernel 2.6.5+ is used.
We recommend using version 4 and do not specifically test NFSv3.
See [NFS documentation](nfs.md#nfs-client-mount-options) for guidance on mount options.
```text
#/etc/fstab
165.227.159.85:/home /nfs/home nfs4 defaults,soft,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
10.0.0.1:/nfs/home /nfs/home nfs4 defaults,hard,vers=4.1,rsize=1048576,wsize=1048576,noatime,nofail,lookupcache=positive 0 2
```
Reboot the client and confirm that the mount point is mounted automatically.
......
......@@ -7,13 +7,7 @@ the [Elasticsearch integration documentation](../integration/elasticsearch.md#en
## Deep Dive
In June 2019, Mario de la Ossa hosted a [Deep Dive] on GitLab's [Elasticsearch integration] to share his domain specific knowledge with anyone who may work in this part of the code base in the future. You can find the [recording on YouTube], and the slides on [Google Slides] and in [PDF]. Everything covered in this deep dive was accurate as of GitLab 12.0, and while specific details may have changed since then, it should still serve as a good introduction.
[Deep Dive]: https://gitlab.com/gitlab-org/create-stage/issues/1
[Elasticsearch integration]: ../integration/elasticsearch.md
[recording on YouTube]: https://www.youtube.com/watch?v=vrvl-tN2EaA
[Google Slides]: https://docs.google.com/presentation/d/1H-pCzI_LNrgrL5pJAIQgvLX8Ji0-jIKOg1QeJQzChug/edit
[PDF]: https://gitlab.com/gitlab-org/create-stage/uploads/c5aa32b6b07476fa8b597004899ec538/Elasticsearch_Deep_Dive.pdf
In June 2019, Mario de la Ossa hosted a [Deep Dive](https://gitlab.com/gitlab-org/create-stage/issues/1) on GitLab's [Elasticsearch integration](../integration/elasticsearch.md) to share his domain specific knowledge with anyone who may work in this part of the code base in the future. You can find the [recording on YouTube](https://www.youtube.com/watch?v=vrvl-tN2EaA), and the slides on [Google Slides](https://docs.google.com/presentation/d/1H-pCzI_LNrgrL5pJAIQgvLX8Ji0-jIKOg1QeJQzChug/edit) and in [PDF](https://gitlab.com/gitlab-org/create-stage/uploads/c5aa32b6b07476fa8b597004899ec538/Elasticsearch_Deep_Dive.pdf). Everything covered in this deep dive was accurate as of GitLab 12.0, and while specific details may have changed since then, it should still serve as a good introduction.
## Supported Versions
......
......@@ -9,8 +9,8 @@ together.
## How do we test GitLab?
We use [Omnibus GitLab][omnibus-gitlab] to build GitLab packages and then we
test these packages using the [GitLab QA orchestrator][gitlab-qa] tool, which is
We use [Omnibus GitLab](https://gitlab.com/gitlab-org/omnibus-gitlab) to build GitLab packages and then we
test these packages using the [GitLab QA orchestrator](https://gitlab.com/gitlab-org/gitlab-qa) tool, which is
a black-box testing framework for the API and the UI.
### Testing nightly builds
......@@ -38,7 +38,7 @@ available for forks).
Omnibus package built from your merge request's changes.**
Manual action that starts end-to-end tests is also available in merge requests
in [Omnibus GitLab][omnibus-gitlab].
in [Omnibus GitLab](https://gitlab.com/gitlab-org/omnibus-gitlab).
Below you can read more about how to use it and how does it work.
......@@ -68,14 +68,15 @@ subgraph "gitlab-qa-mirror pipeline"
1. Developer triggers a manual action, that can be found in CE / EE merge
requests. This starts a chain of pipelines in multiple projects.
1. The script being executed triggers a pipeline in [Omnibus GitLab Mirror][omnibus-gitlab-mirror]
1. The script being executed triggers a pipeline in
[Omnibus GitLab Mirror](https://gitlab.com/gitlab-org/omnibus-gitlab-mirror)
and waits for the resulting status. We call this a _status attribution_.
1. GitLab packages are being built in the [Omnibus GitLab][omnibus-gitlab]
1. GitLab packages are being built in the [Omnibus GitLab](https://gitlab.com/gitlab-org/omnibus-gitlab)
pipeline. Packages are then pushed to its Container Registry.
1. When packages are ready, and available in the registry, a final step in the
[Omnibus GitLab][omnibus-gitlab] pipeline, triggers a new
[Omnibus GitLab](https://gitlab.com/gitlab-org/omnibus-gitlab) pipeline, triggers a new
GitLab QA pipeline (those with access can view them at `https://gitlab.com/gitlab-org/gitlab-qa-mirror/pipelines`). It also waits for a resulting status.
1. GitLab QA pulls images from the registry, spins-up containers and runs tests
......@@ -139,26 +140,23 @@ many of the 10 available jobs that you want to run).
On every pipeline during the `test` stage, the `review-qa-smoke` job is
automatically started: it runs the QA smoke suite against the
[Review App][review-apps].
[Review App](../review_apps.md).
You can also manually start the `review-qa-all`: it runs the full QA suite
against the [Review App][review-apps].
against the [Review App](../review_apps.md).
**This runs end-to-end tests against a Review App based on [the official GitLab
Helm chart][helm-chart], itself deployed with custom
[Cloud Native components][cng] built from your merge request's changes.**
Helm chart](https://gitlab.com/gitlab-org/charts/gitlab/), itself deployed with custom
[Cloud Native components](https://gitlab.com/gitlab-org/build/CNG) built from your merge request's changes.**
See [Review Apps][review-apps] for more details about Review Apps.
[helm-chart]: https://gitlab.com/gitlab-org/charts/gitlab/
[cng]: https://gitlab.com/gitlab-org/build/CNG
See [Review Apps](../review_apps.md) for more details about Review Apps.
## How do I run the tests?
If you are not [testing code in a merge request](#testing-code-in-merge-requests),
there are two main options for running the tests. If you simply want to run
the existing tests against a live GitLab instance or against a pre-built docker image
you can use the [GitLab QA orchestrator][gitlab-qa-readme]. See also [examples
you can use the [GitLab QA orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md). See also [examples
of the test scenarios you can run via the orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/what_tests_can_be_run.md#examples).
On the other hand, if you would like to run against a local development GitLab
......@@ -173,12 +171,12 @@ Learn how to perform [tests that require special setup or consideration to run o
## How do I write tests?
In order to write new tests, you first need to learn more about GitLab QA
architecture. See the [documentation about it][gitlab-qa-architecture].
architecture. See the [documentation about it](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/architecture.md).
Once you decided where to put [test environment orchestration scenarios] and
[instance-level scenarios], take a look at the [GitLab QA README][instance-qa-readme],
the [GitLab QA orchestrator README][gitlab-qa-readme], and [the already existing
instance-level scenarios][instance-level scenarios].
Once you decided where to put [test environment orchestration scenarios](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/lib/gitlab/qa/scenario) and
[instance-level scenarios](https://gitlab.com/gitlab-org/gitlab-foss/tree/master/qa/qa/specs/features), take a look at the [GitLab QA README](https://gitlab.com/gitlab-org/gitlab/tree/master/qa/README.md),
the [GitLab QA orchestrator README](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md), and [the already existing
instance-level scenarios](https://gitlab.com/gitlab-org/gitlab-foss/tree/master/qa/qa/specs/features).
Continued reading:
......@@ -193,18 +191,5 @@ Continued reading:
You can ask question in the `#quality` channel on Slack (GitLab internal) or
you can find an issue you would like to work on in
[the `gitlab` issue tracker][gitlab-issues], or
[the `gitlab-qa` issue tracker][gitlab-qa-issues].
[omnibus-gitlab]: https://gitlab.com/gitlab-org/omnibus-gitlab
[omnibus-gitlab-mirror]: https://gitlab.com/gitlab-org/omnibus-gitlab-mirror
[gitlab-qa]: https://gitlab.com/gitlab-org/gitlab-qa
[gitlab-qa-readme]: https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md
[review-apps]: ../review_apps.md
[gitlab-qa-architecture]: https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/architecture.md
[gitlab-qa-issues]: https://gitlab.com/gitlab-org/gitlab-qa/issues?label_name%5B%5D=new+scenario
[gitlab-issues]: https://gitlab.com/gitlab-org/gitlab/issues?label_name[]=QA&label_name[]=test
[test environment orchestration scenarios]: https://gitlab.com/gitlab-org/gitlab-qa/tree/master/lib/gitlab/qa/scenario
[instance-level scenarios]: https://gitlab.com/gitlab-org/gitlab-foss/tree/master/qa/qa/specs/features
[instance-qa-readme]: https://gitlab.com/gitlab-org/gitlab/tree/master/qa/README.md
[instance-qa-examples]: https://gitlab.com/gitlab-org/gitlab-foss/tree/master/qa/qa
[the `gitlab` issue tracker](https://gitlab.com/gitlab-org/gitlab/issues?label_name%5B%5D=QA&label_name%5B%5D=test), or
[the `gitlab-qa` issue tracker](https://gitlab.com/gitlab-org/gitlab-qa/issues?label_name%5B%5D=new+scenario).
......@@ -22,10 +22,10 @@ If you don't exactly understand what we mean by **not everything needs to happen
- [2.](#2-test-skeleton) Creating the skeleton of the test file (`*_spec.rb`)
- [3.](#3-test-cases-mvc) The [MVC](https://about.gitlab.com/handbook/values/#minimum-viable-change-mvc) of the test cases' logic
- [4.](#4-extracting-duplicated-code) Extracting duplicated code into methods
- [5.](#5-tests-pre-conditions-using-resources-and-page-objects) Tests' pre-conditions (`before :context` and `before`) using resources and [Page Objects]
- [5.](#5-tests-pre-conditions-using-resources-and-page-objects) Tests' pre-conditions (`before :context` and `before`) using resources and [Page Objects](page_objects.md)
- [6.](#6-optimization) Optimizing the test suite
- [7.](#7-resources) Using and implementing resources
- [8.](#8-page-objects) Moving element definitions and methods to [Page Objects]
- [8.](#8-page-objects) Moving element definitions and methods to [Page Objects](page_objects.md)
### 0. Are end-to-end tests needed?
......@@ -126,7 +126,7 @@ end
> Notice that the test itself is simple. The most challenging part is the creation of the application state, which will be covered later.
>
> The exemplified test case's MVC is not enough for the change to be merged, but it helps to build up the test logic. The reason is that we do not want to use locators directly in the tests, and tests **must** use [Page Objects] before they can be merged. This way we better separate the responsibilities, where the Page Objects encapsulate elements and methods that allow us to interact with pages, while the spec files describe the test cases in more business-related language.
> The exemplified test case's MVC is not enough for the change to be merged, but it helps to build up the test logic. The reason is that we do not want to use locators directly in the tests, and tests **must** use [Page Objects](page_objects.md) before they can be merged. This way we better separate the responsibilities, where the Page Objects encapsulate elements and methods that allow us to interact with pages, while the spec files describe the test cases in more business-related language.
Below are the steps that the test covers:
......@@ -294,7 +294,7 @@ In the `before` block we create all the application state needed for the tests t
> A project is created in the background by creating the `issue` resource.
>
> When creating the [Resources], notice that when calling the `fabricate_via_api` method, we pass some attribute:values, like `title`, and `labels` for the `issue` resource; and `project` and `title` for the `label` resource.
> When creating the [Resources](resources.md), notice that when calling the `fabricate_via_api` method, we pass some attribute:values, like `title`, and `labels` for the `issue` resource; and `project` and `title` for the `label` resource.
>
> What's important to understand here is that by creating the application state mostly using the public APIs we save a lot of time in the test suite setup stage.
>
......@@ -358,7 +358,7 @@ To address point 1, we changed the test implementation from two `it` blocks into
**Note:** When writing this document, some code that is now merged to master was not implemented yet, but we left them here for the readers to understand the whole process of end-to-end test creation.
You can think of [Resources] as anything that can be created on GitLab CE or EE, either through the GUI, the API, or the CLI.
You can think of [Resources](resources.md) as anything that can be created on GitLab CE or EE, either through the GUI, the API, or the CLI.
With that in mind, resources can be a project, an epic, an issue, a label, a commit, etc.
......@@ -468,7 +468,7 @@ Page Objects are used in end-to-end tests for maintenance reasons, where a page'
> Page Objects are auto-loaded in the [`qa/qa.rb`](https://gitlab.com/gitlab-org/gitlab/blob/master/qa/qa.rb) file and available in all the test files (`*_spec.rb`).
Take a look at the [Page Objects] documentation.
Take a look at the [Page Objects](page_objects.md) documentation.
Now, let's go back to our example.
......@@ -571,7 +571,7 @@ The `text_of_labels_block` method is a simple method that returns the `:labels_b
#### Updates in the view (*.html.haml) and `dropdowns_helper.rb` files
Now let's change the view and the `dropdowns_helper` files to add the selectors that relate to the [Page Objects].
Now let's change the view and the `dropdowns_helper` files to add the selectors that relate to the [Page Objects](page_objects.md).
In [`app/views/shared/issuable/_sidebar.html.haml:105`](https://gitlab.com/gitlab-org/gitlab/blob/7ca12defc7a965987b162a6ebef302f95dc8867f/app/views/shared/issuable/_sidebar.html.haml#L105), add a `data: { qa_selector: 'edit_link_labels' }` data attribute.
......@@ -619,6 +619,3 @@ This method receives an element (`name`) and the `keys` that it will send to tha
As you might remember, in the Issue Page Object we call this method like this: `send_keys_to_element(:dropdown_input_field, [label, :enter])`.
With that, you should be able to start writing end-to-end tests yourself. *Congratulations!*
[Page Objects]: page_objects.md
[Resources]: resources.md
......@@ -40,8 +40,8 @@ Quarantined tests are run on the CI in dedicated jobs that are allowed to fail:
## Automatic retries and flaky tests detection
On our CI, we use [rspec-retry] to automatically retry a failing example a few
times (see [`spec/spec_helper.rb`] for the precise retries count).
On our CI, we use [rspec-retry](https://github.com/NoRedInk/rspec-retry) to automatically retry a failing example a few
times (see [`spec/spec_helper.rb`](https://gitlab.com/gitlab-org/gitlab/blob/master/spec/spec_helper.rb) for the precise retries count).
We also use a home-made `RspecFlaky::Listener` listener which records flaky
examples in a JSON report file on `master` (`retrieve-tests-metadata` and
......@@ -52,9 +52,6 @@ This was originally implemented in: <https://gitlab.com/gitlab-org/gitlab-foss/-
If you want to enable retries locally, you can use the `RETRIES` env variable.
For instance `RETRIES=1 bin/rspec ...` would retry the failing examples once.
[rspec-retry]: https://github.com/NoRedInk/rspec-retry
[`spec/spec_helper.rb`]: https://gitlab.com/gitlab-org/gitlab/blob/master/spec/spec_helper.rb
## Problems we had in the past at GitLab
- [`rspec-retry` is biting us when some API specs fail](https://gitlab.com/gitlab-org/gitlab-foss/issues/29242): <https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/9825>
......
......@@ -44,36 +44,36 @@ subgraph "CNG-mirror pipeline"
### Detailed explanation
1. On every [pipeline][gitlab-pipeline] during the `test` stage, the
[`gitlab:assets:compile`][gitlab:assets:compile pull-cache] job is automatically started.
- Once it's done, it starts the [`review-build-cng`][review-build-cng]
manual job since the [`CNG-mirror`][cng-mirror] pipeline triggered in the
1. On every [pipeline](https://gitlab.com/gitlab-org/gitlab/pipelines/125315730) during the `test` stage, the
[`gitlab:assets:compile`](https://gitlab.com/gitlab-org/gitlab/-/jobs/467724487) job is automatically started.
- Once it's done, it starts the [`review-build-cng`](https://gitlab.com/gitlab-org/gitlab/-/jobs/467724808)
manual job since the [`CNG-mirror`](https://gitlab.com/gitlab-org/build/CNG-mirror) pipeline triggered in the
following step depends on it.
1. The [`review-build-cng`][review-build-cng] job [triggers a pipeline][cng-mirror-pipeline]
in the [`CNG-mirror`][cng-mirror] project.
- The [`CNG-mirror`][cng-mirror-pipeline] pipeline creates the Docker images of
1. The [`review-build-cng`](https://gitlab.com/gitlab-org/gitlab/-/jobs/467724808) job [triggers a pipeline](https://gitlab.com/gitlab-org/build/CNG-mirror/pipelines/44364657)
in the [`CNG-mirror`](https://gitlab.com/gitlab-org/build/CNG-mirror) project.
- The [`CNG-mirror`](https://gitlab.com/gitlab-org/build/CNG-mirror/pipelines/44364657) pipeline creates the Docker images of
each component (e.g. `gitlab-rails-ee`, `gitlab-shell`, `gitaly` etc.)
based on the commit from the [GitLab pipeline][gitlab-pipeline] and stores
them in its [registry][cng-mirror-registry].
- We use the [`CNG-mirror`][cng-mirror] project so that the `CNG`, (**C**loud
based on the commit from the [GitLab pipeline](https://gitlab.com/gitlab-org/gitlab/pipelines/125315730) and stores
them in its [registry](https://gitlab.com/gitlab-org/build/CNG-mirror/container_registry).
- We use the [`CNG-mirror`](https://gitlab.com/gitlab-org/build/CNG-mirror) project so that the `CNG`, (**C**loud
**N**ative **G**itLab), project's registry is not overloaded with a
lot of transient Docker images.
- Note that the official CNG images are built by the `cloud-native-image`
job, which runs only for tags, and triggers itself a [`CNG`][cng] pipeline.
1. Once the `test` stage is done, the [`review-deploy`][review-deploy] job
deploys the Review App using [the official GitLab Helm chart][helm-chart] to
the [`review-apps-ce`][review-apps-ce] / [`review-apps-ee`][review-apps-ee]
job, which runs only for tags, and triggers itself a [`CNG`](https://gitlab.com/gitlab-org/build/CNG) pipeline.
1. Once the `test` stage is done, the [`review-deploy`](https://gitlab.com/gitlab-org/gitlab/-/jobs/467724810) job
deploys the Review App using [the official GitLab Helm chart](https://gitlab.com/gitlab-org/charts/gitlab/) to
the [`review-apps-ce`](https://console.cloud.google.com/kubernetes/clusters/details/us-central1-a/review-apps-ce?project=gitlab-review-apps) / [`review-apps-ee`](https://console.cloud.google.com/kubernetes/clusters/details/us-central1-b/review-apps-ee?project=gitlab-review-apps)
Kubernetes cluster on GCP.
- The actual scripts used to deploy the Review App can be found at
[`scripts/review_apps/review-apps.sh`][review-apps.sh].
[`scripts/review_apps/review-apps.sh`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/scripts/review_apps/review-apps.sh).
- These scripts are basically
[our official Auto DevOps scripts][Auto-DevOps.gitlab-ci.yml] where the
[our official Auto DevOps scripts](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml) where the
default CNG images are overridden with the images built and stored in the
[`CNG-mirror` project's registry][cng-mirror-registry].
- Since we're using [the official GitLab Helm chart][helm-chart], this means
[`CNG-mirror` project's registry](https://gitlab.com/gitlab-org/build/CNG-mirror/container_registry).
- Since we're using [the official GitLab Helm chart](https://gitlab.com/gitlab-org/charts/gitlab/), this means
you get a dedicated environment for your branch that's very close to what
it would look in production.
1. Once the [`review-deploy`][review-deploy] job succeeds, you should be able to
1. Once the [`review-deploy`](https://gitlab.com/gitlab-org/gitlab/-/jobs/467724810) job succeeds, you should be able to
use your Review App thanks to the direct link to it from the MR widget. To log
into the Review App, see "Log into my Review App?" below.
......@@ -95,7 +95,7 @@ subgraph "CNG-mirror pipeline"
stop a Review App manually, and is also started by GitLab once a merge
request's branch is deleted after being merged.
- The Kubernetes cluster is connected to the `gitlab-{ce,ee}` projects using
[GitLab's Kubernetes integration][gitlab-k8s-integration]. This basically
[GitLab's Kubernetes integration](../../user/project/clusters/index.md). This basically
allows to have a link to the Review App directly from the merge request
widget.
......@@ -119,7 +119,7 @@ that were not removed along with the Kubernetes resources.
## QA runs
On every [pipeline][gitlab-pipeline] in the `qa` stage (which comes after the
On every [pipeline](https://gitlab.com/gitlab-org/gitlab/pipelines/125315730) in the `qa` stage (which comes after the
`review` stage), the `review-qa-smoke` job is automatically started and it runs
the QA smoke suite.
......@@ -127,7 +127,7 @@ You can also manually start the `review-qa-all`: it runs the full QA suite.
## Performance Metrics
On every [pipeline][gitlab-pipeline] in the `qa` stage, the
On every [pipeline](https://gitlab.com/gitlab-org/gitlab/pipelines/125315730) in the `qa` stage, the
`review-performance` job is automatically started: this job does basic
browser performance testing using a
[Sitespeed.io Container](../../user/project/merge_requests/browser_performance_testing.md).
......@@ -287,7 +287,7 @@ kubectl get cm --sort-by='{.metadata.creationTimestamp}' | grep 'review-' | grep
### Using K9s
[K9s] is a powerful command line dashboard which allows you to filter by labels. This can help identify trends with apps exceeding the [review-app resource requests](https://gitlab.com/gitlab-org/gitlab/-/blob/master/scripts/review_apps/base-config.yaml). Kubernetes will schedule pods to nodes based on resource requests and allow for CPU usage up to the limits.
[K9s](https://github.com/derailed/k9s) is a powerful command line dashboard which allows you to filter by labels. This can help identify trends with apps exceeding the [review-app resource requests](https://gitlab.com/gitlab-org/gitlab/-/blob/master/scripts/review_apps/base-config.yaml). Kubernetes will schedule pods to nodes based on resource requests and allow for CPU usage up to the limits.
- In K9s you can sort or add filters by typing the `/` character
- `-lrelease=<review-app-slug>` - filters down to all pods for a release. This aids in determining what is having issues in a single deployment
......@@ -389,27 +389,9 @@ find a way to limit it to only us.**
### Helpful command line tools
- [K9s] - enables CLI dashboard across pods and enabling filtering by labels
- [K9s](https://github.com/derailed/k9s) - enables CLI dashboard across pods and enabling filtering by labels
- [Stern](https://github.com/wercker/stern) - enables cross pod log tailing based on label/field selectors
[charts-1068]: https://gitlab.com/gitlab-org/charts/gitlab/issues/1068
[gitlab-pipeline]: https://gitlab.com/gitlab-org/gitlab/pipelines/125315730
[gitlab:assets:compile pull-cache]: https://gitlab.com/gitlab-org/gitlab/-/jobs/467724487
[review-build-cng]: https://gitlab.com/gitlab-org/gitlab/-/jobs/467724808
[review-deploy]: https://gitlab.com/gitlab-org/gitlab/-/jobs/467724810
[cng-mirror]: https://gitlab.com/gitlab-org/build/CNG-mirror
[cng]: https://gitlab.com/gitlab-org/build/CNG
[cng-mirror-pipeline]: https://gitlab.com/gitlab-org/build/CNG-mirror/pipelines/44364657
[cng-mirror-registry]: https://gitlab.com/gitlab-org/build/CNG-mirror/container_registry
[helm-chart]: https://gitlab.com/gitlab-org/charts/gitlab/
[review-apps-ce]: https://console.cloud.google.com/kubernetes/clusters/details/us-central1-a/review-apps-ce?project=gitlab-review-apps
[review-apps-ee]: https://console.cloud.google.com/kubernetes/clusters/details/us-central1-b/review-apps-ee?project=gitlab-review-apps
[review-apps.sh]: https://gitlab.com/gitlab-org/gitlab/-/blob/master/scripts/review_apps/review-apps.sh
[automated_cleanup.rb]: https://gitlab.com/gitlab-org/gitlab/-/blob/master/scripts/review_apps/automated_cleanup.rb
[Auto-DevOps.gitlab-ci.yml]: https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml
[gitlab-k8s-integration]: ../../user/project/clusters/index.md
[K9s]: https://github.com/derailed/k9s
---
[Return to Testing documentation](index.md)
......@@ -3418,6 +3418,9 @@ msgstr ""
msgid "Cannot have multiple Jira imports running at the same time"
msgstr ""
msgid "Cannot import because issues are not available in this project."
msgstr ""
msgid "Cannot make epic confidential if it contains not-confidential issues"
msgstr ""
......@@ -12926,7 +12929,7 @@ msgstr ""
msgid "Metrics|Prometheus Query Documentation"
msgstr ""
msgid "Metrics|Reload this page"
msgid "Metrics|Refresh dashboard"
msgstr ""
msgid "Metrics|Show last"
......@@ -21811,6 +21814,9 @@ msgstr ""
msgid "Unable to generate new instance ID"
msgstr ""
msgid "Unable to load the diff"
msgstr ""
msgid "Unable to load the diff. %{button_try_again}"
msgstr ""
......
......@@ -5,6 +5,7 @@ require 'spec_helper'
describe Projects::Import::JiraController do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:jira_project_key) { 'Test' }
context 'with anonymous user' do
before do
......@@ -21,7 +22,7 @@ describe Projects::Import::JiraController do
context 'post import' do
it 'redirects to issues page' do
post :import, params: { namespace_id: project.namespace, project_id: project, jira_project_key: 'Test' }
post :import, params: { namespace_id: project.namespace, project_id: project, jira_project_key: jira_project_key }
expect(response).to redirect_to(new_user_session_path)
end
......@@ -49,7 +50,7 @@ describe Projects::Import::JiraController do
context 'post import' do
it 'redirects to issues page' do
post :import, params: { namespace_id: project.namespace, project_id: project, jira_project_key: 'Test' }
post :import, params: { namespace_id: project.namespace, project_id: project, jira_project_key: jira_project_key }
expect(response).to redirect_to(project_issues_path(project))
end
......@@ -65,12 +66,64 @@ describe Projects::Import::JiraController do
context 'when jira service is enabled for the project' do
let_it_be(:jira_service) { create(:jira_service, project: project) }
context 'when user is developer' do
let_it_be(:dev) { create(:user) }
before do
sign_in(dev)
project.add_developer(dev)
end
context 'get show' do
before do
get :show, params: { namespace_id: project.namespace.to_param, project_id: project }
end
it 'does not query jira service' do
expect(project).not_to receive(:jira_service)
end
it 'renders show template' do
expect(response).to render_template(:show)
expect(assigns(:jira_projects)).not_to be_present
end
end
context 'post import' do
it 'returns 404' do
post :import, params: { namespace_id: project.namespace, project_id: project, jira_project_key: jira_project_key }
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
context 'when issues disabled' do
let_it_be(:disabled_issues_project) { create(:project, :public, :issues_disabled) }
context 'get show' do
it 'returs 404' do
get :show, params: { namespace_id: project.namespace.to_param, project_id: disabled_issues_project }
expect(response).to have_gitlab_http_status(:not_found)
end
end
context 'post import' do
it 'returs 404' do
post :import, params: { namespace_id: disabled_issues_project.namespace, project_id: disabled_issues_project, jira_project_key: jira_project_key }
expect(response).to have_gitlab_http_status(:not_found)
end
end
end
context 'when running jira import first time' do
context 'get show' do
before do
allow(JIRA::Resource::Project).to receive(:all).and_return(jira_projects)
expect(project.import_state).to be_nil
expect(project.jira_imports).to be_empty
get :show, params: { namespace_id: project.namespace.to_param, project_id: project }
end
......@@ -84,7 +137,7 @@ describe Projects::Import::JiraController do
end
end
context 'when everything is ok' do
context 'when projects retrieved from Jira' do
let(:jira_projects) { [double(name: 'FOO project', key: 'FOO')] }
it 'renders show template' do
......@@ -107,14 +160,14 @@ describe Projects::Import::JiraController do
it 'creates import state' do
expect(project.latest_jira_import).to be_nil
post :import, params: { namespace_id: project.namespace, project_id: project, jira_project_key: 'Test' }
post :import, params: { namespace_id: project.namespace, project_id: project, jira_project_key: jira_project_key }
project.reload
jira_import = project.latest_jira_import
expect(project.import_type).to eq 'jira'
expect(jira_import.status).to eq 'scheduled'
expect(jira_import.jira_project_key).to eq 'Test'
expect(jira_import.jira_project_key).to eq jira_project_key
expect(response).to redirect_to(project_import_jira_path(project))
end
end
......@@ -145,7 +198,7 @@ describe Projects::Import::JiraController do
end
context 'when jira import ran before' do
let_it_be(:jira_import_state) { create(:jira_import_state, :finished, project: project, jira_project_key: 'Test') }
let_it_be(:jira_import_state) { create(:jira_import_state, :finished, project: project, jira_project_key: jira_project_key) }
context 'get show' do
it 'renders import status' do
......@@ -164,7 +217,7 @@ describe Projects::Import::JiraController do
project.reload
expect(project.latest_jira_import.status).to eq 'scheduled'
expect(project.jira_imports.size).to eq 2
expect(project.jira_imports.first.jira_project_key).to eq 'Test'
expect(project.jira_imports.first.jira_project_key).to eq jira_project_key
expect(project.jira_imports.last.jira_project_key).to eq 'New Project'
expect(response).to redirect_to(project_import_jira_path(project))
end
......
......@@ -92,7 +92,7 @@ exports[`Dashboard template matches the default snapshot 1`] = `
>
<gl-deprecated-button-stub
size="md"
title="Reload this page"
title="Refresh dashboard"
variant="default"
>
<icon-stub
......
......@@ -15,7 +15,7 @@ import {
receiveMetricsDashboardSuccess,
fetchDeploymentsData,
fetchEnvironmentsData,
fetchPrometheusMetrics,
fetchDashboardData,
fetchPrometheusMetric,
setInitialState,
filterEnvironments,
......@@ -375,7 +375,7 @@ describe('Monitoring store actions', () => {
metricsDashboardResponse.dashboard,
);
expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetrics');
expect(dispatch).toHaveBeenCalledWith('fetchDashboardData');
});
it('sets the dashboards loaded from the repository', () => {
const params = {};
......@@ -395,7 +395,7 @@ describe('Monitoring store actions', () => {
expect(commit).toHaveBeenCalledWith(types.SET_ALL_DASHBOARDS, dashboardGitResponse);
});
});
describe('fetchPrometheusMetrics', () => {
describe('fetchDashboardData', () => {
let commit;
let dispatch;
let state;
......@@ -413,7 +413,7 @@ describe('Monitoring store actions', () => {
const getters = {
metricsWithData: () => [],
};
fetchPrometheusMetrics({ state, commit, dispatch, getters })
fetchDashboardData({ state, commit, dispatch, getters })
.then(() => {
expect(Tracking.event).toHaveBeenCalledWith(
document.body.dataset.page,
......@@ -442,7 +442,7 @@ describe('Monitoring store actions', () => {
metricsWithData: () => [metric.id],
};
fetchPrometheusMetrics({ state, commit, dispatch, getters })
fetchDashboardData({ state, commit, dispatch, getters })
.then(() => {
expect(dispatch).toHaveBeenCalledWith('fetchPrometheusMetric', {
metric,
......@@ -478,7 +478,7 @@ describe('Monitoring store actions', () => {
dispatch.mockRejectedValueOnce(new Error('Error fetching this metric'));
dispatch.mockResolvedValue();
fetchPrometheusMetrics({ state, commit, dispatch })
fetchDashboardData({ state, commit, dispatch })
.then(() => {
expect(dispatch).toHaveBeenCalledTimes(10); // one per metric plus 1 for deployments
expect(dispatch).toHaveBeenCalledWith('fetchDeploymentsData');
......
......@@ -202,15 +202,12 @@ describe('Monitoring mutations', () => {
mutations[types.REQUEST_METRIC_RESULT](stateCopy, {
metricId,
result,
});
expect(stateCopy.showEmptyState).toBe(true);
expect(getMetric()).toEqual(
expect.objectContaining({
loading: true,
result: null,
state: metricStates.LOADING,
}),
);
});
......@@ -232,7 +229,7 @@ describe('Monitoring mutations', () => {
});
it('adds results to the store', () => {
expect(getMetric().result).toBe(undefined);
expect(getMetric().result).toBe(null);
mutations[types.RECEIVE_METRIC_RESULT_SUCCESS](stateCopy, {
metricId,
......
......@@ -5,6 +5,7 @@ import {
removeLeadingSlash,
mapToDashboardViewModel,
} from '~/monitoring/stores/utils';
import { NOT_IN_DB_PREFIX } from '~/monitoring/constants';
const projectPath = 'gitlab-org/gitlab-test';
......@@ -256,6 +257,9 @@ describe('mapToDashboardViewModel', () => {
expect(getMappedMetric(dashboard)).toEqual({
label: expect.any(String),
metricId: expect.any(String),
loading: false,
result: null,
state: null,
});
});
......@@ -307,7 +311,7 @@ describe('mapToDashboardViewModel', () => {
describe('uniqMetricsId', () => {
[
{ input: { id: 1 }, expected: 'NO_DB_1' },
{ input: { id: 1 }, expected: `${NOT_IN_DB_PREFIX}_1` },
{ input: { metric_id: 2 }, expected: '2_undefined' },
{ input: { metric_id: 2, id: 21 }, expected: '2_21' },
{ input: { metric_id: 22, id: 1 }, expected: '22_1' },
......
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`Form Footer Actions renders content properly 1`] = `
<footer
class="form-actions d-flex justify-content-between"
>
<div>
Bar
</div>
<div>
Foo
</div>
<div>
Abrakadabra
</div>
</footer>
`;
import FormFooterActions from '~/vue_shared/components/form/form_footer_actions.vue';
import { shallowMount } from '@vue/test-utils';
describe('Form Footer Actions', () => {
let wrapper;
function createComponent(slots = {}) {
wrapper = shallowMount(FormFooterActions, {
slots,
});
}
afterEach(() => {
wrapper.destroy();
});
it('renders content properly', () => {
const defaultSlot = 'Foo';
const prepend = 'Bar';
const append = 'Abrakadabra';
createComponent({
default: defaultSlot,
prepend,
append,
});
expect(wrapper.element).toMatchSnapshot();
});
});
......@@ -18,22 +18,20 @@ describe Resolvers::Projects::JiraImportsResolver do
it_behaves_like 'no jira import access'
end
context 'when user developer' do
before do
project.add_developer(user)
end
it_behaves_like 'no jira import access'
end
end
context 'when user can read Jira import data' do
before do
project.add_maintainer(user)
project.add_guest(user)
end
it_behaves_like 'no jira import data present'
it 'does not raise access error' do
expect do
resolve_imports
end.not_to raise_error
end
end
end
......@@ -58,19 +56,11 @@ describe Resolvers::Projects::JiraImportsResolver do
it_behaves_like 'no jira import access'
end
context 'when user developer' do
before do
project.add_developer(user)
end
it_behaves_like 'no jira import access'
end
end
context 'when user can access Jira imports' do
before do
project.add_maintainer(user)
project.add_guest(user)
end
it 'returns Jira imports sorted ascending by created_at time' do
......
......@@ -99,6 +99,12 @@ describe 'Starting a Jira Import' do
it_behaves_like 'a mutation that returns errors in the response', errors: ['Jira integration not configured.']
end
context 'when issues feature are disabled' do
let_it_be(:project, reload: true) { create(:project, :issues_disabled) }
it_behaves_like 'a mutation that returns errors in the response', errors: ['Cannot import because issues are not available in this project.']
end
context 'when when project has Jira service' do
let!(:service) { create(:jira_service, project: project) }
......
......@@ -38,6 +38,12 @@ describe JiraImport::StartImportService do
it_behaves_like 'responds with error', 'Jira integration not configured.'
end
context 'when issues feature are disabled' do
let_it_be(:project, reload: true) { create(:project, :issues_disabled) }
it_behaves_like 'responds with error', 'Cannot import because issues are not available in this project.'
end
context 'when Jira service exists' do
let!(:jira_service) { create(:jira_service, project: project, active: true) }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment