Commit 11e5d1b9 authored by GitLab Bot's avatar GitLab Bot

Add latest changes from gitlab-org/gitlab@master

parent 7351a484
......@@ -75,18 +75,18 @@
changes: *code-backstage-qa-patterns
when: on_success
.rails:rules:master-refs-code-backstage-qa:
.rails:rules:master-refs-code-backstage:
rules:
- <<: *if-master-refs
changes: *code-backstage-qa-patterns
changes: *code-backstage-patterns
when: on_success
.rails:rules:master-refs-code-backstage-qa-ee-only:
.rails:rules:master-refs-code-backstage-ee-only:
rules:
- <<: *if-not-ee
when: never
- <<: *if-master-refs
changes: *code-backstage-qa-patterns
changes: *code-backstage-patterns
when: on_success
.rails:rules:ee-only:
......@@ -332,12 +332,12 @@ coverage:
rspec quarantine pg9:
extends:
- .rspec-base-quarantine
- .rails:rules:master-refs-code-backstage-qa
- .rails:rules:master-refs-code-backstage
.rspec-base-pg10:
extends:
- .rspec-base
- .rails:rules:master-refs-code-backstage-qa
- .rails:rules:master-refs-code-backstage
- .use-pg10
rspec unit pg10:
......@@ -359,7 +359,7 @@ rspec system pg10:
rspec-ee quarantine pg9:
extends:
- .rspec-base-quarantine
- .rails:rules:master-refs-code-backstage-qa-ee-only
- .rails:rules:master-refs-code-backstage-ee-only
variables:
RSPEC_OPTS: "--tag quarantine -- ee/spec/"
......@@ -367,25 +367,25 @@ rspec-ee migration pg10:
extends:
- .rspec-ee-base-pg10
- .rspec-base-migration
- .rails:rules:master-refs-code-backstage-qa
- .rails:rules:master-refs-code-backstage
parallel: 2
rspec-ee unit pg10:
extends:
- .rspec-ee-base-pg10
- .rails:rules:master-refs-code-backstage-qa
- .rails:rules:master-refs-code-backstage
parallel: 10
rspec-ee integration pg10:
extends:
- .rspec-ee-base-pg10
- .rails:rules:master-refs-code-backstage-qa
- .rails:rules:master-refs-code-backstage
parallel: 3
rspec-ee system pg10:
extends:
- .rspec-ee-base-pg10
- .rails:rules:master-refs-code-backstage-qa
- .rails:rules:master-refs-code-backstage
parallel: 5
# ee + master-only jobs #
#########################
......
import initRegistryImages from '~/registry/list';
import initRegistryImages from '~/registry/list/index';
import registryExplorer from '~/registry/explorer/index';
document.addEventListener('DOMContentLoaded', initRegistryImages);
document.addEventListener('DOMContentLoaded', () => {
initRegistryImages();
registryExplorer();
});
import initRegistryImages from '~/registry/list/index';
import registryExplorer from '~/registry/explorer/index';
document.addEventListener('DOMContentLoaded', initRegistryImages);
document.addEventListener('DOMContentLoaded', () => {
initRegistryImages();
registryExplorer();
});
import { __ } from '~/locale';
export const FETCH_IMAGES_LIST_ERROR_MESSAGE = __(
'Something went wrong while fetching the packages list.',
);
export const FETCH_TAGS_LIST_ERROR_MESSAGE = __(
'Something went wrong while fetching the tags list.',
);
export const DELETE_IMAGE_ERROR_MESSAGE = __('Something went wrong while deleting the image.');
export const DELETE_IMAGE_SUCCESS_MESSAGE = __('Image deleted successfully');
export const DELETE_TAG_ERROR_MESSAGE = __('Something went wrong while deleting the tag.');
export const DELETE_TAG_SUCCESS_MESSAGE = __('Tag deleted successfully');
export const DELETE_TAGS_ERROR_MESSAGE = __('Something went wrong while deleting the tags.');
export const DELETE_TAGS_SUCCESS_MESSAGE = __('Tags deleted successfully');
export const DEFAULT_PAGE = 1;
export const DEFAULT_PAGE_SIZE = 10;
export const GROUP_PAGE_TYPE = 'groups';
export const LIST_KEY_TAG = 'name';
export const LIST_KEY_IMAGE_ID = 'short_revision';
export const LIST_KEY_SIZE = 'total_size';
export const LIST_KEY_LAST_UPDATED = 'created_at';
export const LIST_KEY_ACTIONS = 'actions';
export const LIST_KEY_CHECKBOX = 'checkbox';
export const LIST_LABEL_TAG = __('Tag');
export const LIST_LABEL_IMAGE_ID = __('Image ID');
export const LIST_LABEL_SIZE = __('Size');
export const LIST_LABEL_LAST_UPDATED = __('Last Updated');
import Vue from 'vue';
import Translate from '~/vue_shared/translate';
import RegistryExplorer from './pages/index.vue';
import { createStore } from './stores';
import createRouter from './router';
Vue.use(Translate);
export default () => {
const el = document.getElementById('js-container-registry');
if (!el) {
return null;
}
const { endpoint } = el.dataset;
const store = createStore();
const router = createRouter(endpoint, store);
store.dispatch('setInitialState', el.dataset);
return new Vue({
el,
store,
router,
components: {
RegistryExplorer,
},
render(createElement) {
return createElement('registry-explorer');
},
});
};
<script>
export default {};
</script>
<template>
<div></div>
</template>
<script>
export default {};
</script>
<template>
<div class="position-relative">
<transition name="slide">
<router-view />
</transition>
</div>
</template>
<script>
export default {};
</script>
<template>
<div></div>
</template>
import Vue from 'vue';
import VueRouter from 'vue-router';
import { __ } from '~/locale';
import List from './pages/list.vue';
import Details from './pages/details.vue';
Vue.use(VueRouter);
export default function createRouter(base, store) {
const router = new VueRouter({
base,
mode: 'history',
routes: [
{
name: 'list',
path: '/',
component: List,
meta: {
name: __('Container Registry'),
},
beforeEnter: (to, from, next) => {
store.dispatch('requestImagesList');
next();
},
},
{
name: 'details',
path: '/:id',
component: Details,
meta: {
name: __('Tags'),
},
beforeEnter: (to, from, next) => {
store.dispatch('requestTagsList', { id: to.params.id });
next();
},
},
],
});
return router;
}
import axios from '~/lib/utils/axios_utils';
import createFlash from '~/flash';
import * as types from './mutation_types';
import {
FETCH_IMAGES_LIST_ERROR_MESSAGE,
DEFAULT_PAGE,
DEFAULT_PAGE_SIZE,
FETCH_TAGS_LIST_ERROR_MESSAGE,
DELETE_TAG_SUCCESS_MESSAGE,
DELETE_TAG_ERROR_MESSAGE,
DELETE_TAGS_SUCCESS_MESSAGE,
DELETE_TAGS_ERROR_MESSAGE,
DELETE_IMAGE_ERROR_MESSAGE,
DELETE_IMAGE_SUCCESS_MESSAGE,
} from '../constants';
export const setInitialState = ({ commit }, data) => commit(types.SET_INITIAL_STATE, data);
export const receiveImagesListSuccess = ({ commit }, { data, headers }) => {
commit(types.SET_IMAGES_LIST_SUCCESS, data);
commit(types.SET_PAGINATION, headers);
};
export const receiveTagsListSuccess = ({ commit }, { data, headers }) => {
commit(types.SET_TAGS_LIST_SUCCESS, data);
commit(types.SET_TAGS_PAGINATION, headers);
};
export const requestImagesList = ({ commit, dispatch, state }, pagination = {}) => {
commit(types.SET_MAIN_LOADING, true);
const { page = DEFAULT_PAGE, perPage = DEFAULT_PAGE_SIZE } = pagination;
return axios
.get(state.config.endpoint, { params: { page, per_page: perPage } })
.then(({ data, headers }) => {
dispatch('receiveImagesListSuccess', { data, headers });
})
.catch(() => {
createFlash(FETCH_IMAGES_LIST_ERROR_MESSAGE);
})
.finally(() => {
commit(types.SET_MAIN_LOADING, false);
});
};
export const requestTagsList = ({ commit, dispatch }, { pagination = {}, id }) => {
commit(types.SET_MAIN_LOADING, true);
const url = window.atob(id);
const { page = DEFAULT_PAGE, perPage = DEFAULT_PAGE_SIZE } = pagination;
return axios
.get(url, { params: { page, per_page: perPage } })
.then(({ data, headers }) => {
dispatch('receiveTagsListSuccess', { data, headers });
})
.catch(() => {
createFlash(FETCH_TAGS_LIST_ERROR_MESSAGE);
})
.finally(() => {
commit(types.SET_MAIN_LOADING, false);
});
};
export const requestDeleteTag = ({ commit, dispatch, state }, { tag, imageId }) => {
commit(types.SET_MAIN_LOADING, true);
return axios
.delete(tag.destroy_path)
.then(() => {
createFlash(DELETE_TAG_SUCCESS_MESSAGE, 'success');
dispatch('requestTagsList', { pagination: state.tagsPagination, id: imageId });
})
.catch(() => {
createFlash(DELETE_TAG_ERROR_MESSAGE);
})
.finally(() => {
commit(types.SET_MAIN_LOADING, false);
});
};
export const requestDeleteTags = ({ commit, dispatch, state }, { ids, imageId }) => {
commit(types.SET_MAIN_LOADING, true);
const url = `/${state.config.projectPath}/registry/repository/${imageId}/tags/bulk_destroy`;
return axios
.delete(url, { params: { ids } })
.then(() => {
createFlash(DELETE_TAGS_SUCCESS_MESSAGE, 'success');
dispatch('requestTagsList', { pagination: state.tagsPagination, id: imageId });
})
.catch(() => {
createFlash(DELETE_TAGS_ERROR_MESSAGE);
})
.finally(() => {
commit(types.SET_MAIN_LOADING, false);
});
};
export const requestDeleteImage = ({ commit, dispatch, state }, destroyPath) => {
commit(types.SET_MAIN_LOADING, true);
return axios
.delete(destroyPath)
.then(() => {
dispatch('requestImagesList', { pagination: state.pagination });
createFlash(DELETE_IMAGE_SUCCESS_MESSAGE, 'success');
})
.catch(() => {
createFlash(DELETE_IMAGE_ERROR_MESSAGE);
})
.finally(() => {
commit(types.SET_MAIN_LOADING, false);
});
};
export default () => {};
import Vue from 'vue';
import Vuex from 'vuex';
import * as actions from './actions';
import mutations from './mutations';
import state from './state';
Vue.use(Vuex);
export const createStore = () =>
new Vuex.Store({
state,
actions,
mutations,
});
export default createStore();
export const SET_INITIAL_STATE = 'SET_INITIAL_STATE';
export const SET_IMAGES_LIST_SUCCESS = 'SET_PACKAGE_LIST_SUCCESS';
export const SET_PAGINATION = 'SET_PAGINATION';
export const SET_MAIN_LOADING = 'SET_MAIN_LOADING';
export const SET_TAGS_PAGINATION = 'SET_TAGS_PAGINATION';
export const SET_TAGS_LIST_SUCCESS = 'SET_TAGS_LIST_SUCCESS';
import * as types from './mutation_types';
import { parseIntPagination, normalizeHeaders } from '~/lib/utils/common_utils';
export default {
[types.SET_INITIAL_STATE](state, config) {
state.config = {
...config,
};
},
[types.SET_IMAGES_LIST_SUCCESS](state, images) {
state.images = images;
},
[types.SET_TAGS_LIST_SUCCESS](state, tags) {
state.tags = tags;
},
[types.SET_MAIN_LOADING](state, isLoading) {
state.isLoading = isLoading;
},
[types.SET_PAGINATION](state, headers) {
const normalizedHeaders = normalizeHeaders(headers);
state.pagination = parseIntPagination(normalizedHeaders);
},
[types.SET_TAGS_PAGINATION](state, headers) {
const normalizedHeaders = normalizeHeaders(headers);
state.tagsPagination = parseIntPagination(normalizedHeaders);
},
};
export default () => ({
isLoading: false,
config: {},
images: [],
tags: [],
pagination: {},
tagsPagination: {},
});
......@@ -4,14 +4,20 @@ import Translate from '~/vue_shared/translate';
Vue.use(Translate);
export default () =>
new Vue({
el: '#js-vue-registry-images',
export default () => {
const el = document.getElementById('js-vue-registry-images');
if (!el) {
return null;
}
return new Vue({
el,
components: {
registryApp,
},
data() {
const { dataset } = document.querySelector(this.$options.el);
const { dataset } = el;
return {
registryData: {
endpoint: dataset.endpoint,
......@@ -35,3 +41,4 @@ export default () =>
});
},
});
};
import { joinPaths } from '~/lib/utils/url_utility';
export const updateElementsVisibility = (selector, isVisible) => {
document.querySelectorAll(selector).forEach(elem => elem.classList.toggle('hidden', !isVisible));
};
......@@ -6,6 +8,6 @@ export const updateFormAction = (selector, basePath, path) => {
const form = document.querySelector(selector);
if (form) {
form.action = `${basePath}${path}`;
form.action = joinPaths(basePath, path);
}
};
......@@ -24,7 +24,7 @@ class Admin::SpamLogsController < Admin::ApplicationController
def mark_as_ham
spam_log = SpamLog.find(params[:id])
if Spam::HamService.new(spam_log).mark_as_ham!
if Spam::HamService.new(spam_log).execute
redirect_to admin_spam_logs_path, notice: _('Spam log successfully submitted as ham.')
else
redirect_to admin_spam_logs_path, alert: _('Error with Akismet. Please check the logs for more info.')
......
......@@ -23,6 +23,7 @@ module Ci
belongs_to :trigger_request
belongs_to :erased_by, class_name: 'User'
belongs_to :resource_group, class_name: 'Ci::ResourceGroup', inverse_of: :builds
belongs_to :pipeline, class_name: 'Ci::Pipeline', foreign_key: :commit_id
RUNNER_FEATURES = {
upload_multiple_artifacts: -> (build) { build.publishes_artifacts_reports? },
......
......@@ -28,7 +28,7 @@ module Ci
license_scanning: 'gl-license-scanning-report.json',
performance: 'performance.json',
metrics: 'metrics.txt',
lsif: 'lsif.sqlite3'
lsif: 'lsif.json'
}.freeze
INTERNAL_TYPES = {
......@@ -74,6 +74,7 @@ module Ci
scope :with_files_stored_locally, -> { where(file_store: [nil, ::JobArtifactUploader::Store::LOCAL]) }
scope :with_files_stored_remotely, -> { where(file_store: ::JobArtifactUploader::Store::REMOTE) }
scope :for_sha, ->(sha) { joins(job: :pipeline).where(ci_pipelines: { sha: sha }) }
scope :with_file_types, -> (file_types) do
types = self.file_types.select { |file_type| file_types.include?(file_type) }.values
......@@ -117,7 +118,7 @@ module Ci
metrics: 12, ## EE-specific
metrics_referee: 13, ## runner referees
network_referee: 14, ## runner referees
lsif: 15 # LSIF dump for code navigation
lsif: 15 # LSIF data for code navigation
}
enum file_format: {
......
......@@ -6,23 +6,22 @@ module ReactiveCaching
extend ActiveSupport::Concern
InvalidateReactiveCache = Class.new(StandardError)
ExceededReactiveCacheLimit = Class.new(StandardError)
included do
class_attribute :reactive_cache_lease_timeout
class_attribute :reactive_cache_key
class_attribute :reactive_cache_lifetime
class_attribute :reactive_cache_lease_timeout
class_attribute :reactive_cache_refresh_interval
class_attribute :reactive_cache_lifetime
class_attribute :reactive_cache_hard_limit
class_attribute :reactive_cache_worker_finder
# defaults
self.reactive_cache_key = -> (record) { [model_name.singular, record.id] }
self.reactive_cache_lease_timeout = 2.minutes
self.reactive_cache_refresh_interval = 1.minute
self.reactive_cache_lifetime = 10.minutes
self.reactive_cache_hard_limit = 1.megabyte
self.reactive_cache_worker_finder = ->(id, *_args) do
find_by(primary_key => id)
end
......@@ -71,6 +70,8 @@ module ReactiveCaching
if within_reactive_cache_lifetime?(*args)
enqueuing_update(*args) do
new_value = calculate_reactive_cache(*args)
check_exceeded_reactive_cache_limit!(new_value)
old_value = Rails.cache.read(key)
Rails.cache.write(key, new_value)
reactive_cache_updated(*args) if new_value != old_value
......@@ -121,5 +122,13 @@ module ReactiveCaching
ReactiveCachingWorker.perform_in(self.class.reactive_cache_refresh_interval, self.class, id, *args)
end
def check_exceeded_reactive_cache_limit!(data)
return unless Feature.enabled?(:reactive_cache_limit)
data_deep_size = Gitlab::Utils::DeepSize.new(data, max_size: self.class.reactive_cache_hard_limit)
raise ExceededReactiveCacheLimit.new unless data_deep_size.valid?
end
end
end
......@@ -6,6 +6,7 @@ class Environment < ApplicationRecord
self.reactive_cache_refresh_interval = 1.minute
self.reactive_cache_lifetime = 55.seconds
self.reactive_cache_hard_limit = 10.megabytes
belongs_to :project, required: true
......
......@@ -24,6 +24,7 @@ class MergeRequest < ApplicationRecord
self.reactive_cache_key = ->(model) { [model.project.id, model.iid] }
self.reactive_cache_refresh_interval = 10.minutes
self.reactive_cache_lifetime = 10.minutes
self.reactive_cache_hard_limit = 20.megabytes
SORTING_PREFERENCE_FIELD = :merge_requests_sort
......
# frozen_string_literal: true
module Projects
class LsifDataService
attr_reader :file, :project, :path, :commit_id
CACHE_EXPIRE_IN = 1.hour
def initialize(file, project, params)
@file = file
@project = project
@path = params[:path]
@commit_id = params[:commit_id]
end
def execute
docs, doc_ranges, ranges =
fetch_data.values_at('docs', 'doc_ranges', 'ranges')
doc_id = doc_id_from(docs)
doc_ranges[doc_id]&.map do |range_id|
line_data, column_data = ranges[range_id]['loc']
{
start_line: line_data.first,
end_line: line_data.last,
start_char: column_data.first,
end_char: column_data.last
}
end
end
private
def fetch_data
Rails.cache.fetch("project:#{project.id}:lsif:#{commit_id}", expires_in: CACHE_EXPIRE_IN) do
data = nil
file.open do |stream|
Zlib::GzipReader.wrap(stream) do |gz_stream|
data = JSON.parse(gz_stream.read)
end
end
data
end
end
def doc_id_from(docs)
docs.reduce(nil) do |doc_id, (id, doc_path)|
next doc_id unless doc_path =~ /#{path}$/
if doc_id.nil? || docs[doc_id].size > doc_path.size
doc_id = id
end
doc_id
end
end
end
end
......@@ -8,7 +8,7 @@ module Spam
@spam_log = spam_log
end
def mark_as_ham!
def execute
if akismet.submit_ham
spam_log.update_attribute(:submitted_as_ham, true)
else
......
......@@ -3,10 +3,20 @@
%section
.row.registry-placeholder.prepend-bottom-10
.col-12
#js-vue-registry-images{ data: { endpoint: group_container_registries_path(@group, format: :json),
"help_page_path" => help_page_path('user/packages/container_registry/index'),
"no_containers_image" => image_path('illustrations/docker-empty-state.svg'),
"containers_error_image" => image_path('illustrations/docker-error-state.svg'),
"repository_url" => "",
is_group_page: true,
character_error: @character_error.to_s } }
- if Feature.enabled?(:vue_container_registry_explorer)
#js-container-registry{ data: { endpoint: group_container_registries_path(@group),
"help_page_path" => help_page_path('user/packages/container_registry/index'),
"two_factor_auth_help_link" => help_page_path('user/profile/account/two_factor_authentication'),
"personal_access_tokens_help_link" => help_page_path('user/profile/personal_access_tokens'),
"no_containers_image" => image_path('illustrations/docker-empty-state.svg'),
"containers_error_image" => image_path('illustrations/docker-error-state.svg'),
"registry_host_url_with_port" => escape_once(registry_config.host_port),
character_error: @character_error.to_s } }
- else
#js-vue-registry-images{ data: { endpoint: group_container_registries_path(@group, format: :json),
"help_page_path" => help_page_path('user/packages/container_registry/index'),
"no_containers_image" => image_path('illustrations/docker-empty-state.svg'),
"containers_error_image" => image_path('illustrations/docker-error-state.svg'),
"repository_url" => "",
is_group_page: true,
character_error: @character_error.to_s } }
......@@ -3,12 +3,24 @@
%section
.row.registry-placeholder.prepend-bottom-10
.col-12
#js-vue-registry-images{ data: { endpoint: project_container_registry_index_path(@project, format: :json),
"help_page_path" => help_page_path('user/packages/container_registry/index'),
"two_factor_auth_help_link" => help_page_path('user/profile/account/two_factor_authentication'),
"personal_access_tokens_help_link" => help_page_path('user/profile/personal_access_tokens'),
"no_containers_image" => image_path('illustrations/docker-empty-state.svg'),
"containers_error_image" => image_path('illustrations/docker-error-state.svg'),
"repository_url" => escape_once(@project.container_registry_url),
"registry_host_url_with_port" => escape_once(registry_config.host_port),
character_error: @character_error.to_s } }
- if Feature.enabled?(:vue_container_registry_explorer)
#js-container-registry{ data: { endpoint: project_container_registry_index_path(@project),
project_path: @project.full_path,
"help_page_path" => help_page_path('user/packages/container_registry/index'),
"two_factor_auth_help_link" => help_page_path('user/profile/account/two_factor_authentication'),
"personal_access_tokens_help_link" => help_page_path('user/profile/personal_access_tokens'),
"no_containers_image" => image_path('illustrations/docker-empty-state.svg'),
"containers_error_image" => image_path('illustrations/docker-error-state.svg'),
"repository_url" => escape_once(@project.container_registry_url),
"registry_host_url_with_port" => escape_once(registry_config.host_port),
character_error: @character_error.to_s } }
- else
#js-vue-registry-images{ data: { endpoint: project_container_registry_index_path(@project, format: :json),
"help_page_path" => help_page_path('user/packages/container_registry/index'),
"two_factor_auth_help_link" => help_page_path('user/profile/account/two_factor_authentication'),
"personal_access_tokens_help_link" => help_page_path('user/profile/personal_access_tokens'),
"no_containers_image" => image_path('illustrations/docker-empty-state.svg'),
"containers_error_image" => image_path('illustrations/docker-error-state.svg'),
"repository_url" => escape_once(@project.container_registry_url),
"registry_host_url_with_port" => escape_once(registry_config.host_port),
character_error: @character_error.to_s } }
......@@ -25,5 +25,7 @@ class ReactiveCachingWorker
.reactive_cache_worker_finder
.call(id, *args)
.try(:exclusively_update_reactive_cache!, *args)
rescue ReactiveCaching::ExceededReactiveCacheLimit => e
Gitlab::ErrorTracking.track_exception(e)
end
end
---
title: Include license_scanning to index_ci_builds_on_name_for_security_products_values
merge_request: 24090
author:
type: changed
---
title: Sets size limits on data loaded async, like deploy boards and merge request reports
merge_request: 21871
author:
type: changed
---
title: Log user last activity on REST API
merge_request: 21725
author:
type: fixed
# frozen_string_literal: true
# See http://doc.gitlab.com/ce/development/migration_style_guide.html
# for more information on how to write migrations for GitLab.
class UdpateIndexCiBuildsOnNameForSecurityProducts < ActiveRecord::Migration[6.0]
include Gitlab::Database::MigrationHelpers
DOWNTIME = false
INDEX_NAME = 'index_ci_builds_on_name_for_security_products_values'
INDEX_NAME_NEW = 'index_ci_builds_on_name_for_security_reports_values'
INITIAL_INDEX = "((name)::text = ANY (ARRAY[('container_scanning'::character varying)::text, ('dast'::character varying)::text, ('dependency_scanning'::character varying)::text, ('license_management'::character varying)::text, ('sast'::character varying)::text"
disable_ddl_transaction!
def up
add_concurrent_index(:ci_builds,
:name,
name: INDEX_NAME_NEW,
where: INITIAL_INDEX + ", ('license_scanning'::character varying)::text]))")
remove_concurrent_index_by_name(:ci_builds, INDEX_NAME)
end
def down
add_concurrent_index(:ci_builds,
:name,
name: INDEX_NAME,
where: INITIAL_INDEX + ']))')
remove_concurrent_index_by_name(:ci_builds, INDEX_NAME_NEW)
end
end
......@@ -682,7 +682,7 @@ ActiveRecord::Schema.define(version: 2020_02_07_151640) do
t.index ["commit_id", "status", "type"], name: "index_ci_builds_on_commit_id_and_status_and_type"
t.index ["commit_id", "type", "name", "ref"], name: "index_ci_builds_on_commit_id_and_type_and_name_and_ref"
t.index ["commit_id", "type", "ref"], name: "index_ci_builds_on_commit_id_and_type_and_ref"
t.index ["name"], name: "index_ci_builds_on_name_for_security_products_values", where: "((name)::text = ANY (ARRAY[('container_scanning'::character varying)::text, ('dast'::character varying)::text, ('dependency_scanning'::character varying)::text, ('license_management'::character varying)::text, ('sast'::character varying)::text]))"
t.index ["name"], name: "index_ci_builds_on_name_for_security_reports_values", where: "((name)::text = ANY (ARRAY[('container_scanning'::character varying)::text, ('dast'::character varying)::text, ('dependency_scanning'::character varying)::text, ('license_management'::character varying)::text, ('sast'::character varying)::text, ('license_scanning'::character varying)::text]))"
t.index ["project_id", "id"], name: "index_ci_builds_on_project_id_and_id"
t.index ["project_id", "name", "ref"], name: "index_ci_builds_on_project_id_and_name_and_ref", where: "(((type)::text = 'Ci::Build'::text) AND ((status)::text = 'success'::text) AND ((retried = false) OR (retried IS NULL)))"
t.index ["project_id", "status"], name: "index_ci_builds_project_id_and_status_for_live_jobs_partial2", where: "(((type)::text = 'Ci::Build'::text) AND ((status)::text = ANY (ARRAY[('running'::character varying)::text, ('pending'::character varying)::text, ('created'::character varying)::text])))"
......
......@@ -87,6 +87,20 @@ Plan.default.limits.update!(ci_active_jobs: 500)
NOTE: **Note:** Set the limit to `0` to disable it.
## Environment data on Deploy Boards
[Deploy Boards](../user/project/deploy_boards.md) load information from Kubernetes about
Pods and Deployments. However, data over 10 MB for a certain environment read from
Kubernetes won't be shown.
## Merge Request reports
Reports that go over the 20 MB limit won't be loaded. Affected reports:
- [Merge Request security reports](../user/project/merge_requests/index.md#security-reports-ultimate)
- [CI/CD parameter `artifacts:expose_as`](../ci/yaml/README.md#artifactsexpose_as)
- [JUnit test reports](../ci/junit_test_reports.md)
## Advanced Global Search limits
### Maximum field length
......
......@@ -24,6 +24,7 @@ GET /projects/:id/protected_branches
| Attribute | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `id` | integer/string | yes | The ID or [URL-encoded path of the project](README.md#namespaced-path-encoding) owned by the authenticated user |
| `search` | string | no | Name or part of the name of protected branches to be searched for |
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" 'https://gitlab.example.com/api/v4/projects/5/protected_branches'
......
......@@ -1376,6 +1376,7 @@ The activities that update the timestamp are:
- Git HTTP/SSH activities (such as clone, push)
- User logging in into GitLab
- User visiting pages related to Dashboards, Projects, Issues and Merge Requests ([introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/54947) in GitLab 11.8)
- User using the API
By default, it shows the activity for all users in the last 6 months, but this can be
amended by using the `from` parameter.
......
......@@ -217,8 +217,8 @@ support this.
# The 'docker' hostname is the alias of the service container as described at
# https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#accessing-the-services.
#
# Note that if you're using the Kubernetes executor, the variable
# should be set to tcp://localhost:2376 because of how the
# Note that if you're using GitLab Runner 12.7 or earlier with the Kubernetes executor and Kubernetes 1.6 or earlier,
# the variable must be set to tcp://localhost:2376 because of how the
# Kubernetes executor connects services to the job container
# DOCKER_HOST: tcp://localhost:2376
#
......@@ -279,12 +279,11 @@ variables:
# The 'docker' hostname is the alias of the service container as described at
# https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#accessing-the-services
#
# Note that if you're using the Kubernetes executor, the variable should be set to
# tcp://localhost:2375 because of how the Kubernetes executor connects services
# to the job container
# Note that if you're using GitLab Runner 12.7 or earlier with the Kubernetes executor and Kubernetes 1.6 or earlier,
# the variable must be set to tcp://localhost:2375 because of how the
# Kubernetes executor connects services to the job container
# DOCKER_HOST: tcp://localhost:2375
#
# For non-Kubernetes executors, we use tcp://docker:2375
DOCKER_HOST: tcp://docker:2375
#
# This will instruct Docker not to start over TLS.
......
......@@ -345,6 +345,9 @@ For example, the following two definitions are equal:
| `command` | no | 9.4 |Command or script that should be used as the container's command. It will be translated to arguments passed to Docker after the image's name. The syntax is similar to [`Dockerfile`'s `CMD`][cmd] directive, where each shell token is a separate string in the array. |
| `alias` | no | 9.4 |Additional alias that can be used to access the service from the job's container. Read [Accessing the services](#accessing-the-services) for more information. |
NOTE: **Note:**
Alias support for the Kubernetes executor was [introduced](https://gitlab.com/gitlab-org/gitlab-runner/issues/2229) in GitLab Runner 12.8, and is only available for Kubernetes version 1.7 or later.
### Starting multiple services from the same image
> Introduced in GitLab and GitLab Runner 9.4. Read more about the [extended
......
......@@ -48,6 +48,12 @@ of the cache by the `reactive_cache_lifetime` value.
Once the lifetime has expired, no more background jobs will be enqueued and calling
`#with_reactive_cache` will again return `nil` - starting the process all over again.
### 1 MB hard limit
`ReactiveCaching` has a 1 megabyte default limit. [This value is configurable](#selfreactive_cache_worker_finder).
If the data we're trying to cache has over 1 megabyte, it will not be cached and a handled `ReactiveCaching::ExceededReactiveCacheLimit` will be notified on Sentry.
## When to use
- If we need to make a request to an external API (for example, requests to the k8s API).
......@@ -228,6 +234,16 @@ be reset to `reactive_cache_lifetime`.
self.reactive_cache_lifetime = 10.minutes
```
#### `self.reactive_cache_hard_limit`
- This is the maximum data size that `ReactiveCaching` allows to be cached.
- The default is 1 megabyte. Data that goes over this value will not be cached
and will silently raise `ReactiveCaching::ExceededReactiveCacheLimit` on Sentry.
```ruby
self.reactive_cache_hard_limit = 5.megabytes
```
#### `self.reactive_cache_worker_finder`
- This is the method used by the background worker to find or generate the object on
......
......@@ -26,3 +26,4 @@ How do we measure the activity of users? GitLab considers a user active if:
- The user signs in.
- The user has Git activity (whether push or pull).
- The user visits pages related to Dashboards, Projects, Issues and Merge Requests ([introduced](https://gitlab.com/gitlab-org/gitlab-foss/issues/54947) in GitLab 11.8).
- The user uses the API
......@@ -103,94 +103,102 @@ module API
helpers ::API::Helpers
helpers ::API::Helpers::CommonHelpers
# Keep in alphabetical order
mount ::API::AccessRequests
mount ::API::Appearance
mount ::API::Applications
mount ::API::Avatar
mount ::API::AwardEmoji
mount ::API::Badges
mount ::API::Boards
mount ::API::Branches
mount ::API::BroadcastMessages
mount ::API::Commits
mount ::API::CommitStatuses
mount ::API::DeployKeys
mount ::API::Deployments
mount ::API::Environments
mount ::API::ErrorTracking
mount ::API::Events
mount ::API::Features
mount ::API::Files
mount ::API::GroupBoards
mount ::API::GroupClusters
mount ::API::GroupExport
mount ::API::GroupLabels
mount ::API::GroupMilestones
mount ::API::Groups
mount ::API::GroupContainerRepositories
mount ::API::GroupVariables
mount ::API::ImportGithub
namespace do
after do
::Users::ActivityService.new(@current_user).execute if Feature.enabled?(:api_activity_logging)
end
# Keep in alphabetical order
mount ::API::AccessRequests
mount ::API::Appearance
mount ::API::Applications
mount ::API::Avatar
mount ::API::AwardEmoji
mount ::API::Badges
mount ::API::Boards
mount ::API::Branches
mount ::API::BroadcastMessages
mount ::API::Commits
mount ::API::CommitStatuses
mount ::API::DeployKeys
mount ::API::Deployments
mount ::API::Environments
mount ::API::ErrorTracking
mount ::API::Events
mount ::API::Features
mount ::API::Files
mount ::API::GroupBoards
mount ::API::GroupClusters
mount ::API::GroupExport
mount ::API::GroupLabels
mount ::API::GroupMilestones
mount ::API::Groups
mount ::API::GroupContainerRepositories
mount ::API::GroupVariables
mount ::API::ImportGithub
mount ::API::Issues
mount ::API::JobArtifacts
mount ::API::Jobs
mount ::API::Keys
mount ::API::Labels
mount ::API::Lint
mount ::API::LsifData
mount ::API::Markdown
mount ::API::Members
mount ::API::MergeRequestDiffs
mount ::API::MergeRequests
mount ::API::Namespaces
mount ::API::Notes
mount ::API::Discussions
mount ::API::ResourceLabelEvents
mount ::API::NotificationSettings
mount ::API::Pages
mount ::API::PagesDomains
mount ::API::Pipelines
mount ::API::PipelineSchedules
mount ::API::ProjectClusters
mount ::API::ProjectContainerRepositories
mount ::API::ProjectEvents
mount ::API::ProjectExport
mount ::API::ProjectImport
mount ::API::ProjectHooks
mount ::API::ProjectMilestones
mount ::API::Projects
mount ::API::ProjectSnapshots
mount ::API::ProjectSnippets
mount ::API::ProjectStatistics
mount ::API::ProjectTemplates
mount ::API::ProtectedBranches
mount ::API::ProtectedTags
mount ::API::Releases
mount ::API::Release::Links
mount ::API::RemoteMirrors
mount ::API::Repositories
mount ::API::Runner
mount ::API::Runners
mount ::API::Search
mount ::API::Services
mount ::API::Settings
mount ::API::SidekiqMetrics
mount ::API::Snippets
mount ::API::Statistics
mount ::API::Submodules
mount ::API::Subscriptions
mount ::API::Suggestions
mount ::API::SystemHooks
mount ::API::Tags
mount ::API::Templates
mount ::API::Todos
mount ::API::Triggers
mount ::API::UserCounts
mount ::API::Users
mount ::API::Variables
mount ::API::Version
mount ::API::Wikis
end
mount ::API::Internal::Base
mount ::API::Internal::Pages
mount ::API::Issues
mount ::API::JobArtifacts
mount ::API::Jobs
mount ::API::Keys
mount ::API::Labels
mount ::API::Lint
mount ::API::Markdown
mount ::API::Members
mount ::API::MergeRequestDiffs
mount ::API::MergeRequests
mount ::API::Namespaces
mount ::API::Notes
mount ::API::Discussions
mount ::API::ResourceLabelEvents
mount ::API::NotificationSettings
mount ::API::Pages
mount ::API::PagesDomains
mount ::API::Pipelines
mount ::API::PipelineSchedules
mount ::API::ProjectClusters
mount ::API::ProjectContainerRepositories
mount ::API::ProjectEvents
mount ::API::ProjectExport
mount ::API::ProjectImport
mount ::API::ProjectHooks
mount ::API::ProjectMilestones
mount ::API::Projects
mount ::API::ProjectSnapshots
mount ::API::ProjectSnippets
mount ::API::ProjectStatistics
mount ::API::ProjectTemplates
mount ::API::ProtectedBranches
mount ::API::ProtectedTags
mount ::API::Releases
mount ::API::Release::Links
mount ::API::RemoteMirrors
mount ::API::Repositories
mount ::API::Runner
mount ::API::Runners
mount ::API::Search
mount ::API::Services
mount ::API::Settings
mount ::API::SidekiqMetrics
mount ::API::Snippets
mount ::API::Statistics
mount ::API::Submodules
mount ::API::Subscriptions
mount ::API::Suggestions
mount ::API::SystemHooks
mount ::API::Tags
mount ::API::Templates
mount ::API::Todos
mount ::API::Triggers
mount ::API::UserCounts
mount ::API::Users
mount ::API::Variables
mount ::API::Version
mount ::API::Wikis
route :any, '*path' do
error!('404 Not Found', 404)
......
# frozen_string_literal: true
module API
class LsifData < Grape::API
MAX_FILE_SIZE = 10.megabytes
before do
not_found! if Feature.disabled?(:code_navigation, user_project)
end
params do
requires :id, type: String, desc: 'The ID of a project'
requires :commit_id, type: String, desc: 'The ID of a commit'
end
resource :projects, requirements: API::NAMESPACE_OR_PROJECT_REQUIREMENTS do
segment ':id/commits/:commit_id' do
params do
requires :path, type: String, desc: 'The path of a file'
end
get 'lsif/info' do
authorize! :download_code, user_project
artifact =
@project.job_artifacts
.with_file_types(['lsif'])
.for_sha(params[:commit_id])
.last
not_found! unless artifact
authorize! :read_pipeline, artifact.job.pipeline
file_too_large! if artifact.file.cached_size > MAX_FILE_SIZE
::Projects::LsifDataService.new(artifact.file, @project, params).execute
end
end
end
end
end
......@@ -10169,6 +10169,12 @@ msgstr ""
msgid "Image %{imageName} was scheduled for deletion from the registry."
msgstr ""
msgid "Image ID"
msgstr ""
msgid "Image deleted successfully"
msgstr ""
msgid "Image: %{image}"
msgstr ""
......@@ -11019,6 +11025,9 @@ msgstr ""
msgid "Last Seen"
msgstr ""
msgid "Last Updated"
msgstr ""
msgid "Last accessed on"
msgstr ""
......@@ -17642,12 +17651,21 @@ msgstr ""
msgid "Something went wrong while closing the %{issuable}. Please try again later"
msgstr ""
msgid "Something went wrong while deleting the image."
msgstr ""
msgid "Something went wrong while deleting the package."
msgstr ""
msgid "Something went wrong while deleting the source branch. Please try again."
msgstr ""
msgid "Something went wrong while deleting the tag."
msgstr ""
msgid "Something went wrong while deleting the tags."
msgstr ""
msgid "Something went wrong while deleting your note. Please try again."
msgstr ""
......@@ -17690,6 +17708,9 @@ msgstr ""
msgid "Something went wrong while fetching the registry list."
msgstr ""
msgid "Something went wrong while fetching the tags list."
msgstr ""
msgid "Something went wrong while initializing the OpenAPI viewer"
msgstr ""
......@@ -18503,6 +18524,9 @@ msgstr ""
msgid "Tag"
msgstr ""
msgid "Tag deleted successfully"
msgstr ""
msgid "Tag list:"
msgstr ""
......@@ -18521,6 +18545,9 @@ msgstr ""
msgid "Tags"
msgstr ""
msgid "Tags deleted successfully"
msgstr ""
msgid "Tags feed"
msgstr ""
......
# frozen_string_literal: true
module QA
context 'Plan', :orchestrated, :smtp, :reliable do
context 'Plan', :orchestrated, :smtp do
describe 'Email Notification' do
let(:user) do
Resource::User.fabricate_or_use(Runtime::Env.gitlab_qa_username_1, Runtime::Env.gitlab_qa_password_1)
......
......@@ -139,6 +139,16 @@ FactoryBot.define do
end
end
trait :lsif do
file_type { :lsif }
file_format { :raw }
after(:build) do |artifact, evaluator|
artifact.file = fixture_file_upload(
Rails.root.join('spec/fixtures/lsif.json.gz'), 'application/octet-stream')
end
end
trait :correct_checksum do
after(:build) do |artifact, evaluator|
artifact.file_sha256 = Digest::SHA256.file(artifact.file.path).hexdigest
......
......@@ -15,6 +15,7 @@ describe 'Container Registry', :js do
project.add_developer(user)
stub_container_registry_config(enabled: true)
stub_container_registry_tags(repository: :any, tags: [])
stub_feature_flags(vue_container_registry_explorer: false)
end
it 'has a page title set' do
......
......@@ -16,6 +16,8 @@ describe 'Projects > Files > User creates a directory', :js do
project.add_developer(user)
sign_in(user)
visit project_tree_path(project, 'master')
wait_for_requests
end
context 'with default target branch' do
......@@ -43,6 +45,25 @@ describe 'Projects > Files > User creates a directory', :js do
end
end
context 'inside sub-folder' do
it 'creates new directory' do
click_link 'files'
page.within('.repo-breadcrumb') do
expect(page).to have_link('files')
end
first('.add-to-tree').click
click_link('New directory')
fill_in(:dir_name, with: 'new_directory')
click_button('Create directory')
expect(page).to have_content('files')
expect(page).to have_content('new_directory')
end
end
context 'with a new target branch' do
before do
first('.add-to-tree').click
......
export const reposServerResponse = [
{
destroy_path: 'path',
id: '123',
location: 'location',
path: 'foo',
tags_path: 'tags_path',
},
{
destroy_path: 'path_',
id: '456',
location: 'location_',
path: 'bar',
tags_path: 'tags_path_',
},
];
export const registryServerResponse = [
{
name: 'centos7',
short_revision: 'b118ab5b0',
revision: 'b118ab5b0e90b7cb5127db31d5321ac14961d097516a8e0e72084b6cdc783b43',
total_size: 679,
layers: 19,
location: 'location',
created_at: 1505828744434,
destroy_path: 'path_',
},
{
name: 'centos6',
short_revision: 'b118ab5b0',
revision: 'b118ab5b0e90b7cb5127db31d5321ac14961d097516a8e0e72084b6cdc783b43',
total_size: 679,
layers: 19,
location: 'location',
created_at: 1505828744434,
},
];
import axios from '~/lib/utils/axios_utils';
import MockAdapter from 'axios-mock-adapter';
import * as actions from '~/registry/explorer/stores/actions';
import * as types from '~/registry/explorer/stores/mutation_types';
import testAction from 'helpers/vuex_action_helper';
import createFlash from '~/flash';
import { TEST_HOST } from 'helpers/test_constants';
import { reposServerResponse, registryServerResponse } from '../mock_data';
jest.mock('~/flash.js');
describe('Actions RegistryExplorer Store', () => {
let mock;
const endpoint = `${TEST_HOST}/endpoint.json`;
beforeEach(() => {
mock = new MockAdapter(axios);
});
afterEach(() => {
mock.restore();
});
it('sets initial state', done => {
const initialState = {
config: {
endpoint,
},
};
testAction(
actions.setInitialState,
initialState,
null,
[{ type: types.SET_INITIAL_STATE, payload: initialState }],
[],
done,
);
});
describe('receives api responses', () => {
const response = {
data: [1, 2, 3],
headers: {
page: 1,
perPage: 10,
},
};
it('images list response', done => {
testAction(
actions.receiveImagesListSuccess,
response,
null,
[
{ type: types.SET_IMAGES_LIST_SUCCESS, payload: response.data },
{ type: types.SET_PAGINATION, payload: response.headers },
],
[],
done,
);
});
it('tags list response', done => {
testAction(
actions.receiveTagsListSuccess,
response,
null,
[
{ type: types.SET_TAGS_LIST_SUCCESS, payload: response.data },
{ type: types.SET_TAGS_PAGINATION, payload: response.headers },
],
[],
done,
);
});
});
describe('fetch images list', () => {
it('sets the imagesList and pagination', done => {
mock.onGet(endpoint).replyOnce(200, reposServerResponse, {});
testAction(
actions.requestImagesList,
{},
{
config: {
endpoint,
},
},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[{ type: 'receiveImagesListSuccess', payload: { data: reposServerResponse, headers: {} } }],
done,
);
});
it('should create flash on error', done => {
testAction(
actions.requestImagesList,
{},
{
config: {
endpoint: null,
},
},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[],
() => {
expect(createFlash).toHaveBeenCalled();
done();
},
);
});
});
describe('fetch tags list', () => {
const url = window.btoa(`${endpoint}/1}`);
it('sets the tagsList', done => {
mock.onGet(window.atob(url)).replyOnce(200, registryServerResponse, {});
testAction(
actions.requestTagsList,
{ id: url },
{},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[
{
type: 'receiveTagsListSuccess',
payload: { data: registryServerResponse, headers: {} },
},
],
done,
);
});
it('should create flash on error', done => {
testAction(
actions.requestTagsList,
{ id: url },
{},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[],
() => {
expect(createFlash).toHaveBeenCalled();
done();
},
);
});
});
describe('request delete single tag', () => {
it('successfully performs the delete request', done => {
const deletePath = 'delete/path';
const url = window.btoa(`${endpoint}/1}`);
mock.onDelete(deletePath).replyOnce(200);
testAction(
actions.requestDeleteTag,
{
tag: {
destroy_path: deletePath,
},
imageId: url,
},
{
tagsPagination: {},
},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[
{
type: 'requestTagsList',
payload: { pagination: {}, id: url },
},
],
() => {
expect(createFlash).toHaveBeenCalled();
done();
},
);
});
it('should show flash message on error', done => {
testAction(
actions.requestDeleteTag,
{
tag: {
destroy_path: null,
},
},
{},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[],
() => {
expect(createFlash).toHaveBeenCalled();
done();
},
);
});
});
describe('request delete multiple tags', () => {
const imageId = 1;
const projectPath = 'project-path';
const url = `${projectPath}/registry/repository/${imageId}/tags/bulk_destroy`;
it('successfully performs the delete request', done => {
mock.onDelete(url).replyOnce(200);
testAction(
actions.requestDeleteTags,
{
ids: [1, 2],
imageId,
},
{
config: {
projectPath,
},
tagsPagination: {},
},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[
{
type: 'requestTagsList',
payload: { pagination: {}, id: 1 },
},
],
() => {
expect(createFlash).toHaveBeenCalled();
done();
},
);
});
it('should show flash message on error', done => {
mock.onDelete(url).replyOnce(500);
testAction(
actions.requestDeleteTags,
{
ids: [1, 2],
imageId,
},
{
config: {
projectPath,
},
tagsPagination: {},
},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[],
() => {
expect(createFlash).toHaveBeenCalled();
done();
},
);
});
});
describe('request delete single image', () => {
it('successfully performs the delete request', done => {
const deletePath = 'delete/path';
mock.onDelete(deletePath).replyOnce(200);
testAction(
actions.requestDeleteImage,
deletePath,
{
pagination: {},
},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[
{
type: 'requestImagesList',
payload: { pagination: {} },
},
],
() => {
expect(createFlash).toHaveBeenCalled();
done();
},
);
});
it('should show flash message on error', done => {
testAction(
actions.requestDeleteImage,
null,
{},
[
{ type: types.SET_MAIN_LOADING, payload: true },
{ type: types.SET_MAIN_LOADING, payload: false },
],
[],
() => {
expect(createFlash).toHaveBeenCalled();
done();
},
);
});
});
});
import mutations from '~/registry/explorer/stores/mutations';
import * as types from '~/registry/explorer/stores/mutation_types';
describe('Mutations Registry Explorer Store', () => {
let mockState;
beforeEach(() => {
mockState = {};
});
describe('SET_INITIAL_STATE', () => {
it('should set the initial state', () => {
const expectedState = { ...mockState, config: { endpoint: 'foo' } };
mutations[types.SET_INITIAL_STATE](mockState, { endpoint: 'foo' });
expect(mockState).toEqual(expectedState);
});
});
describe('SET_IMAGES_LIST_SUCCESS', () => {
it('should set the images list', () => {
const images = [1, 2, 3];
const expectedState = { ...mockState, images };
mutations[types.SET_IMAGES_LIST_SUCCESS](mockState, images);
expect(mockState).toEqual(expectedState);
});
});
describe('SET_TAGS_LIST_SUCCESS', () => {
it('should set the tags list', () => {
const tags = [1, 2, 3];
const expectedState = { ...mockState, tags };
mutations[types.SET_TAGS_LIST_SUCCESS](mockState, tags);
expect(mockState).toEqual(expectedState);
});
});
describe('SET_MAIN_LOADING', () => {
it('should set the isLoading', () => {
const expectedState = { ...mockState, isLoading: true };
mutations[types.SET_MAIN_LOADING](mockState, true);
expect(mockState).toEqual(expectedState);
});
});
describe('SET_PAGINATION', () => {
const generatePagination = () => [
{
'X-PAGE': '1',
'X-PER-PAGE': '20',
'X-TOTAL': '100',
'X-TOTAL-PAGES': '5',
'X-NEXT-PAGE': '2',
'X-PREV-PAGE': '0',
},
{
page: 1,
perPage: 20,
total: 100,
totalPages: 5,
nextPage: 2,
previousPage: 0,
},
];
it('should set the images pagination', () => {
const [headers, expectedResult] = generatePagination();
const expectedState = { ...mockState, pagination: expectedResult };
mutations[types.SET_PAGINATION](mockState, headers);
expect(mockState).toEqual(expectedState);
});
it('should set the tags pagination', () => {
const [headers, expectedResult] = generatePagination();
const expectedState = { ...mockState, tagsPagination: expectedResult };
mutations[types.SET_TAGS_PAGINATION](mockState, headers);
expect(mockState).toEqual(expectedState);
});
});
});
......@@ -20,11 +20,18 @@ describe('updateElementsVisibility', () => {
});
describe('updateFormAction', () => {
it('updates form action', () => {
it.each`
path
${'/test'}
${'test'}
${'/'}
`('updates form action for $path', ({ path }) => {
setHTMLFixture('<form class="js-test" action="/"></form>');
updateFormAction('.js-test', '/gitlab/create', '/test');
updateFormAction('.js-test', '/gitlab/create', path);
expect(document.querySelector('.js-test').action).toBe('http://localhost/gitlab/create/test');
expect(document.querySelector('.js-test').action).toBe(
`http://localhost/gitlab/create/${path.replace(/^\//, '')}`,
);
});
});
......@@ -43,7 +43,7 @@ describe Gitlab::Ci::Config::Entry::Reports do
:license_management | 'gl-license-management-report.json'
:license_scanning | 'gl-license-scanning-report.json'
:performance | 'performance.json'
:lsif | 'lsif.sqlite3'
:lsif | 'lsif.json'
end
with_them do
......
# frozen_string_literal: true
require 'spec_helper'
# Verifies that given an exported project meta-data tree, when importing this
# tree and then exporting it again, we should obtain the initial tree.
#
# This equivalence only works up to a certain extent, for instance we need
# to ignore:
#
# - row IDs and foreign key IDs
# - some timestamps
# - randomly generated fields like tokens
#
# as these are expected to change between import/export cycles.
describe Gitlab::ImportExport do
include ImportExport::CommonUtil
include ConfigurationHelper
include ImportExport::ProjectTreeExpectations
let(:json_fixture) { 'complex' }
it 'yields the initial tree when importing and exporting it again' do
project = create(:project, creator: create(:user, :admin))
# We first generate a test fixture dynamically from a seed-fixture, so as to
# account for any fields in the initial fixture that are missing and set to
# defaults during import (ideally we should have realistic test fixtures
# that "honestly" represent exports)
expect(
restore_then_save_project(
project,
import_path: seed_fixture_path,
export_path: test_fixture_path)
).to be true
# Import, then export again from the generated fixture. Any residual changes
# in the JSON will count towards comparison i.e. test failures.
expect(
restore_then_save_project(
project,
import_path: test_fixture_path,
export_path: test_tmp_path)
).to be true
imported_json = JSON.parse(File.read("#{test_fixture_path}/project.json"))
exported_json = JSON.parse(File.read("#{test_tmp_path}/project.json"))
assert_relations_match(imported_json, exported_json)
end
private
def seed_fixture_path
"#{fixtures_path}/#{json_fixture}"
end
def test_fixture_path
"#{test_tmp_path}/#{json_fixture}"
end
end
......@@ -111,6 +111,18 @@ describe Ci::JobArtifact do
end
end
describe '.for_sha' do
it 'returns job artifacts for a given pipeline sha' do
first_pipeline = create(:ci_pipeline)
second_pipeline = create(:ci_pipeline, sha: Digest::SHA1.hexdigest(SecureRandom.hex))
first_artifact = create(:ci_job_artifact, job: create(:ci_build, pipeline: first_pipeline))
second_artifact = create(:ci_job_artifact, job: create(:ci_build, pipeline: second_pipeline))
expect(described_class.for_sha(first_pipeline.sha)).to eq([first_artifact])
expect(described_class.for_sha(second_pipeline.sha)).to eq([second_artifact])
end
end
describe 'callbacks' do
subject { create(:ci_job_artifact, :archive) }
......
......@@ -165,11 +165,25 @@ describe ReactiveCaching, :use_clean_rails_memory_store_caching do
describe '#exclusively_update_reactive_cache!' do
subject(:go!) { instance.exclusively_update_reactive_cache! }
shared_examples 'successful cache' do
it 'caches the result of #calculate_reactive_cache' do
go!
expect(read_reactive_cache(instance)).to eq(calculation.call)
end
it 'does not raise the exception' do
expect { go! }.not_to raise_exception(ReactiveCaching::ExceededReactiveCacheLimit)
end
end
context 'when the lease is free and lifetime is not exceeded' do
before do
stub_reactive_cache(instance, "preexisting")
stub_reactive_cache(instance, 'preexisting')
end
it_behaves_like 'successful cache'
it 'takes and releases the lease' do
expect_to_obtain_exclusive_lease(cache_key, 'uuid')
expect_to_cancel_exclusive_lease(cache_key, 'uuid')
......@@ -177,19 +191,13 @@ describe ReactiveCaching, :use_clean_rails_memory_store_caching do
go!
end
it 'caches the result of #calculate_reactive_cache' do
go!
expect(read_reactive_cache(instance)).to eq(calculation.call)
end
it "enqueues a repeat worker" do
it 'enqueues a repeat worker' do
expect_reactive_cache_update_queued(instance)
go!
end
it "calls a reactive_cache_updated only once if content did not change on subsequent update" do
it 'calls a reactive_cache_updated only once if content did not change on subsequent update' do
expect(instance).to receive(:calculate_reactive_cache).twice
expect(instance).to receive(:reactive_cache_updated).once
......@@ -202,6 +210,43 @@ describe ReactiveCaching, :use_clean_rails_memory_store_caching do
go!
end
context 'when calculated object size exceeds default reactive_cache_hard_limit' do
let(:calculation) { -> { 'a' * 2 * 1.megabyte } }
shared_examples 'ExceededReactiveCacheLimit' do
it 'raises ExceededReactiveCacheLimit exception and does not cache new data' do
expect { go! }.to raise_exception(ReactiveCaching::ExceededReactiveCacheLimit)
expect(read_reactive_cache(instance)).not_to eq(calculation.call)
end
end
context 'when reactive_cache_hard_limit feature flag is enabled' do
it_behaves_like 'ExceededReactiveCacheLimit'
context 'when reactive_cache_hard_limit is overridden' do
let(:test_class) { Class.new(CacheTest) { self.reactive_cache_hard_limit = 3.megabytes } }
let(:instance) { test_class.new(666, &calculation) }
it_behaves_like 'successful cache'
context 'when cache size is over the overridden limit' do
let(:calculation) { -> { 'a' * 4 * 1.megabyte } }
it_behaves_like 'ExceededReactiveCacheLimit'
end
end
end
context 'when reactive_cache_limit feature flag is disabled' do
before do
stub_feature_flags(reactive_cache_limit: false)
end
it_behaves_like 'successful cache'
end
end
context 'and #calculate_reactive_cache raises an exception' do
before do
stub_reactive_cache(instance, "preexisting")
......@@ -256,8 +301,8 @@ describe ReactiveCaching, :use_clean_rails_memory_store_caching do
it { expect(subject.reactive_cache_lease_timeout).to be_a(ActiveSupport::Duration) }
it { expect(subject.reactive_cache_refresh_interval).to be_a(ActiveSupport::Duration) }
it { expect(subject.reactive_cache_lifetime).to be_a(ActiveSupport::Duration) }
it { expect(subject.reactive_cache_key).to respond_to(:call) }
it { expect(subject.reactive_cache_hard_limit).to be_a(Integer) }
it { expect(subject.reactive_cache_worker_finder).to respond_to(:call) }
end
end
# frozen_string_literal: true
require 'spec_helper'
describe API::API do
let(:user) { create(:user, last_activity_on: Date.yesterday) }
describe 'Record user last activity in after hook' do
# It does not matter which endpoint is used because last_activity_on should
# be updated on every request. `/groups` is used as an example
# to represent any API endpoint
it 'updates the users last_activity_on date' do
expect { get api('/groups', user) }.to change { user.reload.last_activity_on }.to(Date.today)
end
context 'when the the api_activity_logging feature is disabled' do
it 'does not touch last_activity_on' do
stub_feature_flags(api_activity_logging: false)
expect { get api('/groups', user) }.not_to change { user.reload.last_activity_on }
end
end
end
end
# frozen_string_literal: true
require "spec_helper"
describe API::LsifData do
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, :repository) }
let(:commit) { project.commit }
describe 'GET lsif/info' do
let(:endpoint_path) { "/projects/#{project.id}/commits/#{commit.id}/lsif/info" }
context 'user does not have access to the project' do
before do
project.add_guest(user)
end
it 'returns 403' do
get api(endpoint_path, user), params: { path: 'main.go' }
expect(response).to have_gitlab_http_status(:forbidden)
end
end
context 'user has access to the project' do
before do
project.add_reporter(user)
end
context 'code_navigation feature is disabled' do
before do
stub_feature_flags(code_navigation: false)
end
it 'returns 404' do
get api(endpoint_path, user)
expect(response).to have_gitlab_http_status(:not_found)
end
end
context 'there is no job artifact for the passed commit' do
it 'returns 404' do
get api(endpoint_path, user), params: { path: 'main.go' }
expect(response).to have_gitlab_http_status(:not_found)
end
end
context 'lsif data is stored as a job artifact' do
let!(:pipeline) { create(:ci_pipeline, project: project, sha: commit.id) }
let!(:artifact) { create(:ci_job_artifact, :lsif, job: create(:ci_build, pipeline: pipeline)) }
it 'returns code navigation info for a given path' do
get api(endpoint_path, user), params: { path: 'main.go' }
expect(response).to have_gitlab_http_status(:ok)
expect(response.parsed_body.last).to eq({
'end_char' => 18,
'end_line' => 8,
'start_char' => 13,
'start_line' => 8
})
end
context 'the stored file is too large' do
it 'returns 413' do
allow_any_instance_of(JobArtifactUploader).to receive(:cached_size).and_return(20.megabytes)
get api(endpoint_path, user), params: { path: 'main.go' }
expect(response).to have_gitlab_http_status(:payload_too_large)
end
end
context 'the user does not have access to the pipeline' do
let(:project) { create(:project, :repository, builds_access_level: ProjectFeature::DISABLED) }
it 'returns 403' do
get api(endpoint_path, user), params: { path: 'main.go' }
expect(response).to have_gitlab_http_status(:forbidden)
end
end
end
end
end
end
......@@ -148,6 +148,7 @@ describe API::ProjectContainerRepositories do
let(:lease_key) { "container_repository:cleanup_tags:#{root_repository.id}" }
it 'schedules cleanup of tags repository' do
stub_last_activity_update
stub_exclusive_lease(lease_key, timeout: 1.hour)
expect(CleanupContainerRepositoryWorker).to receive(:perform_async)
.with(maintainer.id, root_repository.id, worker_params)
......
......@@ -1462,7 +1462,7 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
subject
expect(response).to have_gitlab_http_status(200)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(response.media_type).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(json_response['TempPath']).to eq(JobArtifactUploader.workhorse_local_upload_path)
expect(json_response['RemoteObject']).to be_nil
end
......@@ -1482,7 +1482,7 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
subject
expect(response).to have_gitlab_http_status(200)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(response.media_type).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(json_response).not_to have_key('TempPath')
expect(json_response['RemoteObject']).to have_key('ID')
expect(json_response['RemoteObject']).to have_key('GetURL')
......@@ -1558,7 +1558,7 @@ describe API::Runner, :clean_gitlab_redis_shared_state do
authorize_artifacts_with_token_in_headers
expect(response).to have_gitlab_http_status(200)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(response.media_type).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(json_response['TempPath']).not_to be_nil
end
......
......@@ -92,7 +92,7 @@ describe 'Git HTTP requests' do
it 'allows pulls' do
download(path, env) do |response|
expect(response).to have_gitlab_http_status(:ok)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(response.media_type).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
end
end
end
......@@ -101,7 +101,7 @@ describe 'Git HTTP requests' do
it 'allows pushes', :sidekiq_might_not_need_inline do
upload(path, env) do |response|
expect(response).to have_gitlab_http_status(:ok)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(response.media_type).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
end
end
end
......@@ -509,7 +509,7 @@ describe 'Git HTTP requests' do
download(path, env) do
expect(response).to have_gitlab_http_status(:ok)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(response.media_type).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
end
end
......@@ -518,7 +518,7 @@ describe 'Git HTTP requests' do
upload(path, env) do
expect(response).to have_gitlab_http_status(:ok)
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(response.media_type).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
end
end
......
......@@ -907,7 +907,7 @@ describe 'Git LFS API and storage' do
it_behaves_like 'LFS http 200 response'
it 'uses the gitlab-workhorse content type' do
expect(response.content_type.to_s).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
expect(response.media_type).to eq(Gitlab::Workhorse::INTERNAL_API_CONTENT_TYPE)
end
end
......
# frozen_string_literal: true
require 'spec_helper'
describe Projects::LsifDataService do
let(:artifact) { create(:ci_job_artifact, :lsif) }
let(:project) { build_stubbed(:project) }
let(:path) { 'main.go' }
let(:commit_id) { Digest::SHA1.hexdigest(SecureRandom.hex) }
let(:params) { { path: path, commit_id: commit_id } }
let(:service) { described_class.new(artifact.file, project, params) }
describe '#execute' do
context 'fetched lsif file', :use_clean_rails_memory_store_caching do
it 'is cached' do
service.execute
cached_data = Rails.cache.fetch("project:#{project.id}:lsif:#{commit_id}")
expect(cached_data.keys).to eq(%w[def_refs doc_ranges docs hover_refs ranges])
end
end
context 'for main.go' do
it 'returns lsif ranges for the file' do
expect(service.execute).to eq([
{
end_char: 9,
end_line: 6,
start_char: 5,
start_line: 6
},
{
end_char: 36,
end_line: 3,
start_char: 1,
start_line: 3
},
{
end_char: 12,
end_line: 7,
start_char: 1,
start_line: 7
},
{
end_char: 20,
end_line: 7,
start_char: 13,
start_line: 7
},
{
end_char: 12,
end_line: 8,
start_char: 1,
start_line: 8
},
{
end_char: 18,
end_line: 8,
start_char: 13,
start_line: 8
}
])
end
end
context 'for morestring/reverse.go' do
let(:path) { 'morestrings/reverse.go' }
it 'returns lsif ranges for the file' do
expect(service.execute.first).to eq({
end_char: 2,
end_line: 11,
start_char: 1,
start_line: 11
})
end
end
context 'for an unknown file' do
let(:path) { 'unknown.go' }
it 'returns nil' do
expect(service.execute).to eq(nil)
end
end
end
describe '#doc_id_from' do
context 'when the passed path matches multiple files' do
let(:path) { 'check/main.go' }
let(:docs) do
{
1 => 'cmd/check/main.go',
2 => 'cmd/command.go',
3 => 'check/main.go',
4 => 'cmd/nested/check/main.go'
}
end
it 'fetches the document with the shortest absolute path' do
expect(service.__send__(:doc_id_from, docs)).to eq(3)
end
end
end
end
......@@ -13,18 +13,18 @@ describe Spam::HamService do
allow(Spam::AkismetService).to receive(:new).and_return fake_akismet_service
end
describe '#mark_as_ham!' do
describe '#execute' do
context 'AkismetService returns false (Akismet cannot be reached, etc)' do
before do
allow(fake_akismet_service).to receive(:submit_ham).and_return false
end
it 'returns false' do
expect(subject.mark_as_ham!).to be_falsey
expect(subject.execute).to be_falsey
end
it 'does not update the record' do
expect { subject.mark_as_ham! }.not_to change { spam_log.submitted_as_ham }
expect { subject.execute }.not_to change { spam_log.submitted_as_ham }
end
context 'if spam log record has already been marked as spam' do
......@@ -33,7 +33,7 @@ describe Spam::HamService do
end
it 'does not update the record' do
expect { subject.mark_as_ham! }.not_to change { spam_log.submitted_as_ham }
expect { subject.execute }.not_to change { spam_log.submitted_as_ham }
end
end
end
......@@ -45,11 +45,11 @@ describe Spam::HamService do
end
it 'returns true' do
expect(subject.mark_as_ham!).to be_truthy
expect(subject.execute).to be_truthy
end
it 'updates the record' do
expect { subject.mark_as_ham! }.to change { spam_log.submitted_as_ham }.from(false).to(true)
expect { subject.execute }.to change { spam_log.submitted_as_ham }.from(false).to(true)
end
end
end
......
......@@ -46,4 +46,8 @@ module ApiHelpers
expect(json_response).to be_an Array
expect(json_response.map { |item| item['id'] }).to eq(Array(items))
end
def stub_last_activity_update
allow_any_instance_of(Users::ActivityService).to receive(:execute)
end
end
......@@ -17,5 +17,38 @@ module ImportExport
allow_any_instance_of(Gitlab::ImportExport).to receive(:export_path) { export_path }
end
def fixtures_path
"spec/fixtures/lib/gitlab/import_export"
end
def test_tmp_path
"tmp/tests/gitlab-test/import_export"
end
def restore_then_save_project(project, import_path:, export_path:)
project_restorer = get_project_restorer(project, import_path)
project_saver = get_project_saver(project, export_path)
project_restorer.restore && project_saver.save
end
def get_project_restorer(project, import_path)
Gitlab::ImportExport::ProjectTreeRestorer.new(
user: project.creator, shared: get_shared_env(path: import_path), project: project
)
end
def get_project_saver(project, export_path)
Gitlab::ImportExport::ProjectTreeSaver.new(
project: project, current_user: project.creator, shared: get_shared_env(path: export_path)
)
end
def get_shared_env(path:)
instance_double(Gitlab::ImportExport::Shared).tap do |shared|
allow(shared).to receive(:export_path).and_return(path)
end
end
end
end
# frozen_string_literal: true
module ImportExport
module ProjectTreeExpectations
def assert_relations_match(imported_hash, exported_hash)
normalized_imported_hash = normalize_elements(imported_hash)
normalized_exported_hash = normalize_elements(exported_hash)
# this is for sanity checking, to make sure we didn't accidentally pass the test
# because we essentially ignored everything
stats = {
hashes: 0,
arrays: {
direct: 0,
pairwise: 0,
fuzzy: 0
},
values: 0
}
failures = match_recursively(normalized_imported_hash, normalized_exported_hash, stats)
puts "Elements checked:\n#{stats.pretty_inspect}"
expect(failures).to be_empty, failures.join("\n\n")
end
private
def match_recursively(left_node, right_node, stats, location_stack = [], failures = [])
if Hash === left_node && Hash === right_node
match_hashes(left_node, right_node, stats, location_stack, failures)
elsif Array === left_node && Array === right_node
match_arrays(left_node, right_node, stats, location_stack, failures)
else
stats[:values] += 1
if left_node != right_node
failures << failure_message("Value mismatch", location_stack, left_node, right_node)
end
end
failures
end
def match_hashes(left_node, right_node, stats, location_stack, failures)
stats[:hashes] += 1
left_keys = left_node.keys.to_set
right_keys = right_node.keys.to_set
if left_keys != right_keys
failures << failure_message("Hash keys mismatch", location_stack, left_keys, right_keys)
end
left_node.keys.each do |key|
location_stack << key
match_recursively(left_node[key], right_node[key], stats, location_stack, failures)
location_stack.pop
end
end
def match_arrays(left_node, right_node, stats, location_stack, failures)
has_simple_elements = left_node.none? { |el| Enumerable === el }
# for simple types, we can do a direct order-less set comparison
if has_simple_elements && left_node.to_set != right_node.to_set
stats[:arrays][:direct] += 1
failures << failure_message("Elements mismatch", location_stack, left_node, right_node)
# if both arrays have the same number of complex elements, we can compare pair-wise in-order
elsif left_node.size == right_node.size
stats[:arrays][:pairwise] += 1
left_node.zip(right_node).each do |left_entry, right_entry|
match_recursively(left_entry, right_entry, stats, location_stack, failures)
end
# otherwise we have to fall back to a best-effort match by probing into the right array;
# this means we will not account for elements that exist on the right, but not on the left
else
stats[:arrays][:fuzzy] += 1
left_node.each do |left_entry|
right_entry = right_node.find { |el| el == left_entry }
match_recursively(left_entry, right_entry, stats, location_stack, failures)
end
end
end
def failure_message(what, location_stack, left_value, right_value)
where =
if location_stack.empty?
"root"
else
location_stack.map { |loc| loc.to_sym.inspect }.join(' -> ')
end
">> [#{where}] #{what}\n\n#{left_value.pretty_inspect}\nNOT EQUAL TO\n\n#{right_value.pretty_inspect}"
end
# Helper that traverses a project tree and normalizes data that we know
# to vary in the process of importing (such as list order or row IDs)
def normalize_elements(elem)
case elem
when Hash
elem.map do |key, value|
if ignore_key?(key, value)
[key, :ignored]
else
[key, normalize_elements(value)]
end
end.to_h
when Array
elem.map { |a| normalize_elements(a) }
else
elem
end
end
# We currently need to ignore certain entries when checking for equivalence because
# we know them to change between imports/exports either by design or because of bugs;
# this helper filters out these problematic nodes.
def ignore_key?(key, value)
id?(key) || # IDs are known to be replaced during imports
key == 'updated_at' || # these get changed frequently during imports
key == 'next_run_at' || # these values change based on wall clock
key == 'notes' # the importer attaches an extra "by user XYZ" at the end of a note
end
def id?(key)
key == 'id' || key.ends_with?('_id')
end
end
end
......@@ -14,6 +14,18 @@ describe ReactiveCachingWorker do
described_class.new.perform("Environment", environment.id)
end
context 'when ReactiveCaching::ExceededReactiveCacheLimit is raised' do
it 'avoids failing the job and tracks via Gitlab::ErrorTracking' do
allow_any_instance_of(Environment).to receive(:exclusively_update_reactive_cache!)
.and_raise(ReactiveCaching::ExceededReactiveCacheLimit)
expect(Gitlab::ErrorTracking).to receive(:track_exception)
.with(kind_of(ReactiveCaching::ExceededReactiveCacheLimit))
described_class.new.perform("Environment", environment.id)
end
end
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment