Commit 565b9786 authored by Stan Hu's avatar Stan Hu

Merge branch 'ce-to-ee-2018-02-26' into 'master'

CE upstream - 2018-02-26 16:08 UTC

Closes #3639 and gitaly#1036

See merge request gitlab-org/gitlab-ee!4725
parents 90488d20 a0c170ef
import Vue from 'vue';
import deployKeysApp from './components/app.vue';
document.addEventListener('DOMContentLoaded', () => new Vue({
export default () => new Vue({
el: document.getElementById('js-deploy-keys'),
components: {
deployKeysApp,
......@@ -18,4 +18,4 @@ document.addEventListener('DOMContentLoaded', () => new Vue({
},
});
},
}));
});
......@@ -213,7 +213,7 @@ export default class LabelsSelect {
}
}
if (label.duplicate) {
color = gl.DropdownUtils.duplicateLabelColor(label.color);
color = DropdownUtils.duplicateLabelColor(label.color);
}
else {
if (label.color != null) {
......
import initSettingsPanels from '~/settings_panels';
import initDeployKeys from '~/deploy_keys';
document.addEventListener('DOMContentLoaded', initSettingsPanels);
document.addEventListener('DOMContentLoaded', () => {
initDeployKeys();
initSettingsPanels();
});
......@@ -40,9 +40,9 @@ class Projects::Clusters::GcpController < Projects::ApplicationController
def verify_billing
case google_project_billing_status
when nil
flash[:alert] = _('We could not verify that one of your projects on GCP has billing enabled. Please try again.')
flash.now[:alert] = _('We could not verify that one of your projects on GCP has billing enabled. Please try again.')
when false
flash[:alert] = _('Please <a href=%{link_to_billing} target="_blank" rel="noopener noreferrer">enable billing for one of your projects to be able to create a Kubernetes cluster</a>, then try again.').html_safe % { link_to_billing: "https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral" }
flash.now[:alert] = _('Please <a href=%{link_to_billing} target="_blank" rel="noopener noreferrer">enable billing for one of your projects to be able to create a Kubernetes cluster</a>, then try again.').html_safe % { link_to_billing: "https://console.cloud.google.com/freetrial?utm_campaign=2018_cpanel&utm_source=gitlab&utm_medium=referral" }
when true
return
end
......
module Projects
module Prometheus
class MetricsController < Projects::ApplicationController
before_action :authorize_admin_project!
def active_common
respond_to do |format|
format.json do
matched_metrics = prometheus_service.matched_metrics || {}
if matched_metrics.any?
render json: matched_metrics
else
head :no_content
end
end
end
end
private
def prometheus_service
@prometheus_service ||= project.find_or_initialize_service('prometheus')
end
end
end
end
class Projects::PrometheusController < Projects::ApplicationController
before_action :authorize_read_project!
before_action :require_prometheus_metrics!
def active_metrics
respond_to do |format|
format.json do
matched_metrics = project.prometheus_service.matched_metrics || {}
if matched_metrics.any?
render json: matched_metrics
else
head :no_content
end
end
end
end
private
def require_prometheus_metrics!
render_404 unless project.prometheus_service.present?
end
end
......@@ -69,16 +69,16 @@ class PrometheusService < MonitoringService
client.ping
{ success: true, result: 'Checked API endpoint' }
rescue Gitlab::PrometheusError => err
rescue Gitlab::PrometheusClient::Error => err
{ success: false, result: err }
end
def environment_metrics(environment)
with_reactive_cache(Gitlab::Prometheus::Queries::EnvironmentQuery.name, environment.id, &method(:rename_data_to_metrics))
with_reactive_cache(Gitlab::Prometheus::Queries::EnvironmentQuery.name, environment.id, &rename_field(:data, :metrics))
end
def deployment_metrics(deployment)
metrics = with_reactive_cache(Gitlab::Prometheus::Queries::DeploymentQuery.name, deployment.environment.id, deployment.id, &method(:rename_data_to_metrics))
metrics = with_reactive_cache(Gitlab::Prometheus::Queries::DeploymentQuery.name, deployment.environment.id, deployment.id, &rename_field(:data, :metrics))
metrics&.merge(deployment_time: deployment.created_at.to_i) || {}
end
......@@ -107,7 +107,7 @@ class PrometheusService < MonitoringService
data: data,
last_update: Time.now.utc
}
rescue Gitlab::PrometheusError => err
rescue Gitlab::PrometheusClient::Error => err
{ success: false, result: err.message }
end
......@@ -116,10 +116,10 @@ class PrometheusService < MonitoringService
Gitlab::PrometheusClient.new(RestClient::Resource.new(api_url))
else
cluster = cluster_with_prometheus(environment_id)
raise Gitlab::PrometheusError, "couldn't find cluster with Prometheus installed" unless cluster
raise Gitlab::PrometheusClient::Error, "couldn't find cluster with Prometheus installed" unless cluster
rest_client = client_from_cluster(cluster)
raise Gitlab::PrometheusError, "couldn't create proxy Prometheus client" unless rest_client
raise Gitlab::PrometheusClient::Error, "couldn't create proxy Prometheus client" unless rest_client
Gitlab::PrometheusClient.new(rest_client)
end
......@@ -152,9 +152,11 @@ class PrometheusService < MonitoringService
cluster.application_prometheus.proxy_client
end
def rename_data_to_metrics(metrics)
metrics[:metrics] = metrics.delete :data
metrics
def rename_field(old_field, new_field)
-> (metrics) do
metrics[new_field] = metrics.delete(old_field)
metrics
end
end
def synchronize_service_state!
......
......@@ -35,9 +35,8 @@
method: :put, class: 'btn btn-default',
data: { confirm: _("Are you sure you want to reset registration token?") }
= render partial: 'ci/runner/how_to_setup_runner',
locals: { registration_token: Gitlab::CurrentSettings.runners_registration_token,
type: 'shared' }
= render partial: 'ci/runner/how_to_setup_shared_runner',
locals: { registration_token: Gitlab::CurrentSettings.runners_registration_token }
.append-bottom-20.clearfix
.pull-left
......
- link = link_to _("GitLab Runner section"), 'https://about.gitlab.com/gitlab-ci/#gitlab-runner', target: '_blank'
.bs-callout.help-callout
%h4= _("How to setup a #{type} Runner for a new project")
.append-bottom-10
%h4= _("Setup a #{type} Runner manually")
%ol
%li
= _("Install a Runner compatible with GitLab CI")
= (_("(checkout the %{link} for information on how to install it).") % { link: link }).html_safe
%li
= _("Specify the following URL during the Runner setup:")
%code#coordinator_address= root_url(only_path: false)
%li
= _("Use the following registration token during setup:")
%code#registration_token= registration_token
%li
= _("Start the Runner!")
%ol
%li
= _("Install a Runner compatible with GitLab CI")
= (_("(checkout the %{link} for information on how to install it).") % { link: link }).html_safe
%li
= _("Specify the following URL during the Runner setup:")
%code#coordinator_address= root_url(only_path: false)
%li
= _("Use the following registration token during setup:")
%code#registration_token= registration_token
%li
= _("Start the Runner!")
.bs-callout.help-callout
= render partial: 'ci/runner/how_to_setup_runner',
locals: { registration_token: registration_token, type: 'shared' }
.bs-callout.help-callout
.append-bottom-10
%h4= _('Setup a specific Runner automatically')
%p
- link_to_help_page = link_to(_('Learn more about Kubernetes'),
help_page_path('user/project/clusters/index'),
target: '_blank',
rel: 'noopener noreferrer')
= _('You can easily install a Runner on a Kubernetes cluster. %{link_to_help_page}').html_safe % { link_to_help_page: link_to_help_page }
%ol
%li
= _('Click the button below to begin the install process by navigating to the Kubernetes page')
%li
= _('Select an existing Kubernetes cluster or create a new one')
%li
= _('From the Kubernetes cluster details view, install Runner from the applications list')
= link_to _('Install Runner on Kubernetes'),
project_clusters_path(@project),
class: 'btn btn-info'
%hr
= render partial: 'ci/runner/how_to_setup_runner',
locals: { registration_token: registration_token, type: 'specific' }
......@@ -4,7 +4,7 @@
.col-xs-12
.text-content
%h4.text-center= s_('ClusterIntegration|Integrate Kubernetes cluster automation')
- link_to_help_page = link_to(s_('ClusterIntegration|Learn more about Kubernetes'), help_page_path('user/project/clusters/index'), target: '_blank', rel: 'noopener noreferrer')
- link_to_help_page = link_to(_('Learn more about Kubernetes'), help_page_path('user/project/clusters/index'), target: '_blank', rel: 'noopener noreferrer')
%p= s_('ClusterIntegration|Kubernetes clusters allow you to use review apps, deploy your applications, run your pipelines, and much more in an easy way. %{link_to_help_page}').html_safe % { link_to_help_page: link_to_help_page}
.text-center
......
%h3 Shared Runners
.bs-callout.bs-callout-warning.shared-runners-description
.bs-callout.shared-runners-description
- if Gitlab::CurrentSettings.shared_runners_text.present?
= markdown_field(Gitlab::CurrentSettings.current_application_settings, :shared_runners_text)
- else
......@@ -9,7 +9,7 @@
on GitLab.com).
%hr
- if @project.shared_runners_enabled?
= link_to toggle_shared_runners_project_runners_path(@project), class: 'btn btn-warning', method: :post do
= link_to toggle_shared_runners_project_runners_path(@project), class: 'btn btn-close', method: :post do
Disable shared Runners
- else
= link_to toggle_shared_runners_project_runners_path(@project), class: 'btn btn-success', method: :post do
......
%h3 Specific Runners
= render partial: 'ci/runner/how_to_setup_runner',
locals: { registration_token: @project.runners_token,
type: 'specific' }
= render partial: 'ci/runner/how_to_setup_specific_runner',
locals: { registration_token: @project.runners_token }
- if @project_runners.any?
%h4.underlined-title Runners activated for this project
......
......@@ -7,7 +7,7 @@
= link_to s_('PrometheusService|More information'), help_page_path('user/project/integrations/prometheus')
.col-lg-9
.panel.panel-default.js-panel-monitored-metrics{ data: { "active-metrics" => "#{project_prometheus_active_metrics_path(@project, :json)}" } }
.panel.panel-default.js-panel-monitored-metrics{ data: { active_metrics: active_common_project_prometheus_metrics_path(@project, :json) } }
.panel-heading
%h3.panel-title
= s_('PrometheusService|Monitored')
......
......@@ -4,7 +4,6 @@
- content_for :page_specific_javascripts do
= webpack_bundle_tag('common_vue')
= webpack_bundle_tag('deploy_keys')
= render "projects/push_rules/index"
= render "projects/mirrors/show"
......
......@@ -72,6 +72,7 @@
#{ _('New tag') }
.tree-controls
= lock_file_link(html_options: { class: 'btn path-lock' })
= link_to s_('Commits|History'), project_commits_path(@project, @id), class: 'btn'
......
---
title: Add a button to deploy a runner to a Kubernetes cluster in the settings page
merge_request: 17278
author:
type: changed
---
title: Do not persist Google Project verification flash errors after a page reload
merge_request: 17299
author:
type: fixed
---
title: Fix Group labels load failure when there are duplicate labels present
merge_request: 17353
author:
type: fixed
---
title: Restart Unicorn and Sidekiq when GRPC throws 14:Endpoint read failed
merge_request: 17293
author:
type: fixed
---
title: Return a 404 instead of 403 if the repository does not exist on disk
merge_request: 17341
author:
type: fixed
---
title: Move RecentSearchesDropdownContent vue component
merge_request: 16951
author: George Tsiolis
type: performance
......@@ -10,7 +10,7 @@ Sidekiq.configure_server do |config|
config.server_middleware do |chain|
chain.add Gitlab::SidekiqMiddleware::ArgumentsLogger if ENV['SIDEKIQ_LOG_ARGUMENTS']
chain.add Gitlab::SidekiqMiddleware::MemoryKiller if ENV['SIDEKIQ_MEMORY_KILLER_MAX_RSS']
chain.add Gitlab::SidekiqMiddleware::Shutdown
chain.add Gitlab::SidekiqMiddleware::RequestStoreMiddleware unless ENV['SIDEKIQ_REQUEST_STORE'] == '0'
chain.add Gitlab::SidekiqStatus::ServerMiddleware
end
......
......@@ -78,7 +78,9 @@ constraints(ProjectUrlConstrainer.new) do
resource :mattermost, only: [:new, :create]
namespace :prometheus do
get :active_metrics
resources :metrics, constraints: { id: %r{[^\/]+} }, only: [] do
get :active_common, on: :collection
end
end
resources :deploy_keys, constraints: { id: /\d+/ }, only: [:index, :new, :create, :edit, :update] do
......
......@@ -49,9 +49,11 @@ var config = {
context: path.join(ROOT_PATH, 'app/assets/javascripts'),
entry: {
balsamiq_viewer: './blob/balsamiq_viewer.js',
blob: './blob_edit/blob_bundle.js',
common: './commons/index.js',
common_vue: './vue_shared/vue_resource_interceptor.js',
cycle_analytics: './cycle_analytics/cycle_analytics_bundle.js',
commit_pipelines: './commit/pipelines/pipelines_bundle.js',
deploy_keys: './deploy_keys/index.js',
diff_notes: './diff_notes/diff_notes_bundle.js',
environments: './environments/environments_bundle.js',
environments_folder: './environments/folder/environments_folder_bundle.js',
......
......@@ -2,7 +2,9 @@
[Gitaly](https://gitlab.com/gitlab-org/gitaly) (introduced in GitLab
9.0) is a service that provides high-level RPC access to Git
repositories. Gitaly is a mandatory component in GitLab 9.4 and newer.
repositories. Gitaly was optional when it was first introduced in
GitLab, but since GitLab 9.4 it is a mandatory component of the
application.
GitLab components that access Git repositories (gitlab-rails,
gitlab-shell, gitlab-workhorse) act as clients to Gitaly. End users do
......@@ -184,14 +186,20 @@ Gitaly logs on your Gitaly server (`sudo gitlab-ctl tail gitaly` or
coming in. One sure way to trigger a Gitaly request is to clone a
repository from your GitLab server over HTTP.
## Disabling or enabling the Gitaly service
## Disabling or enabling the Gitaly service in a cluster environment
If you are running Gitaly [as a remote
service](#running-gitaly-on-its-own-server) you may want to disable
the local Gitaly service that runs on your Gitlab server by default.
To disable the Gitaly service in your Omnibus installation, add the
following line to `/etc/gitlab/gitlab.rb`:
> 'Disabling Gitaly' only makes sense when you run GitLab in a custom
cluster configuration, where different services run on different
machines. Disabling Gitaly on all machines in the cluster is not a
valid configuration.
If you are setting up a GitLab cluster where Gitaly does not need to
run on all machines, you can disable the Gitaly service in your
Omnibus installation, add the following line to `/etc/gitlab/gitlab.rb`:
```ruby
gitaly['enable'] = false
......@@ -200,11 +208,13 @@ gitaly['enable'] = false
When you run `gitlab-ctl reconfigure` the Gitaly service will be
disabled.
To disable the Gitaly service in an installation from source, add the
following to `/etc/default/gitlab`:
To disable the Gitaly service in a GitLab cluster where you installed
GitLab from source, add the following to `/etc/default/gitlab` on the
machine where you want to disable Gitaly.
```shell
gitaly_enabled=false
```
When you run `service gitlab restart` Gitaly will be disabled.
When you run `service gitlab restart` Gitaly will be disabled on this
particular machine.
......@@ -1234,7 +1234,7 @@ POST /projects/:id/hooks
| `note_events` | boolean | no | Trigger hook on note events |
| `job_events` | boolean | no | Trigger hook on job events |
| `pipeline_events` | boolean | no | Trigger hook on pipeline events |
| `wiki_events` | boolean | no | Trigger hook on wiki events |
| `wiki_page_events` | boolean | no | Trigger hook on wiki events |
| `enable_ssl_verification` | boolean | no | Do SSL verification when triggering the hook |
| `token` | string | no | Secret token to validate received payloads; this will not be returned in the response |
......
......@@ -8,8 +8,8 @@ Milestones allow you to organize issues and merge requests into a cohesive group
## Project milestones and group milestones
- **Project milestones** can be assigned to issues or merge requests in that project only.
- **Group milestones** can be assigned to any issue or merge request of any project in that group.
- **Project milestones** can be assigned to issues or merge requests in that project only.
- **Group milestones** can be assigned to any issue or merge request of any project in that group.
- In the [future](https://gitlab.com/gitlab-org/gitlab-ce/issues/36862), you will be able to assign group milestones to issues and merge reqeusts of projects in [subgroups](../../group/subgroups/index.md).
## Creating milestones
......
......@@ -2210,7 +2210,7 @@ module Gitlab
with_worktree(squash_path, branch, sparse_checkout_files: diff_files, env: env) do
# Apply diff of the `diff_range` to the worktree
diff = run_git!(%W(diff --binary #{diff_range}))
run_git!(%w(apply --index), chdir: squash_path, env: env) do |stdin|
run_git!(%w(apply --index --whitespace=nowarn), chdir: squash_path, env: env) do |stdin|
stdin.binmode
stdin.write(diff)
end
......
......@@ -202,7 +202,7 @@ module Gitlab
def check_repository_existence!
unless repository.exists?
raise UnauthorizedError, ERROR_MESSAGES[:no_repo]
raise NotFoundError, ERROR_MESSAGES[:no_repo]
end
end
......
......@@ -125,6 +125,8 @@ module Gitlab
kwargs = yield(kwargs) if block_given?
stub(service, storage).__send__(rpc, request, kwargs) # rubocop:disable GitlabSecurity/PublicSend
rescue GRPC::Unavailable => ex
handle_grpc_unavailable!(ex)
ensure
duration = Gitlab::Metrics::System.monotonic_time - start
......@@ -135,6 +137,27 @@ module Gitlab
duration)
end
def self.handle_grpc_unavailable!(ex)
status = ex.to_status
raise ex unless status.details == 'Endpoint read failed'
# There is a bug in grpc 1.8.x that causes a client process to get stuck
# always raising '14:Endpoint read failed'. The only thing that we can
# do to recover is to restart the process.
#
# See https://gitlab.com/gitlab-org/gitaly/issues/1029
if Sidekiq.server?
raise Gitlab::SidekiqMiddleware::Shutdown::WantShutdown.new(ex.to_s)
else
# SIGQUIT requests a Unicorn worker to shut down gracefully after the current request.
Process.kill('QUIT', Process.pid)
end
raise ex
end
private_class_method :handle_grpc_unavailable!
def self.current_transaction_labels
Gitlab::Metrics::Transaction.current&.labels || {}
end
......
......@@ -6,9 +6,14 @@ module Gitlab
attr_accessor :name, :priority, :metrics
validates :name, :priority, :metrics, presence: true
def self.all
def self.common_metrics
AdditionalMetricsParser.load_groups_from_yaml
end
# EE only
def self.for_project(_)
common_metrics
end
end
end
end
......@@ -7,6 +7,7 @@ module Gitlab
def query(environment_id, deployment_id)
Deployment.find_by(id: deployment_id).try do |deployment|
query_metrics(
deployment.project,
common_query_context(
deployment.environment,
timeframe_start: (deployment.created_at - 30.minutes).to_f,
......
......@@ -7,6 +7,7 @@ module Gitlab
def query(environment_id)
::Environment.find_by(id: environment_id).try do |environment|
query_metrics(
environment.project,
common_query_context(environment, timeframe_start: 8.hours.ago.to_f, timeframe_end: Time.now.to_f)
)
end
......
......@@ -18,7 +18,7 @@ module Gitlab
private
def groups_data
metrics_groups = groups_with_active_metrics(Gitlab::Prometheus::MetricGroup.all)
metrics_groups = groups_with_active_metrics(Gitlab::Prometheus::MetricGroup.common_metrics)
lookup = active_series_lookup(metrics_groups)
groups = {}
......
......@@ -2,10 +2,10 @@ module Gitlab
module Prometheus
module Queries
module QueryAdditionalMetrics
def query_metrics(query_context)
def query_metrics(project, query_context)
query_processor = method(:process_query).curry[query_context]
groups = matched_metrics.map do |group|
groups = matched_metrics(project).map do |group|
metrics = group.metrics.map do |metric|
{
title: metric.title,
......@@ -60,8 +60,8 @@ module Gitlab
@available_metrics ||= client_label_values || []
end
def matched_metrics
result = Gitlab::Prometheus::MetricGroup.all.map do |group|
def matched_metrics(project)
result = Gitlab::Prometheus::MetricGroup.for_project(project).map do |group|
group.metrics.select! do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
end
......
module Gitlab
PrometheusError = Class.new(StandardError)
# Helper methods to interact with Prometheus network services & resources
class PrometheusClient
Error = Class.new(StandardError)
QueryError = Class.new(Gitlab::PrometheusClient::Error)
attr_reader :rest_client, :headers
def initialize(rest_client)
......@@ -22,10 +23,10 @@ module Gitlab
def query_range(query, start: 8.hours.ago, stop: Time.now)
get_result('matrix') do
json_api_get('query_range',
query: query,
start: start.to_f,
end: stop.to_f,
step: 1.minute.to_i)
query: query,
start: start.to_f,
end: stop.to_f,
step: 1.minute.to_i)
end
end
......@@ -43,22 +44,22 @@ module Gitlab
path = ['api', 'v1', type].join('/')
get(path, args)
rescue JSON::ParserError
raise PrometheusError, 'Parsing response failed'
raise PrometheusClient::Error, 'Parsing response failed'
rescue Errno::ECONNREFUSED
raise PrometheusError, 'Connection refused'
raise PrometheusClient::Error, 'Connection refused'
end
def get(path, args)
response = rest_client[path].get(params: args)
handle_response(response)
rescue SocketError
raise PrometheusError, "Can't connect to #{rest_client.url}"
raise PrometheusClient::Error, "Can't connect to #{rest_client.url}"
rescue OpenSSL::SSL::SSLError
raise PrometheusError, "#{rest_client.url} contains invalid SSL data"
raise PrometheusClient::Error, "#{rest_client.url} contains invalid SSL data"
rescue RestClient::ExceptionWithResponse => ex
handle_exception_response(ex.response)
rescue RestClient::Exception
raise PrometheusError, "Network connection error"
raise PrometheusClient::Error, "Network connection error"
end
def handle_response(response)
......@@ -66,16 +67,18 @@ module Gitlab
if response.code == 200 && json_data['status'] == 'success'
json_data['data'] || {}
else
raise PrometheusError, "#{response.code} - #{response.body}"
raise PrometheusClient::Error, "#{response.code} - #{response.body}"
end
end
def handle_exception_response(response)
if response.code == 400
if response.code == 200 && response['status'] == 'success'
response['data'] || {}
elsif response.code == 400
json_data = JSON.parse(response.body)
raise PrometheusError, json_data['error'] || 'Bad data received'
raise PrometheusClient::QueryError, json_data['error'] || 'Bad data received'
else
raise PrometheusError, "#{response.code} - #{response.body}"
raise PrometheusClient::Error, "#{response.code} - #{response.body}"
end
end
......
module Gitlab
module SidekiqMiddleware
class MemoryKiller
# Default the RSS limit to 0, meaning the MemoryKiller is disabled
MAX_RSS = (ENV['SIDEKIQ_MEMORY_KILLER_MAX_RSS'] || 0).to_s.to_i
# Give Sidekiq 15 minutes of grace time after exceeding the RSS limit
GRACE_TIME = (ENV['SIDEKIQ_MEMORY_KILLER_GRACE_TIME'] || 15 * 60).to_s.to_i
# Wait 30 seconds for running jobs to finish during graceful shutdown
SHUTDOWN_WAIT = (ENV['SIDEKIQ_MEMORY_KILLER_SHUTDOWN_WAIT'] || 30).to_s.to_i
# Create a mutex used to ensure there will be only one thread waiting to
# shut Sidekiq down
MUTEX = Mutex.new
def call(worker, job, queue)
yield
current_rss = get_rss
return unless MAX_RSS > 0 && current_rss > MAX_RSS
Thread.new do
# Return if another thread is already waiting to shut Sidekiq down
return unless MUTEX.try_lock
Sidekiq.logger.warn "Sidekiq worker PID-#{pid} current RSS #{current_rss}"\
" exceeds maximum RSS #{MAX_RSS} after finishing job #{worker.class} JID-#{job['jid']}"
Sidekiq.logger.warn "Sidekiq worker PID-#{pid} will stop fetching new jobs in #{GRACE_TIME} seconds, and will be shut down #{SHUTDOWN_WAIT} seconds later"
# Wait `GRACE_TIME` to give the memory intensive job time to finish.
# Then, tell Sidekiq to stop fetching new jobs.
wait_and_signal(GRACE_TIME, 'SIGSTP', 'stop fetching new jobs')
# Wait `SHUTDOWN_WAIT` to give already fetched jobs time to finish.
# Then, tell Sidekiq to gracefully shut down by giving jobs a few more
# moments to finish, killing and requeuing them if they didn't, and
# then terminating itself.
wait_and_signal(SHUTDOWN_WAIT, 'SIGTERM', 'gracefully shut down')
# Wait for Sidekiq to shutdown gracefully, and kill it if it didn't.
wait_and_signal(Sidekiq.options[:timeout] + 2, 'SIGKILL', 'die')
end
end
private
def get_rss
output, status = Gitlab::Popen.popen(%W(ps -o rss= -p #{pid}), Rails.root.to_s)
return 0 unless status.zero?
output.to_i
end
def wait_and_signal(time, signal, explanation)
Sidekiq.logger.warn "waiting #{time} seconds before sending Sidekiq worker PID-#{pid} #{signal} (#{explanation})"
sleep(time)
Sidekiq.logger.warn "sending Sidekiq worker PID-#{pid} #{signal} (#{explanation})"
Process.kill(signal, pid)
end
def pid
Process.pid
end
end
end
end
require 'mutex_m'
module Gitlab
module SidekiqMiddleware
class Shutdown
extend Mutex_m
# Default the RSS limit to 0, meaning the MemoryKiller is disabled
MAX_RSS = (ENV['SIDEKIQ_MEMORY_KILLER_MAX_RSS'] || 0).to_s.to_i
# Give Sidekiq 15 minutes of grace time after exceeding the RSS limit
GRACE_TIME = (ENV['SIDEKIQ_MEMORY_KILLER_GRACE_TIME'] || 15 * 60).to_s.to_i
# Wait 30 seconds for running jobs to finish during graceful shutdown
SHUTDOWN_WAIT = (ENV['SIDEKIQ_MEMORY_KILLER_SHUTDOWN_WAIT'] || 30).to_s.to_i
# This exception can be used to request that the middleware start shutting down Sidekiq
WantShutdown = Class.new(StandardError)
ShutdownWithoutRaise = Class.new(WantShutdown)
private_constant :ShutdownWithoutRaise
# For testing only, to avoid race conditions (?) in Rspec mocks.
attr_reader :trace
# We store the shutdown thread in a class variable to ensure that there
# can be only one shutdown thread in the process.
def self.create_shutdown_thread
mu_synchronize do
return unless @shutdown_thread.nil?
@shutdown_thread = Thread.new { yield }
end
end
# For testing only: so we can wait for the shutdown thread to finish.
def self.shutdown_thread
mu_synchronize { @shutdown_thread }
end
# For testing only: so that we can reset the global state before each test.
def self.clear_shutdown_thread
mu_synchronize { @shutdown_thread = nil }
end
def initialize
@trace = Queue.new if Rails.env.test?
end
def call(worker, job, queue)
shutdown_exception = nil
begin
yield
check_rss!
rescue WantShutdown => ex
shutdown_exception = ex
end
return unless shutdown_exception
self.class.create_shutdown_thread do
do_shutdown(worker, job, shutdown_exception)
end
raise shutdown_exception unless shutdown_exception.is_a?(ShutdownWithoutRaise)
end
private
def do_shutdown(worker, job, shutdown_exception)
Sidekiq.logger.warn "Sidekiq worker PID-#{pid} shutting down because of #{shutdown_exception} after job "\
"#{worker.class} JID-#{job['jid']}"
Sidekiq.logger.warn "Sidekiq worker PID-#{pid} will stop fetching new jobs in #{GRACE_TIME} seconds, and will be shut down #{SHUTDOWN_WAIT} seconds later"
# Wait `GRACE_TIME` to give the memory intensive job time to finish.
# Then, tell Sidekiq to stop fetching new jobs.
wait_and_signal(GRACE_TIME, 'SIGTSTP', 'stop fetching new jobs')
# Wait `SHUTDOWN_WAIT` to give already fetched jobs time to finish.
# Then, tell Sidekiq to gracefully shut down by giving jobs a few more
# moments to finish, killing and requeuing them if they didn't, and
# then terminating itself.
wait_and_signal(SHUTDOWN_WAIT, 'SIGTERM', 'gracefully shut down')
# Wait for Sidekiq to shutdown gracefully, and kill it if it didn't.
wait_and_signal(Sidekiq.options[:timeout] + 2, 'SIGKILL', 'die')
end
def check_rss!
return unless MAX_RSS > 0
current_rss = get_rss
return unless current_rss > MAX_RSS
raise ShutdownWithoutRaise.new("current RSS #{current_rss} exceeds maximum RSS #{MAX_RSS}")
end
def get_rss
output, status = Gitlab::Popen.popen(%W(ps -o rss= -p #{pid}), Rails.root.to_s)
return 0 unless status.zero?
output.to_i
end
def wait_and_signal(time, signal, explanation)
Sidekiq.logger.warn "waiting #{time} seconds before sending Sidekiq worker PID-#{pid} #{signal} (#{explanation})"
sleep(time)
Sidekiq.logger.warn "sending Sidekiq worker PID-#{pid} #{signal} (#{explanation})"
kill(signal, pid)
end
def pid
Process.pid
end
def sleep(time)
if Rails.env.test?
@trace << [:sleep, time]
else
Kernel.sleep(time)
end
end
def kill(signal, pid)
if Rails.env.test?
@trace << [:kill, signal, pid]
else
Process.kill(signal, pid)
end
end
end
end
end
......@@ -161,7 +161,7 @@ describe Projects::Clusters::GcpController do
it 'renders the cluster form with an error' do
go
expect(response).to set_flash[:alert]
expect(response).to set_flash.now[:alert]
expect(response).to render_template('new')
end
end
......
require('spec_helper')
require 'spec_helper'
describe Projects::PrometheusController do
describe Projects::Prometheus::MetricsController do
let(:user) { create(:user) }
let!(:project) { create(:project) }
let(:project) { create(:project) }
let(:prometheus_service) { double('prometheus_service') }
before do
allow(controller).to receive(:project).and_return(project)
allow(project).to receive(:prometheus_service).and_return(prometheus_service)
allow(project).to receive(:find_or_initialize_service).with('prometheus').and_return(prometheus_service)
project.add_master(user)
sign_in(user)
end
describe 'GET #active_metrics' do
describe 'GET #active_common' do
context 'when prometheus metrics are enabled' do
context 'when data is not present' do
before do
......@@ -22,7 +22,7 @@ describe Projects::PrometheusController do
end
it 'returns no content response' do
get :active_metrics, project_params(format: :json)
get :active_common, project_params(format: :json)
expect(response).to have_gitlab_http_status(204)
end
......@@ -36,7 +36,7 @@ describe Projects::PrometheusController do
end
it 'returns no content response' do
get :active_metrics, project_params(format: :json)
get :active_common, project_params(format: :json)
expect(response).to have_gitlab_http_status(200)
expect(json_response).to eq(sample_response.deep_stringify_keys)
......@@ -45,7 +45,7 @@ describe Projects::PrometheusController do
context 'when requesting non json response' do
it 'returns not found response' do
get :active_metrics, project_params
get :active_common, project_params
expect(response).to have_gitlab_http_status(404)
end
......
......@@ -19,7 +19,7 @@ describe "Admin Runners" do
end
it 'has all necessary texts' do
expect(page).to have_text "How to setup"
expect(page).to have_text "Setup a shared Runner manually"
expect(page).to have_text "Runners with last contact more than a minute ago: 1"
end
......@@ -54,7 +54,7 @@ describe "Admin Runners" do
end
it 'has all necessary texts including no runner message' do
expect(page).to have_text "How to setup"
expect(page).to have_text "Setup a shared Runner manually"
expect(page).to have_text "Runners with last contact more than a minute ago: 0"
expect(page).to have_text 'No runners found'
end
......
......@@ -146,8 +146,8 @@ feature 'Project' do
end
describe 'removal', :js do
let(:user) { create(:user, username: 'test', name: 'test') }
let(:project) { create(:project, namespace: user.namespace, name: 'project1') }
let(:user) { create(:user) }
let(:project) { create(:project, namespace: user.namespace) }
before do
sign_in(user)
......@@ -156,8 +156,8 @@ feature 'Project' do
end
it 'removes a project' do
expect { remove_with_confirm('Remove project', project.path) }.to change {Project.count}.by(-1)
expect(page).to have_content "Project 'test / project1' is in the process of being deleted."
expect { remove_with_confirm('Remove project', project.path) }.to change { Project.count }.by(-1)
expect(page).to have_content "Project '#{project.full_name}' is in the process of being deleted."
expect(Project.all.count).to be_zero
expect(project.issues).to be_empty
expect(project.merge_requests).to be_empty
......
......@@ -7,6 +7,20 @@ feature 'Runners' do
sign_in(user)
end
context 'when user opens runners page' do
given(:project) { create(:project) }
background do
project.add_master(user)
end
scenario 'user can see a button to install runners on kubernetes clusters' do
visit runners_path(project)
expect(page).to have_link('Install Runner on Kubernetes', href: project_clusters_path(project))
end
end
context 'when a project has enabled shared_runners' do
given(:project) { create(:project) }
......
......@@ -2283,6 +2283,20 @@ describe Gitlab::Git::Repository, seed_helper: true do
expect(subject).to match(/\h{40}/)
end
end
context 'with trailing whitespace in an invalid patch', :skip_gitaly_mock do
let(:diff) { "diff --git a/README.md b/README.md\nindex faaf198..43c5edf 100644\n--- a/README.md\n+++ b/README.md\n@@ -1,4 +1,4 @@\n-testme\n+ \n ====== \n \n Sample repo for testing gitlab features\n" }
it 'does not include whitespace warnings in the error' do
allow(repository).to receive(:run_git!).and_call_original
allow(repository).to receive(:run_git!).with(%W(diff --binary #{start_sha}...#{end_sha})).and_return(diff.force_encoding('ASCII-8BIT'))
expect { subject }.to raise_error do |error|
expect(error).to be_a(described_class::GitError)
expect(error.message).not_to include('trailing whitespace')
end
end
end
end
end
......
......@@ -534,6 +534,19 @@ describe Gitlab::GitAccess do
expect { pull_access_check }.to raise_unauthorized('Your account has been blocked.')
end
context 'when the project repository does not exist' do
it 'returns not found' do
project.add_guest(user)
repo = project.repository
FileUtils.rm_rf(repo.path)
# Sanity check for rm_rf
expect(repo.exists?).to eq(false)
expect { pull_access_check }.to raise_error(Gitlab::GitAccess::NotFoundError, 'A repository for this project does not exist yet.')
end
end
describe 'without access to project' do
context 'pull code' do
it { expect { pull_access_check }.to raise_not_found }
......
......@@ -57,7 +57,7 @@ describe Gitlab::GitAccessWiki do
# Sanity check for rm_rf
expect(wiki_repo.exists?).to eq(false)
expect { subject }.to raise_error(Gitlab::GitAccess::UnauthorizedError, 'A repository for this project does not exist yet.')
expect { subject }.to raise_error(Gitlab::GitAccess::NotFoundError, 'A repository for this project does not exist yet.')
end
end
end
......
......@@ -24,7 +24,7 @@ describe Gitlab::Prometheus::Queries::MatchedMetricsQuery do
context 'with one group where two metrics is found' do
before do
allow(metric_group_class).to receive(:all).and_return([simple_metric_group])
allow(metric_group_class).to receive(:common_metrics).and_return([simple_metric_group])
allow(client).to receive(:label_values).and_return(metric_names)
end
......@@ -70,7 +70,7 @@ describe Gitlab::Prometheus::Queries::MatchedMetricsQuery do
context 'with one group where only one metric is found' do
before do
allow(metric_group_class).to receive(:all).and_return([simple_metric_group])
allow(metric_group_class).to receive(:common_metrics).and_return([simple_metric_group])
allow(client).to receive(:label_values).and_return('metric_a')
end
......@@ -99,7 +99,7 @@ describe Gitlab::Prometheus::Queries::MatchedMetricsQuery do
let(:second_metric_group) { simple_metric_group(name: 'nameb', metrics: simple_metrics(added_metric_name: 'metric_c')) }
before do
allow(metric_group_class).to receive(:all).and_return([simple_metric_group, second_metric_group])
allow(metric_group_class).to receive(:common_metrics).and_return([simple_metric_group, second_metric_group])
allow(client).to receive(:label_values).and_return('metric_c')
end
......
......@@ -19,41 +19,41 @@ describe Gitlab::PrometheusClient do
# - execute_query: A query call
shared_examples 'failure response' do
context 'when request returns 400 with an error message' do
it 'raises a Gitlab::PrometheusError error' do
it 'raises a Gitlab::PrometheusClient::Error error' do
req_stub = stub_prometheus_request(query_url, status: 400, body: { error: 'bar!' })
expect { execute_query }
.to raise_error(Gitlab::PrometheusError, 'bar!')
.to raise_error(Gitlab::PrometheusClient::Error, 'bar!')
expect(req_stub).to have_been_requested
end
end
context 'when request returns 400 without an error message' do
it 'raises a Gitlab::PrometheusError error' do
it 'raises a Gitlab::PrometheusClient::Error error' do
req_stub = stub_prometheus_request(query_url, status: 400)
expect { execute_query }
.to raise_error(Gitlab::PrometheusError, 'Bad data received')
.to raise_error(Gitlab::PrometheusClient::Error, 'Bad data received')
expect(req_stub).to have_been_requested
end
end
context 'when request returns 500' do
it 'raises a Gitlab::PrometheusError error' do
it 'raises a Gitlab::PrometheusClient::Error error' do
req_stub = stub_prometheus_request(query_url, status: 500, body: { message: 'FAIL!' })
expect { execute_query }
.to raise_error(Gitlab::PrometheusError, '500 - {"message":"FAIL!"}')
.to raise_error(Gitlab::PrometheusClient::Error, '500 - {"message":"FAIL!"}')
expect(req_stub).to have_been_requested
end
end
context 'when request returns non json data' do
it 'raises a Gitlab::PrometheusError error' do
it 'raises a Gitlab::PrometheusClient::Error error' do
req_stub = stub_prometheus_request(query_url, status: 200, body: 'not json')
expect { execute_query }
.to raise_error(Gitlab::PrometheusError, 'Parsing response failed')
.to raise_error(Gitlab::PrometheusClient::Error, 'Parsing response failed')
expect(req_stub).to have_been_requested
end
end
......@@ -65,27 +65,27 @@ describe Gitlab::PrometheusClient do
subject { described_class.new(RestClient::Resource.new(prometheus_url)) }
context 'exceptions are raised' do
it 'raises a Gitlab::PrometheusError error when a SocketError is rescued' do
it 'raises a Gitlab::PrometheusClient::Error error when a SocketError is rescued' do
req_stub = stub_prometheus_request_with_exception(prometheus_url, SocketError)
expect { subject.send(:get, '/', {}) }
.to raise_error(Gitlab::PrometheusError, "Can't connect to #{prometheus_url}")
.to raise_error(Gitlab::PrometheusClient::Error, "Can't connect to #{prometheus_url}")
expect(req_stub).to have_been_requested
end
it 'raises a Gitlab::PrometheusError error when a SSLError is rescued' do
it 'raises a Gitlab::PrometheusClient::Error error when a SSLError is rescued' do
req_stub = stub_prometheus_request_with_exception(prometheus_url, OpenSSL::SSL::SSLError)
expect { subject.send(:get, '/', {}) }
.to raise_error(Gitlab::PrometheusError, "#{prometheus_url} contains invalid SSL data")
.to raise_error(Gitlab::PrometheusClient::Error, "#{prometheus_url} contains invalid SSL data")
expect(req_stub).to have_been_requested
end
it 'raises a Gitlab::PrometheusError error when a RestClient::Exception is rescued' do
it 'raises a Gitlab::PrometheusClient::Error error when a RestClient::Exception is rescued' do
req_stub = stub_prometheus_request_with_exception(prometheus_url, RestClient::Exception)
expect { subject.send(:get, '/', {}) }
.to raise_error(Gitlab::PrometheusError, "Network connection error")
.to raise_error(Gitlab::PrometheusClient::Error, "Network connection error")
expect(req_stub).to have_been_requested
end
end
......
require 'spec_helper'
describe Gitlab::SidekiqMiddleware::MemoryKiller do
describe Gitlab::SidekiqMiddleware::Shutdown do
subject { described_class.new }
let(:pid) { 999 }
let(:pid) { Process.pid }
let(:worker) { double(:worker, class: 'TestWorker') }
let(:job) { { 'jid' => 123 } }
let(:queue) { 'test_queue' }
let(:block) { proc { nil } }
def run
thread = subject.call(worker, job, queue) { nil }
thread&.join
subject.call(worker, job, queue) { block.call }
described_class.shutdown_thread&.join
end
def pop_trace
subject.trace.pop(true)
end
before do
allow(subject).to receive(:get_rss).and_return(10.kilobytes)
allow(subject).to receive(:pid).and_return(pid)
described_class.clear_shutdown_thread
end
context 'when MAX_RSS is set to 0' do
......@@ -30,22 +35,26 @@ describe Gitlab::SidekiqMiddleware::MemoryKiller do
end
end
def expect_shutdown_sequence
expect(pop_trace).to eq([:sleep, 15 * 60])
expect(pop_trace).to eq([:kill, 'SIGTSTP', pid])
expect(pop_trace).to eq([:sleep, 30])
expect(pop_trace).to eq([:kill, 'SIGTERM', pid])
expect(pop_trace).to eq([:sleep, 10])
expect(pop_trace).to eq([:kill, 'SIGKILL', pid])
end
context 'when MAX_RSS is exceeded' do
before do
stub_const("#{described_class}::MAX_RSS", 5.kilobytes)
end
it 'sends the STP, TERM and KILL signals at expected times' do
expect(subject).to receive(:sleep).with(15 * 60).ordered
expect(Process).to receive(:kill).with('SIGSTP', pid).ordered
expect(subject).to receive(:sleep).with(30).ordered
expect(Process).to receive(:kill).with('SIGTERM', pid).ordered
expect(subject).to receive(:sleep).with(10).ordered
expect(Process).to receive(:kill).with('SIGKILL', pid).ordered
it 'sends the TSTP, TERM and KILL signals at expected times' do
run
expect_shutdown_sequence
end
end
......@@ -60,4 +69,20 @@ describe Gitlab::SidekiqMiddleware::MemoryKiller do
run
end
end
context 'when WantShutdown is raised' do
let(:block) { proc { raise described_class::WantShutdown } }
it 'starts the shutdown sequence and re-raises the exception' do
expect { run }.to raise_exception(described_class::WantShutdown)
# We can't expect 'run' to have joined on the shutdown thread, because
# it hit an exception.
shutdown_thread = described_class.shutdown_thread
expect(shutdown_thread).not_to be_nil
shutdown_thread.join
expect_shutdown_sequence
end
end
end
......@@ -223,8 +223,8 @@ describe PrometheusService, :use_clean_rails_memory_store_caching do
context 'with cluster for all environments without prometheus installed' do
context 'without environment supplied' do
it 'raises PrometheusError because cluster was not found' do
expect { service.client }.to raise_error(Gitlab::PrometheusError, /couldn't find cluster with Prometheus installed/)
it 'raises PrometheusClient::Error because cluster was not found' do
expect { service.client }.to raise_error(Gitlab::PrometheusClient::Error, /couldn't find cluster with Prometheus installed/)
end
end
......@@ -242,8 +242,8 @@ describe PrometheusService, :use_clean_rails_memory_store_caching do
context 'with prod environment supplied' do
let!(:environment) { create(:environment, project: project, name: 'prod') }
it 'raises PrometheusError because cluster was not found' do
expect { service.client }.to raise_error(Gitlab::PrometheusError, /couldn't find cluster with Prometheus installed/)
it 'raises PrometheusClient::Error because cluster was not found' do
expect { service.client }.to raise_error(Gitlab::PrometheusClient::Error, /couldn't find cluster with Prometheus installed/)
end
end
end
......
......@@ -597,7 +597,7 @@ describe 'Git HTTP requests' do
context "when a gitlab ci token is provided" do
let(:project) { create(:project, :repository) }
let(:build) { create(:ci_build, :running) }
let(:other_project) { create(:project) }
let(:other_project) { create(:project, :repository) }
before do
build.update!(project: project) # can't associate it on factory create
......@@ -648,10 +648,10 @@ describe 'Git HTTP requests' do
context 'when the repo does not exist' do
let(:project) { create(:project) }
it 'rejects pulls with 403 Forbidden' do
it 'rejects pulls with 404 Not Found' do
clone_get path, env
expect(response).to have_gitlab_http_status(:forbidden)
expect(response).to have_gitlab_http_status(:not_found)
expect(response.body).to eq(git_access_error(:no_repo))
end
end
......
......@@ -68,10 +68,10 @@ describe 'OpenID Connect requests' do
let!(:public_email) { build :email, email: 'public@example.com' }
let!(:private_email) { build :email, email: 'private@example.com' }
let!(:group1) { create :group, path: 'group1' }
let!(:group2) { create :group, path: 'group2' }
let!(:group3) { create :group, path: 'group3', parent: group2 }
let!(:group4) { create :group, path: 'group4', parent: group3 }
let!(:group1) { create :group }
let!(:group2) { create :group }
let!(:group3) { create :group, parent: group2 }
let!(:group4) { create :group, parent: group3 }
before do
group1.add_user(user, GroupMember::OWNER)
......@@ -93,8 +93,8 @@ describe 'OpenID Connect requests' do
'groups' => anything
}))
expected_groups = %w[group1 group2/group3]
expected_groups << 'group2/group3/group4' if Group.supports_nested_groups?
expected_groups = [group1.full_path, group3.full_path]
expected_groups << group4.full_path if Group.supports_nested_groups?
expect(json_response['groups']).to match_array(expected_groups)
end
end
......
......@@ -78,8 +78,10 @@ RSpec.configure do |config|
end
config.after(:example, :js) do |example|
# prevent localstorage from introducing side effects based on test order
execute_script("localStorage.clear();")
# prevent localStorage from introducing side effects based on test order
unless ['', 'about:blank', 'data:,'].include? Capybara.current_session.driver.browser.current_url
execute_script("localStorage.clear();")
end
# capybara/rspec already calls Capybara.reset_sessions! in an `after` hook,
# but `block_and_wait_for_requests_complete` is called before it so by
......
......@@ -12,11 +12,12 @@ RSpec.shared_examples 'additional metrics query' do
let(:client) { double('prometheus_client') }
let(:query_result) { described_class.new(client).query(*query_params) }
let(:environment) { create(:environment, slug: 'environment-slug') }
let(:project) { create(:project) }
let(:environment) { create(:environment, slug: 'environment-slug', project: project) }
before do
allow(client).to receive(:label_values).and_return(metric_names)
allow(metric_group_class).to receive(:all).and_return([simple_metric_group(metrics: [simple_metric])])
allow(metric_group_class).to receive(:common_metrics).and_return([simple_metric_group(metrics: [simple_metric])])
end
context 'metrics query context' do
......@@ -24,13 +25,14 @@ RSpec.shared_examples 'additional metrics query' do
shared_examples 'query context containing environment slug and filter' do
it 'contains ci_environment_slug' do
expect(subject).to receive(:query_metrics).with(hash_including(ci_environment_slug: environment.slug))
expect(subject).to receive(:query_metrics).with(project, hash_including(ci_environment_slug: environment.slug))
subject.query(*query_params)
end
it 'contains environment filter' do
expect(subject).to receive(:query_metrics).with(
project,
hash_including(
environment_filter: "container_name!=\"POD\",environment=\"#{environment.slug}\""
)
......@@ -48,7 +50,7 @@ RSpec.shared_examples 'additional metrics query' do
it_behaves_like 'query context containing environment slug and filter'
it 'query context contains kube_namespace' do
expect(subject).to receive(:query_metrics).with(hash_including(kube_namespace: kube_namespace))
expect(subject).to receive(:query_metrics).with(project, hash_including(kube_namespace: kube_namespace))
subject.query(*query_params)
end
......@@ -72,7 +74,7 @@ RSpec.shared_examples 'additional metrics query' do
it_behaves_like 'query context containing environment slug and filter'
it 'query context contains empty kube_namespace' do
expect(subject).to receive(:query_metrics).with(hash_including(kube_namespace: ''))
expect(subject).to receive(:query_metrics).with(project, hash_including(kube_namespace: ''))
subject.query(*query_params)
end
......@@ -81,7 +83,7 @@ RSpec.shared_examples 'additional metrics query' do
context 'with one group where two metrics is found' do
before do
allow(metric_group_class).to receive(:all).and_return([simple_metric_group])
allow(metric_group_class).to receive(:common_metrics).and_return([simple_metric_group])
end
context 'some queries return results' do
......@@ -117,7 +119,7 @@ RSpec.shared_examples 'additional metrics query' do
let(:metrics) { [simple_metric(queries: [simple_query])] }
before do
allow(metric_group_class).to receive(:all).and_return(
allow(metric_group_class).to receive(:common_metrics).and_return(
[
simple_metric_group(name: 'group_a', metrics: [simple_metric(queries: [simple_query])]),
simple_metric_group(name: 'group_b', metrics: [simple_metric(title: 'title_b', queries: [simple_query('b')])])
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment