Commit 6c9c3b7e authored by Kamil Trzciński's avatar Kamil Trzciński

Merge branch '322125-add-prometheus-histo-on-retries' into 'master'

Add Prometheus histogram for lock retries

See merge request gitlab-org/gitlab!55614
parents 82fca986 b01f1711
---
title: Add histogram for optimistic lock retries
merge_request: 55614
author:
type: other
......@@ -222,6 +222,7 @@ configuration option in `gitlab.yml`. These metrics are served from the
| `limited_capacity_worker_remaining_work_count` | Gauge | 13.5 | Number of jobs waiting to be enqueued | `worker` |
| `destroyed_job_artifacts_count_total` | Counter | 13.6 | Number of destroyed expired job artifacts | |
| `destroyed_pipeline_artifacts_count_total` | Counter | 13.8 | Number of destroyed expired pipeline artifacts | |
| `gitlab_optimistic_locking_retries` | Histogram | 13.10 | Number of retry attempts to execute optimistic retry lock | |
## Database load balancing metrics **(PREMIUM SELF)**
......
......@@ -22,20 +22,22 @@ module Gitlab
retry_attempts += 1
retry
ensure
elapsed_time = Gitlab::Metrics::System.monotonic_time - start_time
retry_lock_histogram.observe({}, retry_attempts)
log_optimistic_lock_retries(
name: name,
retry_attempts: retry_attempts,
elapsed_time: elapsed_time)
start_time: start_time)
end
end
alias_method :retry_optimistic_lock, :retry_lock
def log_optimistic_lock_retries(name:, retry_attempts:, elapsed_time:)
def log_optimistic_lock_retries(name:, retry_attempts:, start_time:)
return unless retry_attempts > 0
elapsed_time = Gitlab::Metrics::System.monotonic_time - start_time
retry_lock_logger.info(
message: "Optimistic Lock released with retries",
name: name,
......@@ -46,5 +48,15 @@ module Gitlab
def retry_lock_logger
@retry_lock_logger ||= Gitlab::Services::Logger.build
end
def retry_lock_histogram
@retry_lock_histogram ||=
Gitlab::Metrics.histogram(
:gitlab_optimistic_locking_retries,
'Number of retry attempts to execute optimistic retry lock',
{},
[0, 1, 2, 3, 5, 10, 50]
)
end
end
end
......@@ -5,6 +5,13 @@ require 'spec_helper'
RSpec.describe Gitlab::OptimisticLocking do
let!(:pipeline) { create(:ci_pipeline) }
let!(:pipeline2) { Ci::Pipeline.find(pipeline.id) }
let(:histogram) { spy('prometheus metric') }
before do
allow(described_class)
.to receive(:retry_lock_histogram)
.and_return(histogram)
end
describe '#retry_lock' do
let(:name) { 'optimistic_locking_spec' }
......@@ -28,6 +35,12 @@ RSpec.describe Gitlab::OptimisticLocking do
subject
end
it 'adds number of retries to histogram' do
subject
expect(histogram).to have_received(:observe).with({}, 0)
end
end
context 'when at least one retry happened, the change succeeded' do
......@@ -37,9 +50,11 @@ RSpec.describe Gitlab::OptimisticLocking do
end
end
it 'completes the action' do
before do
pipeline.succeed
end
it 'completes the action' do
expect(pipeline2).to receive(:reset).and_call_original
expect(pipeline2).to receive(:drop).twice.and_call_original
......@@ -47,8 +62,6 @@ RSpec.describe Gitlab::OptimisticLocking do
end
it 'creates a single log record' do
pipeline.succeed
expect(described_class.retry_lock_logger)
.to receive(:info)
.once
......@@ -56,6 +69,12 @@ RSpec.describe Gitlab::OptimisticLocking do
subject
end
it 'adds number of retries to histogram' do
subject
expect(histogram).to have_received(:observe).with({}, 1)
end
end
context 'when MAX_RETRIES attempts exceeded' do
......@@ -82,6 +101,12 @@ RSpec.describe Gitlab::OptimisticLocking do
expect { subject }.to raise_error(ActiveRecord::StaleObjectError)
end
it 'adds number of retries to histogram' do
expect { subject }.to raise_error(ActiveRecord::StaleObjectError)
expect(histogram).to have_received(:observe).with({}, max_retries)
end
end
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment