Commit 29bc885d authored by Sean McGivern's avatar Sean McGivern

Merge branch 'move_overridden_ci_servivec' into 'master'

Move caught-up check for Ci::RegisterJobService to Core

See merge request gitlab-org/gitlab!62744
parents 1c6c3b8b 6c35d95a
...@@ -22,11 +22,27 @@ module Ci ...@@ -22,11 +22,27 @@ module Ci
end end
def execute(params = {}) def execute(params = {})
db_all_caught_up = ::Gitlab::Database::LoadBalancing::Sticking.all_caught_up?(:runner, runner.id)
@metrics.increment_queue_operation(:queue_attempt) @metrics.increment_queue_operation(:queue_attempt)
@metrics.observe_queue_time(:process, @runner.runner_type) do result = @metrics.observe_queue_time(:process, @runner.runner_type) do
process_queue(params) process_queue(params)
end end
# Since we execute this query against replica it might lead to false-positive
# We might receive the positive response: "hi, we don't have any more builds for you".
# This might not be true. If our DB replica is not up-to date with when runner event was generated
# we might still have some CI builds to be picked. Instead we should say to runner:
# "Hi, we don't have any more builds now, but not everything is right anyway, so try again".
# Runner will retry, but again, against replica, and again will check if replication lag did catch-up.
if !db_all_caught_up && !result.build
metrics.increment_queue_operation(:queue_replication_lag)
::Ci::RegisterJobService::Result.new(nil, false) # rubocop:disable Cop/AvoidReturnFromBlocks
else
result
end
end end
private private
......
...@@ -10,24 +10,6 @@ module EE ...@@ -10,24 +10,6 @@ module EE
extend ActiveSupport::Concern extend ActiveSupport::Concern
extend ::Gitlab::Utils::Override extend ::Gitlab::Utils::Override
def execute(params = {})
db_all_caught_up = ::Gitlab::Database::LoadBalancing::Sticking.all_caught_up?(:runner, runner.id)
super.tap do |result|
# Since we execute this query against replica it might lead to false-positive
# We might receive the positive response: "hi, we don't have any more builds for you".
# This might not be true. If our DB replica is not up-to date with when runner event was generated
# we might still have some CI builds to be picked. Instead we should say to runner:
# "Hi, we don't have any more builds now, but not everything is right anyway, so try again".
# Runner will retry, but again, against replica, and again will check if replication lag did catch-up.
if !db_all_caught_up && !result.build
metrics.increment_queue_operation(:queue_replication_lag)
return ::Ci::RegisterJobService::Result.new(nil, false) # rubocop:disable Cop/AvoidReturnFromBlocks
end
end
end
def builds_for_shared_runner def builds_for_shared_runner
# if disaster recovery is enabled, we disable quota # if disaster recovery is enabled, we disable quota
if ::Feature.enabled?(:ci_queueing_disaster_recovery, runner, type: :ops, default_enabled: :yaml) if ::Feature.enabled?(:ci_queueing_disaster_recovery, runner, type: :ops, default_enabled: :yaml)
......
...@@ -10,34 +10,6 @@ RSpec.describe Ci::RegisterJobService do ...@@ -10,34 +10,6 @@ RSpec.describe Ci::RegisterJobService do
let!(:pending_build) { create :ci_build, pipeline: pipeline } let!(:pending_build) { create :ci_build, pipeline: pipeline }
describe '#execute' do describe '#execute' do
context 'checks database loadbalancing stickiness' do
subject { described_class.new(shared_runner).execute }
before do
project.update!(shared_runners_enabled: false)
end
it 'result is valid if replica did caught-up' do
allow(Gitlab::Database::LoadBalancing).to receive(:enable?)
.and_return(true)
expect(Gitlab::Database::LoadBalancing::Sticking).to receive(:all_caught_up?)
.with(:runner, shared_runner.id) { true }
expect(subject).to be_valid
end
it 'result is invalid if replica did not caught-up' do
allow(Gitlab::Database::LoadBalancing).to receive(:enable?)
.and_return(true)
expect(Gitlab::Database::LoadBalancing::Sticking).to receive(:all_caught_up?)
.with(:runner, shared_runner.id) { false }
expect(subject).not_to be_valid
end
end
context 'shared runners minutes limit' do context 'shared runners minutes limit' do
subject { described_class.new(shared_runner).execute.build } subject { described_class.new(shared_runner).execute.build }
......
...@@ -14,6 +14,34 @@ module Ci ...@@ -14,6 +14,34 @@ module Ci
let!(:pending_job) { create(:ci_build, pipeline: pipeline) } let!(:pending_job) { create(:ci_build, pipeline: pipeline) }
describe '#execute' do describe '#execute' do
context 'checks database loadbalancing stickiness' do
subject { described_class.new(shared_runner).execute }
before do
project.update!(shared_runners_enabled: false)
end
it 'result is valid if replica did caught-up' do
allow(Gitlab::Database::LoadBalancing).to receive(:enable?)
.and_return(true)
expect(Gitlab::Database::LoadBalancing::Sticking).to receive(:all_caught_up?)
.with(:runner, shared_runner.id) { true }
expect(subject).to be_valid
end
it 'result is invalid if replica did not caught-up' do
allow(Gitlab::Database::LoadBalancing).to receive(:enable?)
.and_return(true)
expect(Gitlab::Database::LoadBalancing::Sticking).to receive(:all_caught_up?)
.with(:runner, shared_runner.id) { false }
expect(subject).not_to be_valid
end
end
shared_examples 'handles runner assignment' do shared_examples 'handles runner assignment' do
context 'runner follow tag list' do context 'runner follow tag list' do
it "picks build with the same tag" do it "picks build with the same tag" do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment