Commit 2fb3c193 authored by Tiger Watson's avatar Tiger Watson

Merge branch 'fix_n+1_in_batched_migration_jobs' into 'master'

Fix N+1 when computing batched jobs time efficiency

See merge request gitlab-org/gitlab!76999
parents 867c2369 7c1a4672
......@@ -12,17 +12,6 @@ module Gitlab
MAX_ATTEMPTS = 3
STUCK_JOBS_TIMEOUT = 1.hour.freeze
belongs_to :batched_migration, foreign_key: :batched_background_migration_id
scope :active, -> { where(status: [:pending, :running]) }
scope :stuck, -> { active.where('updated_at <= ?', STUCK_JOBS_TIMEOUT.ago) }
scope :retriable, -> {
failed_jobs = where(status: :failed).where('attempts < ?', MAX_ATTEMPTS)
from_union([failed_jobs, self.stuck])
}
scope :except_succeeded, -> { where(status: self.statuses.except(:succeeded).values) }
enum status: {
pending: 0,
running: 1,
......@@ -30,7 +19,14 @@ module Gitlab
succeeded: 3
}
belongs_to :batched_migration, foreign_key: :batched_background_migration_id
scope :active, -> { where(status: [:pending, :running]) }
scope :stuck, -> { active.where('updated_at <= ?', STUCK_JOBS_TIMEOUT.ago) }
scope :retriable, -> { from_union([failed.where('attempts < ?', MAX_ATTEMPTS), self.stuck]) }
scope :except_succeeded, -> { where(status: self.statuses.except(:succeeded).values) }
scope :successful_in_execution_order, -> { where.not(finished_at: nil).succeeded.order(:finished_at) }
scope :with_preloads, -> { preload(:batched_migration) }
delegate :job_class, :table_name, :column_name, :job_arguments,
to: :batched_migration, prefix: :migration
......
......@@ -113,7 +113,7 @@ module Gitlab
end
def smoothed_time_efficiency(number_of_jobs: 10, alpha: 0.2)
jobs = batched_jobs.successful_in_execution_order.reverse_order.limit(number_of_jobs)
jobs = batched_jobs.successful_in_execution_order.reverse_order.limit(number_of_jobs).with_preloads
return if jobs.size < number_of_jobs
......
......@@ -336,8 +336,8 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigration, type: :m
end
describe '#smoothed_time_efficiency' do
let(:migration) { create(:batched_background_migration, interval: 120.seconds) }
let(:end_time) { Time.zone.now }
let_it_be(:migration) { create(:batched_background_migration, interval: 120.seconds) }
let_it_be(:end_time) { Time.zone.now }
around do |example|
freeze_time do
......@@ -345,7 +345,7 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigration, type: :m
end
end
let(:common_attrs) do
let_it_be(:common_attrs) do
{
status: :succeeded,
batched_migration: migration,
......@@ -364,13 +364,14 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigration, type: :m
end
context 'when there are enough jobs' do
subject { migration.smoothed_time_efficiency(number_of_jobs: number_of_jobs) }
let_it_be(:number_of_jobs) { 10 }
let_it_be(:jobs) { create_list(:batched_background_migration_job, number_of_jobs, **common_attrs.merge(batched_migration: migration)) }
let!(:jobs) { create_list(:batched_background_migration_job, number_of_jobs, **common_attrs.merge(batched_migration: migration)) }
let(:number_of_jobs) { 10 }
subject { migration.smoothed_time_efficiency(number_of_jobs: number_of_jobs) }
before do
expect(migration).to receive_message_chain(:batched_jobs, :successful_in_execution_order, :reverse_order, :limit).with(no_args).with(no_args).with(number_of_jobs).and_return(jobs)
expect(migration).to receive_message_chain(:batched_jobs, :successful_in_execution_order, :reverse_order, :limit, :with_preloads)
.and_return(jobs)
end
def mock_efficiencies(*effs)
......@@ -411,6 +412,18 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigration, type: :m
end
end
end
context 'with preloaded batched migration' do
it 'avoids N+1' do
create_list(:batched_background_migration_job, 11, **common_attrs.merge(started_at: end_time - 10.seconds))
control = ActiveRecord::QueryRecorder.new do
migration.smoothed_time_efficiency(number_of_jobs: 10)
end
expect { migration.smoothed_time_efficiency(number_of_jobs: 11) }.not_to exceed_query_limit(control)
end
end
end
describe '#optimize!' do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment