Commit 1c2a29fe authored by Aleksei Lipniagov's avatar Aleksei Lipniagov

Remove feature flag related to valid hosts list

Remove :load_balancing_refine_load_balancer_methods FF. Clean-up related
code.

Changelog: other
parent 5f538752
---
name: load_balancing_refine_load_balancer_methods
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/65356
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/335109
milestone: '14.1'
type: development
group: group::memory
default_enabled: false
......@@ -12,7 +12,6 @@ module Gitlab
# always returns a connection to the primary.
class LoadBalancer
CACHE_KEY = :gitlab_load_balancer_host
VALID_HOSTS_CACHE_KEY = :gitlab_load_balancer_valid_hosts
attr_reader :host_list
......@@ -118,7 +117,7 @@ module Gitlab
# Hosts are scoped per thread so that multiple threads don't
# accidentally re-use the same host + connection.
def host
RequestStore[CACHE_KEY] ||= current_host_list.next
RequestStore[CACHE_KEY] ||= @host_list.next
end
# Releases the host and connection for the current thread.
......@@ -129,7 +128,6 @@ module Gitlab
end
RequestStore.delete(CACHE_KEY)
RequestStore.delete(VALID_HOSTS_CACHE_KEY)
end
def release_primary_connection
......@@ -148,39 +146,6 @@ module Gitlab
end
# Returns true if there was at least one host that has caught up with the given transaction.
#
# In case of a retry, this method also stores the set of hosts that have caught up.
#
# UPD: `select_caught_up_hosts` seems to have redundant logic managing host list (`:gitlab_load_balancer_valid_hosts`),
# while we only need a single host: https://gitlab.com/gitlab-org/gitlab/-/issues/326125#note_615271604
# Also, shuffling the list afterwards doesn't seem to be necessary.
# This may be improved by merging this method with `select_up_to_date_host`.
# Could be removed when `:load_balancing_refine_load_balancer_methods` FF is rolled out
def select_caught_up_hosts(location)
all_hosts = @host_list.hosts
valid_hosts = all_hosts.select { |host| host.caught_up?(location) }
return false if valid_hosts.empty?
# Hosts can come online after the time when this scan was done,
# so we need to remember the ones that can be used. If the host went
# offline, we'll just rely on the retry mechanism to use the primary.
set_consistent_hosts_for_request(HostList.new(valid_hosts))
# Since we will be using a subset from the original list, let's just
# pick a random host and mix up the original list to ensure we don't
# only end up using one replica.
RequestStore[CACHE_KEY] = valid_hosts.sample
@host_list.shuffle
true
end
# Returns true if there was at least one host that has caught up with the given transaction.
# Similar to `#select_caught_up_hosts`, picks a random host, to rotate replicas we use.
# Unlike `#select_caught_up_hosts`, does not iterate over all hosts if finds any.
#
# It is going to be merged with `select_caught_up_hosts`, because they intend to do the same.
def select_up_to_date_host(location)
all_hosts = @host_list.hosts.shuffle
host = all_hosts.find { |host| host.caught_up?(location) }
......@@ -192,11 +157,6 @@ module Gitlab
true
end
# Could be removed when `:load_balancing_refine_load_balancer_methods` FF is rolled out
def set_consistent_hosts_for_request(hosts)
RequestStore[VALID_HOSTS_CACHE_KEY] = hosts
end
# Yields a block, retrying it upon error using an exponential backoff.
def retry_with_backoff(retries = 3, time = 2)
retried = 0
......@@ -268,10 +228,6 @@ module Gitlab
@connection_db_roles_count.delete(connection)
end
end
def current_host_list
RequestStore[VALID_HOSTS_CACHE_KEY] || @host_list
end
end
end
end
......
......@@ -53,15 +53,9 @@ module Gitlab
# write location. If no such location exists, err on the side of caution.
return false unless location
if ::Feature.enabled?(:load_balancing_refine_load_balancer_methods)
load_balancer.select_up_to_date_host(location).tap do |selected|
unstick(namespace, id) if selected
end
else
load_balancer.select_caught_up_hosts(location).tap do |selected|
unstick(namespace, id) if selected
end
end
end
# Sticks to the primary if necessary, otherwise unsticks an object (if
......
......@@ -261,7 +261,6 @@ RSpec.describe Gitlab::Database::LoadBalancing::LoadBalancer, :request_store do
it 'stores the host in a thread-local variable' do
RequestStore.delete(described_class::CACHE_KEY)
RequestStore.delete(described_class::VALID_HOSTS_CACHE_KEY)
expect(lb.host_list).to receive(:next).once.and_call_original
......@@ -279,7 +278,6 @@ RSpec.describe Gitlab::Database::LoadBalancing::LoadBalancer, :request_store do
lb.release_host
expect(RequestStore[described_class::CACHE_KEY]).to be_nil
expect(RequestStore[described_class::VALID_HOSTS_CACHE_KEY]).to be_nil
end
end
......@@ -414,60 +412,6 @@ RSpec.describe Gitlab::Database::LoadBalancing::LoadBalancer, :request_store do
end
end
describe '#select_caught_up_hosts' do
let(:location) { 'AB/12345'}
let(:hosts) { lb.host_list.hosts }
let(:valid_host_list) { RequestStore[described_class::VALID_HOSTS_CACHE_KEY] }
let(:valid_hosts) { valid_host_list.hosts }
subject { lb.select_caught_up_hosts(location) }
context 'when all replicas are caught up' do
before do
expect(hosts).to all(receive(:caught_up?).with(location).and_return(true))
end
it 'returns true and sets all hosts to valid' do
expect(subject).to be true
expect(valid_host_list).to be_a(Gitlab::Database::LoadBalancing::HostList)
expect(valid_hosts).to contain_exactly(*hosts)
end
end
context 'when none of the replicas are caught up' do
before do
expect(hosts).to all(receive(:caught_up?).with(location).and_return(false))
end
it 'returns false and does not set the valid hosts' do
expect(subject).to be false
expect(valid_host_list).to be_nil
end
end
context 'when one of the replicas is caught up' do
before do
expect(hosts[0]).to receive(:caught_up?).with(location).and_return(false)
expect(hosts[1]).to receive(:caught_up?).with(location).and_return(true)
end
it 'returns true and sets one host to valid' do
expect(subject).to be true
expect(valid_host_list).to be_a(Gitlab::Database::LoadBalancing::HostList)
expect(valid_hosts).to contain_exactly(hosts[1])
end
it 'host always returns the caught-up replica' do
subject
3.times do
expect(lb.host).to eq(hosts[1])
RequestStore.delete(described_class::CACHE_KEY)
end
end
end
end
describe '#select_up_to_date_host' do
let(:location) { 'AB/12345'}
let(:hosts) { lb.host_list.hosts }
......
......@@ -313,7 +313,7 @@ RSpec.describe Gitlab::Database::LoadBalancing::Sticking, :redis do
end
it 'returns false and does not try to find caught up hosts' do
expect(described_class).not_to receive(:select_caught_up_hosts)
expect(lb).not_to receive(:select_up_to_date_host)
expect(described_class.select_caught_up_replicas(:project, 42)).to be false
end
end
......@@ -329,18 +329,6 @@ RSpec.describe Gitlab::Database::LoadBalancing::Sticking, :redis do
expect(described_class).to receive(:unstick).with(:project, 42)
expect(described_class.select_caught_up_replicas(:project, 42)).to be true
end
context 'when :load_balancing_refine_load_balancer_methods FF is disabled' do
before do
stub_feature_flags(load_balancing_refine_load_balancer_methods: false)
end
it 'returns true, selects hosts, and unsticks if any secondary has caught up' do
expect(lb).to receive(:select_caught_up_hosts).and_return(true)
expect(described_class).to receive(:unstick).with(:project, 42)
expect(described_class.select_caught_up_replicas(:project, 42)).to be true
end
end
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment