Commit 2db7c576 authored by Stan Hu's avatar Stan Hu

Cache Flipper feature flags in L1 and L2 caches

In https://gitlab.com/gitlab-com/gl-infra/production/issues/928, we saw
a significant amount of network traffic and CPU usage due to Redis
checking feature flags via Flipper. Since these flags are hit with every
request, the overhead becomes significant. To alleviate Redis overhead,
we now cache the data in the following way:

* L1: A thread-local memory store for 1 minute
* L2: Redis for 1 hour
parent 62e52ac6
---
title: Cache Flipper feature flags in L1 and L2 caches
merge_request: 30276
author:
type: performance
......@@ -103,10 +103,27 @@ class Feature
feature_class: FlipperFeature,
gate_class: FlipperGate)
# Redis L2 cache
redis_cache_adapter =
Flipper::Adapters::ActiveSupportCacheStore.new(
active_record_adapter,
l2_cache_backend,
expires_in: 1.hour)
# Thread-local L1 cache: use a short timeout since we don't have a
# way to expire this cache all at once
Flipper::Adapters::ActiveSupportCacheStore.new(
active_record_adapter,
Rails.cache,
expires_in: 1.hour)
redis_cache_adapter,
l1_cache_backend,
expires_in: 1.minute)
end
def l1_cache_backend
Gitlab::ThreadMemoryCache.cache_backend
end
def l2_cache_backend
Rails.cache
end
end
......
......@@ -144,6 +144,68 @@ describe Feature do
expect(described_class.enabled?(:enabled_feature_flag)).to be_truthy
end
it { expect(described_class.l1_cache_backend).to eq(Gitlab::ThreadMemoryCache.cache_backend) }
it { expect(described_class.l2_cache_backend).to eq(Rails.cache) }
it 'caches the status in L1 and L2 caches',
:request_store, :use_clean_rails_memory_store_caching do
described_class.enable(:enabled_feature_flag)
flipper_key = "flipper/v1/feature/enabled_feature_flag"
expect(described_class.l2_cache_backend)
.to receive(:fetch)
.once
.with(flipper_key, expires_in: 1.hour)
.and_call_original
expect(described_class.l1_cache_backend)
.to receive(:fetch)
.once
.with(flipper_key, expires_in: 1.minute)
.and_call_original
2.times do
expect(described_class.enabled?(:enabled_feature_flag)).to be_truthy
end
end
context 'cached feature flag', :request_store do
let(:flag) { :some_feature_flag }
before do
described_class.flipper.memoize = false
described_class.enabled?(flag)
end
it 'caches the status in L1 cache for the first minute' do
expect do
expect(described_class.l1_cache_backend).to receive(:fetch).once.and_call_original
expect(described_class.l2_cache_backend).not_to receive(:fetch)
expect(described_class.enabled?(flag)).to be_truthy
end.not_to exceed_query_limit(0)
end
it 'caches the status in L2 cache after 2 minutes' do
Timecop.travel 2.minutes do
expect do
expect(described_class.l1_cache_backend).to receive(:fetch).once.and_call_original
expect(described_class.l2_cache_backend).to receive(:fetch).once.and_call_original
expect(described_class.enabled?(flag)).to be_truthy
end.not_to exceed_query_limit(0)
end
end
it 'fetches the status after an hour' do
Timecop.travel 61.minutes do
expect do
expect(described_class.l1_cache_backend).to receive(:fetch).once.and_call_original
expect(described_class.l2_cache_backend).to receive(:fetch).once.and_call_original
expect(described_class.enabled?(flag)).to be_truthy
end.not_to exceed_query_limit(1)
end
end
end
context 'with an individual actor' do
CustomActor = Struct.new(:flipper_id)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment