Commit 796a0c97 authored by Dylan Griffith's avatar Dylan Griffith

Merge branch 'caw-refactor-allow-possible-spam-flag' into 'master'

Refactor handling of allow_possible_spam flag

See merge request gitlab-org/gitlab!81078
parents 811e02e6 d7585b56
...@@ -65,22 +65,19 @@ module Spam ...@@ -65,22 +65,19 @@ module Spam
# ask the SpamVerdictService what to do with the target. # ask the SpamVerdictService what to do with the target.
spam_verdict_service.execute.tap do |result| spam_verdict_service.execute.tap do |result|
case result case result
when CONDITIONAL_ALLOW
# at the moment, this means "ask for reCAPTCHA"
create_spam_log
break if target.allow_possible_spam?
target.needs_recaptcha!
when DISALLOW
# TODO: remove `unless target.allow_possible_spam?` once this flag has been passed to `SpamVerdictService`
# https://gitlab.com/gitlab-org/gitlab/-/issues/214739
target.spam! unless target.allow_possible_spam?
create_spam_log
when BLOCK_USER when BLOCK_USER
# TODO: improve BLOCK_USER handling, non-existent until now # TODO: improve BLOCK_USER handling, non-existent until now
# https://gitlab.com/gitlab-org/gitlab/-/issues/329666 # https://gitlab.com/gitlab-org/gitlab/-/issues/329666
target.spam! unless target.allow_possible_spam? target.spam!
create_spam_log
when DISALLOW
target.spam!
create_spam_log
when CONDITIONAL_ALLOW
# This means "require a CAPTCHA to be solved"
target.needs_recaptcha!
create_spam_log
when OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM
create_spam_log create_spam_log
when ALLOW when ALLOW
target.clear_spam_flags! target.clear_spam_flags!
......
...@@ -2,11 +2,12 @@ ...@@ -2,11 +2,12 @@
module Spam module Spam
module SpamConstants module SpamConstants
CONDITIONAL_ALLOW = "conditional_allow" BLOCK_USER = 'block'
DISALLOW = "disallow" DISALLOW = 'disallow'
ALLOW = "allow" CONDITIONAL_ALLOW = 'conditional_allow'
BLOCK_USER = "block" OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM = 'override_via_allow_possible_spam'
NOOP = "noop" ALLOW = 'allow'
NOOP = 'noop'
SUPPORTED_VERDICTS = { SUPPORTED_VERDICTS = {
BLOCK_USER => { BLOCK_USER => {
...@@ -18,11 +19,14 @@ module Spam ...@@ -18,11 +19,14 @@ module Spam
CONDITIONAL_ALLOW => { CONDITIONAL_ALLOW => {
priority: 3 priority: 3
}, },
ALLOW => { OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM => {
priority: 4 priority: 4
}, },
NOOP => { ALLOW => {
priority: 5 priority: 5
},
NOOP => {
priority: 6
} }
}.freeze }.freeze
end end
......
...@@ -39,21 +39,24 @@ module Spam ...@@ -39,21 +39,24 @@ module Spam
return ALLOW unless valid_results.any? return ALLOW unless valid_results.any?
# Favour the most restrictive result. # Favour the most restrictive result.
final_verdict = valid_results.min_by { |v| SUPPORTED_VERDICTS[v][:priority] } verdict = valid_results.min_by { |v| SUPPORTED_VERDICTS[v][:priority] }
# The target can override the verdict via the `allow_possible_spam` feature flag
verdict = OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM if override_via_allow_possible_spam?(verdict: verdict)
logger.info(class: self.class.name, logger.info(class: self.class.name,
akismet_verdict: akismet_verdict, akismet_verdict: akismet_verdict,
spam_check_verdict: original_spamcheck_result, spam_check_verdict: original_spamcheck_result,
extra_attributes: spamcheck_attribs, extra_attributes: spamcheck_attribs,
spam_check_rtt: external_spam_check_round_trip_time.real, spam_check_rtt: external_spam_check_round_trip_time.real,
final_verdict: final_verdict, final_verdict: verdict,
username: user.username, username: user.username,
user_id: user.id, user_id: user.id,
target_type: target.class.to_s, target_type: target.class.to_s,
project_id: target.project_id project_id: target.project_id
) )
final_verdict verdict
end end
private private
...@@ -87,6 +90,14 @@ module Spam ...@@ -87,6 +90,14 @@ module Spam
end end
end end
def override_via_allow_possible_spam?(verdict:)
# If the verdict is already going to allow (because current verdict's priority value is greater
# than the override verdict's priority value), then we don't need to override it.
return false if SUPPORTED_VERDICTS[verdict][:priority] > SUPPORTED_VERDICTS[OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM][:priority]
target.allow_possible_spam?
end
def spamcheck_client def spamcheck_client
@spamcheck_client ||= Gitlab::Spamcheck::Client.new @spamcheck_client ||= Gitlab::Spamcheck::Client.new
end end
......
...@@ -619,11 +619,11 @@ RSpec.describe Projects::IssuesController do ...@@ -619,11 +619,11 @@ RSpec.describe Projects::IssuesController do
end end
end end
context 'when the SpamVerdictService disallows' do context 'when an issue is identified as spam' do
before do before do
stub_application_setting(recaptcha_enabled: true) stub_application_setting(recaptcha_enabled: true)
expect_next_instance_of(Spam::SpamVerdictService) do |verdict_service| allow_next_instance_of(Spam::AkismetService) do |akismet_service|
expect(verdict_service).to receive(:execute).and_return(CONDITIONAL_ALLOW) allow(akismet_service).to receive(:spam?).and_return(true)
end end
end end
...@@ -940,8 +940,8 @@ RSpec.describe Projects::IssuesController do ...@@ -940,8 +940,8 @@ RSpec.describe Projects::IssuesController do
context 'when an issue is identified as spam' do context 'when an issue is identified as spam' do
context 'when recaptcha is not verified' do context 'when recaptcha is not verified' do
before do before do
expect_next_instance_of(Spam::SpamVerdictService) do |verdict_service| allow_next_instance_of(Spam::AkismetService) do |akismet_service|
expect(verdict_service).to receive(:execute).and_return(CONDITIONAL_ALLOW) allow(akismet_service).to receive(:spam?).and_return(true)
end end
end end
...@@ -1274,11 +1274,11 @@ RSpec.describe Projects::IssuesController do ...@@ -1274,11 +1274,11 @@ RSpec.describe Projects::IssuesController do
end end
end end
context 'when SpamVerdictService requires recaptcha' do context 'when an issue is identified as spam and requires recaptcha' do
context 'when captcha is not verified' do context 'when captcha is not verified' do
before do before do
expect_next_instance_of(Spam::SpamVerdictService) do |verdict_service| allow_next_instance_of(Spam::AkismetService) do |akismet_service|
expect(verdict_service).to receive(:execute).and_return(CONDITIONAL_ALLOW) allow(akismet_service).to receive(:spam?).and_return(true)
end end
end end
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'Spam detection on issue creation', :js do
include StubENV
let(:project) { create(:project, :public) }
let(:user) { create(:user) }
include_context 'includes Spam constants'
before do
stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false')
Gitlab::CurrentSettings.update!(
akismet_enabled: true,
akismet_api_key: 'testkey',
spam_check_api_key: 'testkey',
recaptcha_enabled: true,
recaptcha_site_key: 'test site key',
recaptcha_private_key: 'test private key'
)
project.add_maintainer(user)
sign_in(user)
visit new_project_issue_path(project)
fill_in 'issue_title', with: 'issue title'
fill_in 'issue_description', with: 'issue description'
end
shared_examples 'disallows issue creation' do
it 'disallows issue creation' do
click_button 'Create issue'
expect(page).to have_content('discarded')
expect(page).not_to have_css('.recaptcha')
expect(page).not_to have_content('issue title')
end
end
shared_examples 'allows issue creation with CAPTCHA' do
it 'allows issue creation' do
click_button 'Create issue'
# it is impossible to test reCAPTCHA automatically and there is no possibility to fill in recaptcha
# reCAPTCHA verification is skipped in test environment and it always returns true
expect(page).not_to have_content('issue title')
expect(page).to have_css('.recaptcha')
click_button 'Create issue'
expect(page.find('.issue-details h1.title')).to have_content('issue title')
expect(page.find('.issue-details .description')).to have_content('issue description')
end
end
shared_examples 'allows issue creation without CAPTCHA' do
it 'allows issue creation without need to solve CAPTCHA' do
click_button 'Create issue'
expect(page).not_to have_css('.recaptcha')
expect(page.find('.issue-details h1.title')).to have_content('issue title')
expect(page.find('.issue-details .description')).to have_content('issue description')
end
end
shared_examples 'creates a spam_log record' do
it 'creates a spam_log record' do
expect { click_button 'Create issue' }
.to log_spam(title: 'issue title', description: 'issue description', user_id: user.id, noteable_type: 'Issue')
end
end
shared_examples 'does not create a spam_log record' do
it 'does not creates a spam_log record' do
expect { click_button 'Create issue' }
.not_to log_spam(title: 'issue title', description: 'issue description', user_id: user.id, noteable_type: 'Issue')
end
end
shared_context 'when spammable is identified as possible spam' do
before do
allow_next_instance_of(Spam::AkismetService) do |akismet_service|
allow(akismet_service).to receive(:spam?).and_return(true)
end
end
end
shared_context 'when spammable is not identified as possible spam' do
before do
allow_next_instance_of(Spam::AkismetService) do |akismet_service|
allow(akismet_service).to receive(:spam?).and_return(false)
end
end
end
shared_context 'when CAPTCHA is enabled' do
before do
stub_application_setting(recaptcha_enabled: true)
end
end
shared_context 'when CAPTCHA is not enabled' do
before do
stub_application_setting(recaptcha_enabled: false)
end
end
shared_context 'when allow_possible_spam feature flag is true' do
before do
stub_feature_flags(allow_possible_spam: true)
end
end
shared_context 'when allow_possible_spam feature flag is false' do
before do
stub_feature_flags(allow_possible_spam: false)
end
end
describe 'spam handling' do
# verdict, spam_flagged, captcha_enabled, allow_possible_spam_flag, creates_spam_log
# TODO: Add example for BLOCK_USER verdict when we add support for testing SpamCheck - see https://gitlab.com/groups/gitlab-org/-/epics/5527#lacking-coverage-for-spamcheck-vs-akismet
# DISALLOW, true, false, false, true
# CONDITIONAL_ALLOW, true, true, false, true
# OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM, true, true, true, true
# OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM, true, false, true, true
# ALLOW, false, true, false, false
# TODO: Add example for NOOP verdict when we add support for testing SpamCheck - see https://gitlab.com/groups/gitlab-org/-/epics/5527#lacking-coverage-for-spamcheck-vs-akismet
context 'DISALLOW: spam_flagged=true, captcha_enabled=true, allow_possible_spam=true' do
include_context 'when spammable is identified as possible spam'
include_context 'when CAPTCHA is enabled'
include_context 'when allow_possible_spam feature flag is true'
it_behaves_like 'allows issue creation without CAPTCHA'
it_behaves_like 'creates a spam_log record'
end
context 'CONDITIONAL_ALLOW: spam_flagged=true, captcha_enabled=true, allow_possible_spam=false' do
include_context 'when spammable is identified as possible spam'
include_context 'when CAPTCHA is enabled'
include_context 'when allow_possible_spam feature flag is false'
it_behaves_like 'allows issue creation with CAPTCHA'
it_behaves_like 'creates a spam_log record'
end
context 'OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM: spam_flagged=true, captcha_enabled=true, allow_possible_spam=true' do
include_context 'when spammable is identified as possible spam'
include_context 'when CAPTCHA is enabled'
include_context 'when allow_possible_spam feature flag is true'
it_behaves_like 'allows issue creation without CAPTCHA'
it_behaves_like 'creates a spam_log record'
end
context 'OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM: spam_flagged=true, captcha_enabled=false, allow_possible_spam=true' do
include_context 'when spammable is identified as possible spam'
include_context 'when CAPTCHA is not enabled'
include_context 'when allow_possible_spam feature flag is true'
it_behaves_like 'allows issue creation without CAPTCHA'
it_behaves_like 'creates a spam_log record'
end
context 'ALLOW: spam_flagged=false, captcha_enabled=true, allow_possible_spam=false' do
include_context 'when spammable is not identified as possible spam'
include_context 'when CAPTCHA is not enabled'
include_context 'when allow_possible_spam feature flag is false'
it_behaves_like 'allows issue creation without CAPTCHA'
it_behaves_like 'does not create a spam_log record'
end
end
end
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe 'New issue', :js do
include StubENV
let(:project) { create(:project, :public) }
let(:user) { create(:user)}
before do
stub_env('IN_MEMORY_APPLICATION_SETTINGS', 'false')
Gitlab::CurrentSettings.update!(
akismet_enabled: true,
akismet_api_key: 'testkey',
spam_check_api_key: 'testkey',
recaptcha_enabled: true,
recaptcha_site_key: 'test site key',
recaptcha_private_key: 'test private key'
)
project.add_maintainer(user)
sign_in(user)
end
context 'when SpamVerdictService disallows' do
include_context 'includes Spam constants'
before do
allow_next_instance_of(Spam::SpamVerdictService) do |verdict_service|
allow(verdict_service).to receive(:execute).and_return(DISALLOW)
end
visit new_project_issue_path(project)
end
context 'when allow_possible_spam feature flag is false' do
before do
stub_feature_flags(allow_possible_spam: false)
fill_in 'issue_title', with: 'issue title'
fill_in 'issue_description', with: 'issue description'
end
it 'rejects issue creation' do
click_button 'Create issue'
expect(page).to have_content('discarded')
expect(page).not_to have_content('potential spam')
expect(page).not_to have_content('issue title')
end
it 'creates a spam log record' do
expect { click_button 'Create issue' }
.to log_spam(title: 'issue title', description: 'issue description', user_id: user.id, noteable_type: 'Issue')
end
end
context 'when allow_possible_spam feature flag is true' do
before do
fill_in 'issue_title', with: 'issue title'
fill_in 'issue_description', with: 'issue description'
end
it 'allows issue creation' do
click_button 'Create issue'
expect(page.find('.issue-details h1.title')).to have_content('issue title')
expect(page.find('.issue-details .description')).to have_content('issue description')
end
it 'creates a spam log record' do
expect { click_button 'Create issue' }
.to log_spam(title: 'issue title', description: 'issue description', user_id: user.id, noteable_type: 'Issue')
end
end
end
context 'when SpamVerdictService requires recaptcha' do
include_context 'includes Spam constants'
before do
allow_next_instance_of(Spam::SpamVerdictService) do |verdict_service|
allow(verdict_service).to receive(:execute).and_return(CONDITIONAL_ALLOW)
end
visit new_project_issue_path(project)
end
context 'when recaptcha is enabled' do
before do
stub_application_setting(recaptcha_enabled: true)
end
context 'when allow_possible_spam feature flag is false' do
before do
stub_feature_flags(allow_possible_spam: false)
end
it 'creates an issue after solving reCaptcha' do
fill_in 'issue_title', with: 'issue title'
fill_in 'issue_description', with: 'issue description'
click_button 'Create issue'
# it is impossible to test reCAPTCHA automatically and there is no possibility to fill in recaptcha
# reCAPTCHA verification is skipped in test environment and it always returns true
expect(page).not_to have_content('issue title')
expect(page).to have_css('.recaptcha')
click_button 'Create issue'
expect(page.find('.issue-details h1.title')).to have_content('issue title')
expect(page.find('.issue-details .description')).to have_content('issue description')
end
end
context 'when allow_possible_spam feature flag is true' do
before do
fill_in 'issue_title', with: 'issue title'
fill_in 'issue_description', with: 'issue description'
end
it 'creates an issue without a need to solve reCAPTCHA' do
click_button 'Create issue'
expect(page).not_to have_css('.recaptcha')
expect(page.find('.issue-details h1.title')).to have_content('issue title')
expect(page.find('.issue-details .description')).to have_content('issue description')
end
it 'creates a spam log record' do
expect { click_button 'Create issue' }
.to log_spam(title: 'issue title', description: 'issue description', user_id: user.id, noteable_type: 'Issue')
end
end
end
context 'when reCAPTCHA is not enabled' do
before do
stub_application_setting(recaptcha_enabled: false)
end
context 'when allow_possible_spam feature flag is true' do
before do
fill_in 'issue_title', with: 'issue title'
fill_in 'issue_description', with: 'issue description'
end
it 'creates an issue without a need to solve reCaptcha' do
click_button 'Create issue'
expect(page).not_to have_css('.recaptcha')
expect(page.find('.issue-details h1.title')).to have_content('issue title')
expect(page.find('.issue-details .description')).to have_content('issue description')
end
it 'creates a spam log record' do
expect { click_button 'Create issue' }
.to log_spam(title: 'issue title', description: 'issue description', user_id: user.id, noteable_type: 'Issue')
end
end
end
end
context 'when the SpamVerdictService allows' do
include_context 'includes Spam constants'
before do
allow_next_instance_of(Spam::SpamVerdictService) do |verdict_service|
allow(verdict_service).to receive(:execute).and_return(ALLOW)
end
visit new_project_issue_path(project)
end
it 'creates an issue' do
fill_in 'issue_title', with: 'issue title'
fill_in 'issue_description', with: 'issue description'
click_button 'Create issue'
expect(page.find('.issue-details h1.title')).to have_content('issue title')
expect(page.find('.issue-details .description')).to have_content('issue description')
end
end
end
...@@ -199,8 +199,8 @@ RSpec.describe API::Issues do ...@@ -199,8 +199,8 @@ RSpec.describe API::Issues do
expect(spam_service).to receive_messages(check_for_spam?: true) expect(spam_service).to receive_messages(check_for_spam?: true)
end end
expect_next_instance_of(Spam::SpamVerdictService) do |verdict_service| allow_next_instance_of(Spam::AkismetService) do |akismet_service|
expect(verdict_service).to receive(:execute).and_return(DISALLOW) allow(akismet_service).to receive(:spam?).and_return(true)
end end
end end
......
...@@ -170,26 +170,13 @@ RSpec.describe Spam::SpamActionService do ...@@ -170,26 +170,13 @@ RSpec.describe Spam::SpamActionService do
allow(fake_verdict_service).to receive(:execute).and_return(DISALLOW) allow(fake_verdict_service).to receive(:execute).and_return(DISALLOW)
end end
context 'when allow_possible_spam feature flag is false' do it_behaves_like 'creates a spam log'
before do
stub_feature_flags(allow_possible_spam: false)
end
it 'marks as spam' do it 'marks as spam' do
response = subject response = subject
expect(response.message).to match(expected_service_check_response_message)
expect(issue).to be_spam
end
end
context 'when allow_possible_spam feature flag is true' do
it 'does not mark as spam' do
response = subject
expect(response.message).to match(expected_service_check_response_message) expect(response.message).to match(expected_service_check_response_message)
expect(issue).not_to be_spam expect(issue).to be_spam
end
end end
end end
...@@ -198,26 +185,13 @@ RSpec.describe Spam::SpamActionService do ...@@ -198,26 +185,13 @@ RSpec.describe Spam::SpamActionService do
allow(fake_verdict_service).to receive(:execute).and_return(BLOCK_USER) allow(fake_verdict_service).to receive(:execute).and_return(BLOCK_USER)
end end
context 'when allow_possible_spam feature flag is false' do it_behaves_like 'creates a spam log'
before do
stub_feature_flags(allow_possible_spam: false)
end
it 'marks as spam' do
response = subject
expect(response.message).to match(expected_service_check_response_message) it 'marks as spam' do
expect(issue).to be_spam response = subject
end
end
context 'when allow_possible_spam feature flag is true' do
it 'does not mark as spam' do
response = subject
expect(response.message).to match(expected_service_check_response_message) expect(response.message).to match(expected_service_check_response_message)
expect(issue).not_to be_spam expect(issue).to be_spam
end
end end
end end
...@@ -226,37 +200,42 @@ RSpec.describe Spam::SpamActionService do ...@@ -226,37 +200,42 @@ RSpec.describe Spam::SpamActionService do
allow(fake_verdict_service).to receive(:execute).and_return(CONDITIONAL_ALLOW) allow(fake_verdict_service).to receive(:execute).and_return(CONDITIONAL_ALLOW)
end end
context 'when allow_possible_spam feature flag is false' do it_behaves_like 'creates a spam log'
before do
stub_feature_flags(allow_possible_spam: false)
end
it_behaves_like 'creates a spam log' it 'does not mark as spam' do
response = subject
it 'does not mark as spam' do expect(response.message).to match(expected_service_check_response_message)
response = subject expect(issue).not_to be_spam
end
expect(response.message).to match(expected_service_check_response_message) it 'marks as needing reCAPTCHA' do
expect(issue).not_to be_spam response = subject
end
it 'marks as needing reCAPTCHA' do expect(response.message).to match(expected_service_check_response_message)
response = subject expect(issue).to be_needs_recaptcha
end
end
expect(response.message).to match(expected_service_check_response_message) context 'when spam verdict service returns OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM' do
expect(issue).to be_needs_recaptcha before do
end allow(fake_verdict_service).to receive(:execute).and_return(OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM)
end end
context 'when allow_possible_spam feature flag is true' do it_behaves_like 'creates a spam log'
it_behaves_like 'creates a spam log'
it 'does not mark as needing reCAPTCHA' do it 'does not mark as spam' do
response = subject response = subject
expect(response.message).to match(expected_service_check_response_message)
expect(issue).not_to be_spam
end
it 'does not mark as needing CAPTCHA' do
response = subject
expect(response.message).to match(expected_service_check_response_message) expect(response.message).to match(expected_service_check_response_message)
expect(issue.needs_recaptcha).to be_falsey expect(issue).not_to be_needs_recaptcha
end
end end
end end
......
...@@ -27,6 +27,10 @@ RSpec.describe Spam::SpamVerdictService do ...@@ -27,6 +27,10 @@ RSpec.describe Spam::SpamVerdictService do
extra_attributes extra_attributes
end end
before do
stub_feature_flags(allow_possible_spam: false)
end
describe '#execute' do describe '#execute' do
subject { service.execute } subject { service.execute }
...@@ -114,6 +118,32 @@ RSpec.describe Spam::SpamVerdictService do ...@@ -114,6 +118,32 @@ RSpec.describe Spam::SpamVerdictService do
end end
end end
context 'if allow_possible_spam flag is true' do
before do
stub_feature_flags(allow_possible_spam: true)
end
context 'and a service returns a verdict that should be overridden' do
before do
allow(service).to receive(:spamcheck_verdict).and_return([BLOCK_USER, attribs])
end
it 'overrides and renders the override verdict' do
expect(subject).to eq OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM
end
end
context 'and a service returns a verdict that does not need to be overridden' do
before do
allow(service).to receive(:spamcheck_verdict).and_return([ALLOW, attribs])
end
it 'does not override and renders the original verdict' do
expect(subject).to eq ALLOW
end
end
end
context 'records metrics' do context 'records metrics' do
let(:histogram) { instance_double(Prometheus::Client::Histogram) } let(:histogram) { instance_double(Prometheus::Client::Histogram) }
......
...@@ -2,10 +2,11 @@ ...@@ -2,10 +2,11 @@
RSpec.shared_context 'includes Spam constants' do RSpec.shared_context 'includes Spam constants' do
before do before do
stub_const('CONDITIONAL_ALLOW', Spam::SpamConstants::CONDITIONAL_ALLOW) stub_const('BLOCK_USER', Spam::SpamConstants::BLOCK_USER)
stub_const('DISALLOW', Spam::SpamConstants::DISALLOW) stub_const('DISALLOW', Spam::SpamConstants::DISALLOW)
stub_const('CONDITIONAL_ALLOW', Spam::SpamConstants::CONDITIONAL_ALLOW)
stub_const('OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM', Spam::SpamConstants::OVERRIDE_VIA_ALLOW_POSSIBLE_SPAM)
stub_const('ALLOW', Spam::SpamConstants::ALLOW) stub_const('ALLOW', Spam::SpamConstants::ALLOW)
stub_const('BLOCK_USER', Spam::SpamConstants::BLOCK_USER)
stub_const('NOOP', Spam::SpamConstants::NOOP) stub_const('NOOP', Spam::SpamConstants::NOOP)
end end
end end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment