Commit 9074ea25 authored by Peter Leitzen's avatar Peter Leitzen

Merge branch '214625-save-prometheus-alert-in-database' into 'master'

Persist Prometheus alerts in the DB

Closes #214625

See merge request gitlab-org/gitlab!30505
parents ac9d5394 f5d57755
......@@ -6,8 +6,8 @@ module Types
graphql_name 'AlertManagementStatus'
description 'Alert status values'
::AlertManagement::Alert.statuses.keys.each do |status|
value status.upcase, value: status, description: "#{status.titleize} status"
::AlertManagement::Alert::STATUSES.each do |name, value|
value name.upcase, value: value, description: "#{name.to_s.titleize} status"
end
end
end
......
......@@ -6,6 +6,20 @@ module AlertManagement
include ShaAttribute
include Sortable
STATUSES = {
triggered: 0,
acknowledged: 1,
resolved: 2,
ignored: 3
}.freeze
STATUS_EVENTS = {
triggered: :trigger,
acknowledged: :acknowledge,
resolved: :resolve,
ignored: :ignore
}.freeze
belongs_to :project
belongs_to :issue, optional: true
has_internal_id :iid, scope: :project, init: ->(s) { s.project.alert_management_alerts.maximum(:iid) }
......@@ -37,14 +51,49 @@ module AlertManagement
unknown: 5
}
enum status: {
triggered: 0,
acknowledged: 1,
resolved: 2,
ignored: 3
}
state_machine :status, initial: :triggered do
state :triggered, value: STATUSES[:triggered]
state :acknowledged, value: STATUSES[:acknowledged]
state :resolved, value: STATUSES[:resolved] do
validates :ended_at, presence: true
end
state :ignored, value: STATUSES[:ignored]
state :triggered, :acknowledged, :ignored do
validates :ended_at, absence: true
end
event :trigger do
transition any => :triggered
end
event :acknowledge do
transition any => :acknowledged
end
event :resolve do
transition any => :resolved
end
event :ignore do
transition any => :ignored
end
before_transition to: [:triggered, :acknowledged, :ignored] do |alert, _transition|
alert.ended_at = nil
end
before_transition to: :resolved do |alert, transition|
ended_at = transition.args.first
alert.ended_at = ended_at || Time.current
end
end
scope :for_iid, -> (iid) { where(iid: iid) }
scope :for_fingerprint, -> (project, fingerprint) { where(project: project, fingerprint: fingerprint) }
scope :order_start_time, -> (sort_order) { order(started_at: sort_order) }
scope :order_end_time, -> (sort_order) { order(ended_at: sort_order) }
......@@ -69,14 +118,6 @@ module AlertManagement
end
end
def fingerprint=(value)
if value.blank?
super(nil)
else
super(Digest::SHA1.hexdigest(value.to_s))
end
end
def details
details_payload = payload.except(*attributes.keys)
......
# frozen_string_literal: true
module AlertManagement
class ProcessPrometheusAlertService < BaseService
include Gitlab::Utils::StrongMemoize
def execute
return bad_request unless parsed_alert.valid?
process_alert_management_alert
ServiceResponse.success
end
private
delegate :firing?, :resolved?, :gitlab_fingerprint, :ends_at, to: :parsed_alert
def parsed_alert
strong_memoize(:parsed_alert) do
Gitlab::Alerting::Alert.new(project: project, payload: params)
end
end
def process_alert_management_alert
process_firing_alert_management_alert if firing?
process_resolved_alert_management_alert if resolved?
end
def process_firing_alert_management_alert
if am_alert.present?
reset_alert_management_alert_status
else
create_alert_management_alert
end
end
def reset_alert_management_alert_status
return if am_alert.trigger
logger.warn(
message: 'Unable to update AlertManagement::Alert status to triggered',
project_id: project.id,
alert_id: am_alert.id
)
end
def create_alert_management_alert
am_alert = AlertManagement::Alert.new(am_alert_params.merge(ended_at: nil))
return if am_alert.save
logger.warn(
message: 'Unable to create AlertManagement::Alert',
project_id: project.id,
alert_errors: am_alert.errors.messages
)
end
def am_alert_params
Gitlab::AlertManagement::AlertParams.from_prometheus_alert(project: project, parsed_alert: parsed_alert)
end
def process_resolved_alert_management_alert
return if am_alert.blank?
return if am_alert.resolve(ends_at)
logger.warn(
message: 'Unable to update AlertManagement::Alert status to resolved',
project_id: project.id,
alert_id: am_alert.id
)
end
def logger
@logger ||= Gitlab::AppLogger
end
def am_alert
@am_alert ||= AlertManagement::Alert.for_fingerprint(project, gitlab_fingerprint).first
end
def bad_request
ServiceResponse.error(message: 'Bad Request', http_status: :bad_request)
end
end
end
......@@ -8,9 +8,9 @@ module AlertManagement
end
def execute
return error('Invalid status') unless AlertManagement::Alert.statuses.key?(status.to_s)
return error('Invalid status') unless AlertManagement::Alert::STATUSES.key?(status.to_sym)
alert.status = status
alert.status_event = AlertManagement::Alert::STATUS_EVENTS[status.to_sym]
if alert.save
success
......
......@@ -12,6 +12,7 @@ module Projects
return unprocessable_entity unless valid_version?
return unauthorized unless valid_alert_manager_token?(token)
process_prometheus_alerts
persist_events
send_alert_email if send_email?
process_incident_issues if process_issues?
......@@ -115,6 +116,16 @@ module Projects
end
end
def process_prometheus_alerts
return unless Feature.enabled?(:alert_management_minimal, project)
alerts.each do |alert|
AlertManagement::ProcessPrometheusAlertService
.new(project, nil, alert.to_h)
.execute
end
end
def persist_events
CreateEventsService.new(project, nil, params).execute
end
......
......@@ -3,6 +3,10 @@
module Gitlab
module AlertManagement
class AlertParams
MONITORING_TOOLS = {
prometheus: 'Prometheus'
}.freeze
def self.from_generic_alert(project:, payload:)
parsed_payload = Gitlab::Alerting::NotificationPayloadParser.call(payload).with_indifferent_access
annotations = parsed_payload[:annotations]
......@@ -18,6 +22,19 @@ module Gitlab
started_at: parsed_payload['startsAt']
}
end
def self.from_prometheus_alert(project:, parsed_alert:)
{
project_id: project.id,
title: parsed_alert.title,
description: parsed_alert.description,
monitoring_tool: MONITORING_TOOLS[:prometheus],
payload: parsed_alert.payload,
started_at: parsed_alert.starts_at,
ended_at: parsed_alert.ends_at,
fingerprint: parsed_alert.gitlab_fingerprint
}
end
end
end
end
......@@ -105,6 +105,10 @@ module Gitlab
metric_id.present?
end
def gitlab_fingerprint
Digest::SHA1.hexdigest(plain_gitlab_fingerprint)
end
def valid?
payload.respond_to?(:dig) && project && title && starts_at
end
......@@ -115,6 +119,14 @@ module Gitlab
private
def plain_gitlab_fingerprint
if gitlab_managed?
[metric_id, starts_at].join('/')
else # self managed
[starts_at, title, full_query].join('/')
end
end
def parse_environment_from_payload
environment_name = payload&.dig('labels', 'gitlab_environment_name')
......
......@@ -31,8 +31,23 @@ FactoryBot.define do
ended_at { Time.current }
end
trait :without_ended_at do
ended_at { nil }
end
trait :acknowledged do
status { AlertManagement::Alert::STATUSES[:acknowledged] }
without_ended_at
end
trait :resolved do
status { :resolved }
status { AlertManagement::Alert::STATUSES[:resolved] }
with_ended_at
end
trait :ignored do
status { AlertManagement::Alert::STATUSES[:ignored] }
without_ended_at
end
trait :all_fields do
......@@ -41,7 +56,6 @@ FactoryBot.define do
with_service
with_monitoring_tool
with_host
with_ended_at
end
end
end
......@@ -5,8 +5,8 @@ require 'spec_helper'
describe AlertManagement::AlertsFinder, '#execute' do
let_it_be(:current_user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:alert_1) { create(:alert_management_alert, project: project, ended_at: 1.year.ago, events: 2, severity: :high, status: :resolved) }
let_it_be(:alert_2) { create(:alert_management_alert, project: project, events: 1, severity: :critical, status: :ignored) }
let_it_be(:alert_1) { create(:alert_management_alert, :resolved, project: project, ended_at: 1.year.ago, events: 2, severity: :high) }
let_it_be(:alert_2) { create(:alert_management_alert, :ignored, project: project, events: 1, severity: :critical) }
let_it_be(:alert_3) { create(:alert_management_alert) }
let(:params) { {} }
......@@ -155,10 +155,10 @@ describe AlertManagement::AlertsFinder, '#execute' do
end
context 'when sorting by status' do
let_it_be(:alert_triggered) { create(:alert_management_alert, project: project, status: :triggered) }
let_it_be(:alert_acknowledged) { create(:alert_management_alert, project: project, status: :acknowledged) }
let_it_be(:alert_resolved) { create(:alert_management_alert, project: project, status: :resolved) }
let_it_be(:alert_ignored) { create(:alert_management_alert, project: project, status: :ignored) }
let_it_be(:alert_triggered) { create(:alert_management_alert, project: project) }
let_it_be(:alert_acknowledged) { create(:alert_management_alert, :acknowledged, project: project) }
let_it_be(:alert_resolved) { create(:alert_management_alert, :resolved, project: project) }
let_it_be(:alert_ignored) { create(:alert_management_alert, :ignored, project: project) }
context 'sorts alerts ascending' do
let(:params) { { sort: 'status_asc' } }
......
......@@ -20,7 +20,7 @@ describe Mutations::AlertManagement::UpdateAlertStatus do
end
it 'changes the status' do
expect { resolve }.to change { alert.reload.status }.from(alert.status).to(new_status)
expect { resolve }.to change { alert.reload.acknowledged? }.to(true)
end
it 'returns the alert with no errors' do
......
......@@ -7,8 +7,8 @@ describe Resolvers::AlertManagementAlertResolver do
let_it_be(:current_user) { create(:user) }
let_it_be(:project) { create(:project) }
let_it_be(:alert_1) { create(:alert_management_alert, project: project, ended_at: 1.year.ago, events: 2, severity: :high, status: :resolved) }
let_it_be(:alert_2) { create(:alert_management_alert, project: project, events: 1, severity: :critical, status: :ignored) }
let_it_be(:alert_1) { create(:alert_management_alert, :resolved, project: project, ended_at: 1.year.ago, events: 2, severity: :high) }
let_it_be(:alert_2) { create(:alert_management_alert, :ignored, project: project, events: 1, severity: :critical) }
let_it_be(:alert_other_proj) { create(:alert_management_alert) }
let(:args) { {} }
......
......@@ -5,7 +5,20 @@ require 'spec_helper'
describe GitlabSchema.types['AlertManagementStatus'] do
specify { expect(described_class.graphql_name).to eq('AlertManagementStatus') }
it 'exposes all the severity values' do
expect(described_class.values.keys).to include(*%w[TRIGGERED ACKNOWLEDGED RESOLVED IGNORED])
describe 'statuses' do
using RSpec::Parameterized::TableSyntax
where(:status_name, :status_value) do
'TRIGGERED' | 0
'ACKNOWLEDGED' | 1
'RESOLVED' | 2
'IGNORED' | 3
end
with_them do
it 'exposes a status with the correct value' do
expect(described_class.values[status_name].value).to eq(status_value)
end
end
end
end
......@@ -42,4 +42,43 @@ describe Gitlab::AlertManagement::AlertParams do
end
end
end
describe '.from_prometheus_alert' do
let(:payload) do
{
'status' => 'firing',
'labels' => {
'alertname' => 'GitalyFileServerDown',
'channel' => 'gitaly',
'pager' => 'pagerduty',
'severity' => 's1'
},
'annotations' => {
'description' => 'Alert description',
'runbook' => 'troubleshooting/gitaly-down.md',
'title' => 'Alert title'
},
'startsAt' => '2020-04-27T10:10:22.265949279Z',
'endsAt' => '0001-01-01T00:00:00Z',
'generatorURL' => 'http://8d467bd4607a:9090/graph?g0.expr=vector%281%29&g0.tab=1',
'fingerprint' => 'b6ac4d42057c43c1'
}
end
let(:parsed_alert) { Gitlab::Alerting::Alert.new(project: project, payload: payload) }
subject { described_class.from_prometheus_alert(project: project, parsed_alert: parsed_alert) }
it 'returns Alert-compatible params' do
is_expected.to eq(
project_id: project.id,
title: 'Alert title',
description: 'Alert description',
monitoring_tool: 'Prometheus',
payload: payload,
started_at: parsed_alert.starts_at,
ended_at: parsed_alert.ends_at,
fingerprint: parsed_alert.gitlab_fingerprint
)
end
end
end
......@@ -246,6 +246,30 @@ describe Gitlab::Alerting::Alert do
it_behaves_like 'parse payload', 'annotations/gitlab_incident_markdown'
end
describe '#gitlab_fingerprint' do
subject { alert.gitlab_fingerprint }
context 'when the alert is a GitLab managed alert' do
include_context 'gitlab alert'
it 'returns a fingerprint' do
plain_fingerprint = [alert.metric_id, alert.starts_at].join('/')
is_expected.to eq(Digest::SHA1.hexdigest(plain_fingerprint))
end
end
context 'when the alert is from self managed Prometheus' do
include_context 'full query'
it 'returns a fingerprint' do
plain_fingerprint = [alert.starts_at, alert.title, alert.full_query].join('/')
is_expected.to eq(Digest::SHA1.hexdigest(plain_fingerprint))
end
end
end
describe '#valid?' do
before do
payload.update(
......
......@@ -20,6 +20,62 @@ describe AlertManagement::Alert do
it { is_expected.to validate_length_of(:service).is_at_most(100) }
it { is_expected.to validate_length_of(:monitoring_tool).is_at_most(100) }
context 'when status is triggered' do
context 'when ended_at is blank' do
subject { build(:alert_management_alert) }
it { is_expected.to be_valid }
end
context 'when ended_at is present' do
subject { build(:alert_management_alert, ended_at: Time.current) }
it { is_expected.to be_invalid }
end
end
context 'when status is acknowledged' do
context 'when ended_at is blank' do
subject { build(:alert_management_alert, :acknowledged) }
it { is_expected.to be_valid }
end
context 'when ended_at is present' do
subject { build(:alert_management_alert, :acknowledged, ended_at: Time.current) }
it { is_expected.to be_invalid }
end
end
context 'when status is resolved' do
context 'when ended_at is blank' do
subject { build(:alert_management_alert, :resolved, ended_at: nil) }
it { is_expected.to be_invalid }
end
context 'when ended_at is present' do
subject { build(:alert_management_alert, :resolved, ended_at: Time.current) }
it { is_expected.to be_valid }
end
end
context 'when status is ignored' do
context 'when ended_at is blank' do
subject { build(:alert_management_alert, :ignored) }
it { is_expected.to be_valid }
end
context 'when ended_at is present' do
subject { build(:alert_management_alert, :ignored, ended_at: Time.current) }
it { is_expected.to be_invalid }
end
end
describe 'fingerprint' do
let_it_be(:fingerprint) { 'fingerprint' }
let_it_be(:existing_alert) { create(:alert_management_alert, fingerprint: fingerprint) }
......@@ -64,57 +120,7 @@ describe AlertManagement::Alert do
{ critical: 0, high: 1, medium: 2, low: 3, info: 4, unknown: 5 }
end
let(:status_values) do
{ triggered: 0, acknowledged: 1, resolved: 2, ignored: 3 }
end
it { is_expected.to define_enum_for(:severity).with_values(severity_values) }
it { is_expected.to define_enum_for(:status).with_values(status_values) }
end
describe 'fingerprint setter' do
let(:alert) { build(:alert_management_alert) }
subject(:set_fingerprint) { alert.fingerprint = fingerprint }
let(:fingerprint) { 'test' }
it 'sets to the SHA1 of the value' do
expect { set_fingerprint }
.to change { alert.fingerprint }
.from(nil)
.to(Digest::SHA1.hexdigest(fingerprint))
end
describe 'testing length of 40' do
where(:input) do
[
'test',
'another test',
'a' * 1000,
12345
]
end
with_them do
let(:fingerprint) { input }
it 'sets the fingerprint to 40 chars' do
set_fingerprint
expect(alert.fingerprint.size).to eq(40)
end
end
end
context 'blank value given' do
let(:fingerprint) { '' }
it 'does not set the fingerprint' do
expect { set_fingerprint }
.not_to change { alert.fingerprint }
.from(nil)
end
end
end
describe '.for_iid' do
......@@ -127,6 +133,18 @@ describe AlertManagement::Alert do
it { is_expected.to match_array(alert_1) }
end
describe '.for_fingerprint' do
let_it_be(:fingerprint) { SecureRandom.hex }
let_it_be(:project) { create(:project) }
let_it_be(:alert_1) { create(:alert_management_alert, project: project, fingerprint: fingerprint) }
let_it_be(:alert_2) { create(:alert_management_alert, project: project) }
let_it_be(:alert_3) { create(:alert_management_alert, fingerprint: fingerprint) }
subject { described_class.for_fingerprint(project, fingerprint) }
it { is_expected.to contain_exactly(alert_1) }
end
describe '.details' do
let(:payload) do
{
......@@ -152,4 +170,81 @@ describe AlertManagement::Alert do
)
end
end
describe '#trigger' do
subject { alert.trigger }
context 'when alert is in triggered state' do
let(:alert) { create(:alert_management_alert) }
it 'does not change the alert status' do
expect { subject }.not_to change { alert.reload.status }
end
end
context 'when alert not in triggered state' do
let(:alert) { create(:alert_management_alert, :resolved) }
it 'changes the alert status to triggered' do
expect { subject }.to change { alert.triggered? }.to(true)
end
it 'resets ended at' do
expect { subject }.to change { alert.reload.ended_at }.to nil
end
end
end
describe '#acknowledge' do
subject { alert.acknowledge }
let(:alert) { create(:alert_management_alert, :resolved) }
it 'changes the alert status to acknowledged' do
expect { subject }.to change { alert.acknowledged? }.to(true)
end
it 'resets ended at' do
expect { subject }.to change { alert.reload.ended_at }.to nil
end
end
describe '#resolve' do
let!(:ended_at) { Time.current }
subject do
alert.ended_at = ended_at
alert.resolve
end
context 'when alert already resolved' do
let(:alert) { create(:alert_management_alert, :resolved) }
it 'does not change the alert status' do
expect { subject }.not_to change { alert.reload.status }
end
end
context 'when alert is not resolved' do
let(:alert) { create(:alert_management_alert) }
it 'changes alert status to "resolved"' do
expect { subject }.to change { alert.resolved? }.to(true)
end
end
end
describe '#ignore' do
subject { alert.ignore }
let(:alert) { create(:alert_management_alert, :resolved) }
it 'changes the alert status to ignored' do
expect { subject }.to change { alert.ignored? }.to(true)
end
it 'resets ended at' do
expect { subject }.to change { alert.reload.ended_at }.to nil
end
end
end
......@@ -7,7 +7,7 @@ describe 'getting Alert Management Alerts' do
let_it_be(:payload) { { 'custom' => { 'alert' => 'payload' } } }
let_it_be(:project) { create(:project, :repository) }
let_it_be(:current_user) { create(:user) }
let_it_be(:alert_1) { create(:alert_management_alert, :all_fields, project: project, severity: :low) }
let_it_be(:alert_1) { create(:alert_management_alert, :all_fields, :resolved, project: project, severity: :low) }
let_it_be(:alert_2) { create(:alert_management_alert, :all_fields, project: project, severity: :critical, payload: payload) }
let_it_be(:other_project_alert) { create(:alert_management_alert, :all_fields) }
......@@ -49,27 +49,34 @@ describe 'getting Alert Management Alerts' do
end
let(:first_alert) { alerts.first }
let(:second_alert) { alerts.second }
it_behaves_like 'a working graphql query'
it { expect(alerts.size).to eq(2) }
it 'returns the correct properties of the alert' do
it 'returns the correct properties of the alerts' do
expect(first_alert).to include(
'iid' => alert_2.iid.to_s,
'title' => alert_2.title,
'description' => alert_2.description,
'severity' => alert_2.severity.upcase,
'status' => alert_2.status.upcase,
'status' => 'TRIGGERED',
'monitoringTool' => alert_2.monitoring_tool,
'service' => alert_2.service,
'hosts' => alert_2.hosts,
'eventCount' => alert_2.events,
'startedAt' => alert_2.started_at.strftime('%Y-%m-%dT%H:%M:%SZ'),
'endedAt' => alert_2.ended_at.strftime('%Y-%m-%dT%H:%M:%SZ'),
'endedAt' => nil,
'details' => { 'custom.alert' => 'payload' },
'createdAt' => alert_2.created_at.strftime('%Y-%m-%dT%H:%M:%SZ'),
'updatedAt' => alert_2.updated_at.strftime('%Y-%m-%dT%H:%M:%SZ')
)
expect(second_alert).to include(
'status' => 'RESOLVED',
'endedAt' => alert_1.ended_at.strftime('%Y-%m-%dT%H:%M:%SZ')
)
end
context 'with iid given' do
......
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe AlertManagement::ProcessPrometheusAlertService do
let_it_be(:project) { create(:project) }
describe '#execute' do
subject { described_class.new(project, nil, payload).execute }
context 'when alert payload is valid' do
let(:parsed_alert) { Gitlab::Alerting::Alert.new(project: project, payload: payload) }
let(:payload) do
{
'status' => status,
'labels' => {
'alertname' => 'GitalyFileServerDown',
'channel' => 'gitaly',
'pager' => 'pagerduty',
'severity' => 's1'
},
'annotations' => {
'description' => 'Alert description',
'runbook' => 'troubleshooting/gitaly-down.md',
'title' => 'Alert title'
},
'startsAt' => '2020-04-27T10:10:22.265949279Z',
'endsAt' => '2020-04-27T10:20:22.265949279Z',
'generatorURL' => 'http://8d467bd4607a:9090/graph?g0.expr=vector%281%29&g0.tab=1',
'fingerprint' => 'b6ac4d42057c43c1'
}
end
context 'when Prometheus alert status is firing' do
let(:status) { 'firing' }
context 'when alert with the same fingerprint already exists' do
let!(:alert) { create(:alert_management_alert, :resolved, project: project, fingerprint: parsed_alert.gitlab_fingerprint) }
context 'when status can be changed' do
it 'changes status to triggered' do
expect { subject }.to change { alert.reload.triggered? }.to(true)
end
end
context 'when status change did not succeed' do
before do
allow(AlertManagement::Alert).to receive(:for_fingerprint).and_return([alert])
allow(alert).to receive(:trigger).and_return(false)
end
it 'writes a warning to the log' do
expect(Gitlab::AppLogger).to receive(:warn).with(
message: 'Unable to update AlertManagement::Alert status to triggered',
project_id: project.id,
alert_id: alert.id
)
subject
end
end
it { is_expected.to be_success }
end
context 'when alert does not exist' do
context 'when alert can be created' do
it 'creates a new alert' do
expect { subject }.to change { AlertManagement::Alert.where(project: project).count }.by(1)
end
end
context 'when alert cannot be created' do
let(:errors) { double(messages: { hosts: ['hosts array is over 255 chars'] })}
let(:am_alert) { instance_double(AlertManagement::Alert, save: false, errors: errors) }
before do
allow(AlertManagement::Alert).to receive(:new).and_return(am_alert)
end
it 'writes a warning to the log' do
expect(Gitlab::AppLogger).to receive(:warn).with(
message: 'Unable to create AlertManagement::Alert',
project_id: project.id,
alert_errors: { hosts: ['hosts array is over 255 chars'] }
)
subject
end
end
it { is_expected.to be_success }
end
end
context 'when Prometheus alert status is resolved' do
let(:status) { 'resolved' }
let!(:alert) { create(:alert_management_alert, project: project, fingerprint: parsed_alert.gitlab_fingerprint) }
context 'when status can be changed' do
it 'resolves an existing alert' do
expect { subject }.to change { alert.reload.resolved? }.to(true)
end
end
context 'when status change did not succeed' do
before do
allow(AlertManagement::Alert).to receive(:for_fingerprint).and_return([alert])
allow(alert).to receive(:resolve).and_return(false)
end
it 'writes a warning to the log' do
expect(Gitlab::AppLogger).to receive(:warn).with(
message: 'Unable to update AlertManagement::Alert status to resolved',
project_id: project.id,
alert_id: alert.id
)
subject
end
end
it { is_expected.to be_success }
end
end
context 'when alert payload is invalid' do
let(:payload) { {} }
it 'responds with bad_request' do
expect(subject).to be_error
expect(subject.http_status).to eq(:bad_request)
end
end
end
end
......@@ -11,7 +11,7 @@ describe AlertManagement::UpdateAlertStatusService do
let(:new_status) { 'acknowledged' }
it 'updates the status' do
expect { execute }.to change { alert.status }.to(new_status)
expect { execute }.to change { alert.acknowledged? }.to(true)
end
context 'with unknown status' do
......
......@@ -121,7 +121,7 @@ describe Projects::Alerting::NotifyService do
'hosts' => [],
'payload' => payload_raw,
'severity' => 'critical',
'status' => 'triggered',
'status' => AlertManagement::Alert::STATUSES[:triggered],
'events' => 1,
'started_at' => alert.started_at,
'ended_at' => nil
......
......@@ -217,6 +217,51 @@ describe Projects::Prometheus::Alerts::NotifyService do
end
end
context 'process Alert Management alerts' do
let(:process_service) { instance_double(AlertManagement::ProcessPrometheusAlertService) }
before do
create(:prometheus_service, project: project)
create(:project_alerting_setting, project: project, token: token)
end
context 'when alert_management_minimal feature enabled' do
before do
stub_feature_flags(alert_management_minimal: true)
end
context 'with multiple firing alerts and resolving alerts' do
let(:payload_raw) do
payload_for(firing: [alert_firing, alert_firing], resolved: [alert_resolved])
end
it 'processes Prometheus alerts' do
expect(AlertManagement::ProcessPrometheusAlertService)
.to receive(:new)
.with(project, nil, kind_of(Hash))
.exactly(3).times
.and_return(process_service)
expect(process_service).to receive(:execute).exactly(3).times
subject
end
end
end
context 'when alert_management_minimal feature disabled' do
before do
stub_feature_flags(alert_management_minimal: false)
end
it 'does not process Prometheus alerts' do
expect(AlertManagement::ProcessPrometheusAlertService)
.not_to receive(:new)
subject
end
end
end
context 'process incident issues' do
before do
create(:prometheus_service, project: project)
......@@ -286,6 +331,13 @@ describe Projects::Prometheus::Alerts::NotifyService do
it_behaves_like 'no notifications', http_status: :bad_request
it 'does not process Prometheus alerts' do
expect(AlertManagement::ProcessPrometheusAlertService)
.not_to receive(:new)
subject
end
it 'does not process issues' do
expect(IncidentManagement::ProcessPrometheusAlertWorker)
.not_to receive(:perform_async)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment