Commit 8fe5602c authored by Grzegorz Bizon's avatar Grzegorz Bizon

Merge branch '28717-additional-metrics-review-branch' into 'master'

Support additional prometheus metrics - review branch

Closes #28717

See merge request !11712
parents 3bbf0898 97c42df3
export default {
EMPTY: 'empty',
LOADING: 'loading',
LIST: 'list',
};
import PrometheusMetrics from './prometheus_metrics';
$(() => {
const prometheusMetrics = new PrometheusMetrics('.js-prometheus-metrics-monitoring');
prometheusMetrics.loadActiveMetrics();
});
import PANEL_STATE from './constants';
export default class PrometheusMetrics {
constructor(wrapperSelector) {
this.backOffRequestCounter = 0;
this.$wrapper = $(wrapperSelector);
this.$monitoredMetricsPanel = this.$wrapper.find('.js-panel-monitored-metrics');
this.$monitoredMetricsCount = this.$monitoredMetricsPanel.find('.js-monitored-count');
this.$monitoredMetricsLoading = this.$monitoredMetricsPanel.find('.js-loading-metrics');
this.$monitoredMetricsEmpty = this.$monitoredMetricsPanel.find('.js-empty-metrics');
this.$monitoredMetricsList = this.$monitoredMetricsPanel.find('.js-metrics-list');
this.$missingEnvVarPanel = this.$wrapper.find('.js-panel-missing-env-vars');
this.$panelToggle = this.$missingEnvVarPanel.find('.js-panel-toggle');
this.$missingEnvVarMetricCount = this.$missingEnvVarPanel.find('.js-env-var-count');
this.$missingEnvVarMetricsList = this.$missingEnvVarPanel.find('.js-missing-var-metrics-list');
this.activeMetricsEndpoint = this.$monitoredMetricsPanel.data('active-metrics');
this.$panelToggle.on('click', e => this.handlePanelToggle(e));
}
/* eslint-disable class-methods-use-this */
handlePanelToggle(e) {
const $toggleBtn = $(e.currentTarget);
const $currentPanelBody = $toggleBtn.closest('.panel').find('.panel-body');
$currentPanelBody.toggleClass('hidden');
if ($toggleBtn.hasClass('fa-caret-down')) {
$toggleBtn.removeClass('fa-caret-down').addClass('fa-caret-right');
} else {
$toggleBtn.removeClass('fa-caret-right').addClass('fa-caret-down');
}
}
showMonitoringMetricsPanelState(stateName) {
switch (stateName) {
case PANEL_STATE.LOADING:
this.$monitoredMetricsLoading.removeClass('hidden');
this.$monitoredMetricsEmpty.addClass('hidden');
this.$monitoredMetricsList.addClass('hidden');
break;
case PANEL_STATE.LIST:
this.$monitoredMetricsLoading.addClass('hidden');
this.$monitoredMetricsEmpty.addClass('hidden');
this.$monitoredMetricsList.removeClass('hidden');
break;
default:
this.$monitoredMetricsLoading.addClass('hidden');
this.$monitoredMetricsEmpty.removeClass('hidden');
this.$monitoredMetricsList.addClass('hidden');
break;
}
}
populateActiveMetrics(metrics) {
let totalMonitoredMetrics = 0;
let totalMissingEnvVarMetrics = 0;
metrics.forEach((metric) => {
this.$monitoredMetricsList.append(`<li>${metric.group}<span class="badge">${metric.active_metrics}</span></li>`);
totalMonitoredMetrics += metric.active_metrics;
if (metric.metrics_missing_requirements > 0) {
this.$missingEnvVarMetricsList.append(`<li>${metric.group}</li>`);
totalMissingEnvVarMetrics += 1;
}
});
this.$monitoredMetricsCount.text(totalMonitoredMetrics);
this.showMonitoringMetricsPanelState(PANEL_STATE.LIST);
if (totalMissingEnvVarMetrics > 0) {
this.$missingEnvVarPanel.removeClass('hidden');
this.$missingEnvVarPanel.find('.flash-container').off('click');
this.$missingEnvVarMetricCount.text(totalMissingEnvVarMetrics);
}
}
loadActiveMetrics() {
this.showMonitoringMetricsPanelState(PANEL_STATE.LOADING);
gl.utils.backOff((next, stop) => {
$.getJSON(this.activeMetricsEndpoint)
.done((res) => {
if (res && res.success) {
stop(res);
} else {
this.backOffRequestCounter = this.backOffRequestCounter += 1;
if (this.backOffRequestCounter < 3) {
next();
} else {
stop(res);
}
}
})
.fail(stop);
})
.then((res) => {
if (res && res.data && res.data.length) {
this.populateActiveMetrics(res.data);
} else {
this.showMonitoringMetricsPanelState(PANEL_STATE.EMPTY);
}
})
.catch(() => {
this.showMonitoringMetricsPanelState(PANEL_STATE.EMPTY);
});
}
}
......@@ -126,3 +126,66 @@
margin-left: 5px;
}
}
.prometheus-metrics-monitoring {
.panel {
.panel-toggle {
width: 14px;
}
.badge {
font-size: inherit;
}
.panel-heading .badge-count {
color: $white-light;
background: $common-gray-dark;
}
.panel-body {
padding: 0;
}
.flash-container {
margin-bottom: 0;
cursor: default;
.flash-notice {
border-radius: 0;
}
}
}
.loading-metrics,
.empty-metrics {
padding: 30px 10px;
p,
.btn {
margin-top: 10px;
margin-bottom: 0;
}
}
.loading-metrics .metrics-load-spinner {
color: $loading-color;
}
.metrics-list {
margin-bottom: 0;
li {
padding: $gl-padding;
.badge {
margin-left: 5px;
background: $badge-bg;
}
}
/* Ensure we don't add border if there's only single li */
li + li {
border-top: 1px solid $border-color;
}
}
}
......@@ -40,6 +40,10 @@ class ApplicationController < ActionController::Base
render_404
end
rescue_from(ActionController::UnknownFormat) do
render_404
end
rescue_from Gitlab::Access::AccessDeniedError do |exception|
render_403
end
......
......@@ -22,6 +22,22 @@ class Projects::DeploymentsController < Projects::ApplicationController
render_404
end
def additional_metrics
return render_404 unless deployment.has_additional_metrics?
respond_to do |format|
format.json do
metrics = deployment.additional_metrics
if metrics.any?
render json: metrics
else
head :no_content
end
end
end
end
private
def deployment
......
......@@ -129,6 +129,16 @@ class Projects::EnvironmentsController < Projects::ApplicationController
end
end
def additional_metrics
respond_to do |format|
format.json do
additional_metrics = environment.additional_metrics || {}
render json: additional_metrics, status: additional_metrics.any? ? :ok : :no_content
end
end
end
private
def verify_api_request!
......
class Projects::PrometheusController < Projects::ApplicationController
before_action :authorize_read_project!
before_action :require_prometheus_metrics!
def active_metrics
respond_to do |format|
format.json do
matched_metrics = project.prometheus_service.matched_metrics || {}
if matched_metrics.any?
render json: matched_metrics
else
head :no_content
end
end
end
end
private
def require_prometheus_metrics!
render_404 unless project.prometheus_service.present?
end
end
......@@ -114,6 +114,17 @@ class Deployment < ActiveRecord::Base
project.monitoring_service.deployment_metrics(self)
end
def has_additional_metrics?
project.prometheus_service.present?
end
def additional_metrics
return {} unless project.prometheus_service.present?
metrics = project.prometheus_service.additional_deployment_metrics(self)
metrics&.merge(deployment_time: created_at.to_i) || {}
end
private
def ref_path
......
......@@ -157,6 +157,16 @@ class Environment < ActiveRecord::Base
project.monitoring_service.environment_metrics(self) if has_metrics?
end
def has_additional_metrics?
project.prometheus_service.present? && available? && last_deployment.present?
end
def additional_metrics
if has_additional_metrics?
project.prometheus_service.additional_environment_metrics(self)
end
end
# An environment name is not necessarily suitable for use in URLs, DNS
# or other third-party contexts, so provide a slugified version. A slug has
# the following properties:
......
......@@ -28,17 +28,6 @@ class PrometheusService < MonitoringService
'Prometheus monitoring'
end
def help
<<-MD.strip_heredoc
Retrieves the Kubernetes node metrics `container_cpu_usage_seconds_total`
and `container_memory_usage_bytes` from the configured Prometheus server.
If you are not using [Auto-Deploy](https://docs.gitlab.com/ee/ci/autodeploy/index.html)
or have set up your own Prometheus server, an `environment` label is required on each metric to
[identify the Environment](https://docs.gitlab.com/ce/user/project/integrations/prometheus.html#metrics-and-labels).
MD
end
def self.to_param
'prometheus'
end
......@@ -50,6 +39,7 @@ class PrometheusService < MonitoringService
name: 'api_url',
title: 'API URL',
placeholder: 'Prometheus API Base URL, like http://prometheus.example.com/',
help: 'By default, Prometheus listens on ‘http://localhost:9090’. It’s not recommended to change the default address and port as this might affect or conflict with other services running on the GitLab server.',
required: true
}
]
......@@ -65,23 +55,34 @@ class PrometheusService < MonitoringService
end
def environment_metrics(environment)
with_reactive_cache(Gitlab::Prometheus::Queries::EnvironmentQuery.name, environment.id, &:itself)
with_reactive_cache(Gitlab::Prometheus::Queries::EnvironmentQuery.name, environment.id, &method(:rename_data_to_metrics))
end
def deployment_metrics(deployment)
metrics = with_reactive_cache(Gitlab::Prometheus::Queries::DeploymentQuery.name, deployment.id, &:itself)
metrics = with_reactive_cache(Gitlab::Prometheus::Queries::DeploymentQuery.name, deployment.id, &method(:rename_data_to_metrics))
metrics&.merge(deployment_time: created_at.to_i) || {}
end
def additional_environment_metrics(environment)
with_reactive_cache(Gitlab::Prometheus::Queries::AdditionalMetricsEnvironmentQuery.name, environment.id, &:itself)
end
def additional_deployment_metrics(deployment)
with_reactive_cache(Gitlab::Prometheus::Queries::AdditionalMetricsDeploymentQuery.name, deployment.id, &:itself)
end
def matched_metrics
with_reactive_cache(Gitlab::Prometheus::Queries::MatchedMetricsQuery.name, &:itself)
end
# Cache metrics for specific environment
def calculate_reactive_cache(query_class_name, *args)
return unless active? && project && !project.pending_delete?
metrics = Kernel.const_get(query_class_name).new(client).query(*args)
data = Kernel.const_get(query_class_name).new(client).query(*args)
{
success: true,
metrics: metrics,
data: data,
last_update: Time.now.utc
}
rescue Gitlab::PrometheusError => err
......@@ -91,4 +92,11 @@ class PrometheusService < MonitoringService
def client
@prometheus ||= Gitlab::PrometheusClient.new(api_url: api_url)
end
private
def rename_data_to_metrics(metrics)
metrics[:metrics] = metrics.delete :data
metrics
end
end
......@@ -23,3 +23,7 @@
- disabled_title = @service.disabled_title
= link_to 'Cancel', namespace_project_settings_integrations_path(@project.namespace, @project), class: 'btn btn-cancel'
- if lookup_context.template_exists?('show', "projects/services/#{@service.to_param}", true)
%hr
= render "projects/services/#{@service.to_param}/show"
- content_for :page_specific_javascripts do
= webpack_bundle_tag('prometheus_metrics')
.row.prepend-top-default.append-bottom-default.prometheus-metrics-monitoring.js-prometheus-metrics-monitoring
.col-lg-3
%h4.prepend-top-0
Metrics
%p
Metrics are automatically configured and monitored
based on a library of metrics from popular exporters.
= link_to 'More information', '#'
.col-lg-9
.panel.panel-default.js-panel-monitored-metrics{ data: { "active-metrics" => "#{namespace_project_prometheus_active_metrics_path(@project.namespace, @project, :json)}" } }
.panel-heading
%h3.panel-title
Monitored
%span.badge.js-monitored-count 0
.panel-body
.loading-metrics.text-center.js-loading-metrics
= icon('spinner spin 3x', class: 'metrics-load-spinner')
%p Finding and configuring metrics...
.empty-metrics.text-center.hidden.js-empty-metrics
= custom_icon('icon_empty_metrics')
%p No metrics are being monitored. To start monitoring, deploy to an environment.
= link_to project_environments_path(@project), title: 'View environments', class: 'btn btn-success' do
View environments
%ul.list-unstyled.metrics-list.hidden.js-metrics-list
.panel.panel-default.hidden.js-panel-missing-env-vars
.panel-heading
%h3.panel-title
= icon('caret-right lg fw', class: 'panel-toggle js-panel-toggle', 'aria-label' => 'Toggle panel')
Missing environment variable
%span.badge.js-env-var-count 0
.panel-body.hidden
.flash-container
.flash-notice
.flash-text
To set up automatic monitoring, add the environment variable
%code
$CI_ENVIRONMENT_SLUG
to exporter&rsquo;s queries.
= link_to 'More information', '#'
%ul.list-unstyled.metrics-list.js-missing-var-metrics-list
<svg xmlns="http://www.w3.org/2000/svg" width="64" height="64" viewBox="0 0 64 64">
<g fill="#E5E5E5">
<path d="M32 64C30.8954305 64 30 63.1045695 30 62 30 60.8954305 30.8954305 60 32 60 33.8894444 60 35.7536611 59.8131396 37.574335 59.4454933 38.6570511 59.2268618 39.7120017 59.9273408 39.9306331 61.0100569 40.1492646 62.0927729 39.4487856 63.1477235 38.3660695 63.366355 36.285133 63.7865558 34.1557023 64 32 64zM49.2301062 58.9696428C51.0302775 57.8173242 52.7114504 56.4871355 54.247711 55.0008916 55.0415758 54.232873 55.0625283 52.9667164 54.2945097 52.1728516 53.5264912 51.3789869 52.2603346 51.3580344 51.4664698 52.1260529 50.1212672 53.4274592 48.6493395 54.5920875 47.0736141 55.6007347 46.1433158 56.1962335 45.8719072 57.4331365 46.4674061 58.3634348 47.0629049 59.2937331 48.2998079 59.5651416 49.2301062 58.9696428zM61.0426034 45.4531856C61.9412068 43.5163476 62.6441937 41.4911051 63.1388045 39.4034279 63.393449 38.3286117 62.7285685 37.2508708 61.6537523 36.9962262 60.5789361 36.7415816 59.5011952 37.4064621 59.2465506 38.4812784 58.8141946 40.3061875 58.1997219 42.0764286 57.4141077 43.7697311 56.9492346 44.7717126 57.3846469 45.9608331 58.3866284 46.4257062 59.3886098 46.8905793 60.5777303 46.455167 61.0426034 45.4531856zM63.7270657 27.8034151C63.4476841 25.6718707 62.9558906 23.5863203 62.2616468 21.5714028 61.9018246 20.527084 60.7635435 19.9721898 59.7192246 20.3320119 58.6749058 20.6918341 58.1200116 21.8301152 58.4798337 22.874434 59.0867105 24.6357842 59.5166381 26.45898 59.760988 28.3232492 59.9045362 29.4184513 60.9087418 30.1899192 62.0039439 30.046371 63.099146 29.9028228 63.8706139 28.8986173 63.7270657 27.8034151zM56.4699838 11.3781121C55.0919588 9.74451505 53.5537382 8.25140603 51.8798083 6.92273835 51.0146495 6.23602588 49.7566092 6.38068523 49.0698968 7.24584403 48.3831843 8.11100284 48.5278436 9.36904308 49.3930024 10.0557555 50.8587525 11.2191822 52.2058153 12.5267396 53.4125204 13.9572433 54.1247279 14.8015385 55.3865225 14.9086168 56.2308177 14.1964094 57.0751129 13.484202 57.1821912 12.2224073 56.4699838 11.3781121zM41.481294 1.42849704C39.4470333.798260231 37.3474846.371987025 35.2067823.158824109 34.1076485.0493765922 33.1278998.851675811 33.0184523 1.95080957 32.9090048 3.04994333 33.711304 4.02969203 34.8104377 4.13913955 36.6833634 4.32563829 38.5191483 4.69835932 40.297557 5.24933028 41.3526509 5.57621023 42.4729622 4.98587613 42.7998421 3.93078217 43.1267221 2.8756882 42.536388 1.75537699 41.481294 1.42849704zM23.6558195 1.0993008C21.5852929 1.6571259 19.5822296 2.42161363 17.6728876 3.37914679 16.6855233 3.874309 16.2865147 5.07613416 16.7816769 6.06349841 17.2768392 7.05086266 18.4786643 7.44987125 19.4660286 6.95470905 21.1354949 6.11747332 22.8864813 5.44919307 24.6963667 4.96158787 25.7629079 4.67424869 26.3945759 3.57671185 26.1072367 2.51017072 25.8198975 1.44362959 24.7223606.811961615 23.6558195 1.0993008zM8.36290105 10.4291871C6.92120358 12.00815 5.63985273 13.7275139 4.53998784 15.5610549 3.97179016 16.5082746 4.27904822 17.7367631 5.22626792 18.3049608 6.17348763 18.8731585 7.40197615 18.5659004 7.97017383 17.6186807 8.9327668 16.0139803 10.054503 14.5087932 11.3168098 13.126301 12.0615972 12.3106016 12.0041117 11.0455771 11.1884123 10.3007897 10.372713 9.55600224 9.10768848 9.61348772 8.36290105 10.4291871zM.450120287 26.6230259C.151304663 28.3883054 0 30.1850053 0 32 0 32.2974081.00406268322 32.594367.0121750297 32.8908218.0423897377 33.994978.96197903 34.8655796 2.0661352 34.8353649 3.17029137 34.8051502 4.04089294 33.8855609 4.01067824 32.7814047 4.00356366 32.521412 4 32.2609289 4 32 4 30.4089462 4.13249902 28.8355581 4.39401589 27.2906242 4.57836807 26.2015475 3.84494393 25.1692294 2.75586724 24.9848772 1.66679054 24.800525.634472466 25.5339492.450120287 26.6230259zM2.45830096 44.3202494C3.28286321 46.2952494 4.30407075 48.1806071 5.50459135 49.9494734 6.124886 50.8634254 7.36863868 51.1014818 8.28259072 50.4811871 9.19654276 49.8608925 9.43459912 48.6171398 8.81430448 47.7031878 7.76386025 46.1554464 6.87058107 44.5062706 6.14951581 42.7791677 5.72395784 41.7598668 4.55266835 41.2785432 3.53336751 41.7041011 2.51406668 42.1296591 2.03274299 43.3009486 2.45830096 44.3202494zM13.73374 58.2776222C15.4883094 59.4994144 17.3614388 60.5433005 19.3262717 61.39161 20.3403619 61.8294398 21.5173756 61.3622885 21.9552054 60.3481983 22.3930351 59.3341082 21.9258838 58.1570945 20.9117937 57.7192647 19.1934726 56.9773858 17.5548741 56.0642026 16.0195384 54.9950736 15.1130877 54.3638678 13.8665707 54.5869979 13.2353649 55.4934487 12.6041591 56.3998995 12.8272892 57.6464164 13.73374 58.2776222zM30.6955071 63.9738646C29.5918263 63.9295649 28.7330282 62.9989428 28.7773279 61.895262 28.8216276 60.7915812 29.7522497 59.9327832 30.8559305 59.9770829 31.2344492 59.9922759 31.6140624 59.9999282 31.9946308 59.9999995 33.0992003 60.0002065 33.994463 60.8958047 33.994256 62.0003742 33.9940491 63.1049437 33.0984508 64.0002064 31.9938814 63.9999994 31.5600677 63.9999181 31.1272192 63.9911927 30.6955071 63.9738646zM30.1721098 44.2840559C30.7941711 46.023825 33.2407935 46.0619159 33.9167124 44.3423547L38.9452693 31.5495297 41.1315797 35.2685507C41.4908522 35.8796908 42.1468005 36.2549751 42.8557214 36.2549751L51.1106965 36.2549751C52.215266 36.2549751 53.1106965 35.3595446 53.1106965 34.2549751 53.1106965 33.1504056 52.215266 32.2549751 51.1106965 32.2549751L43.9999712 32.2549751 40.3112064 25.9802055C39.465988 24.5424477 37.3358287 24.7099356 36.7257006 26.2621229L32.1439734 37.9181973 26.2115967 21.3266406C25.5807315 19.562249 23.0875908 19.5563214 22.4483429 21.3176933L18.4775633 32.2587065 13 32.2587065C11.8954305 32.2587065 11 33.154137 11 34.2587065 11 35.363276 11.8954305 36.2587065 13 36.2587065L19.8793532 36.2587065C20.720826 36.2587065 21.4722973 35.732004 21.7593685 34.9410132L24.314328 27.9011249 30.1721098 44.2840559z"/>
</g>
</svg>
---
title: Additional Prometheus metrics support
merge_request: 11712
author:
- group: Kubernetes
priority: 1
metrics:
- title: "Memory usage"
y_label: "Values"
required_metrics:
- container_memory_usage_bytes
weight: 1
queries:
- query_range: 'avg(container_memory_usage_bytes{%{environment_filter}}) / 2^20'
label: Container memory
unit: MiB
- title: "Current memory usage"
required_metrics:
- container_memory_usage_bytes
weight: 1
queries:
- query: 'avg(container_memory_usage_bytes{%{environment_filter}}) / 2^20'
display_empty: false
unit: MiB
- title: "CPU usage"
required_metrics:
- container_cpu_usage_seconds_total
weight: 1
queries:
- query_range: 'avg(rate(container_cpu_usage_seconds_total{%{environment_filter}}[2m])) * 100'
- title: "Current CPU usage"
required_metrics:
- container_cpu_usage_seconds_total
weight: 1
queries:
- query: 'avg(rate(container_cpu_usage_seconds_total{%{environment_filter}}[2m])) * 100'
......@@ -73,6 +73,10 @@ constraints(ProjectUrlConstrainer.new) do
resource :mattermost, only: [:new, :create]
namespace :prometheus do
get :active_metrics
end
resources :deploy_keys, constraints: { id: /\d+/ }, only: [:index, :new, :create, :edit, :update] do
member do
put :enable
......@@ -153,6 +157,7 @@ constraints(ProjectUrlConstrainer.new) do
post :stop
get :terminal
get :metrics
get :additional_metrics
get '/terminal.ws/authorize', to: 'environments#terminal_websocket_authorize', constraints: { format: nil }
end
......@@ -163,6 +168,7 @@ constraints(ProjectUrlConstrainer.new) do
resources :deployments, only: [:index] do
member do
get :metrics
get :additional_metrics
end
end
end
......
......@@ -55,6 +55,7 @@ var config = {
pipelines: './pipelines/pipelines_bundle.js',
pipelines_details: './pipelines/pipeline_details_bundle.js',
profile: './profile/profile_bundle.js',
prometheus_metrics: './prometheus_metrics',
protected_branches: './protected_branches/protected_branches_bundle.js',
protected_tags: './protected_tags',
sidebar: './sidebar/sidebar_bundle.js',
......
module Gitlab
module Prometheus
module AdditionalMetricsParser
extend self
def load_groups_from_yaml
additional_metrics_raw.map(&method(:group_from_entry))
end
private
def validate!(obj)
raise ParsingError.new(obj.errors.full_messages.join('\n')) unless obj.valid?
end
def group_from_entry(entry)
entry[:name] = entry.delete(:group)
entry[:metrics]&.map! do |entry|
Metric.new(entry).tap(&method(:validate!))
end
MetricGroup.new(entry).tap(&method(:validate!))
end
def additional_metrics_raw
load_yaml_file&.map(&:deep_symbolize_keys).freeze
end
def load_yaml_file
@loaded_yaml_file ||= YAML.load_file(Rails.root.join('config/prometheus/additional_metrics.yml'))
end
end
end
end
module Gitlab
module Prometheus
class Metric
include ActiveModel::Model
attr_accessor :title, :required_metrics, :weight, :y_label, :queries
validates :title, :required_metrics, :weight, :y_label, :queries, presence: true
def initialize(params = {})
super(params)
@y_label ||= 'Values'
end
end
end
end
module Gitlab
module Prometheus
class MetricGroup
include ActiveModel::Model
attr_accessor :name, :priority, :metrics
validates :name, :priority, :metrics, presence: true
def self.all
AdditionalMetricsParser.load_groups_from_yaml
end
end
end
end
module Gitlab
module Prometheus
ParsingError = Class.new(StandardError)
end
end
module Gitlab
module Prometheus
module Queries
class AdditionalMetricsDeploymentQuery < BaseQuery
include QueryAdditionalMetrics
def query(deployment_id)
Deployment.find_by(id: deployment_id).try do |deployment|
query_context = {
environment_slug: deployment.environment.slug,
environment_filter: %{container_name!="POD",environment="#{deployment.environment.slug}"},
timeframe_start: (deployment.created_at - 30.minutes).to_f,
timeframe_end: (deployment.created_at + 30.minutes).to_f
}
query_metrics(query_context)
end
end
end
end
end
end
module Gitlab
module Prometheus
module Queries
class AdditionalMetricsEnvironmentQuery < BaseQuery
include QueryAdditionalMetrics
def query(environment_id)
Environment.find_by(id: environment_id).try do |environment|
query_context = {
environment_slug: environment.slug,
environment_filter: %{container_name!="POD",environment="#{environment.slug}"},
timeframe_start: 8.hours.ago.to_f,
timeframe_end: Time.now.to_f
}
query_metrics(query_context)
end
end
end
end
end
end
......@@ -3,7 +3,7 @@ module Gitlab
module Queries
class BaseQuery
attr_accessor :client
delegate :query_range, :query, to: :client, prefix: true
delegate :query_range, :query, :label_values, :series, to: :client, prefix: true
def raw_memory_usage_query(environment_slug)
%{avg(container_memory_usage_bytes{container_name!="POD",environment="#{environment_slug}"}) / 2^20}
......
module Gitlab::Prometheus::Queries
class DeploymentQuery < BaseQuery
def query(deployment_id)
deployment = Deployment.find_by(id: deployment_id)
environment_slug = deployment.environment.slug
module Gitlab
module Prometheus
module Queries
class DeploymentQuery < BaseQuery
def query(deployment_id)
Deployment.find_by(id: deployment_id).try do |deployment|
environment_slug = deployment.environment.slug
memory_query = raw_memory_usage_query(environment_slug)
memory_avg_query = %{avg(avg_over_time(container_memory_usage_bytes{container_name!="POD",environment="#{environment_slug}"}[30m]))}
cpu_query = raw_cpu_usage_query(environment_slug)
cpu_avg_query = %{avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="#{environment_slug}"}[30m])) * 100}
memory_query = raw_memory_usage_query(environment_slug)
memory_avg_query = %{avg(avg_over_time(container_memory_usage_bytes{container_name!="POD",environment="#{environment_slug}"}[30m]))}
cpu_query = raw_cpu_usage_query(environment_slug)
cpu_avg_query = %{avg(rate(container_cpu_usage_seconds_total{container_name!="POD",environment="#{environment_slug}"}[30m])) * 100}
timeframe_start = (deployment.created_at - 30.minutes).to_f
timeframe_end = (deployment.created_at + 30.minutes).to_f
timeframe_start = (deployment.created_at - 30.minutes).to_f
timeframe_end = (deployment.created_at + 30.minutes).to_f
{
memory_values: client_query_range(memory_query, start: timeframe_start, stop: timeframe_end),
memory_before: client_query(memory_avg_query, time: deployment.created_at.to_f),
memory_after: client_query(memory_avg_query, time: timeframe_end),
{
memory_values: client_query_range(memory_query, start: timeframe_start, stop: timeframe_end),
memory_before: client_query(memory_avg_query, time: deployment.created_at.to_f),
memory_after: client_query(memory_avg_query, time: timeframe_end),
cpu_values: client_query_range(cpu_query, start: timeframe_start, stop: timeframe_end),
cpu_before: client_query(cpu_avg_query, time: deployment.created_at.to_f),
cpu_after: client_query(cpu_avg_query, time: timeframe_end)
}
cpu_values: client_query_range(cpu_query, start: timeframe_start, stop: timeframe_end),
cpu_before: client_query(cpu_avg_query, time: deployment.created_at.to_f),
cpu_after: client_query(cpu_avg_query, time: timeframe_end)
}
end
end
end
end
end
end
module Gitlab::Prometheus::Queries
class EnvironmentQuery < BaseQuery
def query(environment_id)
environment = Environment.find_by(id: environment_id)
environment_slug = environment.slug
timeframe_start = 8.hours.ago.to_f
timeframe_end = Time.now.to_f
module Gitlab
module Prometheus
module Queries
class EnvironmentQuery < BaseQuery
def query(environment_id)
Environment.find_by(id: environment_id).try do |environment|
environment_slug = environment.slug
timeframe_start = 8.hours.ago.to_f
timeframe_end = Time.now.to_f
memory_query = raw_memory_usage_query(environment_slug)
cpu_query = raw_cpu_usage_query(environment_slug)
memory_query = raw_memory_usage_query(environment_slug)
cpu_query = raw_cpu_usage_query(environment_slug)
{
memory_values: client_query_range(memory_query, start: timeframe_start, stop: timeframe_end),
memory_current: client_query(memory_query, time: timeframe_end),
cpu_values: client_query_range(cpu_query, start: timeframe_start, stop: timeframe_end),
cpu_current: client_query(cpu_query, time: timeframe_end)
}
{
memory_values: client_query_range(memory_query, start: timeframe_start, stop: timeframe_end),
memory_current: client_query(memory_query, time: timeframe_end),
cpu_values: client_query_range(cpu_query, start: timeframe_start, stop: timeframe_end),
cpu_current: client_query(cpu_query, time: timeframe_end)
}
end
end
end
end
end
end
module Gitlab
module Prometheus
module Queries
class MatchedMetricsQuery < BaseQuery
MAX_QUERY_ITEMS = 40.freeze
def query
groups_data.map do |group, data|
{
group: group.name,
priority: group.priority,
active_metrics: data[:active_metrics],
metrics_missing_requirements: data[:metrics_missing_requirements]
}
end
end
private
def groups_data
metrics_groups = groups_with_active_metrics(Gitlab::Prometheus::MetricGroup.all)
lookup = active_series_lookup(metrics_groups)
groups = {}
metrics_groups.each do |group|
groups[group] ||= { active_metrics: 0, metrics_missing_requirements: 0 }
active_metrics = group.metrics.count { |metric| metric.required_metrics.all?(&lookup.method(:has_key?)) }
groups[group][:active_metrics] += active_metrics
groups[group][:metrics_missing_requirements] += group.metrics.count - active_metrics
end
groups
end
def active_series_lookup(metric_groups)
timeframe_start = 8.hours.ago
timeframe_end = Time.now
series = metric_groups.flat_map(&:metrics).flat_map(&:required_metrics).uniq
lookup = series.each_slice(MAX_QUERY_ITEMS).flat_map do |batched_series|
client_series(*batched_series, start: timeframe_start, stop: timeframe_end)
.select(&method(:has_matching_label))
.map { |series_info| [series_info['__name__'], true] }
end
lookup.to_h
end
def has_matching_label(series_info)
series_info.key?('environment')
end
def available_metrics
@available_metrics ||= client_label_values || []
end
def filter_active_metrics(metric_group)
metric_group.metrics.select! do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
end
metric_group
end
def groups_with_active_metrics(metric_groups)
metric_groups.map(&method(:filter_active_metrics)).select { |group| group.metrics.any? }
end
def metrics_with_required_series(metric_groups)
metric_groups.flat_map do |group|
group.metrics.select do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
end
end
end
end
end
end
end
module Gitlab
module Prometheus
module Queries
module QueryAdditionalMetrics
def query_metrics(query_context)
query_processor = method(:process_query).curry[query_context]
groups = matched_metrics.map do |group|
metrics = group.metrics.map do |metric|
{
title: metric.title,
weight: metric.weight,
y_label: metric.y_label,
queries: metric.queries.map(&query_processor).select(&method(:query_with_result))
}
end
{
group: group.name,
priority: group.priority,
metrics: metrics.select(&method(:metric_with_any_queries))
}
end
groups.select(&method(:group_with_any_metrics))
end
private
def metric_with_any_queries(metric)
metric[:queries]&.count&.> 0
end
def group_with_any_metrics(group)
group[:metrics]&.count&.> 0
end
def query_with_result(query)
query[:result]&.any? do |item|
item&.[](:values)&.any? || item&.[](:value)&.any?
end
end
def process_query(context, query)
query_with_result = query.dup
result =
if query.key?(:query_range)
client_query_range(query[:query_range] % context, start: context[:timeframe_start], stop: context[:timeframe_end])
else
client_query(query[:query] % context, time: context[:timeframe_end])
end
query_with_result[:result] = result&.map(&:deep_symbolize_keys)
query_with_result
end
def available_metrics
@available_metrics ||= client_label_values || []
end
def matched_metrics
result = Gitlab::Prometheus::MetricGroup.all.map do |group|
group.metrics.select! do |metric|
metric.required_metrics.all?(&available_metrics.method(:include?))
end
group
end
result.select { |group| group.metrics.any? }
end
end
end
end
end
......@@ -29,6 +29,14 @@ module Gitlab
end
end
def label_values(name = '__name__')
json_api_get("label/#{name}/values")
end
def series(*matches, start: 8.hours.ago, stop: Time.now)
json_api_get('series', 'match': matches, start: start.to_f, end: stop.to_f)
end
private
def json_api_get(type, args = {})
......
......@@ -99,6 +99,36 @@ describe ApplicationController do
end
end
describe 'response format' do
controller(described_class) do
def index
respond_to do |format|
format.json do
head :ok
end
end
end
end
context 'when format is handled' do
let(:requested_format) { :json }
it 'returns 200 response' do
get :index, private_token: user.private_token, format: requested_format
expect(response).to have_http_status 200
end
end
context 'when format is not handled' do
it 'returns 404 response' do
get :index, private_token: user.private_token
expect(response).to have_http_status 404
end
end
end
describe '#authenticate_user_from_rss_token' do
describe "authenticating a user from an RSS token" do
controller(described_class) do
......
......@@ -42,6 +42,7 @@ describe Projects::DeploymentsController do
before do
allow(controller).to receive(:deployment).and_return(deployment)
end
context 'when metrics are disabled' do
before do
allow(deployment).to receive(:has_metrics?).and_return false
......@@ -108,6 +109,69 @@ describe Projects::DeploymentsController do
end
end
describe 'GET #additional_metrics' do
let(:deployment) { create(:deployment, project: project, environment: environment) }
before do
allow(controller).to receive(:deployment).and_return(deployment)
end
context 'when metrics are disabled' do
before do
allow(deployment).to receive(:has_metrics?).and_return false
end
it 'responds with not found' do
get :metrics, deployment_params(id: deployment.id)
expect(response).to be_not_found
end
end
context 'when metrics are enabled' do
let(:prometheus_service) { double('prometheus_service') }
before do
allow(deployment.project).to receive(:prometheus_service).and_return(prometheus_service)
end
context 'when environment has no metrics' do
before do
expect(deployment).to receive(:additional_metrics).and_return({})
end
it 'returns a empty response 204 response' do
get :additional_metrics, deployment_params(id: deployment.id, format: :json)
expect(response).to have_http_status(204)
expect(response.body).to eq('')
end
end
context 'when environment has some metrics' do
let(:empty_metrics) do
{
success: true,
metrics: {},
last_update: 42
}
end
before do
expect(deployment).to receive(:additional_metrics).and_return(empty_metrics)
end
it 'returns a metrics JSON document' do
get :additional_metrics, deployment_params(id: deployment.id, format: :json)
expect(response).to be_ok
expect(json_response['success']).to be(true)
expect(json_response['metrics']).to eq({})
expect(json_response['last_update']).to eq(42)
end
end
end
end
def deployment_params(opts = {})
opts.reverse_merge(namespace_id: project.namespace,
project_id: project,
......
......@@ -318,6 +318,48 @@ describe Projects::EnvironmentsController do
end
end
describe 'GET #additional_metrics' do
before do
allow(controller).to receive(:environment).and_return(environment)
end
context 'when environment has no metrics' do
before do
expect(environment).to receive(:additional_metrics).and_return(nil)
end
context 'when requesting metrics as JSON' do
it 'returns a metrics JSON document' do
get :additional_metrics, environment_params(format: :json)
expect(response).to have_http_status(204)
expect(json_response).to eq({})
end
end
end
context 'when environment has some metrics' do
before do
expect(environment)
.to receive(:additional_metrics)
.and_return({
success: true,
data: {},
last_update: 42
})
end
it 'returns a metrics JSON document' do
get :additional_metrics, environment_params(format: :json)
expect(response).to be_ok
expect(json_response['success']).to be(true)
expect(json_response['data']).to eq({})
expect(json_response['last_update']).to eq(42)
end
end
end
def environment_params(opts = {})
opts.reverse_merge(namespace_id: project.namespace,
project_id: project,
......
require('spec_helper')
describe Projects::PrometheusController do
let(:user) { create(:user) }
let!(:project) { create(:empty_project) }
let(:prometheus_service) { double('prometheus_service') }
before do
allow(controller).to receive(:project).and_return(project)
allow(project).to receive(:prometheus_service).and_return(prometheus_service)
project.add_master(user)
sign_in(user)
end
describe 'GET #active_metrics' do
context 'when prometheus metrics are enabled' do
context 'when data is not present' do
before do
allow(prometheus_service).to receive(:matched_metrics).and_return({})
end
it 'returns no content response' do
get :active_metrics, project_params(format: :json)
expect(response).to have_http_status(204)
end
end
context 'when data is available' do
let(:sample_response) { { some_data: 1 } }
before do
allow(prometheus_service).to receive(:matched_metrics).and_return(sample_response)
end
it 'returns no content response' do
get :active_metrics, project_params(format: :json)
expect(response).to have_http_status(200)
expect(json_response).to eq(sample_response.deep_stringify_keys)
end
end
context 'when requesting non json response' do
it 'returns not found response' do
get :active_metrics, project_params
expect(response).to have_http_status(404)
end
end
end
end
def project_params(opts = {})
opts.reverse_merge(namespace_id: project.namespace, project_id: project)
end
end
......@@ -25,6 +25,14 @@ FactoryGirl.define do
})
end
factory :prometheus_service do
project factory: :empty_project
active true
properties({
api_url: 'https://prometheus.example.com/'
})
end
factory :jira_service do
project factory: :empty_project
active true
......
{
"items": {
"properties": {
"group": {
"type": "string"
},
"metrics": {
"items": {
"properties": {
"queries": {
"items": {
"properties": {
"query_range": {
"type": "string"
},
"query": {
"type": "string"
},
"result": {
"type": "any"
}
},
"type": "object"
},
"type": "array"
},
"title": {
"type": "string"
},
"weight": {
"type": "integer"
},
"y_label": {
"type": "string"
}
},
"type": "object"
},
"required": [
"metrics",
"title",
"weight"
],
"type": "array"
},
"priority": {
"type": "integer"
}
},
"type": "object"
},
"required": [
"group",
"priority",
"metrics"
],
"type": "array"
}
\ No newline at end of file
require 'spec_helper'
describe Projects::ServicesController, '(JavaScript fixtures)', type: :controller do
include JavaScriptFixturesHelpers
let(:admin) { create(:admin) }
let(:namespace) { create(:namespace, name: 'frontend-fixtures' )}
let(:project) { create(:project_empty_repo, namespace: namespace, path: 'services-project') }
let!(:service) { create(:prometheus_service, project: project) }
render_views
before(:all) do
clean_frontend_fixtures('services/prometheus')
end
before(:each) do
sign_in(admin)
end
it 'services/prometheus/prometheus_service.html.raw' do |example|
get :edit,
namespace_id: namespace,
project_id: project,
id: service.to_param
expect(response).to be_success
store_frontend_fixture(response, example.description)
end
end
export const metrics = [
{
group: 'Kubernetes',
priority: 1,
active_metrics: 4,
metrics_missing_requirements: 0,
},
{
group: 'HAProxy',
priority: 2,
active_metrics: 3,
metrics_missing_requirements: 0,
},
{
group: 'Apache',
priority: 3,
active_metrics: 5,
metrics_missing_requirements: 0,
},
];
export const missingVarMetrics = [
{
group: 'Kubernetes',
priority: 1,
active_metrics: 4,
metrics_missing_requirements: 0,
},
{
group: 'HAProxy',
priority: 2,
active_metrics: 3,
metrics_missing_requirements: 1,
},
{
group: 'Apache',
priority: 3,
active_metrics: 5,
metrics_missing_requirements: 3,
},
];
import PrometheusMetrics from '~/prometheus_metrics/prometheus_metrics';
import PANEL_STATE from '~/prometheus_metrics/constants';
import { metrics, missingVarMetrics } from './mock_data';
describe('PrometheusMetrics', () => {
const FIXTURE = 'services/prometheus/prometheus_service.html.raw';
preloadFixtures(FIXTURE);
beforeEach(() => {
loadFixtures(FIXTURE);
});
describe('constructor', () => {
let prometheusMetrics;
beforeEach(() => {
prometheusMetrics = new PrometheusMetrics('.js-prometheus-metrics-monitoring');
});
it('should initialize wrapper element refs on class object', () => {
expect(prometheusMetrics.$wrapper).toBeDefined();
expect(prometheusMetrics.$monitoredMetricsPanel).toBeDefined();
expect(prometheusMetrics.$monitoredMetricsCount).toBeDefined();
expect(prometheusMetrics.$monitoredMetricsLoading).toBeDefined();
expect(prometheusMetrics.$monitoredMetricsEmpty).toBeDefined();
expect(prometheusMetrics.$monitoredMetricsList).toBeDefined();
expect(prometheusMetrics.$missingEnvVarPanel).toBeDefined();
expect(prometheusMetrics.$panelToggle).toBeDefined();
expect(prometheusMetrics.$missingEnvVarMetricCount).toBeDefined();
expect(prometheusMetrics.$missingEnvVarMetricsList).toBeDefined();
});
it('should initialize metadata on class object', () => {
expect(prometheusMetrics.backOffRequestCounter).toEqual(0);
expect(prometheusMetrics.activeMetricsEndpoint).toContain('/test');
});
});
describe('showMonitoringMetricsPanelState', () => {
let prometheusMetrics;
beforeEach(() => {
prometheusMetrics = new PrometheusMetrics('.js-prometheus-metrics-monitoring');
});
it('should show loading state when called with `loading`', () => {
prometheusMetrics.showMonitoringMetricsPanelState(PANEL_STATE.LOADING);
expect(prometheusMetrics.$monitoredMetricsLoading.hasClass('hidden')).toBeFalsy();
expect(prometheusMetrics.$monitoredMetricsEmpty.hasClass('hidden')).toBeTruthy();
expect(prometheusMetrics.$monitoredMetricsList.hasClass('hidden')).toBeTruthy();
});
it('should show metrics list when called with `list`', () => {
prometheusMetrics.showMonitoringMetricsPanelState(PANEL_STATE.LIST);
expect(prometheusMetrics.$monitoredMetricsLoading.hasClass('hidden')).toBeTruthy();
expect(prometheusMetrics.$monitoredMetricsEmpty.hasClass('hidden')).toBeTruthy();
expect(prometheusMetrics.$monitoredMetricsList.hasClass('hidden')).toBeFalsy();
});
it('should show empty state when called with `empty`', () => {
prometheusMetrics.showMonitoringMetricsPanelState(PANEL_STATE.EMPTY);
expect(prometheusMetrics.$monitoredMetricsLoading.hasClass('hidden')).toBeTruthy();
expect(prometheusMetrics.$monitoredMetricsEmpty.hasClass('hidden')).toBeFalsy();
expect(prometheusMetrics.$monitoredMetricsList.hasClass('hidden')).toBeTruthy();
});
});
describe('populateActiveMetrics', () => {
let prometheusMetrics;
beforeEach(() => {
prometheusMetrics = new PrometheusMetrics('.js-prometheus-metrics-monitoring');
});
it('should show monitored metrics list', () => {
prometheusMetrics.populateActiveMetrics(metrics);
const $metricsListLi = prometheusMetrics.$monitoredMetricsList.find('li');
expect(prometheusMetrics.$monitoredMetricsLoading.hasClass('hidden')).toBeTruthy();
expect(prometheusMetrics.$monitoredMetricsList.hasClass('hidden')).toBeFalsy();
expect(prometheusMetrics.$monitoredMetricsCount.text()).toEqual('12');
expect($metricsListLi.length).toEqual(metrics.length);
expect($metricsListLi.first().find('.badge').text()).toEqual(`${metrics[0].active_metrics}`);
});
it('should show missing environment variables list', () => {
prometheusMetrics.populateActiveMetrics(missingVarMetrics);
expect(prometheusMetrics.$monitoredMetricsLoading.hasClass('hidden')).toBeTruthy();
expect(prometheusMetrics.$missingEnvVarPanel.hasClass('hidden')).toBeFalsy();
expect(prometheusMetrics.$missingEnvVarMetricCount.text()).toEqual('2');
expect(prometheusMetrics.$missingEnvVarPanel.find('li').length).toEqual(2);
expect(prometheusMetrics.$missingEnvVarPanel.find('.flash-container')).toBeDefined();
});
});
describe('loadActiveMetrics', () => {
let prometheusMetrics;
beforeEach(() => {
prometheusMetrics = new PrometheusMetrics('.js-prometheus-metrics-monitoring');
});
it('should show loader animation while response is being loaded and hide it when request is complete', (done) => {
const deferred = $.Deferred();
spyOn($, 'getJSON').and.returnValue(deferred.promise());
prometheusMetrics.loadActiveMetrics();
expect(prometheusMetrics.$monitoredMetricsLoading.hasClass('hidden')).toBeFalsy();
expect($.getJSON).toHaveBeenCalledWith(prometheusMetrics.activeMetricsEndpoint);
deferred.resolve({ data: metrics, success: true });
setTimeout(() => {
expect(prometheusMetrics.$monitoredMetricsLoading.hasClass('hidden')).toBeTruthy();
done();
});
});
it('should show empty state if response failed to load', (done) => {
const deferred = $.Deferred();
spyOn($, 'getJSON').and.returnValue(deferred.promise());
spyOn(prometheusMetrics, 'populateActiveMetrics');
prometheusMetrics.loadActiveMetrics();
deferred.reject();
setTimeout(() => {
expect(prometheusMetrics.$monitoredMetricsLoading.hasClass('hidden')).toBeTruthy();
expect(prometheusMetrics.$monitoredMetricsEmpty.hasClass('hidden')).toBeFalsy();
done();
});
});
it('should populate metrics list once response is loaded', (done) => {
const deferred = $.Deferred();
spyOn($, 'getJSON').and.returnValue(deferred.promise());
spyOn(prometheusMetrics, 'populateActiveMetrics');
prometheusMetrics.loadActiveMetrics();
deferred.resolve({ data: metrics, success: true });
setTimeout(() => {
expect(prometheusMetrics.populateActiveMetrics).toHaveBeenCalledWith(metrics);
done();
});
});
});
});
require 'spec_helper'
describe Gitlab::Prometheus::AdditionalMetricsParser, lib: true do
include Prometheus::MetricBuilders
let(:parser_error_class) { Gitlab::Prometheus::ParsingError }
describe '#load_groups_from_yaml' do
subject { described_class.load_groups_from_yaml }
describe 'parsing sample yaml' do
let(:sample_yaml) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
- title: "title"
required_metrics: [ metric_a, metric_b ]
weight: 1
queries: [{ query_range: 'query_range_a', label: label, unit: unit }]
- title: "title"
required_metrics: [metric_a]
weight: 1
queries: [{ query_range: 'query_range_empty' }]
- group: group_b
priority: 1
metrics:
- title: title
required_metrics: ['metric_a']
weight: 1
queries: [{query_range: query_range_a}]
EOF
end
before do
allow(described_class).to receive(:load_yaml_file) { YAML.load(sample_yaml) }
end
it 'parses to two metric groups with 2 and 1 metric respectively' do
expect(subject.count).to eq(2)
expect(subject[0].metrics.count).to eq(2)
expect(subject[1].metrics.count).to eq(1)
end
it 'provide group data' do
expect(subject[0]).to have_attributes(name: 'group_a', priority: 1)
expect(subject[1]).to have_attributes(name: 'group_b', priority: 1)
end
it 'provides metrics data' do
metrics = subject.flat_map(&:metrics)
expect(metrics.count).to eq(3)
expect(metrics[0]).to have_attributes(title: 'title', required_metrics: %w(metric_a metric_b), weight: 1)
expect(metrics[1]).to have_attributes(title: 'title', required_metrics: %w(metric_a), weight: 1)
expect(metrics[2]).to have_attributes(title: 'title', required_metrics: %w{metric_a}, weight: 1)
end
it 'provides query data' do
queries = subject.flat_map(&:metrics).flat_map(&:queries)
expect(queries.count).to eq(3)
expect(queries[0]).to eq(query_range: 'query_range_a', label: 'label', unit: 'unit')
expect(queries[1]).to eq(query_range: 'query_range_empty')
expect(queries[2]).to eq(query_range: 'query_range_a')
end
end
shared_examples 'required field' do |field_name|
context "when #{field_name} is nil" do
before do
allow(described_class).to receive(:load_yaml_file) { YAML.load(field_missing) }
end
it 'throws parsing error' do
expect { subject }.to raise_error(parser_error_class, /#{field_name} can't be blank/i)
end
end
context "when #{field_name} are not specified" do
before do
allow(described_class).to receive(:load_yaml_file) { YAML.load(field_nil) }
end
it 'throws parsing error' do
expect { subject }.to raise_error(parser_error_class, /#{field_name} can't be blank/i)
end
end
end
describe 'group required fields' do
it_behaves_like 'required field', 'metrics' do
let(:field_nil) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
EOF
end
let(:field_missing) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
EOF
end
end
it_behaves_like 'required field', 'name' do
let(:field_nil) do
<<-EOF.strip_heredoc
- group:
priority: 1
metrics: []
EOF
end
let(:field_missing) do
<<-EOF.strip_heredoc
- priority: 1
metrics: []
EOF
end
end
it_behaves_like 'required field', 'priority' do
let(:field_nil) do
<<-EOF.strip_heredoc
- group: group_a
priority:
metrics: []
EOF
end
let(:field_missing) do
<<-EOF.strip_heredoc
- group: group_a
metrics: []
EOF
end
end
end
describe 'metrics fields parsing' do
it_behaves_like 'required field', 'title' do
let(:field_nil) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
- title:
required_metrics: []
weight: 1
queries: []
EOF
end
let(:field_missing) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
- required_metrics: []
weight: 1
queries: []
EOF
end
end
it_behaves_like 'required field', 'required metrics' do
let(:field_nil) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
- title: title
required_metrics:
weight: 1
queries: []
EOF
end
let(:field_missing) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
- title: title
weight: 1
queries: []
EOF
end
end
it_behaves_like 'required field', 'weight' do
let(:field_nil) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
- title: title
required_metrics: []
weight:
queries: []
EOF
end
let(:field_missing) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
- title: title
required_metrics: []
queries: []
EOF
end
end
it_behaves_like 'required field', :queries do
let(:field_nil) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
- title: title
required_metrics: []
weight: 1
queries:
EOF
end
let(:field_missing) do
<<-EOF.strip_heredoc
- group: group_a
priority: 1
metrics:
- title: title
required_metrics: []
weight: 1
EOF
end
end
end
end
end
require 'spec_helper'
describe Gitlab::Prometheus::Queries::AdditionalMetricsDeploymentQuery, lib: true do
include Prometheus::MetricBuilders
let(:client) { double('prometheus_client') }
let(:environment) { create(:environment, slug: 'environment-slug') }
let(:deployment) { create(:deployment, environment: environment) }
subject(:query_result) { described_class.new(client).query(deployment.id) }
around do |example|
Timecop.freeze(Time.local(2008, 9, 1, 12, 0, 0)) { example.run }
end
include_examples 'additional metrics query' do
it 'queries using specific time' do
expect(client).to receive(:query_range).with(anything,
start: (deployment.created_at - 30.minutes).to_f,
stop: (deployment.created_at + 30.minutes).to_f)
expect(query_result).not_to be_nil
end
end
end
require 'spec_helper'
describe Gitlab::Prometheus::Queries::AdditionalMetricsEnvironmentQuery, lib: true do
include Prometheus::MetricBuilders
let(:client) { double('prometheus_client') }
let(:environment) { create(:environment, slug: 'environment-slug') }
subject(:query_result) { described_class.new(client).query(environment.id) }
around do |example|
Timecop.freeze { example.run }
end
include_examples 'additional metrics query' do
it 'queries using specific time' do
expect(client).to receive(:query_range).with(anything, start: 8.hours.ago.to_f, stop: Time.now.to_f)
expect(query_result).not_to be_nil
end
end
end
require 'spec_helper'
describe Gitlab::Prometheus::Queries::MatchedMetricsQuery, lib: true do
include Prometheus::MetricBuilders
let(:metric_group_class) { Gitlab::Prometheus::MetricGroup }
let(:metric_class) { Gitlab::Prometheus::Metric }
def series_info_with_environment(*more_metrics)
%w{metric_a metric_b}.concat(more_metrics).map { |metric_name| { '__name__' => metric_name, 'environment' => '' } }
end
let(:metric_names) { %w{metric_a metric_b} }
let(:series_info_without_environment) do
[{ '__name__' => 'metric_a' },
{ '__name__' => 'metric_b' }]
end
let(:partialy_empty_series_info) { [{ '__name__' => 'metric_a', 'environment' => '' }] }
let(:empty_series_info) { [] }
let(:client) { double('prometheus_client') }
subject { described_class.new(client) }
context 'with one group where two metrics is found' do
before do
allow(metric_group_class).to receive(:all).and_return([simple_metric_group])
allow(client).to receive(:label_values).and_return(metric_names)
end
context 'both metrics in the group pass requirements' do
before do
allow(client).to receive(:series).and_return(series_info_with_environment)
end
it 'responds with both metrics as actve' do
expect(subject.query).to eq([{ group: 'name', priority: 1, active_metrics: 2, metrics_missing_requirements: 0 }])
end
end
context 'none of the metrics pass requirements' do
before do
allow(client).to receive(:series).and_return(series_info_without_environment)
end
it 'responds with both metrics missing requirements' do
expect(subject.query).to eq([{ group: 'name', priority: 1, active_metrics: 0, metrics_missing_requirements: 2 }])
end
end
context 'no series information found about the metrics' do
before do
allow(client).to receive(:series).and_return(empty_series_info)
end
it 'responds with both metrics missing requirements' do
expect(subject.query).to eq([{ group: 'name', priority: 1, active_metrics: 0, metrics_missing_requirements: 2 }])
end
end
context 'one of the series info was not found' do
before do
allow(client).to receive(:series).and_return(partialy_empty_series_info)
end
it 'responds with one active and one missing metric' do
expect(subject.query).to eq([{ group: 'name', priority: 1, active_metrics: 1, metrics_missing_requirements: 1 }])
end
end
end
context 'with one group where only one metric is found' do
before do
allow(metric_group_class).to receive(:all).and_return([simple_metric_group])
allow(client).to receive(:label_values).and_return('metric_a')
end
context 'both metrics in the group pass requirements' do
before do
allow(client).to receive(:series).and_return(series_info_with_environment)
end
it 'responds with one metrics as active and no missing requiremens' do
expect(subject.query).to eq([{ group: 'name', priority: 1, active_metrics: 1, metrics_missing_requirements: 0 }])
end
end
context 'no metrics in group pass requirements' do
before do
allow(client).to receive(:series).and_return(series_info_without_environment)
end
it 'responds with one metrics as active and no missing requiremens' do
expect(subject.query).to eq([{ group: 'name', priority: 1, active_metrics: 0, metrics_missing_requirements: 1 }])
end
end
end
context 'with two groups where metrics are found in each group' do
let(:second_metric_group) { simple_metric_group(name: 'nameb', metrics: simple_metrics(added_metric_name: 'metric_c')) }
before do
allow(metric_group_class).to receive(:all).and_return([simple_metric_group, second_metric_group])
allow(client).to receive(:label_values).and_return('metric_c')
end
context 'all metrics in both groups pass requirements' do
before do
allow(client).to receive(:series).and_return(series_info_with_environment('metric_c'))
end
it 'responds with one metrics as active and no missing requiremens' do
expect(subject.query).to eq([
{ group: 'name', priority: 1, active_metrics: 1, metrics_missing_requirements: 0 },
{ group: 'nameb', priority: 1, active_metrics: 2, metrics_missing_requirements: 0 }
]
)
end
end
context 'no metrics in groups pass requirements' do
before do
allow(client).to receive(:series).and_return(series_info_without_environment)
end
it 'responds with one metrics as active and no missing requiremens' do
expect(subject.query).to eq([
{ group: 'name', priority: 1, active_metrics: 0, metrics_missing_requirements: 1 },
{ group: 'nameb', priority: 1, active_metrics: 0, metrics_missing_requirements: 2 }
]
)
end
end
end
end
......@@ -119,6 +119,36 @@ describe Gitlab::PrometheusClient, lib: true do
end
end
describe '#series' do
let(:query_url) { prometheus_series_url('series_name', 'other_service') }
around do |example|
Timecop.freeze { example.run }
end
it 'calls endpoint and returns list of series' do
req_stub = stub_prometheus_request(query_url, body: prometheus_series('series_name'))
expected = prometheus_series('series_name').deep_stringify_keys['data']
expect(subject.series('series_name', 'other_service')).to eq(expected)
expect(req_stub).to have_been_requested
end
end
describe '#label_values' do
let(:query_url) { prometheus_label_values_url('__name__') }
it 'calls endpoint and returns label values' do
req_stub = stub_prometheus_request(query_url, body: prometheus_label_values)
expected = prometheus_label_values.deep_stringify_keys['data']
expect(subject.label_values('__name__')).to eq(expected)
expect(req_stub).to have_been_requested
end
end
describe '#query_range' do
let(:prometheus_query) { prometheus_memory_query('env-slug') }
let(:query_url) { prometheus_query_range_url(prometheus_query) }
......
......@@ -30,7 +30,7 @@ describe Deployment, models: true do
end
describe '#includes_commit?' do
let(:project) { create(:project, :repository) }
let(:project) { create(:project, :repository) }
let(:environment) { create(:environment, project: project) }
let(:deployment) do
create(:deployment, environment: environment, sha: project.commit.id)
......@@ -90,6 +90,36 @@ describe Deployment, models: true do
end
end
describe '#additional_metrics' do
let(:project) { create(:project) }
let(:deployment) { create(:deployment, project: project) }
subject { deployment.additional_metrics }
context 'metrics are disabled' do
it { is_expected.to eq({}) }
end
context 'metrics are enabled' do
let(:simple_metrics) do
{
success: true,
metrics: {},
last_update: 42
}
end
let(:prometheus_service) { double('prometheus_service') }
before do
allow(project).to receive(:prometheus_service).and_return(prometheus_service)
allow(prometheus_service).to receive(:additional_deployment_metrics).and_return(simple_metrics)
end
it { is_expected.to eq(simple_metrics.merge({ deployment_time: deployment.created_at.to_i })) }
end
end
describe '#stop_action' do
let(:build) { create(:ci_build) }
......
......@@ -432,6 +432,99 @@ describe Environment, models: true do
end
end
describe '#has_metrics?' do
subject { environment.has_metrics? }
context 'when the enviroment is available' do
context 'with a deployment service' do
let(:project) { create(:prometheus_project) }
context 'and a deployment' do
let!(:deployment) { create(:deployment, environment: environment) }
it { is_expected.to be_truthy }
end
context 'but no deployments' do
it { is_expected.to be_falsy }
end
end
context 'without a monitoring service' do
it { is_expected.to be_falsy }
end
end
context 'when the environment is unavailable' do
let(:project) { create(:prometheus_project) }
before do
environment.stop
end
it { is_expected.to be_falsy }
end
end
describe '#additional_metrics' do
let(:project) { create(:prometheus_project) }
subject { environment.additional_metrics }
context 'when the environment has additional metrics' do
before do
allow(environment).to receive(:has_additional_metrics?).and_return(true)
end
it 'returns the additional metrics from the deployment service' do
expect(project.prometheus_service).to receive(:additional_environment_metrics)
.with(environment)
.and_return(:fake_metrics)
is_expected.to eq(:fake_metrics)
end
end
context 'when the environment does not have metrics' do
before do
allow(environment).to receive(:has_additional_metrics?).and_return(false)
end
it { is_expected.to be_nil }
end
end
describe '#has_additional_metrics??' do
subject { environment.has_additional_metrics? }
context 'when the enviroment is available' do
context 'with a deployment service' do
let(:project) { create(:prometheus_project) }
context 'and a deployment' do
let!(:deployment) { create(:deployment, environment: environment) }
it { is_expected.to be_truthy }
end
context 'but no deployments' do
it { is_expected.to be_falsy }
end
end
context 'without a monitoring service' do
it { is_expected.to be_falsy }
end
end
context 'when the environment is unavailable' do
let(:project) { create(:prometheus_project) }
before do
environment.stop
end
it { is_expected.to be_falsy }
end
end
describe '#slug' do
it "is automatically generated" do
expect(environment.slug).not_to be_nil
......
......@@ -65,7 +65,7 @@ describe PrometheusService, models: true, caching: true do
end
it 'returns reactive data' do
is_expected.to eq(prometheus_data)
is_expected.to eq(prometheus_metrics_data)
end
end
end
......@@ -86,7 +86,7 @@ describe PrometheusService, models: true, caching: true do
end
it 'returns reactive data' do
is_expected.to eq(prometheus_data.merge(deployment_time: deployment.created_at.to_i))
is_expected.to eq(prometheus_metrics_data.merge(deployment_time: deployment.created_at.to_i))
end
end
end
......@@ -116,6 +116,7 @@ describe PrometheusService, models: true, caching: true do
end
it { expect(subject.to_json).to eq(prometheus_data.to_json) }
it { expect(subject.to_json).to eq(prometheus_data.to_json) }
end
[404, 500].each do |status|
......
def schema_path(schema)
schema_directory = "#{Dir.pwd}/spec/fixtures/api/schemas"
"#{schema_directory}/#{schema}.json"
end
RSpec::Matchers.define :match_response_schema do |schema, **options|
match do |response|
schema_directory = "#{Dir.pwd}/spec/fixtures/api/schemas"
schema_path = "#{schema_directory}/#{schema}.json"
JSON::Validator.validate!(schema_path(schema), response.body, options)
end
end
JSON::Validator.validate!(schema_path, response.body, options)
RSpec::Matchers.define :match_schema do |schema, **options|
match do |data|
JSON::Validator.validate!(schema_path(schema), data, options)
end
end
RSpec.shared_examples 'additional metrics query' do
include Prometheus::MetricBuilders
let(:metric_group_class) { Gitlab::Prometheus::MetricGroup }
let(:metric_class) { Gitlab::Prometheus::Metric }
let(:metric_names) { %w{metric_a metric_b} }
let(:query_range_result) do
[{ 'metric': {}, 'values': [[1488758662.506, '0.00002996364761904785'], [1488758722.506, '0.00003090239047619091']] }]
end
before do
allow(client).to receive(:label_values).and_return(metric_names)
allow(metric_group_class).to receive(:all).and_return([simple_metric_group(metrics: [simple_metric])])
end
context 'with one group where two metrics is found' do
before do
allow(metric_group_class).to receive(:all).and_return([simple_metric_group])
end
context 'some queries return results' do
before do
allow(client).to receive(:query_range).with('query_range_a', any_args).and_return(query_range_result)
allow(client).to receive(:query_range).with('query_range_b', any_args).and_return(query_range_result)
allow(client).to receive(:query_range).with('query_range_empty', any_args).and_return([])
end
it 'return group data only for queries with results' do
expected = [
{
group: 'name',
priority: 1,
metrics: [
{
title: 'title', weight: 1, y_label: 'Values', queries: [
{ query_range: 'query_range_a', result: query_range_result },
{ query_range: 'query_range_b', label: 'label', unit: 'unit', result: query_range_result }
]
}
]
}
]
expect(query_result).to match_schema('prometheus/additional_metrics_query_result')
expect(query_result).to eq(expected)
end
end
end
context 'with two groups with one metric each' do
let(:metrics) { [simple_metric(queries: [simple_query])] }
before do
allow(metric_group_class).to receive(:all).and_return(
[
simple_metric_group(name: 'group_a', metrics: [simple_metric(queries: [simple_query])]),
simple_metric_group(name: 'group_b', metrics: [simple_metric(title: 'title_b', queries: [simple_query('b')])])
])
allow(client).to receive(:label_values).and_return(metric_names)
end
context 'both queries return results' do
before do
allow(client).to receive(:query_range).with('query_range_a', any_args).and_return(query_range_result)
allow(client).to receive(:query_range).with('query_range_b', any_args).and_return(query_range_result)
end
it 'return group data both queries' do
queries_with_result_a = { queries: [{ query_range: 'query_range_a', result: query_range_result }] }
queries_with_result_b = { queries: [{ query_range: 'query_range_b', result: query_range_result }] }
expect(query_result).to match_schema('prometheus/additional_metrics_query_result')
expect(query_result.count).to eq(2)
expect(query_result).to all(satisfy { |r| r[:metrics].count == 1 })
expect(query_result[0][:metrics].first).to include(queries_with_result_a)
expect(query_result[1][:metrics].first).to include(queries_with_result_b)
end
end
context 'one query returns result' do
before do
allow(client).to receive(:query_range).with('query_range_a', any_args).and_return(query_range_result)
allow(client).to receive(:query_range).with('query_range_b', any_args).and_return([])
end
it 'return group data only for query with results' do
queries_with_result = { queries: [{ query_range: 'query_range_a', result: query_range_result }] }
expect(query_result).to match_schema('prometheus/additional_metrics_query_result')
expect(query_result.count).to eq(1)
expect(query_result).to all(satisfy { |r| r[:metrics].count == 1 })
expect(query_result.first[:metrics].first).to include(queries_with_result)
end
end
end
end
module Prometheus
module MetricBuilders
def simple_query(suffix = 'a', **opts)
{ query_range: "query_range_#{suffix}" }.merge(opts)
end
def simple_queries
[simple_query, simple_query('b', label: 'label', unit: 'unit')]
end
def simple_metric(title: 'title', required_metrics: [], queries: [simple_query])
Gitlab::Prometheus::Metric.new(title: title, required_metrics: required_metrics, weight: 1, queries: queries)
end
def simple_metrics(added_metric_name: 'metric_a')
[
simple_metric(required_metrics: %W(#{added_metric_name} metric_b), queries: simple_queries),
simple_metric(required_metrics: [added_metric_name], queries: [simple_query('empty')]),
simple_metric(required_metrics: %w{metric_c})
]
end
def simple_metric_group(name: 'name', metrics: simple_metrics)
Gitlab::Prometheus::MetricGroup.new(name: name, priority: 1, metrics: metrics)
end
end
end
......@@ -36,6 +36,19 @@ module PrometheusHelpers
"https://prometheus.example.com/api/v1/query_range?#{query}"
end
def prometheus_label_values_url(name)
"https://prometheus.example.com/api/v1/label/#{name}/values"
end
def prometheus_series_url(*matches, start: 8.hours.ago, stop: Time.now)
query = {
match: matches,
start: start.to_f,
end: stop.to_f
}.to_query
"https://prometheus.example.com/api/v1/series?#{query}"
end
def stub_prometheus_request(url, body: {}, status: 200)
WebMock.stub_request(:get, url)
.to_return({
......@@ -83,6 +96,19 @@ module PrometheusHelpers
end
def prometheus_data(last_update: Time.now.utc)
{
success: true,
data: {
memory_values: prometheus_values_body('matrix').dig(:data, :result),
memory_current: prometheus_value_body('vector').dig(:data, :result),
cpu_values: prometheus_values_body('matrix').dig(:data, :result),
cpu_current: prometheus_value_body('vector').dig(:data, :result)
},
last_update: last_update
}
end
def prometheus_metrics_data(last_update: Time.now.utc)
{
success: true,
metrics: {
......@@ -140,4 +166,37 @@ module PrometheusHelpers
}
}
end
def prometheus_label_values
{
'status': 'success',
'data': %w(job_adds job_controller_rate_limiter_use job_depth job_queue_latency job_work_duration_sum up)
}
end
def prometheus_series(name)
{
'status': 'success',
'data': [
{
'__name__': name,
'container_name': 'gitlab',
'environment': 'mattermost',
'id': '/docker/9953982f95cf5010dfc59d7864564d5f188aaecddeda343699783009f89db667',
'image': 'gitlab/gitlab-ce:8.15.4-ce.1',
'instance': 'minikube',
'job': 'kubernetes-nodes',
'name': 'k8s_gitlab.e6611886_mattermost-4210310111-77z8r_gitlab_2298ae6b-da24-11e6-baee-8e7f67d0eb3a_43536cb6',
'namespace': 'gitlab',
'pod_name': 'mattermost-4210310111-77z8r'
},
{
'__name__': name,
'id': '/docker',
'instance': 'minikube',
'job': 'kubernetes-nodes'
}
]
}
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment