Commit 1aa0ceae authored by Douwe Maan's avatar Douwe Maan

Merge branch '60383-change-common-metrics-schema' into 'master'

Change the schema of common_metrics.yml

Closes #60383

See merge request gitlab-org/gitlab-ce!27283
parents 6376f784 e5966e5d
# NGINX Ingress metrics for pre-0.16.0 versions
dashboard: 'Environment metrics'
priority: 1
panel_groups:
# NGINX Ingress metrics for pre-0.16.0 versions
- group: Response metrics (NGINX Ingress VTS)
priority: 10
metrics:
panels:
- title: "Throughput"
type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- nginx_upstream_responses_total
weight: 1
queries:
metrics:
- id: response_metrics_nginx_ingress_throughput_status_code
query_range: 'sum(rate(nginx_upstream_responses_total{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[2m])) by (status_code)'
unit: req / sec
label: Status Code
series:
- label: status_code
when:
- value: 2xx
color: green
- value: 4xx
color: orange
- value: 5xx
color: red
- title: "Latency"
type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- nginx_upstream_response_msecs_avg
weight: 1
queries:
metrics:
- id: response_metrics_nginx_ingress_latency_pod_average
query_range: 'avg(nginx_upstream_response_msecs_avg{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"})'
label: Pod average
unit: ms
- title: "HTTP Error Rate"
type: "area-chart"
y_label: "HTTP Errors"
required_metrics:
- nginx_upstream_responses_total
weight: 1
queries:
metrics:
- id: response_metrics_nginx_ingress_http_error_rate
query_range: 'sum(rate(nginx_upstream_responses_total{status_code="5xx", upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[2m])) / sum(rate(nginx_upstream_responses_total{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[2m])) * 100'
label: 5xx Errors
......@@ -44,227 +35,179 @@
# NGINX Ingress metrics for post-0.16.0 versions
- group: Response metrics (NGINX Ingress)
priority: 10
metrics:
panels:
- title: "Throughput"
type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- nginx_ingress_controller_requests
weight: 1
queries:
metrics:
- id: response_metrics_nginx_ingress_16_throughput_status_code
query_range: 'sum(label_replace(rate(nginx_ingress_controller_requests{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m]), "status_code", "${1}xx", "status", "(.)..")) by (status_code)'
unit: req / sec
label: Status Code
series:
- label: status_code
when:
- value: 2xx
color: green
- value: 3xx
color: blue
- value: 4xx
color: orange
- value: 5xx
color: red
- title: "Latency"
type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- nginx_ingress_controller_ingress_upstream_latency_seconds_sum
weight: 1
queries:
metrics:
- id: response_metrics_nginx_ingress_16_latency_pod_average
query_range: 'sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) / sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) * 1000'
label: Pod average
unit: ms
- title: "HTTP Error Rate"
type: "area-chart"
y_label: "HTTP Errors"
required_metrics:
- nginx_ingress_controller_requests
weight: 1
queries:
metrics:
- id: response_metrics_nginx_ingress_16_http_error_rate
query_range: 'sum(rate(nginx_ingress_controller_requests{status=~"5.*",namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) / sum(rate(nginx_ingress_controller_requests{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) * 100'
label: 5xx Errors
unit: "%"
- group: Response metrics (HA Proxy)
priority: 10
metrics:
panels:
- title: "Throughput"
type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- haproxy_frontend_http_requests_total
weight: 1
queries:
metrics:
- id: response_metrics_ha_proxy_throughput_status_code
query_range: 'sum(rate(haproxy_frontend_http_requests_total{%{environment_filter}}[2m])) by (code)'
unit: req / sec
label: Status Code
series:
- label: status_code
when:
- value: 2xx
color: green
- value: 4xx
color: yellow
- value: 5xx
color: red
- title: "HTTP Error Rate"
type: "area-chart"
y_label: "Error Rate (%)"
required_metrics:
- haproxy_frontend_http_responses_total
weight: 1
queries:
metrics:
- id: response_metrics_ha_proxy_http_error_rate
query_range: 'sum(rate(haproxy_frontend_http_responses_total{code="5xx",%{environment_filter}}[2m])) / sum(rate(haproxy_frontend_http_responses_total{%{environment_filter}}[2m]))'
label: HTTP Errors
unit: "%"
- group: Response metrics (AWS ELB)
priority: 10
metrics:
panels:
- title: "Throughput"
type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- aws_elb_request_count_sum
weight: 1
queries:
metrics:
- id: response_metrics_aws_elb_throughput_requests
query_range: 'sum(aws_elb_request_count_sum{%{environment_filter}}) / 60'
label: Total
unit: req / sec
- title: "Latency"
type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- aws_elb_latency_average
weight: 1
queries:
metrics:
- id: response_metrics_aws_elb_latency_average
query_range: 'avg(aws_elb_latency_average{%{environment_filter}}) * 1000'
label: Average
unit: ms
- title: "HTTP Error Rate"
type: "area-chart"
y_label: "Error Rate (%)"
required_metrics:
- aws_elb_request_count_sum
- aws_elb_httpcode_backend_5_xx_sum
weight: 1
queries:
metrics:
- id: response_metrics_aws_elb_http_error_rate
query_range: 'sum(aws_elb_httpcode_backend_5_xx_sum{%{environment_filter}}) / sum(aws_elb_request_count_sum{%{environment_filter}})'
label: HTTP Errors
unit: "%"
- group: Response metrics (NGINX)
priority: 10
metrics:
panels:
- title: "Throughput"
type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- nginx_server_requests
weight: 1
queries:
metrics:
- id: response_metrics_nginx_throughput_status_code
query_range: 'sum(rate(nginx_server_requests{server_zone!="*", server_zone!="_", %{environment_filter}}[2m])) by (code)'
unit: req / sec
label: Status Code
series:
- label: status_code
when:
- value: 2xx
color: green
- value: 4xx
color: orange
- value: 5xx
color: red
- title: "Latency"
type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- nginx_server_requestMsec
weight: 1
queries:
metrics:
- id: response_metrics_nginx_latency
query_range: 'avg(nginx_server_requestMsec{%{environment_filter}})'
label: Upstream
unit: ms
- title: "HTTP Error Rate"
type: "area-chart"
y_label: "HTTP 500 Errors / Sec"
required_metrics:
- nginx_server_requests
weight: 1
queries:
metrics:
- id: response_metrics_nginx_http_error_rate
query_range: 'sum(rate(nginx_server_requests{code="5xx", %{environment_filter}}[2m]))'
label: HTTP Errors
unit: "errors / sec"
- group: System metrics (Kubernetes)
priority: 5
metrics:
panels:
- title: "Memory Usage (Total)"
type: "area-chart"
y_label: "Total Memory Used"
required_metrics:
- container_memory_usage_bytes
weight: 4
queries:
metrics:
- id: system_metrics_kubernetes_container_memory_total
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) /1024/1024/1024'
label: Total
unit: GB
- title: "Core Usage (Total)"
type: "area-chart"
y_label: "Total Cores"
required_metrics:
- container_cpu_usage_seconds_total
weight: 3
queries:
metrics:
- id: system_metrics_kubernetes_container_cores_total
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job)'
label: Total
unit: "cores"
- title: "Memory Usage (Pod average)"
type: "area-chart"
y_label: "Memory Used per Pod"
required_metrics:
- container_memory_usage_bytes
weight: 2
queries:
metrics:
- id: system_metrics_kubernetes_container_memory_average
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average
unit: MB
- title: "Canary: Memory Usage (Pod Average)"
type: "area-chart"
y_label: "Memory Used per Pod"
required_metrics:
- container_memory_usage_bytes
weight: 2
queries:
metrics:
- id: system_metrics_kubernetes_container_memory_average_canary
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average
unit: MB
track: canary
- title: "Core Usage (Pod Average)"
type: "area-chart"
y_label: "Cores per Pod"
required_metrics:
- container_cpu_usage_seconds_total
weight: 1
queries:
metrics:
- id: system_metrics_kubernetes_container_core_usage
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average
unit: "cores"
- title: "Canary: Core Usage (Pod Average)"
type: "area-chart"
y_label: "Cores per Pod"
required_metrics:
- container_cpu_usage_seconds_total
weight: 1
queries:
metrics:
- id: system_metrics_kubernetes_container_core_usage_canary
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average
unit: "cores"
track: canary
- title: "Knative function invocations"
type: "area-chart"
y_label: "Invocations"
required_metrics:
- istio_revision_request_count
weight: 1
queries:
metrics:
- id: system_metrics_knative_function_invocation_count
query_range: 'floor(sum(rate(istio_revision_request_count{destination_configuration="%{function_name}", destination_namespace="%{kube_namespace}"}[1m])/3))'
label: invocations / minute
......
......@@ -53,7 +53,7 @@ module Importers
private
def process_content(&blk)
content.map do |group|
content['panel_groups'].map do |group|
process_group(group, &blk)
end
end
......@@ -63,28 +63,28 @@ module Importers
group: find_group_title_key(group['group'])
}
group['metrics'].map do |metric|
process_metric(metric, attributes, &blk)
group['panels'].map do |panel|
process_panel(panel, attributes, &blk)
end
end
def process_metric(metric, attributes, &blk)
def process_panel(panel, attributes, &blk)
attributes = attributes.merge(
title: metric['title'],
y_label: metric['y_label'])
title: panel['title'],
y_label: panel['y_label'])
metric['queries'].map do |query|
process_metric_query(query, attributes, &blk)
panel['metrics'].map do |metric_details|
process_metric_details(metric_details, attributes, &blk)
end
end
def process_metric_query(query, attributes, &blk)
def process_metric_details(metric_details, attributes, &blk)
attributes = attributes.merge(
legend: query['label'],
query: query['query_range'],
unit: query['unit'])
legend: metric_details['label'],
query: metric_details['query_range'],
unit: metric_details['unit'])
yield(query['id'], attributes)
yield(metric_details['id'], attributes)
end
def find_or_build_metric!(id)
......
......@@ -23,10 +23,10 @@ describe Importers::CommonMetricsImporter do
subject { described_class.new }
context "does import common_metrics.yml" do
let(:groups) { subject.content }
let(:metrics) { groups.map { |group| group['metrics'] }.flatten }
let(:queries) { metrics.map { |group| group['queries'] }.flatten }
let(:query_ids) { queries.map { |query| query['id'] } }
let(:groups) { subject.content['panel_groups'] }
let(:panels) { groups.map { |group| group['panels'] }.flatten }
let(:metrics) { panels.map { |group| group['metrics'] }.flatten }
let(:metric_ids) { metrics.map { |metric| metric['id'] } }
before do
subject.execute
......@@ -36,20 +36,20 @@ describe Importers::CommonMetricsImporter do
expect(PrometheusMetric.common.group(:group).count.count).to eq(groups.count)
end
it "has the same amount of metrics" do
expect(PrometheusMetric.common.group(:group, :title).count.count).to eq(metrics.count)
it "has the same amount of panels" do
expect(PrometheusMetric.common.group(:group, :title).count.count).to eq(panels.count)
end
it "has the same amount of queries" do
expect(PrometheusMetric.common.count).to eq(queries.count)
it "has the same amount of metrics" do
expect(PrometheusMetric.common.count).to eq(metrics.count)
end
it "does not have duplicate IDs" do
expect(query_ids).to eq(query_ids.uniq)
expect(metric_ids).to eq(metric_ids.uniq)
end
it "imports all IDs" do
expect(PrometheusMetric.common.pluck(:identifier)).to contain_exactly(*query_ids)
expect(PrometheusMetric.common.pluck(:identifier)).to contain_exactly(*metric_ids)
end
end
......@@ -65,24 +65,26 @@ describe Importers::CommonMetricsImporter do
context 'does import properly all fields' do
let(:query_identifier) { 'response-metric' }
let(:group) do
let(:dashboard) do
{
group: 'Response metrics (NGINX Ingress)',
metrics: [{
title: "Throughput",
y_label: "Requests / Sec",
queries: [{
id: query_identifier,
query_range: 'my-query',
unit: 'my-unit',
label: 'status code'
panel_groups: [{
group: 'Response metrics (NGINX Ingress)',
panels: [{
title: "Throughput",
y_label: "Requests / Sec",
metrics: [{
id: query_identifier,
query_range: 'my-query',
unit: 'my-unit',
label: 'status code'
}]
}]
}]
}
end
before do
expect(subject).to receive(:content) { [group.deep_stringify_keys] }
expect(subject).to receive(:content) { dashboard.deep_stringify_keys }
end
shared_examples 'stores metric' do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment