Commit 3d7ad993 authored by Ryan Cobb's avatar Ryan Cobb Committed by Peter Leitzen

Remove deprecated influxdb libs

This removes deprecated influxdb libraries and gem.
parent d3846406
......@@ -324,7 +324,6 @@ gem 'derailed_benchmarks', require: false
# Metrics
group :metrics do
gem 'method_source', '~> 0.8', require: false
gem 'influxdb', '~> 0.2', require: false
# Prometheus
gem 'prometheus-client-mmap', '~> 0.10.0'
......
......@@ -153,7 +153,6 @@ GEM
activemodel (>= 4.0.0)
activesupport (>= 4.0.0)
mime-types (>= 1.16)
cause (0.1)
character_set (1.1.2)
charlock_holmes (0.7.6)
childprocess (3.0.0)
......@@ -535,9 +534,6 @@ GEM
i18n_data (0.8.0)
icalendar (2.4.1)
ice_nine (0.11.2)
influxdb (0.2.3)
cause
json
invisible_captcha (0.12.1)
rails (>= 3.2.0)
ipaddress (0.8.3)
......@@ -1276,7 +1272,6 @@ DEPENDENCIES
html2text
httparty (~> 0.16.4)
icalendar
influxdb (~> 0.2)
invisible_captcha (~> 0.12.1)
jira-ruby (~> 2.0.0)
js_regex (~> 3.1)
......
---
title: Remove rake task `gitlab:track_deployment`
merge_request: 31404
author:
type: removed
......@@ -61,6 +61,8 @@ if !Rails.env.test? && Gitlab::Metrics.prometheus_metrics_enabled?
Gitlab::Metrics::Samplers::PumaSampler.instance(Settings.monitoring.puma_sampler_interval).start
end
Gitlab::Metrics.gauge(:deployments, 'GitLab Version', {}, :max).set({ version: Gitlab::VERSION }, 1)
Gitlab::Metrics::RequestsRackMiddleware.initialize_http_request_duration_seconds
rescue IOError => e
Gitlab::ErrorTracking.track_exception(e)
......
......@@ -196,27 +196,6 @@ production machine after installing the package, there should be no reason to re
`rake gitlab:assets:compile` on the production machine. If you suspect that assets
have been corrupted, you should reinstall the omnibus package.
## Tracking Deployments
GitLab provides a Rake task that lets you track deployments in GitLab
Performance Monitoring. This Rake task simply stores the current GitLab version
in the GitLab Performance Monitoring database.
To run `gitlab:track_deployment`:
**Omnibus Installation**
```shell
sudo gitlab-rake gitlab:track_deployment
```
**Source Installation**
```shell
cd /home/git/gitlab
sudo -u git -H bundle exec rake gitlab:track_deployment RAILS_ENV=production
```
## Check TCP connectivity to a remote site
Sometimes you need to know if your GitLab installation can connect to a TCP
......
# frozen_string_literal: true
module Gitlab
module Metrics
module InfluxDb
extend ActiveSupport::Concern
include Gitlab::Metrics::Methods
EXECUTION_MEASUREMENT_BUCKETS = [0.001, 0.01, 0.1, 1].freeze
MUTEX = Mutex.new
private_constant :MUTEX
class_methods do
def influx_metrics_enabled?
settings[:enabled] || false
end
# Prometheus histogram buckets used for arbitrary code measurements
def settings
@settings ||= begin
current_settings = Gitlab::CurrentSettings.current_application_settings
{
enabled: current_settings[:metrics_enabled],
pool_size: current_settings[:metrics_pool_size],
timeout: current_settings[:metrics_timeout],
method_call_threshold: current_settings[:metrics_method_call_threshold],
host: current_settings[:metrics_host],
port: current_settings[:metrics_port],
sample_interval: current_settings[:metrics_sample_interval] || 15,
packet_size: current_settings[:metrics_packet_size] || 1
}
end
end
def mri?
RUBY_ENGINE == 'ruby'
end
def method_call_threshold
# This is memoized since this method is called for every instrumented
# method. Loading data from an external cache on every method call slows
# things down too much.
# in milliseconds
@method_call_threshold ||= settings[:method_call_threshold]
end
def submit_metrics(metrics)
prepared = prepare_metrics(metrics)
pool&.with do |connection|
prepared.each_slice(settings[:packet_size]) do |slice|
connection.write_points(slice)
rescue StandardError
end
end
rescue Errno::EADDRNOTAVAIL, SocketError => ex
Gitlab::EnvironmentLogger.error('Cannot resolve InfluxDB address. GitLab Performance Monitoring will not work.')
Gitlab::EnvironmentLogger.error(ex)
end
def prepare_metrics(metrics)
metrics.map do |hash|
new_hash = hash.symbolize_keys
new_hash[:tags].each do |key, value|
if value.blank?
new_hash[:tags].delete(key)
else
new_hash[:tags][key] = escape_value(value)
end
end
new_hash
end
end
def escape_value(value)
value.to_s.gsub('=', '\\=')
end
# Measures the execution time of a block.
#
# Example:
#
# Gitlab::Metrics.measure(:find_by_username_duration) do
# UserFinder.new(some_username).find_by_username
# end
#
# name - The name of the field to store the execution time in.
#
# Returns the value yielded by the supplied block.
def measure(name)
trans = current_transaction
return yield unless trans
real_start = Time.now.to_f
cpu_start = System.cpu_time
retval = yield
cpu_stop = System.cpu_time
real_stop = Time.now.to_f
real_time = (real_stop - real_start)
cpu_time = cpu_stop - cpu_start
real_duration_seconds = fetch_histogram("gitlab_#{name}_real_duration_seconds".to_sym) do
docstring "Measure #{name}"
base_labels Transaction::BASE_LABELS
buckets EXECUTION_MEASUREMENT_BUCKETS
end
real_duration_seconds.observe(trans.labels, real_time)
cpu_duration_seconds = fetch_histogram("gitlab_#{name}_cpu_duration_seconds".to_sym) do
docstring "Measure #{name}"
base_labels Transaction::BASE_LABELS
buckets EXECUTION_MEASUREMENT_BUCKETS
with_feature "prometheus_metrics_measure_#{name}_cpu_duration"
end
cpu_duration_seconds.observe(trans.labels, cpu_time)
# InfluxDB stores the _real_time and _cpu_time time values as milliseconds
trans.increment("#{name}_real_time", real_time.in_milliseconds, false)
trans.increment("#{name}_cpu_time", cpu_time.in_milliseconds, false)
trans.increment("#{name}_call_count", 1, false)
retval
end
# Sets the action of the current transaction (if any)
#
# action - The name of the action.
def action=(action)
trans = current_transaction
trans&.action = action
end
# Tracks an event.
#
# See `Gitlab::Metrics::Transaction#add_event` for more details.
def add_event(*args)
current_transaction&.add_event(*args)
end
# Returns the prefix to use for the name of a series.
def series_prefix
@series_prefix ||= Gitlab::Runtime.sidekiq? ? 'sidekiq_' : 'rails_'
end
# Allow access from other metrics related middlewares
def current_transaction
Transaction.current
end
# When enabled this should be set before being used as the usual pattern
# "@foo ||= bar" is _not_ thread-safe.
def pool
if influx_metrics_enabled?
if @pool.nil?
MUTEX.synchronize do
@pool ||= ConnectionPool.new(size: settings[:pool_size], timeout: settings[:timeout]) do
host = settings[:host]
port = settings[:port]
InfluxDB::Client
.new(udp: { host: host, port: port })
end
end
end
@pool
end
end
end
end
end
end
......@@ -49,19 +49,6 @@ module Gitlab
retval
end
# Returns a Metric instance of the current method call.
def to_metric
Metric.new(
Instrumentation.series,
{
duration: real_time.in_milliseconds.to_i,
cpu_duration: cpu_time.in_milliseconds.to_i,
call_count: call_count
},
method: @name
)
end
# Returns true if the total runtime of this method exceeds the method call
# threshold.
def above_threshold?
......
# frozen_string_literal: true
module Gitlab
module Metrics
# Class for storing details of a single metric (label, value, etc).
class Metric
JITTER_RANGE = (0.000001..0.001).freeze
attr_reader :series, :values, :tags, :type
# series - The name of the series (as a String) to store the metric in.
# values - A Hash containing the values to store.
# tags - A Hash containing extra tags to add to the metrics.
def initialize(series, values, tags = {}, type = :metric)
@values = values
@series = series
@tags = tags
@type = type
end
def event?
type == :event
end
# Returns a Hash in a format that can be directly written to InfluxDB.
def to_hash
# InfluxDB overwrites an existing point if a new point has the same
# series, tag set, and timestamp. In a highly concurrent environment
# this means that using the number of seconds since the Unix epoch is
# inevitably going to collide with another timestamp. For example, two
# Rails requests processed by different processes may end up generating
# metrics using the _exact_ same timestamp (in seconds).
#
# Due to the way InfluxDB is set up there's no solution to this problem,
# all we can do is lower the amount of collisions. We do this by using
# System.real_time which returns the nanoseconds as a Float providing
# greater accuracy. We then add a small random value that is large
# enough to distinguish most timestamps but small enough to not alter
# the timestamp significantly.
#
# See https://gitlab.com/gitlab-com/operations/issues/175 for more
# information.
time = System.real_time(:nanosecond) + rand(JITTER_RANGE)
{
series: @series,
tags: @tags,
values: @values,
timestamp: time.to_i
}
end
end
end
end
# frozen_string_literal: true
module Gitlab
module Metrics
module Samplers
# Class that sends certain metrics to InfluxDB at a specific interval.
#
# This class is used to gather statistics that can't be directly associated
# with a transaction such as system memory usage, garbage collection
# statistics, etc.
class InfluxSampler < BaseSampler
# interval - The sampling interval in seconds.
def initialize(interval = ::Gitlab::Metrics.settings[:sample_interval])
super(interval)
@last_step = nil
@metrics = []
end
def sample
sample_memory_usage
sample_file_descriptors
flush
ensure
@metrics.clear
end
def flush
::Gitlab::Metrics.submit_metrics(@metrics.map(&:to_hash))
end
def sample_memory_usage
add_metric('memory_usage', value: System.memory_usage_rss)
end
def sample_file_descriptors
add_metric('file_descriptors', value: System.file_descriptor_count)
end
def add_metric(series, values, tags = {})
prefix = Gitlab::Runtime.sidekiq? ? 'sidekiq_' : 'rails_'
@metrics << Metric.new("#{prefix}#{series}", values, tags)
end
end
end
end
end
namespace :gitlab do
desc 'GitLab | Tracks a deployment in GitLab Performance Monitoring'
task track_deployment: :environment do
metric = Gitlab::Metrics::Metric
.new('deployments', version: Gitlab::VERSION)
Gitlab::Metrics.submit_metrics([metric.to_hash])
end
end
......@@ -76,25 +76,6 @@ describe Gitlab::Metrics::MethodCall do
end
end
describe '#to_metric' do
it 'returns a Metric instance' do
expect(method_call).to receive(:real_time).and_return(4.0001).twice
expect(method_call).to receive(:cpu_time).and_return(3.0001)
method_call.measure { 'foo' }
metric = method_call.to_metric
expect(metric).to be_an_instance_of(Gitlab::Metrics::Metric)
expect(metric.series).to eq('rails_method_calls')
expect(metric.values[:duration]).to eq(4000)
expect(metric.values[:cpu_duration]).to eq(3000)
expect(metric.values[:call_count]).to be_an(Integer)
expect(metric.tags).to eq({ method: 'Foo#bar' })
end
end
describe '#above_threshold?' do
before do
allow(Gitlab::Metrics).to receive(:method_call_threshold).and_return(100)
......
# frozen_string_literal: true
require 'spec_helper'
describe Gitlab::Metrics::Metric do
let(:metric) do
described_class.new('foo', { number: 10 }, { host: 'localtoast' })
end
describe '#series' do
subject { metric.series }
it { is_expected.to eq('foo') }
end
describe '#values' do
subject { metric.values }
it { is_expected.to eq({ number: 10 }) }
end
describe '#tags' do
subject { metric.tags }
it { is_expected.to eq({ host: 'localtoast' }) }
end
describe '#type' do
subject { metric.type }
it { is_expected.to eq(:metric) }
end
describe '#event?' do
it 'returns false for a regular metric' do
expect(metric.event?).to eq(false)
end
it 'returns true for an event metric' do
expect(metric).to receive(:type).and_return(:event)
expect(metric.event?).to eq(true)
end
end
describe '#to_hash' do
it 'returns a Hash' do
expect(metric.to_hash).to be_an_instance_of(Hash)
end
describe 'the returned Hash' do
let(:hash) { metric.to_hash }
it 'includes the series' do
expect(hash[:series]).to eq('foo')
end
it 'includes the tags' do
expect(hash[:tags]).to be_an_instance_of(Hash)
end
it 'includes the values' do
expect(hash[:values]).to eq({ number: 10 })
end
it 'includes the timestamp' do
expect(hash[:timestamp]).to be_an(Integer)
end
end
end
end
# frozen_string_literal: true
require 'spec_helper'
describe Gitlab::Metrics::Samplers::InfluxSampler do
let(:sampler) { described_class.new(5) }
describe '#start' do
it 'runs once and gathers a sample at a given interval' do
expect(sampler).to receive(:sleep).with(a_kind_of(Numeric)).twice
expect(sampler).to receive(:sample).once
expect(sampler).to receive(:running).and_return(true, false)
sampler.start.join
end
end
describe '#sample' do
it 'samples various statistics' do
expect(sampler).to receive(:sample_memory_usage)
expect(sampler).to receive(:sample_file_descriptors)
expect(sampler).to receive(:flush)
sampler.sample
end
end
describe '#flush' do
it 'schedules the metrics using Sidekiq' do
expect(Gitlab::Metrics).to receive(:submit_metrics)
.with([an_instance_of(Hash)])
sampler.sample_memory_usage
sampler.flush
end
end
describe '#sample_memory_usage' do
it 'adds a metric containing the memory usage' do
expect(Gitlab::Metrics::System).to receive(:memory_usage_rss)
.and_return(9000)
expect(sampler).to receive(:add_metric)
.with(/memory_usage/, value: 9000)
.and_call_original
sampler.sample_memory_usage
end
end
describe '#sample_file_descriptors' do
it 'adds a metric containing the amount of open file descriptors' do
expect(Gitlab::Metrics::System).to receive(:file_descriptor_count)
.and_return(4)
expect(sampler).to receive(:add_metric)
.with(/file_descriptors/, value: 4)
.and_call_original
sampler.sample_file_descriptors
end
end
describe '#add_metric' do
it 'prefixes the series name for a Rails process' do
expect(Gitlab::Runtime).to receive(:sidekiq?).and_return(false)
expect(Gitlab::Metrics::Metric).to receive(:new)
.with('rails_cats', { value: 10 }, {})
.and_call_original
sampler.add_metric('cats', value: 10)
end
it 'prefixes the series name for a Sidekiq process' do
expect(Gitlab::Runtime).to receive(:sidekiq?).and_return(true)
expect(Gitlab::Metrics::Metric).to receive(:new)
.with('sidekiq_cats', { value: 10 }, {})
.and_call_original
sampler.add_metric('cats', value: 10)
end
end
describe '#sleep_interval' do
it 'returns a Numeric' do
expect(sampler.sleep_interval).to be_a_kind_of(Numeric)
end
# Testing random behaviour is very hard, so treat this test as a basic smoke
# test instead of a very accurate behaviour/unit test.
it 'does not return the same interval twice in a row' do
last = nil
100.times do
interval = sampler.sleep_interval
expect(interval).not_to eq(last)
last = interval
end
end
end
end
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment