Commit d313f8d7 authored by Bob Van Landuyt's avatar Bob Van Landuyt

Add the job size to start message logging

This would allow us to see how big jobs are in the log, perhaps
allowing us to set a sensible limit.
parent e8c3cd84
......@@ -13,7 +13,7 @@ module Gitlab
base_payload = parse_job(job)
ActiveRecord::LogSubscriber.reset_runtime
Sidekiq.logger.info log_job_start(base_payload)
Sidekiq.logger.info log_job_start(job, base_payload)
yield
......@@ -40,13 +40,15 @@ module Gitlab
output_payload.merge!(job.slice(*::Gitlab::Metrics::Subscribers::ActiveRecord::DB_COUNTERS))
end
def log_job_start(payload)
def log_job_start(job, payload)
payload['message'] = "#{base_message(payload)}: start"
payload['job_status'] = 'start'
scheduling_latency_s = ::Gitlab::InstrumentationHelper.queue_duration_for_job(payload)
payload['scheduling_latency_s'] = scheduling_latency_s if scheduling_latency_s
payload['job_size_bytes'] = Sidekiq.dump_json(job).bytesize
payload
end
......
......@@ -38,7 +38,8 @@ RSpec.describe Gitlab::SidekiqLogging::StructuredLogger do
'pid' => Process.pid,
'created_at' => created_at.to_f,
'enqueued_at' => created_at.to_f,
'scheduling_latency_s' => scheduling_latency_s
'scheduling_latency_s' => scheduling_latency_s,
'job_size_bytes' => be > 0
)
end
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment