Commit 361c81db authored by Wander Lairson Costa's avatar Wander Lairson Costa Committed by Jens Axboe

blktrace: switch trace spinlock to a raw spinlock

The running_trace_lock protects running_trace_list and is acquired
within the tracepoint which implies disabled preemption. The spinlock_t
typed lock can not be acquired with disabled preemption on PREEMPT_RT
because it becomes a sleeping lock.
The runtime of the tracepoint depends on the number of entries in
running_trace_list and has no limit. The blk-tracer is considered debug
code and higher latencies here are okay.

Make running_trace_lock a raw_spinlock_t.
Signed-off-by: default avatarWander Lairson Costa <wander@redhat.com>
Link: https://lore.kernel.org/r/20211220192827.38297-1-wander@redhat.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5ef16305
...@@ -34,7 +34,7 @@ static struct trace_array *blk_tr; ...@@ -34,7 +34,7 @@ static struct trace_array *blk_tr;
static bool blk_tracer_enabled __read_mostly; static bool blk_tracer_enabled __read_mostly;
static LIST_HEAD(running_trace_list); static LIST_HEAD(running_trace_list);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock); static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
/* Select an alternative, minimalistic output than the original one */ /* Select an alternative, minimalistic output than the original one */
#define TRACE_BLK_OPT_CLASSIC 0x1 #define TRACE_BLK_OPT_CLASSIC 0x1
...@@ -121,12 +121,12 @@ static void trace_note_tsk(struct task_struct *tsk) ...@@ -121,12 +121,12 @@ static void trace_note_tsk(struct task_struct *tsk)
struct blk_trace *bt; struct blk_trace *bt;
tsk->btrace_seq = blktrace_seq; tsk->btrace_seq = blktrace_seq;
spin_lock_irqsave(&running_trace_lock, flags); raw_spin_lock_irqsave(&running_trace_lock, flags);
list_for_each_entry(bt, &running_trace_list, running_list) { list_for_each_entry(bt, &running_trace_list, running_list) {
trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
sizeof(tsk->comm), 0); sizeof(tsk->comm), 0);
} }
spin_unlock_irqrestore(&running_trace_lock, flags); raw_spin_unlock_irqrestore(&running_trace_lock, flags);
} }
static void trace_note_time(struct blk_trace *bt) static void trace_note_time(struct blk_trace *bt)
...@@ -666,9 +666,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start) ...@@ -666,9 +666,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
blktrace_seq++; blktrace_seq++;
smp_mb(); smp_mb();
bt->trace_state = Blktrace_running; bt->trace_state = Blktrace_running;
spin_lock_irq(&running_trace_lock); raw_spin_lock_irq(&running_trace_lock);
list_add(&bt->running_list, &running_trace_list); list_add(&bt->running_list, &running_trace_list);
spin_unlock_irq(&running_trace_lock); raw_spin_unlock_irq(&running_trace_lock);
trace_note_time(bt); trace_note_time(bt);
ret = 0; ret = 0;
...@@ -676,9 +676,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start) ...@@ -676,9 +676,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
} else { } else {
if (bt->trace_state == Blktrace_running) { if (bt->trace_state == Blktrace_running) {
bt->trace_state = Blktrace_stopped; bt->trace_state = Blktrace_stopped;
spin_lock_irq(&running_trace_lock); raw_spin_lock_irq(&running_trace_lock);
list_del_init(&bt->running_list); list_del_init(&bt->running_list);
spin_unlock_irq(&running_trace_lock); raw_spin_unlock_irq(&running_trace_lock);
relay_flush(bt->rchan); relay_flush(bt->rchan);
ret = 0; ret = 0;
} }
...@@ -1608,9 +1608,9 @@ static int blk_trace_remove_queue(struct request_queue *q) ...@@ -1608,9 +1608,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
if (bt->trace_state == Blktrace_running) { if (bt->trace_state == Blktrace_running) {
bt->trace_state = Blktrace_stopped; bt->trace_state = Blktrace_stopped;
spin_lock_irq(&running_trace_lock); raw_spin_lock_irq(&running_trace_lock);
list_del_init(&bt->running_list); list_del_init(&bt->running_list);
spin_unlock_irq(&running_trace_lock); raw_spin_unlock_irq(&running_trace_lock);
relay_flush(bt->rchan); relay_flush(bt->rchan);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment