Commit c0a581d7 authored by Waiman Long's avatar Waiman Long Committed by Steven Rostedt (Google)

tracing: Disable interrupt or preemption before acquiring arch_spinlock_t

It was found that some tracing functions in kernel/trace/trace.c acquire
an arch_spinlock_t with preemption and irqs enabled. An example is the
tracing_saved_cmdlines_size_read() function which intermittently causes
a "BUG: using smp_processor_id() in preemptible" warning when the LTP
read_all_proc test is run.

That can be problematic in case preemption happens after acquiring the
lock. Add the necessary preemption or interrupt disabling code in the
appropriate places before acquiring an arch_spinlock_t.

The convention here is to disable preemption for trace_cmdline_lock and
interupt for max_lock.

Link: https://lkml.kernel.org/r/20220922145622.1744826-1-longman@redhat.com

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: stable@vger.kernel.org
Fixes: a35873a0 ("tracing: Add conditional snapshot")
Fixes: 939c7a4f ("tracing: Introduce saved_cmdlines_size file")
Suggested-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarSteven Rostedt (Google) <rostedt@goodmis.org>
parent 834168fb
...@@ -1193,12 +1193,14 @@ void *tracing_cond_snapshot_data(struct trace_array *tr) ...@@ -1193,12 +1193,14 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)
{ {
void *cond_data = NULL; void *cond_data = NULL;
local_irq_disable();
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot) if (tr->cond_snapshot)
cond_data = tr->cond_snapshot->cond_data; cond_data = tr->cond_snapshot->cond_data;
arch_spin_unlock(&tr->max_lock); arch_spin_unlock(&tr->max_lock);
local_irq_enable();
return cond_data; return cond_data;
} }
...@@ -1334,9 +1336,11 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, ...@@ -1334,9 +1336,11 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
goto fail_unlock; goto fail_unlock;
} }
local_irq_disable();
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
tr->cond_snapshot = cond_snapshot; tr->cond_snapshot = cond_snapshot;
arch_spin_unlock(&tr->max_lock); arch_spin_unlock(&tr->max_lock);
local_irq_enable();
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
...@@ -1363,6 +1367,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr) ...@@ -1363,6 +1367,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
{ {
int ret = 0; int ret = 0;
local_irq_disable();
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
if (!tr->cond_snapshot) if (!tr->cond_snapshot)
...@@ -1373,6 +1378,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr) ...@@ -1373,6 +1378,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
} }
arch_spin_unlock(&tr->max_lock); arch_spin_unlock(&tr->max_lock);
local_irq_enable();
return ret; return ret;
} }
...@@ -2200,6 +2206,11 @@ static size_t tgid_map_max; ...@@ -2200,6 +2206,11 @@ static size_t tgid_map_max;
#define SAVED_CMDLINES_DEFAULT 128 #define SAVED_CMDLINES_DEFAULT 128
#define NO_CMDLINE_MAP UINT_MAX #define NO_CMDLINE_MAP UINT_MAX
/*
* Preemption must be disabled before acquiring trace_cmdline_lock.
* The various trace_arrays' max_lock must be acquired in a context
* where interrupt is disabled.
*/
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
struct saved_cmdlines_buffer { struct saved_cmdlines_buffer {
unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
...@@ -2412,7 +2423,11 @@ static int trace_save_cmdline(struct task_struct *tsk) ...@@ -2412,7 +2423,11 @@ static int trace_save_cmdline(struct task_struct *tsk)
* the lock, but we also don't want to spin * the lock, but we also don't want to spin
* nor do we want to disable interrupts, * nor do we want to disable interrupts,
* so if we miss here, then better luck next time. * so if we miss here, then better luck next time.
*
* This is called within the scheduler and wake up, so interrupts
* had better been disabled and run queue lock been held.
*/ */
lockdep_assert_preemption_disabled();
if (!arch_spin_trylock(&trace_cmdline_lock)) if (!arch_spin_trylock(&trace_cmdline_lock))
return 0; return 0;
...@@ -5890,9 +5905,11 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, ...@@ -5890,9 +5905,11 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
char buf[64]; char buf[64];
int r; int r;
preempt_disable();
arch_spin_lock(&trace_cmdline_lock); arch_spin_lock(&trace_cmdline_lock);
r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
arch_spin_unlock(&trace_cmdline_lock); arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
} }
...@@ -5917,10 +5934,12 @@ static int tracing_resize_saved_cmdlines(unsigned int val) ...@@ -5917,10 +5934,12 @@ static int tracing_resize_saved_cmdlines(unsigned int val)
return -ENOMEM; return -ENOMEM;
} }
preempt_disable();
arch_spin_lock(&trace_cmdline_lock); arch_spin_lock(&trace_cmdline_lock);
savedcmd_temp = savedcmd; savedcmd_temp = savedcmd;
savedcmd = s; savedcmd = s;
arch_spin_unlock(&trace_cmdline_lock); arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
free_saved_cmdlines_buffer(savedcmd_temp); free_saved_cmdlines_buffer(savedcmd_temp);
return 0; return 0;
...@@ -6373,10 +6392,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf) ...@@ -6373,10 +6392,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
#ifdef CONFIG_TRACER_SNAPSHOT #ifdef CONFIG_TRACER_SNAPSHOT
if (t->use_max_tr) { if (t->use_max_tr) {
local_irq_disable();
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot) if (tr->cond_snapshot)
ret = -EBUSY; ret = -EBUSY;
arch_spin_unlock(&tr->max_lock); arch_spin_unlock(&tr->max_lock);
local_irq_enable();
if (ret) if (ret)
goto out; goto out;
} }
...@@ -7436,10 +7457,12 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -7436,10 +7457,12 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
goto out; goto out;
} }
local_irq_disable();
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot) if (tr->cond_snapshot)
ret = -EBUSY; ret = -EBUSY;
arch_spin_unlock(&tr->max_lock); arch_spin_unlock(&tr->max_lock);
local_irq_enable();
if (ret) if (ret)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment