Commit 7c51f7bb authored by Tetsuo Handa's avatar Tetsuo Handa Committed by Linus Torvalds

profiling: remove prof_cpu_mask

syzbot is reporting uninit-value at profile_hits(), for there is a race
window between

  if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
    return -ENOMEM;
  cpumask_copy(prof_cpu_mask, cpu_possible_mask);

in profile_init() and

  cpumask_available(prof_cpu_mask) &&
  cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))

in profile_tick(); prof_cpu_mask remains uninitialzed until cpumask_copy()
completes while cpumask_available(prof_cpu_mask) returns true as soon as
alloc_cpumask_var(&prof_cpu_mask) completes.

We could replace alloc_cpumask_var() with zalloc_cpumask_var() and
call cpumask_copy() from create_proc_profile() on only UP kernels, for
profile_online_cpu() calls cpumask_set_cpu() as needed via
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN) on SMP kernels. But this patch
removes prof_cpu_mask because it seems unnecessary.

The cpumask_test_cpu(smp_processor_id(), prof_cpu_mask) test
in profile_tick() is likely always true due to

  a CPU cannot call profile_tick() if that CPU is offline

and

  cpumask_set_cpu(cpu, prof_cpu_mask) is called when that CPU becomes
  online and cpumask_clear_cpu(cpu, prof_cpu_mask) is called when that
  CPU becomes offline

. This test could be false during transition between online and offline.

But according to include/linux/cpuhotplug.h , CPUHP_PROFILE_PREPARE
belongs to PREPARE section, which means that the CPU subjected to
profile_dead_cpu() cannot be inside profile_tick() (i.e. no risk of
use-after-free bug) because interrupt for that CPU is disabled during
PREPARE section. Therefore, this test is guaranteed to be true, and
can be removed. (Since profile_hits() checks prof_buffer != NULL, we
don't need to check prof_buffer != NULL here unless get_irq_regs() or
user_mode() is such slow that we want to avoid when prof_buffer == NULL).

do_profile_hits() is called from profile_tick() from timer interrupt
only if cpumask_test_cpu(smp_processor_id(), prof_cpu_mask) is true and
prof_buffer is not NULL. But syzbot is also reporting that sometimes
do_profile_hits() is called while current thread is still doing vzalloc(),
where prof_buffer must be NULL at this moment. This indicates that multiple
threads concurrently tried to write to /sys/kernel/profiling interface,
which caused that somebody else try to re-allocate prof_buffer despite
somebody has already allocated prof_buffer. Fix this by using
serialization.
Reported-by: default avatarsyzbot <syzbot+b1a83ab2a9eb9321fbdd@syzkaller.appspotmail.com>
Closes: https://syzkaller.appspot.com/bug?extid=b1a83ab2a9eb9321fbddSigned-off-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Tested-by: default avatarsyzbot <syzbot+b1a83ab2a9eb9321fbdd@syzkaller.appspotmail.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 99d3bf5f
...@@ -92,7 +92,14 @@ static ssize_t profiling_store(struct kobject *kobj, ...@@ -92,7 +92,14 @@ static ssize_t profiling_store(struct kobject *kobj,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int ret; int ret;
static DEFINE_MUTEX(lock);
/*
* We need serialization, for profile_setup() initializes prof_on
* value and profile_init() must not reallocate prof_buffer after
* once allocated.
*/
guard(mutex)(&lock);
if (prof_on) if (prof_on)
return -EEXIST; return -EEXIST;
/* /*
......
...@@ -47,7 +47,6 @@ static unsigned short int prof_shift; ...@@ -47,7 +47,6 @@ static unsigned short int prof_shift;
int prof_on __read_mostly; int prof_on __read_mostly;
EXPORT_SYMBOL_GPL(prof_on); EXPORT_SYMBOL_GPL(prof_on);
static cpumask_var_t prof_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
static DEFINE_PER_CPU(int, cpu_profile_flip); static DEFINE_PER_CPU(int, cpu_profile_flip);
...@@ -114,11 +113,6 @@ int __ref profile_init(void) ...@@ -114,11 +113,6 @@ int __ref profile_init(void)
buffer_bytes = prof_len*sizeof(atomic_t); buffer_bytes = prof_len*sizeof(atomic_t);
if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(prof_cpu_mask, cpu_possible_mask);
prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
if (prof_buffer) if (prof_buffer)
return 0; return 0;
...@@ -132,7 +126,6 @@ int __ref profile_init(void) ...@@ -132,7 +126,6 @@ int __ref profile_init(void)
if (prof_buffer) if (prof_buffer)
return 0; return 0;
free_cpumask_var(prof_cpu_mask);
return -ENOMEM; return -ENOMEM;
} }
...@@ -267,9 +260,6 @@ static int profile_dead_cpu(unsigned int cpu) ...@@ -267,9 +260,6 @@ static int profile_dead_cpu(unsigned int cpu)
struct page *page; struct page *page;
int i; int i;
if (cpumask_available(prof_cpu_mask))
cpumask_clear_cpu(cpu, prof_cpu_mask);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (per_cpu(cpu_profile_hits, cpu)[i]) { if (per_cpu(cpu_profile_hits, cpu)[i]) {
page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
...@@ -302,14 +292,6 @@ static int profile_prepare_cpu(unsigned int cpu) ...@@ -302,14 +292,6 @@ static int profile_prepare_cpu(unsigned int cpu)
return 0; return 0;
} }
static int profile_online_cpu(unsigned int cpu)
{
if (cpumask_available(prof_cpu_mask))
cpumask_set_cpu(cpu, prof_cpu_mask);
return 0;
}
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
#define profile_flip_buffers() do { } while (0) #define profile_flip_buffers() do { } while (0)
#define profile_discard_flip_buffers() do { } while (0) #define profile_discard_flip_buffers() do { } while (0)
...@@ -334,8 +316,8 @@ void profile_tick(int type) ...@@ -334,8 +316,8 @@ void profile_tick(int type)
{ {
struct pt_regs *regs = get_irq_regs(); struct pt_regs *regs = get_irq_regs();
if (!user_mode(regs) && cpumask_available(prof_cpu_mask) && /* This is the old kernel-only legacy profiling */
cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) if (!user_mode(regs))
profile_hit(type, (void *)profile_pc(regs)); profile_hit(type, (void *)profile_pc(regs));
} }
...@@ -418,10 +400,6 @@ static const struct proc_ops profile_proc_ops = { ...@@ -418,10 +400,6 @@ static const struct proc_ops profile_proc_ops = {
int __ref create_proc_profile(void) int __ref create_proc_profile(void)
{ {
struct proc_dir_entry *entry; struct proc_dir_entry *entry;
#ifdef CONFIG_SMP
enum cpuhp_state online_state;
#endif
int err = 0; int err = 0;
if (!prof_on) if (!prof_on)
...@@ -431,26 +409,14 @@ int __ref create_proc_profile(void) ...@@ -431,26 +409,14 @@ int __ref create_proc_profile(void)
profile_prepare_cpu, profile_dead_cpu); profile_prepare_cpu, profile_dead_cpu);
if (err) if (err)
return err; return err;
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
profile_online_cpu, NULL);
if (err < 0)
goto err_state_prep;
online_state = err;
err = 0;
#endif #endif
entry = proc_create("profile", S_IWUSR | S_IRUGO, entry = proc_create("profile", S_IWUSR | S_IRUGO,
NULL, &profile_proc_ops); NULL, &profile_proc_ops);
if (!entry) if (entry)
goto err_state_onl; proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
return err;
err_state_onl:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpuhp_remove_state(online_state); else
err_state_prep: cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
#endif #endif
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment