Commit d44248a4 authored by Song Liu's avatar Song Liu Committed by Ingo Molnar

perf/core: Rework memory accounting in perf_mmap()

perf_mmap() always increases user->locked_vm. As a result, "extra" could
grow bigger than "user_extra", which doesn't make sense. Here is an
example case:

(Note: Assume "user_lock_limit" is very small.)

  | # of perf_mmap calls |vma->vm_mm->pinned_vm|user->locked_vm|
  | 0                    | 0                   | 0             |
  | 1                    | user_extra          | user_extra    |
  | 2                    | 3 * user_extra      | 2 * user_extra|
  | 3                    | 6 * user_extra      | 3 * user_extra|
  | 4                    | 10 * user_extra     | 4 * user_extra|

Fix this by maintaining proper user_extra and extra.
Reviewed-By: default avatarHechao Li <hechaol@fb.com>
Reported-by: default avatarHechao Li <hechaol@fb.com>
Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: <kernel-team@fb.com>
Cc: Jie Meng <jmeng@fb.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190904214618.3795672-1-songliubraving@fb.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f733c6b5
...@@ -5668,7 +5668,8 @@ static void perf_mmap_close(struct vm_area_struct *vma) ...@@ -5668,7 +5668,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
* undo the VM accounting. * undo the VM accounting.
*/ */
atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
&mmap_user->locked_vm);
atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
free_uid(mmap_user); free_uid(mmap_user);
...@@ -5812,8 +5813,20 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -5812,8 +5813,20 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
user_locked = atomic_long_read(&user->locked_vm) + user_extra; user_locked = atomic_long_read(&user->locked_vm) + user_extra;
if (user_locked > user_lock_limit) if (user_locked <= user_lock_limit) {
/* charge all to locked_vm */
} else if (atomic_long_read(&user->locked_vm) >= user_lock_limit) {
/* charge all to pinned_vm */
extra = user_extra;
user_extra = 0;
} else {
/*
* charge locked_vm until it hits user_lock_limit;
* charge the rest from pinned_vm
*/
extra = user_locked - user_lock_limit; extra = user_locked - user_lock_limit;
user_extra -= extra;
}
lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT; lock_limit >>= PAGE_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment