Commit 42fc5414 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds

mmap locking API: add mmap_assert_locked() and mmap_assert_write_locked()

Add new APIs to assert that mmap_sem is held.

Using this instead of rwsem_is_locked and lockdep_assert_held[_write]
makes the assertions more tolerant of future changes to the lock type.
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarDaniel Jordan <daniel.m.jordan@oracle.com>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-10-walken@google.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 14c3656b
...@@ -2181,7 +2181,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) ...@@ -2181,7 +2181,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
* For now, this can't happen because all callers hold mmap_sem * For now, this can't happen because all callers hold mmap_sem
* for write. If this changes, we'll need a different solution. * for write. If this changes, we'll need a different solution.
*/ */
lockdep_assert_held_write(&mm->mmap_sem); mmap_assert_write_locked(mm);
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1) if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1); on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
......
...@@ -234,7 +234,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, ...@@ -234,7 +234,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
pte_t *ptep, pte; pte_t *ptep, pte;
bool ret = true; bool ret = true;
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); mmap_assert_locked(mm);
ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
...@@ -286,7 +286,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, ...@@ -286,7 +286,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
pte_t *pte; pte_t *pte;
bool ret = true; bool ret = true;
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); mmap_assert_locked(mm);
pgd = pgd_offset(mm, address); pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd)) if (!pgd_present(*pgd))
...@@ -405,7 +405,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) ...@@ -405,7 +405,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
* Coredumping runs without mmap_sem so we can only check that * Coredumping runs without mmap_sem so we can only check that
* the mmap_sem is held, if PF_DUMPCORE was not set. * the mmap_sem is held, if PF_DUMPCORE was not set.
*/ */
WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem)); mmap_assert_locked(mm);
ctx = vmf->vma->vm_userfaultfd_ctx.ctx; ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx) if (!ctx)
......
#ifndef _LINUX_MMAP_LOCK_H #ifndef _LINUX_MMAP_LOCK_H
#define _LINUX_MMAP_LOCK_H #define _LINUX_MMAP_LOCK_H
#include <linux/mmdebug.h>
#define MMAP_LOCK_INITIALIZER(name) \ #define MMAP_LOCK_INITIALIZER(name) \
.mmap_sem = __RWSEM_INITIALIZER((name).mmap_sem), .mmap_sem = __RWSEM_INITIALIZER((name).mmap_sem),
...@@ -73,4 +75,16 @@ static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) ...@@ -73,4 +75,16 @@ static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
up_read_non_owner(&mm->mmap_sem); up_read_non_owner(&mm->mmap_sem);
} }
static inline void mmap_assert_locked(struct mm_struct *mm)
{
lockdep_assert_held(&mm->mmap_sem);
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
}
static inline void mmap_assert_write_locked(struct mm_struct *mm)
{
lockdep_assert_held_write(&mm->mmap_sem);
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
}
#endif /* _LINUX_MMAP_LOCK_H */ #endif /* _LINUX_MMAP_LOCK_H */
...@@ -1425,7 +1425,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, ...@@ -1425,7 +1425,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(start < vma->vm_start, vma);
VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma);
VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); mmap_assert_locked(mm);
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
if (vma->vm_flags & VM_LOCKONFAULT) if (vma->vm_flags & VM_LOCKONFAULT)
......
...@@ -563,7 +563,7 @@ int hmm_range_fault(struct hmm_range *range) ...@@ -563,7 +563,7 @@ int hmm_range_fault(struct hmm_range *range)
struct mm_struct *mm = range->notifier->mm; struct mm_struct *mm = range->notifier->mm;
int ret; int ret;
lockdep_assert_held(&mm->mmap_sem); mmap_assert_locked(mm);
do { do {
/* If range is no longer valid force retry. */ /* If range is no longer valid force retry. */
......
...@@ -1211,7 +1211,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb, ...@@ -1211,7 +1211,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (pud_trans_huge(*pud) || pud_devmap(*pud)) { if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
if (next - addr != HPAGE_PUD_SIZE) { if (next - addr != HPAGE_PUD_SIZE) {
VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma); mmap_assert_locked(tlb->mm);
split_huge_pud(vma, pud, addr); split_huge_pud(vma, pud, addr);
} else if (zap_huge_pud(tlb, vma, pud, addr)) } else if (zap_huge_pud(tlb, vma, pud, addr))
goto next; goto next;
......
...@@ -609,7 +609,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription, ...@@ -609,7 +609,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
struct mmu_notifier_subscriptions *subscriptions = NULL; struct mmu_notifier_subscriptions *subscriptions = NULL;
int ret; int ret;
lockdep_assert_held_write(&mm->mmap_sem); mmap_assert_write_locked(mm);
BUG_ON(atomic_read(&mm->mm_users) <= 0); BUG_ON(atomic_read(&mm->mm_users) <= 0);
if (IS_ENABLED(CONFIG_LOCKDEP)) { if (IS_ENABLED(CONFIG_LOCKDEP)) {
...@@ -761,7 +761,7 @@ struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, ...@@ -761,7 +761,7 @@ struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
struct mmu_notifier *subscription; struct mmu_notifier *subscription;
int ret; int ret;
lockdep_assert_held_write(&mm->mmap_sem); mmap_assert_write_locked(mm);
if (mm->notifier_subscriptions) { if (mm->notifier_subscriptions) {
subscription = find_get_mmu_notifier(mm, ops); subscription = find_get_mmu_notifier(mm, ops);
...@@ -1006,7 +1006,7 @@ int mmu_interval_notifier_insert_locked( ...@@ -1006,7 +1006,7 @@ int mmu_interval_notifier_insert_locked(
mm->notifier_subscriptions; mm->notifier_subscriptions;
int ret; int ret;
lockdep_assert_held_write(&mm->mmap_sem); mmap_assert_write_locked(mm);
if (!subscriptions || !subscriptions->has_itree) { if (!subscriptions || !subscriptions->has_itree) {
ret = __mmu_notifier_register(NULL, mm); ret = __mmu_notifier_register(NULL, mm);
......
...@@ -395,7 +395,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, ...@@ -395,7 +395,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
if (!walk.mm) if (!walk.mm)
return -EINVAL; return -EINVAL;
lockdep_assert_held(&walk.mm->mmap_sem); mmap_assert_locked(walk.mm);
vma = find_vma(walk.mm, start); vma = find_vma(walk.mm, start);
do { do {
...@@ -453,7 +453,7 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start, ...@@ -453,7 +453,7 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
if (start >= end || !walk.mm) if (start >= end || !walk.mm)
return -EINVAL; return -EINVAL;
lockdep_assert_held(&walk.mm->mmap_sem); mmap_assert_locked(walk.mm);
return __walk_page_range(start, end, &walk); return __walk_page_range(start, end, &walk);
} }
...@@ -472,7 +472,7 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, ...@@ -472,7 +472,7 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
if (!walk.mm) if (!walk.mm)
return -EINVAL; return -EINVAL;
lockdep_assert_held(&walk.mm->mmap_sem); mmap_assert_locked(walk.mm);
err = walk_page_test(vma->vm_start, vma->vm_end, &walk); err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
if (err > 0) if (err > 0)
......
...@@ -437,7 +437,7 @@ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, ...@@ -437,7 +437,7 @@ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
unsigned long locked_vm, limit; unsigned long locked_vm, limit;
int ret = 0; int ret = 0;
lockdep_assert_held_write(&mm->mmap_sem); mmap_assert_write_locked(mm);
locked_vm = mm->locked_vm; locked_vm = mm->locked_vm;
if (inc) { if (inc) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment