Commit bae473a4 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: introduce fault_env

The idea borrowed from Peter's patch from patchset on speculative page
faults[1]:

Instead of passing around the endless list of function arguments,
replace the lot with a single structure so we can change context without
endless function signature changes.

The changes are mostly mechanical with exception of faultaround code:
filemap_map_pages() got reworked a bit.

This patch is preparation for the next one.

[1] http://lkml.kernel.org/r/20141020222841.302891540@infradead.org

Link: http://lkml.kernel.org/r/1466021202-61880-9-git-send-email-kirill.shutemov@linux.intel.comSigned-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dcddffd4
...@@ -548,13 +548,13 @@ subsequent truncate), and then return with VM_FAULT_LOCKED, and the page ...@@ -548,13 +548,13 @@ subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
locked. The VM will unlock the page. locked. The VM will unlock the page.
->map_pages() is called when VM asks to map easy accessible pages. ->map_pages() is called when VM asks to map easy accessible pages.
Filesystem should find and map pages associated with offsets from "pgoff" Filesystem should find and map pages associated with offsets from "start_pgoff"
till "max_pgoff". ->map_pages() is called with page table locked and must till "end_pgoff". ->map_pages() is called with page table locked and must
not block. If it's not possible to reach a page without blocking, not block. If it's not possible to reach a page without blocking,
filesystem should skip it. Filesystem should use do_set_pte() to setup filesystem should skip it. Filesystem should use do_set_pte() to setup
page table entry. Pointer to entry associated with offset "pgoff" is page table entry. Pointer to entry associated with the page is passed in
passed in "pte" field in vm_fault structure. Pointers to entries for other "pte" field in fault_env structure. Pointers to entries for other offsets
offsets should be calculated relative to "pte". should be calculated relative to "pte".
->page_mkwrite() is called when a previously read-only pte is ->page_mkwrite() is called when a previously read-only pte is
about to become writeable. The filesystem again must ensure that there are about to become writeable. The filesystem again must ensure that there are
......
...@@ -257,10 +257,9 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, ...@@ -257,10 +257,9 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
* fatal_signal_pending()s, and the mmap_sem must be released before * fatal_signal_pending()s, and the mmap_sem must be released before
* returning it. * returning it.
*/ */
int handle_userfault(struct vm_area_struct *vma, unsigned long address, int handle_userfault(struct fault_env *fe, unsigned long reason)
unsigned int flags, unsigned long reason)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = fe->vma->vm_mm;
struct userfaultfd_ctx *ctx; struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue uwq; struct userfaultfd_wait_queue uwq;
int ret; int ret;
...@@ -269,7 +268,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -269,7 +268,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
ctx = vma->vm_userfaultfd_ctx.ctx; ctx = fe->vma->vm_userfaultfd_ctx.ctx;
if (!ctx) if (!ctx)
goto out; goto out;
...@@ -302,17 +301,17 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -302,17 +301,17 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
* without first stopping userland access to the memory. For * without first stopping userland access to the memory. For
* VM_UFFD_MISSING userfaults this is enough for now. * VM_UFFD_MISSING userfaults this is enough for now.
*/ */
if (unlikely(!(flags & FAULT_FLAG_ALLOW_RETRY))) { if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) {
/* /*
* Validate the invariant that nowait must allow retry * Validate the invariant that nowait must allow retry
* to be sure not to return SIGBUS erroneously on * to be sure not to return SIGBUS erroneously on
* nowait invocations. * nowait invocations.
*/ */
BUG_ON(flags & FAULT_FLAG_RETRY_NOWAIT); BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT);
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
if (printk_ratelimit()) { if (printk_ratelimit()) {
printk(KERN_WARNING printk(KERN_WARNING
"FAULT_FLAG_ALLOW_RETRY missing %x\n", flags); "FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags);
dump_stack(); dump_stack();
} }
#endif #endif
...@@ -324,7 +323,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -324,7 +323,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
* and wait. * and wait.
*/ */
ret = VM_FAULT_RETRY; ret = VM_FAULT_RETRY;
if (flags & FAULT_FLAG_RETRY_NOWAIT) if (fe->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out; goto out;
/* take the reference before dropping the mmap_sem */ /* take the reference before dropping the mmap_sem */
...@@ -332,10 +331,11 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -332,10 +331,11 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
uwq.wq.private = current; uwq.wq.private = current;
uwq.msg = userfault_msg(address, flags, reason); uwq.msg = userfault_msg(fe->address, fe->flags, reason);
uwq.ctx = ctx; uwq.ctx = ctx;
return_to_userland = (flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == return_to_userland =
(fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
spin_lock(&ctx->fault_pending_wqh.lock); spin_lock(&ctx->fault_pending_wqh.lock);
...@@ -353,7 +353,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -353,7 +353,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
TASK_KILLABLE); TASK_KILLABLE);
spin_unlock(&ctx->fault_pending_wqh.lock); spin_unlock(&ctx->fault_pending_wqh.lock);
must_wait = userfaultfd_must_wait(ctx, address, flags, reason); must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (likely(must_wait && !ACCESS_ONCE(ctx->released) && if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
......
#ifndef _LINUX_HUGE_MM_H #ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H
extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, extern int do_huge_pmd_anonymous_page(struct fault_env *fe);
struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
unsigned int flags);
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma); struct vm_area_struct *vma);
extern void huge_pmd_set_accessed(struct mm_struct *mm, extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd);
struct vm_area_struct *vma, extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd);
unsigned long address, pmd_t *pmd,
pmd_t orig_pmd, int dirty);
extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
pmd_t orig_pmd);
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
pmd_t *pmd, pmd_t *pmd,
...@@ -134,8 +126,7 @@ static inline int hpage_nr_pages(struct page *page) ...@@ -134,8 +126,7 @@ static inline int hpage_nr_pages(struct page *page)
return 1; return 1;
} }
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd);
unsigned long addr, pmd_t pmd, pmd_t *pmdp);
extern struct page *huge_zero_page; extern struct page *huge_zero_page;
...@@ -196,8 +187,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, ...@@ -196,8 +187,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
return NULL; return NULL;
} }
static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd)
unsigned long addr, pmd_t pmd, pmd_t *pmdp)
{ {
return 0; return 0;
} }
......
...@@ -309,10 +309,27 @@ struct vm_fault { ...@@ -309,10 +309,27 @@ struct vm_fault {
* VM_FAULT_DAX_LOCKED and fill in * VM_FAULT_DAX_LOCKED and fill in
* entry here. * entry here.
*/ */
/* for ->map_pages() only */ };
pgoff_t max_pgoff; /* map pages for offset from pgoff till
* max_pgoff inclusive */ /*
pte_t *pte; /* pte entry associated with ->pgoff */ * Page fault context: passes though page fault handler instead of endless list
* of function arguments.
*/
struct fault_env {
struct vm_area_struct *vma; /* Target VMA */
unsigned long address; /* Faulting virtual address */
unsigned int flags; /* FAULT_FLAG_xxx flags */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address'
*/
pte_t *pte; /* Pointer to pte entry matching
* the 'address'. NULL if the page
* table hasn't been allocated.
*/
spinlock_t *ptl; /* Page table lock.
* Protects pte page table if 'pte'
* is not NULL, otherwise pmd.
*/
}; };
/* /*
...@@ -327,7 +344,8 @@ struct vm_operations_struct { ...@@ -327,7 +344,8 @@ struct vm_operations_struct {
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
int (*pmd_fault)(struct vm_area_struct *, unsigned long address, int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
pmd_t *, unsigned int flags); pmd_t *, unsigned int flags);
void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); void (*map_pages)(struct fault_env *fe,
pgoff_t start_pgoff, pgoff_t end_pgoff);
/* notification that a previously read-only page is about to become /* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */ * writable, if an error is returned it will cause a SIGBUS */
...@@ -600,8 +618,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) ...@@ -600,8 +618,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte; return pte;
} }
void do_set_pte(struct vm_area_struct *vma, unsigned long address, void do_set_pte(struct fault_env *fe, struct page *page);
struct page *page, pte_t *pte, bool write, bool anon);
#endif #endif
/* /*
...@@ -2062,7 +2079,8 @@ extern void truncate_inode_pages_final(struct address_space *); ...@@ -2062,7 +2079,8 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */ /* generic vm_area_ops exported for stackable file systems */
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); extern void filemap_map_pages(struct fault_env *fe,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
/* mm/page-writeback.c */ /* mm/page-writeback.c */
......
...@@ -27,8 +27,7 @@ ...@@ -27,8 +27,7 @@
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
extern int handle_userfault(struct vm_area_struct *vma, unsigned long address, extern int handle_userfault(struct fault_env *fe, unsigned long reason);
unsigned int flags, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len); unsigned long src_start, unsigned long len);
...@@ -56,10 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) ...@@ -56,10 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
#else /* CONFIG_USERFAULTFD */ #else /* CONFIG_USERFAULTFD */
/* mm helpers */ /* mm helpers */
static inline int handle_userfault(struct vm_area_struct *vma, static inline int handle_userfault(struct fault_env *fe, unsigned long reason)
unsigned long address,
unsigned int flags,
unsigned long reason)
{ {
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
......
...@@ -2128,22 +2128,27 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -2128,22 +2128,27 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
EXPORT_SYMBOL(filemap_fault); EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) void filemap_map_pages(struct fault_env *fe,
pgoff_t start_pgoff, pgoff_t end_pgoff)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
void **slot; void **slot;
struct file *file = vma->vm_file; struct file *file = fe->vma->vm_file;
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
pgoff_t last_pgoff = start_pgoff;
loff_t size; loff_t size;
struct page *page; struct page *page;
unsigned long address = (unsigned long) vmf->virtual_address;
unsigned long addr;
pte_t *pte;
rcu_read_lock(); rcu_read_lock();
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
if (iter.index > vmf->max_pgoff) start_pgoff) {
if (iter.index > end_pgoff)
break; break;
fe->pte += iter.index - last_pgoff;
fe->address += (iter.index - last_pgoff) << PAGE_SHIFT;
last_pgoff = iter.index;
if (!pte_none(*fe->pte))
goto next;
repeat: repeat:
page = radix_tree_deref_slot(slot); page = radix_tree_deref_slot(slot);
if (unlikely(!page)) if (unlikely(!page))
...@@ -2179,14 +2184,9 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -2179,14 +2184,9 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
if (page->index >= size >> PAGE_SHIFT) if (page->index >= size >> PAGE_SHIFT)
goto unlock; goto unlock;
pte = vmf->pte + page->index - vmf->pgoff;
if (!pte_none(*pte))
goto unlock;
if (file->f_ra.mmap_miss > 0) if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--; file->f_ra.mmap_miss--;
addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; do_set_pte(fe, page);
do_set_pte(vma, addr, page, pte, false, false);
unlock_page(page); unlock_page(page);
goto next; goto next;
unlock: unlock:
...@@ -2194,7 +2194,7 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -2194,7 +2194,7 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
skip: skip:
put_page(page); put_page(page);
next: next:
if (iter.index == vmf->max_pgoff) if (iter.index == end_pgoff)
break; break;
} }
rcu_read_unlock(); rcu_read_unlock();
......
This diff is collapsed.
...@@ -36,9 +36,7 @@ ...@@ -36,9 +36,7 @@
/* Do not use these with a slab allocator */ /* Do not use these with a slab allocator */
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
extern int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, int do_swap_page(struct fault_env *fe, pte_t orig_pte);
unsigned long address, pte_t *page_table, pmd_t *pmd,
unsigned int flags, pte_t orig_pte);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling); unsigned long floor, unsigned long ceiling);
......
This diff is collapsed.
...@@ -1809,7 +1809,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1809,7 +1809,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
EXPORT_SYMBOL(filemap_fault); EXPORT_SYMBOL(filemap_fault);
void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) void filemap_map_pages(struct fault_env *fe,
pgoff_t start_pgoff, pgoff_t end_pgoff)
{ {
BUG(); BUG();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment