Commit 9118c0cb authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

mm: factor out functionality to finish page faults

Introduce finish_fault() as a helper function for finishing page faults.
It is rather thin wrapper around alloc_set_pte() but since we'd want to
call this from DAX code or filesystems, it is still useful to avoid some
boilerplate code.

Link: http://lkml.kernel.org/r/1479460644-25076-10-git-send-email-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3917048d
...@@ -620,6 +620,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) ...@@ -620,6 +620,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
struct page *page); struct page *page);
int finish_fault(struct vm_fault *vmf);
#endif #endif
/* /*
......
...@@ -3074,6 +3074,38 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, ...@@ -3074,6 +3074,38 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
return ret; return ret;
} }
/**
* finish_fault - finish page fault once we have prepared the page to fault
*
* @vmf: structure describing the fault
*
* This function handles all that is needed to finish a page fault once the
* page to fault in is prepared. It handles locking of PTEs, inserts PTE for
* given page, adds reverse page mapping, handles memcg charges and LRU
* addition. The function returns 0 on success, VM_FAULT_ code in case of
* error.
*
* The function expects the page to be locked and on success it consumes a
* reference of a page being mapped (for the PTE which maps it).
*/
int finish_fault(struct vm_fault *vmf)
{
struct page *page;
int ret;
/* Did we COW the page? */
if ((vmf->flags & FAULT_FLAG_WRITE) &&
!(vmf->vma->vm_flags & VM_SHARED))
page = vmf->cow_page;
else
page = vmf->page;
ret = alloc_set_pte(vmf, vmf->memcg, page);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
return ret;
}
static unsigned long fault_around_bytes __read_mostly = static unsigned long fault_around_bytes __read_mostly =
rounddown_pow_of_two(65536); rounddown_pow_of_two(65536);
...@@ -3213,9 +3245,7 @@ static int do_read_fault(struct vm_fault *vmf) ...@@ -3213,9 +3245,7 @@ static int do_read_fault(struct vm_fault *vmf)
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret; return ret;
ret |= alloc_set_pte(vmf, NULL, vmf->page); ret |= finish_fault(vmf);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
unlock_page(vmf->page); unlock_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
put_page(vmf->page); put_page(vmf->page);
...@@ -3250,9 +3280,7 @@ static int do_cow_fault(struct vm_fault *vmf) ...@@ -3250,9 +3280,7 @@ static int do_cow_fault(struct vm_fault *vmf)
copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
__SetPageUptodate(vmf->cow_page); __SetPageUptodate(vmf->cow_page);
ret |= alloc_set_pte(vmf, vmf->memcg, vmf->cow_page); ret |= finish_fault(vmf);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
if (!(ret & VM_FAULT_DAX_LOCKED)) { if (!(ret & VM_FAULT_DAX_LOCKED)) {
unlock_page(vmf->page); unlock_page(vmf->page);
put_page(vmf->page); put_page(vmf->page);
...@@ -3293,9 +3321,7 @@ static int do_shared_fault(struct vm_fault *vmf) ...@@ -3293,9 +3321,7 @@ static int do_shared_fault(struct vm_fault *vmf)
} }
} }
ret |= alloc_set_pte(vmf, NULL, vmf->page); ret |= finish_fault(vmf);
if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
VM_FAULT_RETRY))) { VM_FAULT_RETRY))) {
unlock_page(vmf->page); unlock_page(vmf->page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment