Commit 936ca80d authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

mm: trim __do_fault() arguments

Use vm_fault structure to pass cow_page, page, and entry in and out of
the function.

That reduces number of __do_fault() arguments from 4 to 1.

Link: http://lkml.kernel.org/r/1479460644-25076-6-git-send-email-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 667240e0
...@@ -2844,26 +2844,22 @@ static int do_anonymous_page(struct vm_fault *vmf) ...@@ -2844,26 +2844,22 @@ static int do_anonymous_page(struct vm_fault *vmf)
* released depending on flags and vma->vm_ops->fault() return value. * released depending on flags and vma->vm_ops->fault() return value.
* See filemap_fault() and __lock_page_retry(). * See filemap_fault() and __lock_page_retry().
*/ */
static int __do_fault(struct vm_fault *vmf, struct page *cow_page, static int __do_fault(struct vm_fault *vmf)
struct page **page, void **entry)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
int ret; int ret;
vmf->cow_page = cow_page;
ret = vma->vm_ops->fault(vma, vmf); ret = vma->vm_ops->fault(vma, vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret; return ret;
if (ret & VM_FAULT_DAX_LOCKED) { if (ret & VM_FAULT_DAX_LOCKED)
*entry = vmf->entry;
return ret; return ret;
}
if (unlikely(PageHWPoison(vmf->page))) { if (unlikely(PageHWPoison(vmf->page))) {
if (ret & VM_FAULT_LOCKED) if (ret & VM_FAULT_LOCKED)
unlock_page(vmf->page); unlock_page(vmf->page);
put_page(vmf->page); put_page(vmf->page);
vmf->page = NULL;
return VM_FAULT_HWPOISON; return VM_FAULT_HWPOISON;
} }
...@@ -2872,7 +2868,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page, ...@@ -2872,7 +2868,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
else else
VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
*page = vmf->page;
return ret; return ret;
} }
...@@ -3208,7 +3203,6 @@ static int do_fault_around(struct vm_fault *vmf) ...@@ -3208,7 +3203,6 @@ static int do_fault_around(struct vm_fault *vmf)
static int do_read_fault(struct vm_fault *vmf) static int do_read_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct page *fault_page;
int ret = 0; int ret = 0;
/* /*
...@@ -3222,54 +3216,52 @@ static int do_read_fault(struct vm_fault *vmf) ...@@ -3222,54 +3216,52 @@ static int do_read_fault(struct vm_fault *vmf)
return ret; return ret;
} }
ret = __do_fault(vmf, NULL, &fault_page, NULL); ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret; return ret;
ret |= alloc_set_pte(vmf, NULL, fault_page); ret |= alloc_set_pte(vmf, NULL, vmf->page);
if (vmf->pte) if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
unlock_page(fault_page); unlock_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
put_page(fault_page); put_page(vmf->page);
return ret; return ret;
} }
static int do_cow_fault(struct vm_fault *vmf) static int do_cow_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct page *fault_page, *new_page;
void *fault_entry;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
int ret; int ret;
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM; return VM_FAULT_OOM;
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
if (!new_page) if (!vmf->cow_page)
return VM_FAULT_OOM; return VM_FAULT_OOM;
if (mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
&memcg, false)) { &memcg, false)) {
put_page(new_page); put_page(vmf->cow_page);
return VM_FAULT_OOM; return VM_FAULT_OOM;
} }
ret = __do_fault(vmf, new_page, &fault_page, &fault_entry); ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out; goto uncharge_out;
if (!(ret & VM_FAULT_DAX_LOCKED)) if (!(ret & VM_FAULT_DAX_LOCKED))
copy_user_highpage(new_page, fault_page, vmf->address, vma); copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
__SetPageUptodate(new_page); __SetPageUptodate(vmf->cow_page);
ret |= alloc_set_pte(vmf, memcg, new_page); ret |= alloc_set_pte(vmf, memcg, vmf->cow_page);
if (vmf->pte) if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
if (!(ret & VM_FAULT_DAX_LOCKED)) { if (!(ret & VM_FAULT_DAX_LOCKED)) {
unlock_page(fault_page); unlock_page(vmf->page);
put_page(fault_page); put_page(vmf->page);
} else { } else {
dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff); dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
} }
...@@ -3277,20 +3269,19 @@ static int do_cow_fault(struct vm_fault *vmf) ...@@ -3277,20 +3269,19 @@ static int do_cow_fault(struct vm_fault *vmf)
goto uncharge_out; goto uncharge_out;
return ret; return ret;
uncharge_out: uncharge_out:
mem_cgroup_cancel_charge(new_page, memcg, false); mem_cgroup_cancel_charge(vmf->cow_page, memcg, false);
put_page(new_page); put_page(vmf->cow_page);
return ret; return ret;
} }
static int do_shared_fault(struct vm_fault *vmf) static int do_shared_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct page *fault_page;
struct address_space *mapping; struct address_space *mapping;
int dirtied = 0; int dirtied = 0;
int ret, tmp; int ret, tmp;
ret = __do_fault(vmf, NULL, &fault_page, NULL); ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret; return ret;
...@@ -3299,26 +3290,26 @@ static int do_shared_fault(struct vm_fault *vmf) ...@@ -3299,26 +3290,26 @@ static int do_shared_fault(struct vm_fault *vmf)
* about to become writable * about to become writable
*/ */
if (vma->vm_ops->page_mkwrite) { if (vma->vm_ops->page_mkwrite) {
unlock_page(fault_page); unlock_page(vmf->page);
tmp = do_page_mkwrite(vma, fault_page, vmf->address); tmp = do_page_mkwrite(vma, vmf->page, vmf->address);
if (unlikely(!tmp || if (unlikely(!tmp ||
(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
put_page(fault_page); put_page(vmf->page);
return tmp; return tmp;
} }
} }
ret |= alloc_set_pte(vmf, NULL, fault_page); ret |= alloc_set_pte(vmf, NULL, vmf->page);
if (vmf->pte) if (vmf->pte)
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
VM_FAULT_RETRY))) { VM_FAULT_RETRY))) {
unlock_page(fault_page); unlock_page(vmf->page);
put_page(fault_page); put_page(vmf->page);
return ret; return ret;
} }
if (set_page_dirty(fault_page)) if (set_page_dirty(vmf->page))
dirtied = 1; dirtied = 1;
/* /*
* Take a local copy of the address_space - page.mapping may be zeroed * Take a local copy of the address_space - page.mapping may be zeroed
...@@ -3326,8 +3317,8 @@ static int do_shared_fault(struct vm_fault *vmf) ...@@ -3326,8 +3317,8 @@ static int do_shared_fault(struct vm_fault *vmf)
* pinned by vma->vm_file's reference. We rely on unlock_page()'s * pinned by vma->vm_file's reference. We rely on unlock_page()'s
* release semantics to prevent the compiler from undoing this copying. * release semantics to prevent the compiler from undoing this copying.
*/ */
mapping = page_rmapping(fault_page); mapping = page_rmapping(vmf->page);
unlock_page(fault_page); unlock_page(vmf->page);
if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
/* /*
* Some device drivers do not set page.mapping but still * Some device drivers do not set page.mapping but still
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment