Commit 0721ec8b authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

mm: use pgoff in struct vm_fault instead of passing it separately

struct vm_fault has already pgoff entry.  Use it instead of passing
pgoff as a separate argument and then assigning it later.

Link: http://lkml.kernel.org/r/1479460644-25076-4-git-send-email-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1a29d85e
...@@ -882,6 +882,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -882,6 +882,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
.address = address, .address = address,
.flags = FAULT_FLAG_ALLOW_RETRY, .flags = FAULT_FLAG_ALLOW_RETRY,
.pmd = pmd, .pmd = pmd,
.pgoff = linear_page_index(vma, address),
}; };
/* we only decide to swapin, if there is enough young ptes */ /* we only decide to swapin, if there is enough young ptes */
......
...@@ -2275,7 +2275,7 @@ static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte) ...@@ -2275,7 +2275,7 @@ static int wp_pfn_shared(struct vm_fault *vmf, pte_t orig_pte)
if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
struct vm_fault vmf2 = { struct vm_fault vmf2 = {
.page = NULL, .page = NULL,
.pgoff = linear_page_index(vma, vmf->address), .pgoff = vmf->pgoff,
.address = vmf->address, .address = vmf->address,
.flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
}; };
...@@ -2844,15 +2844,15 @@ static int do_anonymous_page(struct vm_fault *vmf) ...@@ -2844,15 +2844,15 @@ static int do_anonymous_page(struct vm_fault *vmf)
* released depending on flags and vma->vm_ops->fault() return value. * released depending on flags and vma->vm_ops->fault() return value.
* See filemap_fault() and __lock_page_retry(). * See filemap_fault() and __lock_page_retry().
*/ */
static int __do_fault(struct vm_fault *vmf, pgoff_t pgoff, static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
struct page *cow_page, struct page **page, void **entry) struct page **page, void **entry)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct vm_fault vmf2; struct vm_fault vmf2;
int ret; int ret;
vmf2.address = vmf->address; vmf2.address = vmf->address;
vmf2.pgoff = pgoff; vmf2.pgoff = vmf->pgoff;
vmf2.flags = vmf->flags; vmf2.flags = vmf->flags;
vmf2.page = NULL; vmf2.page = NULL;
vmf2.gfp_mask = __get_fault_gfp_mask(vma); vmf2.gfp_mask = __get_fault_gfp_mask(vma);
...@@ -3156,9 +3156,10 @@ late_initcall(fault_around_debugfs); ...@@ -3156,9 +3156,10 @@ late_initcall(fault_around_debugfs);
* fault_around_pages() value (and therefore to page order). This way it's * fault_around_pages() value (and therefore to page order). This way it's
* easier to guarantee that we don't cross page table boundaries. * easier to guarantee that we don't cross page table boundaries.
*/ */
static int do_fault_around(struct vm_fault *vmf, pgoff_t start_pgoff) static int do_fault_around(struct vm_fault *vmf)
{ {
unsigned long address = vmf->address, nr_pages, mask; unsigned long address = vmf->address, nr_pages, mask;
pgoff_t start_pgoff = vmf->pgoff;
pgoff_t end_pgoff; pgoff_t end_pgoff;
int off, ret = 0; int off, ret = 0;
...@@ -3210,7 +3211,7 @@ static int do_fault_around(struct vm_fault *vmf, pgoff_t start_pgoff) ...@@ -3210,7 +3211,7 @@ static int do_fault_around(struct vm_fault *vmf, pgoff_t start_pgoff)
return ret; return ret;
} }
static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff) static int do_read_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct page *fault_page; struct page *fault_page;
...@@ -3222,12 +3223,12 @@ static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff) ...@@ -3222,12 +3223,12 @@ static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff)
* something). * something).
*/ */
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
ret = do_fault_around(vmf, pgoff); ret = do_fault_around(vmf);
if (ret) if (ret)
return ret; return ret;
} }
ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL); ret = __do_fault(vmf, NULL, &fault_page, NULL);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret; return ret;
...@@ -3240,7 +3241,7 @@ static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff) ...@@ -3240,7 +3241,7 @@ static int do_read_fault(struct vm_fault *vmf, pgoff_t pgoff)
return ret; return ret;
} }
static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff) static int do_cow_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct page *fault_page, *new_page; struct page *fault_page, *new_page;
...@@ -3261,7 +3262,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff) ...@@ -3261,7 +3262,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff)
return VM_FAULT_OOM; return VM_FAULT_OOM;
} }
ret = __do_fault(vmf, pgoff, new_page, &fault_page, &fault_entry); ret = __do_fault(vmf, new_page, &fault_page, &fault_entry);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out; goto uncharge_out;
...@@ -3276,7 +3277,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff) ...@@ -3276,7 +3277,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff)
unlock_page(fault_page); unlock_page(fault_page);
put_page(fault_page); put_page(fault_page);
} else { } else {
dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff); dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
} }
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out; goto uncharge_out;
...@@ -3287,7 +3288,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff) ...@@ -3287,7 +3288,7 @@ static int do_cow_fault(struct vm_fault *vmf, pgoff_t pgoff)
return ret; return ret;
} }
static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff) static int do_shared_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct page *fault_page; struct page *fault_page;
...@@ -3295,7 +3296,7 @@ static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff) ...@@ -3295,7 +3296,7 @@ static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff)
int dirtied = 0; int dirtied = 0;
int ret, tmp; int ret, tmp;
ret = __do_fault(vmf, pgoff, NULL, &fault_page, NULL); ret = __do_fault(vmf, NULL, &fault_page, NULL);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret; return ret;
...@@ -3356,16 +3357,15 @@ static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff) ...@@ -3356,16 +3357,15 @@ static int do_shared_fault(struct vm_fault *vmf, pgoff_t pgoff)
static int do_fault(struct vm_fault *vmf) static int do_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
pgoff_t pgoff = linear_page_index(vma, vmf->address);
/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
if (!vma->vm_ops->fault) if (!vma->vm_ops->fault)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
if (!(vmf->flags & FAULT_FLAG_WRITE)) if (!(vmf->flags & FAULT_FLAG_WRITE))
return do_read_fault(vmf, pgoff); return do_read_fault(vmf);
if (!(vma->vm_flags & VM_SHARED)) if (!(vma->vm_flags & VM_SHARED))
return do_cow_fault(vmf, pgoff); return do_cow_fault(vmf);
return do_shared_fault(vmf, pgoff); return do_shared_fault(vmf);
} }
static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
...@@ -3613,6 +3613,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -3613,6 +3613,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
.vma = vma, .vma = vma,
.address = address & PAGE_MASK, .address = address & PAGE_MASK,
.flags = flags, .flags = flags,
.pgoff = linear_page_index(vma, address),
}; };
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd; pgd_t *pgd;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment