Commit 2994302b authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

mm: add orig_pte field into vm_fault

Add orig_pte field to vm_fault structure to allow ->page_mkwrite
handlers to fully handle the fault.

This also allows us to save some passing of extra arguments around.

Link: http://lkml.kernel.org/r/1479460644-25076-8-git-send-email-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fe82221f
...@@ -298,8 +298,8 @@ struct vm_fault { ...@@ -298,8 +298,8 @@ struct vm_fault {
pgoff_t pgoff; /* Logical page offset based on vma */ pgoff_t pgoff; /* Logical page offset based on vma */
unsigned long address; /* Faulting virtual address */ unsigned long address; /* Faulting virtual address */
pmd_t *pmd; /* Pointer to pmd entry matching pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' * the 'address' */
*/ pte_t orig_pte; /* Value of PTE at the time of fault */
struct page *cow_page; /* Handler may choose to COW */ struct page *cow_page; /* Handler may choose to COW */
struct page *page; /* ->fault handlers should return a struct page *page; /* ->fault handlers should return a
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
/* Do not use these with a slab allocator */ /* Do not use these with a slab allocator */
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
int do_swap_page(struct vm_fault *vmf, pte_t orig_pte); int do_swap_page(struct vm_fault *vmf);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling); unsigned long floor, unsigned long ceiling);
......
...@@ -875,7 +875,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -875,7 +875,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
unsigned long address, pmd_t *pmd, unsigned long address, pmd_t *pmd,
int referenced) int referenced)
{ {
pte_t pteval;
int swapped_in = 0, ret = 0; int swapped_in = 0, ret = 0;
struct vm_fault vmf = { struct vm_fault vmf = {
.vma = vma, .vma = vma,
...@@ -893,11 +892,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, ...@@ -893,11 +892,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
vmf.pte = pte_offset_map(pmd, address); vmf.pte = pte_offset_map(pmd, address);
for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE; for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
vmf.pte++, vmf.address += PAGE_SIZE) { vmf.pte++, vmf.address += PAGE_SIZE) {
pteval = *vmf.pte; vmf.orig_pte = *vmf.pte;
if (!is_swap_pte(pteval)) if (!is_swap_pte(vmf.orig_pte))
continue; continue;
swapped_in++; swapped_in++;
ret = do_swap_page(&vmf, pteval); ret = do_swap_page(&vmf);
/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
if (ret & VM_FAULT_RETRY) { if (ret & VM_FAULT_RETRY) {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment