Commit 315d09bf authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

Revert "mm: make faultaround produce old ptes"

This reverts commit 5c0a85fa.

The commit causes ~6% regression in unixbench.

Let's revert it for now and consider other solution for reclaim problem
later.

Link: http://lkml.kernel.org/r/1465893750-44080-2-git-send-email-kirill.shutemov@linux.intel.comSigned-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reported-by: default avatar"Huang, Ying" <ying.huang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vinayak Menon <vinmenon@codeaurora.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1f08fe26
...@@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) ...@@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
} }
void do_set_pte(struct vm_area_struct *vma, unsigned long address, void do_set_pte(struct vm_area_struct *vma, unsigned long address,
struct page *page, pte_t *pte, bool write, bool anon, bool old); struct page *page, pte_t *pte, bool write, bool anon);
#endif #endif
/* /*
......
...@@ -2186,7 +2186,7 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -2186,7 +2186,7 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
if (file->f_ra.mmap_miss > 0) if (file->f_ra.mmap_miss > 0)
file->f_ra.mmap_miss--; file->f_ra.mmap_miss--;
addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
do_set_pte(vma, addr, page, pte, false, false, true); do_set_pte(vma, addr, page, pte, false, false);
unlock_page(page); unlock_page(page);
goto next; goto next;
unlock: unlock:
......
...@@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
* vm_ops->map_pages. * vm_ops->map_pages.
*/ */
void do_set_pte(struct vm_area_struct *vma, unsigned long address, void do_set_pte(struct vm_area_struct *vma, unsigned long address,
struct page *page, pte_t *pte, bool write, bool anon, bool old) struct page *page, pte_t *pte, bool write, bool anon)
{ {
pte_t entry; pte_t entry;
...@@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, ...@@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
entry = mk_pte(page, vma->vm_page_prot); entry = mk_pte(page, vma->vm_page_prot);
if (write) if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (old)
entry = pte_mkold(entry);
if (anon) { if (anon) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address, false); page_add_new_anon_rmap(page, vma, address, false);
...@@ -3032,20 +3030,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3032,20 +3030,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
*/ */
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl); pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!pte_same(*pte, orig_pte))
goto unlock_out;
do_fault_around(vma, address, pte, pgoff, flags); do_fault_around(vma, address, pte, pgoff, flags);
/* Check if the fault is handled by faultaround */ if (!pte_same(*pte, orig_pte))
if (!pte_same(*pte, orig_pte)) {
/*
* Faultaround produce old pte, but the pte we've
* handler fault for should be young.
*/
pte_t entry = pte_mkyoung(*pte);
if (ptep_set_access_flags(vma, address, pte, entry, 0))
update_mmu_cache(vma, address, pte);
goto unlock_out; goto unlock_out;
}
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
} }
...@@ -3060,7 +3047,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3060,7 +3047,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
put_page(fault_page); put_page(fault_page);
return ret; return ret;
} }
do_set_pte(vma, address, fault_page, pte, false, false, false); do_set_pte(vma, address, fault_page, pte, false, false);
unlock_page(fault_page); unlock_page(fault_page);
unlock_out: unlock_out:
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
...@@ -3111,7 +3098,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3111,7 +3098,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
} }
goto uncharge_out; goto uncharge_out;
} }
do_set_pte(vma, address, new_page, pte, true, true, false); do_set_pte(vma, address, new_page, pte, true, true);
mem_cgroup_commit_charge(new_page, memcg, false, false); mem_cgroup_commit_charge(new_page, memcg, false, false);
lru_cache_add_active_or_unevictable(new_page, vma); lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
...@@ -3164,7 +3151,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3164,7 +3151,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
put_page(fault_page); put_page(fault_page);
return ret; return ret;
} }
do_set_pte(vma, address, fault_page, pte, true, false, false); do_set_pte(vma, address, fault_page, pte, true, false);
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
if (set_page_dirty(fault_page)) if (set_page_dirty(fault_page))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment