Commit 369ea824 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

mm/rmap: update to new mmu_notifier semantic v2

Replace all mmu_notifier_invalidate_page() calls by *_invalidate_range()
and make sure it is bracketed by calls to *_invalidate_range_start()/end().

Note that because we can not presume the pmd value or pte value we have
to assume the worst and unconditionaly report an invalidation as
happening.

Changed since v2:
  - try_to_unmap_one() only one call to mmu_notifier_invalidate_range()
  - compute end with PAGE_SIZE << compound_order(page)
  - fix PageHuge() case in try_to_unmap_one()
Signed-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Reviewed-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Bernhard Held <berny156@gmx.de>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Wanpeng Li <kernellwp@gmail.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Takashi Iwai <tiwai@suse.de>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: axie <axie@amd.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a4d1a885
...@@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, ...@@ -887,11 +887,21 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
.address = address, .address = address,
.flags = PVMW_SYNC, .flags = PVMW_SYNC,
}; };
unsigned long start = address, end;
int *cleaned = arg; int *cleaned = arg;
/*
* We have to assume the worse case ie pmd for invalidation. Note that
* the page can not be free from this function.
*/
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
while (page_vma_mapped_walk(&pvmw)) { while (page_vma_mapped_walk(&pvmw)) {
unsigned long cstart, cend;
int ret = 0; int ret = 0;
address = pvmw.address;
cstart = address = pvmw.address;
if (pvmw.pte) { if (pvmw.pte) {
pte_t entry; pte_t entry;
pte_t *pte = pvmw.pte; pte_t *pte = pvmw.pte;
...@@ -904,6 +914,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, ...@@ -904,6 +914,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
entry = pte_wrprotect(entry); entry = pte_wrprotect(entry);
entry = pte_mkclean(entry); entry = pte_mkclean(entry);
set_pte_at(vma->vm_mm, address, pte, entry); set_pte_at(vma->vm_mm, address, pte, entry);
cend = cstart + PAGE_SIZE;
ret = 1; ret = 1;
} else { } else {
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
...@@ -918,6 +929,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, ...@@ -918,6 +929,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
entry = pmd_wrprotect(entry); entry = pmd_wrprotect(entry);
entry = pmd_mkclean(entry); entry = pmd_mkclean(entry);
set_pmd_at(vma->vm_mm, address, pmd, entry); set_pmd_at(vma->vm_mm, address, pmd, entry);
cstart &= PMD_MASK;
cend = cstart + PMD_SIZE;
ret = 1; ret = 1;
#else #else
/* unexpected pmd-mapped page? */ /* unexpected pmd-mapped page? */
...@@ -926,11 +939,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, ...@@ -926,11 +939,13 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
} }
if (ret) { if (ret) {
mmu_notifier_invalidate_page(vma->vm_mm, address); mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
(*cleaned)++; (*cleaned)++;
} }
} }
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
return true; return true;
} }
...@@ -1324,6 +1339,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1324,6 +1339,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
pte_t pteval; pte_t pteval;
struct page *subpage; struct page *subpage;
bool ret = true; bool ret = true;
unsigned long start = address, end;
enum ttu_flags flags = (enum ttu_flags)arg; enum ttu_flags flags = (enum ttu_flags)arg;
/* munlock has nothing to gain from examining un-locked vmas */ /* munlock has nothing to gain from examining un-locked vmas */
...@@ -1335,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1335,6 +1351,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
flags & TTU_MIGRATION, page); flags & TTU_MIGRATION, page);
} }
/*
* We have to assume the worse case ie pmd for invalidation. Note that
* the page can not be free in this function as call of try_to_unmap()
* must hold a reference on the page.
*/
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
while (page_vma_mapped_walk(&pvmw)) { while (page_vma_mapped_walk(&pvmw)) {
/* /*
* If the page is mlock()d, we cannot swap it out. * If the page is mlock()d, we cannot swap it out.
...@@ -1445,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1445,6 +1469,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
ret = false; ret = false;
/* We have to invalidate as we cleared the pte */
page_vma_mapped_walk_done(&pvmw); page_vma_mapped_walk_done(&pvmw);
break; break;
} }
...@@ -1490,8 +1515,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1490,8 +1515,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
discard: discard:
page_remove_rmap(subpage, PageHuge(page)); page_remove_rmap(subpage, PageHuge(page));
put_page(page); put_page(page);
mmu_notifier_invalidate_page(mm, address); mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} }
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment