Commit edc315fd authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

badpage: remove vma from page_remove_rmap

Remove page_remove_rmap()'s vma arg, which was only for the Eeek message.
And remove the BUG_ON(page_mapcount(page) == 0) from CONFIG_DEBUG_VM's
page_dup_rmap(): we're trying to be more resilient about that than BUGs.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2509ef26
...@@ -69,7 +69,7 @@ void __anon_vma_link(struct vm_area_struct *); ...@@ -69,7 +69,7 @@ void __anon_vma_link(struct vm_area_struct *);
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_file_rmap(struct page *); void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *, struct vm_area_struct *); void page_remove_rmap(struct page *);
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
......
...@@ -193,7 +193,7 @@ __xip_unmap (struct address_space * mapping, ...@@ -193,7 +193,7 @@ __xip_unmap (struct address_space * mapping,
/* Nuke the page table entry. */ /* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pte)); flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush_notify(vma, address, pte); pteval = ptep_clear_flush_notify(vma, address, pte);
page_remove_rmap(page, vma); page_remove_rmap(page);
dec_mm_counter(mm, file_rss); dec_mm_counter(mm, file_rss);
BUG_ON(pte_dirty(pteval)); BUG_ON(pte_dirty(pteval));
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
......
...@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
if (page) { if (page) {
if (pte_dirty(pte)) if (pte_dirty(pte))
set_page_dirty(page); set_page_dirty(page);
page_remove_rmap(page, vma); page_remove_rmap(page);
page_cache_release(page); page_cache_release(page);
update_hiwater_rss(mm); update_hiwater_rss(mm);
dec_mm_counter(mm, file_rss); dec_mm_counter(mm, file_rss);
......
...@@ -798,7 +798,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -798,7 +798,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
mark_page_accessed(page); mark_page_accessed(page);
file_rss--; file_rss--;
} }
page_remove_rmap(page, vma); page_remove_rmap(page);
if (unlikely(page_mapcount(page) < 0)) if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page); print_bad_pte(vma, addr, ptent, page);
tlb_remove_page(tlb, page); tlb_remove_page(tlb, page);
...@@ -2023,7 +2023,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2023,7 +2023,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* mapcount is visible. So transitively, TLBs to * mapcount is visible. So transitively, TLBs to
* old page will be flushed before it can be reused. * old page will be flushed before it can be reused.
*/ */
page_remove_rmap(old_page, vma); page_remove_rmap(old_page);
} }
/* Free the old page.. */ /* Free the old page.. */
......
...@@ -707,7 +707,6 @@ void page_add_file_rmap(struct page *page) ...@@ -707,7 +707,6 @@ void page_add_file_rmap(struct page *page)
*/ */
void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
{ {
BUG_ON(page_mapcount(page) == 0);
if (PageAnon(page)) if (PageAnon(page))
__page_check_anon_rmap(page, vma, address); __page_check_anon_rmap(page, vma, address);
atomic_inc(&page->_mapcount); atomic_inc(&page->_mapcount);
...@@ -717,11 +716,10 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long ...@@ -717,11 +716,10 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
/** /**
* page_remove_rmap - take down pte mapping from a page * page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from * @page: page to remove mapping from
* @vma: the vm area in which the mapping is removed
* *
* The caller needs to hold the pte lock. * The caller needs to hold the pte lock.
*/ */
void page_remove_rmap(struct page *page, struct vm_area_struct *vma) void page_remove_rmap(struct page *page)
{ {
if (atomic_add_negative(-1, &page->_mapcount)) { if (atomic_add_negative(-1, &page->_mapcount)) {
/* /*
...@@ -837,7 +835,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -837,7 +835,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
dec_mm_counter(mm, file_rss); dec_mm_counter(mm, file_rss);
page_remove_rmap(page, vma); page_remove_rmap(page);
page_cache_release(page); page_cache_release(page);
out_unmap: out_unmap:
...@@ -952,7 +950,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, ...@@ -952,7 +950,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
if (pte_dirty(pteval)) if (pte_dirty(pteval))
set_page_dirty(page); set_page_dirty(page);
page_remove_rmap(page, vma); page_remove_rmap(page);
page_cache_release(page); page_cache_release(page);
dec_mm_counter(mm, file_rss); dec_mm_counter(mm, file_rss);
(*mapcount)--; (*mapcount)--;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment