Commit 69929041 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] rmaplock: swapoff use anon_vma

Swapoff can make good use of a page's anon_vma and index, while it's still
left in swapcache, or once it's brought back in and the first pte mapped back:
unuse_vma go directly to just one page of only those vmas with the same
anon_vma.  And unuse_process can skip any vmas without an anon_vma (extending
the hugetlb check: hugetlb vmas have no anon_vma).

This just hacks in on top of the existing procedure, still going through all
the vmas of all the mms in mmlist.  A more elegant procedure might replace
mmlist by a list of anon_vmas: but that would be more work to implement, with
apparently more overhead in the common paths.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 9d9ae43b
...@@ -91,6 +91,11 @@ static inline void page_dup_rmap(struct page *page) ...@@ -91,6 +91,11 @@ static inline void page_dup_rmap(struct page *page)
int page_referenced(struct page *, int is_locked); int page_referenced(struct page *, int is_locked);
int try_to_unmap(struct page *); int try_to_unmap(struct page *);
/*
* Used by swapoff to help locate where page is expected in vma.
*/
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
#else /* !CONFIG_MMU */ #else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0) #define anon_vma_init() do {} while (0)
......
...@@ -230,6 +230,24 @@ vma_address(struct page *page, struct vm_area_struct *vma) ...@@ -230,6 +230,24 @@ vma_address(struct page *page, struct vm_area_struct *vma)
return address; return address;
} }
/*
* At what user virtual address is page expected in vma? checking that the
* page matches the vma: currently only used by unuse_process, on anon pages.
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
if (PageAnon(page)) {
if ((void *)vma->anon_vma !=
(void *)page->mapping - PAGE_MAPPING_ANON)
return -EFAULT;
} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
if (vma->vm_file->f_mapping != page->mapping)
return -EFAULT;
} else
return -EFAULT;
return vma_address(page, vma);
}
/* /*
* Subfunctions of page_referenced: page_referenced_one called * Subfunctions of page_referenced: page_referenced_one called
* repeatedly from either page_referenced_anon or page_referenced_file. * repeatedly from either page_referenced_anon or page_referenced_file.
...@@ -459,6 +477,8 @@ void page_remove_rmap(struct page *page) ...@@ -459,6 +477,8 @@ void page_remove_rmap(struct page *page)
* which increments mapcount after us but sets mapping * which increments mapcount after us but sets mapping
* before us: so leave the reset to free_hot_cold_page, * before us: so leave the reset to free_hot_cold_page,
* and remember that it's only reliable while mapped. * and remember that it's only reliable while mapped.
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
*/ */
if (page_test_and_clear_dirty(page)) if (page_test_and_clear_dirty(page))
set_page_dirty(page); set_page_dirty(page);
......
...@@ -520,14 +520,24 @@ static unsigned long unuse_pgd(struct vm_area_struct * vma, pgd_t *dir, ...@@ -520,14 +520,24 @@ static unsigned long unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
} }
/* vma->vm_mm->page_table_lock is held */ /* vma->vm_mm->page_table_lock is held */
static unsigned long unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir, static unsigned long unuse_vma(struct vm_area_struct * vma,
swp_entry_t entry, struct page *page) swp_entry_t entry, struct page *page)
{ {
unsigned long start = vma->vm_start, end = vma->vm_end; pgd_t *pgdir;
unsigned long start, end;
unsigned long foundaddr; unsigned long foundaddr;
if (start >= end) if (page->mapping) {
BUG(); start = page_address_in_vma(page, vma);
if (start == -EFAULT)
return 0;
else
end = start + PAGE_SIZE;
} else {
start = vma->vm_start;
end = vma->vm_end;
}
pgdir = pgd_offset(vma->vm_mm, start);
do { do {
foundaddr = unuse_pgd(vma, pgdir, start, end - start, foundaddr = unuse_pgd(vma, pgdir, start, end - start,
entry, page); entry, page);
...@@ -559,9 +569,8 @@ static int unuse_process(struct mm_struct * mm, ...@@ -559,9 +569,8 @@ static int unuse_process(struct mm_struct * mm,
} }
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!is_vm_hugetlb_page(vma)) { if (vma->anon_vma) {
pgd_t * pgd = pgd_offset(mm, vma->vm_start); foundaddr = unuse_vma(vma, entry, page);
foundaddr = unuse_vma(vma, pgd, entry, page);
if (foundaddr) if (foundaddr)
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment