Commit 4969c119 authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds

mm: fix swapin race condition

The pte_same check is reliable only if the swap entry remains pinned (by
the page lock on swapcache).  We've also to ensure the swapcache isn't
removed before we take the lock as try_to_free_swap won't care about the
page pin.

One of the possible impacts of this patch is that a KSM-shared page can
point to the anon_vma of another process, which could exit before the page
is freed.

This can leave a page with a pointer to a recycled anon_vma object, or
worse, a pointer to something that is no longer an anon_vma.

[riel@redhat.com: changelog help]
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: <stable@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7c5367f2
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
struct stable_node; struct stable_node;
struct mem_cgroup; struct mem_cgroup;
struct page *ksm_does_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
#ifdef CONFIG_KSM #ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start, int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags); unsigned long end, int advice, unsigned long *vm_flags);
...@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page, ...@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out? * but what if the vma was unmerged while the page was swapped out?
*/ */
struct page *ksm_does_need_to_copy(struct page *page, static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
static inline struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *vma, unsigned long address)
{ {
struct anon_vma *anon_vma = page_anon_vma(page); struct anon_vma *anon_vma = page_anon_vma(page);
if (!anon_vma || return anon_vma &&
(anon_vma->root == vma->anon_vma->root && (anon_vma->root != vma->anon_vma->root ||
page->index == linear_page_index(vma, address))) page->index != linear_page_index(vma, address));
return page;
return ksm_does_need_to_copy(page, vma, address);
} }
int page_referenced_ksm(struct page *page, int page_referenced_ksm(struct page *page,
...@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, ...@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0; return 0;
} }
static inline struct page *ksm_might_need_to_copy(struct page *page, static inline int ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *vma, unsigned long address)
{ {
return page; return 0;
} }
static inline int page_referenced_ksm(struct page *page, static inline int page_referenced_ksm(struct page *page,
......
...@@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page, ...@@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
{ {
struct page *new_page; struct page *new_page;
unlock_page(page); /* any racers will COW it, not modify it */
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) { if (new_page) {
copy_user_highpage(new_page, page, address, vma); copy_user_highpage(new_page, page, address, vma);
...@@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page, ...@@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
add_page_to_unevictable_list(new_page); add_page_to_unevictable_list(new_page);
} }
page_cache_release(page);
return new_page; return new_page;
} }
......
...@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned int flags, pte_t orig_pte) unsigned int flags, pte_t orig_pte)
{ {
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page, *swapcache = NULL;
swp_entry_t entry; swp_entry_t entry;
pte_t pte; pte_t pte;
struct mem_cgroup *ptr = NULL; struct mem_cgroup *ptr = NULL;
...@@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page); lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN); delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
page = ksm_might_need_to_copy(page, vma, address); /*
if (!page) { * Make sure try_to_free_swap didn't release the swapcache
* from under us. The page pin isn't enough to prevent that.
*/
if (unlikely(!PageSwapCache(page)))
goto out_page;
if (ksm_might_need_to_copy(page, vma, address)) {
swapcache = page;
page = ksm_does_need_to_copy(page, vma, address);
if (unlikely(!page)) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
goto out; page = swapcache;
swapcache = NULL;
goto out_page;
}
} }
if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
...@@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page); try_to_free_swap(page);
unlock_page(page); unlock_page(page);
if (swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
* (to avoid false positives from pte_same). For
* further safety release the lock after the swap_free
* so that the swap count won't change under a
* parallel locked swapcache.
*/
unlock_page(swapcache);
page_cache_release(swapcache);
}
if (flags & FAULT_FLAG_WRITE) { if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
...@@ -2756,6 +2781,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2756,6 +2781,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock_page(page); unlock_page(page);
out_release: out_release:
page_cache_release(page); page_cache_release(page);
if (swapcache) {
unlock_page(swapcache);
page_cache_release(swapcache);
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment