Commit e8569dd2 authored by Andreas Sandberg's avatar Andreas Sandberg Committed by Linus Torvalds

mm/hugetlb.c: call MMU notifiers when copying a hugetlb page range

When copy_hugetlb_page_range() is called to copy a range of hugetlb
mappings, the secondary MMUs are not notified if there is a protection
downgrade, which breaks COW semantics in KVM.

This patch adds the necessary MMU notifier calls.
Signed-off-by: default avatarAndreas Sandberg <andreas@sandberg.pp.se>
Acked-by: default avatarSteve Capper <steve.capper@linaro.org>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 549543df
...@@ -2346,17 +2346,27 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -2346,17 +2346,27 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
int cow; int cow;
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h); unsigned long sz = huge_page_size(h);
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
int ret = 0;
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
mmun_start = vma->vm_start;
mmun_end = vma->vm_end;
if (cow)
mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
spinlock_t *src_ptl, *dst_ptl; spinlock_t *src_ptl, *dst_ptl;
src_pte = huge_pte_offset(src, addr); src_pte = huge_pte_offset(src, addr);
if (!src_pte) if (!src_pte)
continue; continue;
dst_pte = huge_pte_alloc(dst, addr, sz); dst_pte = huge_pte_alloc(dst, addr, sz);
if (!dst_pte) if (!dst_pte) {
goto nomem; ret = -ENOMEM;
break;
}
/* If the pagetables are shared don't copy or take references */ /* If the pagetables are shared don't copy or take references */
if (dst_pte == src_pte) if (dst_pte == src_pte)
...@@ -2377,10 +2387,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -2377,10 +2387,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
spin_unlock(src_ptl); spin_unlock(src_ptl);
spin_unlock(dst_ptl); spin_unlock(dst_ptl);
} }
return 0;
nomem: if (cow)
return -ENOMEM; mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
return ret;
} }
static int is_hugetlb_entry_migration(pte_t pte) static int is_hugetlb_entry_migration(pte_t pte)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment