Commit db110a99 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds

mm/hugetlb: fix kernel crash with hugetlb mremap

This fixes the below crash:

  kernel BUG at include/linux/mm.h:2373!
  cpu 0x5d: Vector: 700 (Program Check) at [c00000003c6e76e0]
      pc: c000000000581a54: pmd_to_page+0x54/0x80
      lr: c00000000058d184: move_hugetlb_page_tables+0x4e4/0x5b0
      sp: c00000003c6e7980
     msr: 9000000000029033
    current = 0xc00000003bd8d980
    paca    = 0xc000200fff610100   irqmask: 0x03   irq_happened: 0x01
      pid   = 9349, comm = hugepage-mremap
  kernel BUG at include/linux/mm.h:2373!
    move_hugetlb_page_tables+0x4e4/0x5b0 (link register)
    move_hugetlb_page_tables+0x22c/0x5b0 (unreliable)
    move_page_tables+0xdbc/0x1010
    move_vma+0x254/0x5f0
    sys_mremap+0x7c0/0x900
    system_call_exception+0x160/0x2c0

the kernel can't use huge_pte_offset before it set the pte entry because
a page table lookup check for huge PTE bit in the page table to
differentiate between a huge pte entry and a pointer to pte page.  A
huge_pte_alloc won't mark the page table entry huge and hence kernel
should not use huge_pte_offset after a huge_pte_alloc.

Link: https://lkml.kernel.org/r/20220211063221.99293-1-aneesh.kumar@linux.ibm.com
Fixes: 550a7d60 ("mm, hugepages: add mremap() support for hugepage backed vma")
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMina Almasry <almasrymina@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bbcf7b0e
...@@ -4851,14 +4851,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -4851,14 +4851,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
} }
static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pte_t *src_pte) unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
{ {
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
pte_t *dst_pte, pte;
spinlock_t *src_ptl, *dst_ptl; spinlock_t *src_ptl, *dst_ptl;
pte_t pte;
dst_pte = huge_pte_offset(mm, new_addr, huge_page_size(h));
dst_ptl = huge_pte_lock(h, mm, dst_pte); dst_ptl = huge_pte_lock(h, mm, dst_pte);
src_ptl = huge_pte_lockptr(h, mm, src_pte); src_ptl = huge_pte_lockptr(h, mm, src_pte);
...@@ -4917,7 +4916,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, ...@@ -4917,7 +4916,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
if (!dst_pte) if (!dst_pte)
break; break;
move_huge_pte(vma, old_addr, new_addr, src_pte); move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
} }
flush_tlb_range(vma, old_end - len, old_end); flush_tlb_range(vma, old_end - len, old_end);
mmu_notifier_invalidate_range_end(&range); mmu_notifier_invalidate_range_end(&range);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment