Commit fac35ba7 authored by Baolin Wang's avatar Baolin Wang Committed by Andrew Morton

mm/hugetlb: fix races when looking up a CONT-PTE/PMD size hugetlb page

On some architectures (like ARM64), it can support CONT-PTE/PMD size
hugetlb, which means it can support not only PMD/PUD size hugetlb (2M and
1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size specified.

So when looking up a CONT-PTE size hugetlb page by follow_page(), it will
use pte_offset_map_lock() to get the pte entry lock for the CONT-PTE size
hugetlb in follow_page_pte().  However this pte entry lock is incorrect
for the CONT-PTE size hugetlb, since we should use huge_pte_lock() to get
the correct lock, which is mm->page_table_lock.

That means the pte entry of the CONT-PTE size hugetlb under current pte
lock is unstable in follow_page_pte(), we can continue to migrate or
poison the pte entry of the CONT-PTE size hugetlb, which can cause some
potential race issues, even though they are under the 'pte lock'.

For example, suppose thread A is trying to look up a CONT-PTE size hugetlb
page by move_pages() syscall under the lock, however antoher thread B can
migrate the CONT-PTE hugetlb page at the same time, which will cause
thread A to get an incorrect page, if thread A also wants to do page
migration, then data inconsistency error occurs.

Moreover we have the same issue for CONT-PMD size hugetlb in
follow_huge_pmd().

To fix above issues, rename the follow_huge_pmd() as follow_huge_pmd_pte()
to handle PMD and PTE level size hugetlb, which uses huge_pte_lock() to
get the correct pte entry lock to make the pte entry stable.

Mike said:

Support for CONT_PMD/_PTE was added with bb9dd3df ("arm64: hugetlb:
refactor find_num_contig()").  Patch series "Support for contiguous pte
hugepages", v4.  However, I do not believe these code paths were
executed until migration support was added with 5480280d ("arm64/mm:
enable HugeTLB migration for contiguous bit HugeTLB pages") I would go
with 5480280d for the Fixes: targe.

Link: https://lkml.kernel.org/r/635f43bdd85ac2615a58405da82b4d33c6e5eb05.1662017562.git.baolin.wang@linux.alibaba.com
Fixes: 5480280d ("arm64/mm: enable HugeTLB migration for contiguous bit HugeTLB pages")
Signed-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Suggested-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 1c8e2349
...@@ -207,8 +207,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, ...@@ -207,8 +207,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pd(struct vm_area_struct *vma, struct page *follow_huge_pd(struct vm_area_struct *vma,
unsigned long address, hugepd_t hpd, unsigned long address, hugepd_t hpd,
int flags, int pdshift); int flags, int pdshift);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, int flags); int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags); pud_t *pud, int flags);
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
...@@ -312,8 +312,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma, ...@@ -312,8 +312,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
return NULL; return NULL;
} }
static inline struct page *follow_huge_pmd(struct mm_struct *mm, static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, int flags) unsigned long address, int flags)
{ {
return NULL; return NULL;
} }
......
...@@ -530,6 +530,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ...@@ -530,6 +530,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET))) (FOLL_PIN | FOLL_GET)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/*
* Considering PTE level hugetlb, like continuous-PTE hugetlb on
* ARM64 architecture.
*/
if (is_vm_hugetlb_page(vma)) {
page = follow_huge_pmd_pte(vma, address, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
retry: retry:
if (unlikely(pmd_bad(*pmd))) if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags); return no_page_table(vma, flags);
...@@ -662,7 +674,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, ...@@ -662,7 +674,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (pmd_none(pmdval)) if (pmd_none(pmdval))
return no_page_table(vma, flags); return no_page_table(vma, flags);
if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) { if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
page = follow_huge_pmd(mm, address, pmd, flags); page = follow_huge_pmd_pte(vma, address, flags);
if (page) if (page)
return page; return page;
return no_page_table(vma, flags); return no_page_table(vma, flags);
......
...@@ -6946,12 +6946,13 @@ follow_huge_pd(struct vm_area_struct *vma, ...@@ -6946,12 +6946,13 @@ follow_huge_pd(struct vm_area_struct *vma,
} }
struct page * __weak struct page * __weak
follow_huge_pmd(struct mm_struct *mm, unsigned long address, follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
pmd_t *pmd, int flags)
{ {
struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm;
struct page *page = NULL; struct page *page = NULL;
spinlock_t *ptl; spinlock_t *ptl;
pte_t pte; pte_t *ptep, pte;
/* /*
* FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
...@@ -6961,17 +6962,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -6961,17 +6962,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL; return NULL;
retry: retry:
ptl = pmd_lockptr(mm, pmd); ptep = huge_pte_offset(mm, address, huge_page_size(h));
spin_lock(ptl); if (!ptep)
/* return NULL;
* make sure that the address range covered by this pmd is not
* unmapped from other threads. ptl = huge_pte_lock(h, mm, ptep);
*/ pte = huge_ptep_get(ptep);
if (!pmd_huge(*pmd))
goto out;
pte = huge_ptep_get((pte_t *)pmd);
if (pte_present(pte)) { if (pte_present(pte)) {
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); page = pte_page(pte) +
((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
/* /*
* try_grab_page() should always succeed here, because: a) we * try_grab_page() should always succeed here, because: a) we
* hold the pmd (ptl) lock, and b) we've just checked that the * hold the pmd (ptl) lock, and b) we've just checked that the
...@@ -6987,7 +6986,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -6987,7 +6986,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
} else { } else {
if (is_hugetlb_entry_migration(pte)) { if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl); spin_unlock(ptl);
__migration_entry_wait_huge((pte_t *)pmd, ptl); __migration_entry_wait_huge(ptep, ptl);
goto retry; goto retry;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment