Commit bbff39cc authored by Mike Kravetz's avatar Mike Kravetz Committed by Andrew Morton

hugetlb: allocate vma lock for all sharable vmas

The hugetlb vma lock was originally designed to synchronize pmd sharing. 
As such, it was only necessary to allocate the lock for vmas that were
capable of pmd sharing.  Later in the development cycle, it was discovered
that it could also be used to simplify fault/truncation races as described
in [1].  However, a subsequent change to allocate the lock for all vmas
that use the page cache was never made.  A fault/truncation race could
leave pages in a file past i_size until the file is removed.

Remove the previous restriction and allocate lock for all VM_MAYSHARE
vmas.  Warn in the unlikely event of allocation failure.

[1] https://lore.kernel.org/lkml/Yxiv0SkMkZ0JWGGp@monkey/#t

Link: https://lkml.kernel.org/r/20221005011707.514612-4-mike.kravetz@oracle.com
Fixes: "hugetlb: clean up code checking for fault/truncation races"
Signed-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: James Houghton <jthoughton@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Prakash Sangappa <prakash.sangappa@oracle.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ecfbd733
...@@ -6687,10 +6687,11 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, ...@@ -6687,10 +6687,11 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
return saddr; return saddr;
} }
static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma, bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
unsigned long start, unsigned long end,
bool check_vma_lock)
{ {
unsigned long start = addr & PUD_MASK;
unsigned long end = start + PUD_SIZE;
#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_USERFAULTFD
if (uffd_disable_huge_pmd_share(vma)) if (uffd_disable_huge_pmd_share(vma))
return false; return false;
...@@ -6700,38 +6701,13 @@ static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma, ...@@ -6700,38 +6701,13 @@ static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma,
*/ */
if (!(vma->vm_flags & VM_MAYSHARE)) if (!(vma->vm_flags & VM_MAYSHARE))
return false; return false;
if (check_vma_lock && !vma->vm_private_data) if (!vma->vm_private_data) /* vma lock required for sharing */
return false; return false;
if (!range_in_vma(vma, start, end)) if (!range_in_vma(vma, start, end))
return false; return false;
return true; return true;
} }
static bool vma_pmd_shareable(struct vm_area_struct *vma)
{
unsigned long start = ALIGN(vma->vm_start, PUD_SIZE),
end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
if (start >= end)
return false;
return __vma_aligned_range_pmd_shareable(vma, start, end, false);
}
static bool vma_addr_pmd_shareable(struct vm_area_struct *vma,
unsigned long addr)
{
unsigned long start = addr & PUD_MASK;
unsigned long end = start + PUD_SIZE;
return __vma_aligned_range_pmd_shareable(vma, start, end, true);
}
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
return vma_addr_pmd_shareable(vma, addr);
}
/* /*
* Determine if start,end range within vma could be mapped by shared pmd. * Determine if start,end range within vma could be mapped by shared pmd.
* If yes, adjust start and end to cover range associated with possible * If yes, adjust start and end to cover range associated with possible
...@@ -6880,17 +6856,21 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) ...@@ -6880,17 +6856,21 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
if (vma->vm_private_data) if (vma->vm_private_data)
return; return;
/* Check size/alignment for pmd sharing possible */
if (!vma_pmd_shareable(vma))
return;
vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
if (!vma_lock) if (!vma_lock) {
/* /*
* If we can not allocate structure, then vma can not * If we can not allocate structure, then vma can not
* participate in pmd sharing. * participate in pmd sharing. This is only a possible
* performance enhancement and memory saving issue.
* However, the lock is also used to synchronize page
* faults with truncation. If the lock is not present,
* unlikely races could leave pages in a file past i_size
* until the file is removed. Warn in the unlikely case of
* allocation failure.
*/ */
pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
return; return;
}
kref_init(&vma_lock->refs); kref_init(&vma_lock->refs);
init_rwsem(&vma_lock->rw_sema); init_rwsem(&vma_lock->rw_sema);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment