Commit 04dee9e8 authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton

mm/various: give up if pte_offset_map[_lock]() fails

Following the examples of nearby code, various functions can just give up
if pte_offset_map() or pte_offset_map_lock() fails.  And there's no need
for a preliminary pmd_trans_unstable() or other such check, since such
cases are now safely handled inside.

Link: https://lkml.kernel.org/r/7b9bd85d-1652-cbf2-159d-f503b45e5b@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zack Rusin <zackr@vmware.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9f2bad09
...@@ -545,10 +545,10 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ...@@ -545,10 +545,10 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET))) (FOLL_PIN | FOLL_GET)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
ptep = pte_offset_map_lock(mm, pmd, address, &ptl); ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!ptep)
return no_page_table(vma, flags);
pte = *ptep; pte = *ptep;
if (!pte_present(pte)) if (!pte_present(pte))
goto no_page; goto no_page;
...@@ -852,8 +852,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, ...@@ -852,8 +852,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd)) if (!pmd_present(*pmd))
return -EFAULT; return -EFAULT;
VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map(pmd, address); pte = pte_offset_map(pmd, address);
if (!pte)
return -EFAULT;
if (pte_none(*pte)) if (pte_none(*pte))
goto unmap; goto unmap;
*vma = get_gate_vma(mm); *vma = get_gate_vma(mm);
...@@ -2468,6 +2469,8 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, ...@@ -2468,6 +2469,8 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
pte_t *ptep, *ptem; pte_t *ptep, *ptem;
ptem = ptep = pte_offset_map(&pmd, addr); ptem = ptep = pte_offset_map(&pmd, addr);
if (!ptep)
return 0;
do { do {
pte_t pte = ptep_get_lockless(ptep); pte_t pte = ptep_get_lockless(ptep);
struct page *page; struct page *page;
......
...@@ -431,10 +431,9 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex ...@@ -431,10 +431,9 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex
pte_t *pte; pte_t *pte;
int ret; int ret;
if (pmd_leaf(*pmd) || !pmd_present(*pmd))
return 0;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte)
return 0;
if (pte_present(*pte)) { if (pte_present(*pte)) {
page = vm_normal_page(walk->vma, addr, *pte); page = vm_normal_page(walk->vma, addr, *pte);
} else if (!pte_none(*pte)) { } else if (!pte_none(*pte)) {
...@@ -1203,6 +1202,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, ...@@ -1203,6 +1202,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
mmu_notifier_invalidate_range_start(&range); mmu_notifier_invalidate_range_start(&range);
ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
if (!ptep)
goto out_mn;
if (!pte_same(*ptep, orig_pte)) { if (!pte_same(*ptep, orig_pte)) {
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
goto out_mn; goto out_mn;
......
...@@ -6021,9 +6021,9 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, ...@@ -6021,9 +6021,9 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
return 0; return 0;
} }
if (pmd_trans_unstable(pmd))
return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (!pte)
return 0;
for (; addr != end; pte++, addr += PAGE_SIZE) for (; addr != end; pte++, addr += PAGE_SIZE)
if (get_mctgt_type(vma, addr, *pte, NULL)) if (get_mctgt_type(vma, addr, *pte, NULL))
mc.precharge++; /* increment precharge temporarily */ mc.precharge++; /* increment precharge temporarily */
...@@ -6241,10 +6241,10 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, ...@@ -6241,10 +6241,10 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
return 0; return 0;
} }
if (pmd_trans_unstable(pmd))
return 0;
retry: retry:
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (!pte)
return 0;
for (; addr != end; addr += PAGE_SIZE) { for (; addr != end; addr += PAGE_SIZE) {
pte_t ptent = *(pte++); pte_t ptent = *(pte++);
bool device = false; bool device = false;
......
...@@ -405,6 +405,8 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, ...@@ -405,6 +405,8 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
if (pmd_devmap(*pmd)) if (pmd_devmap(*pmd))
return PMD_SHIFT; return PMD_SHIFT;
pte = pte_offset_map(pmd, address); pte = pte_offset_map(pmd, address);
if (!pte)
return 0;
if (pte_present(*pte) && pte_devmap(*pte)) if (pte_present(*pte) && pte_devmap(*pte))
ret = PAGE_SHIFT; ret = PAGE_SHIFT;
pte_unmap(pte); pte_unmap(pte);
...@@ -791,11 +793,11 @@ static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, ...@@ -791,11 +793,11 @@ static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
goto out; goto out;
} }
if (pmd_trans_unstable(pmdp))
goto out;
mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
addr, &ptl); addr, &ptl);
if (!ptep)
goto out;
for (; addr != end; ptep++, addr += PAGE_SIZE) { for (; addr != end; ptep++, addr += PAGE_SIZE) {
ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT, ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT,
hwp->pfn, &hwp->tk); hwp->pfn, &hwp->tk);
......
...@@ -305,6 +305,9 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, ...@@ -305,6 +305,9 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
swp_entry_t entry; swp_entry_t entry;
ptep = pte_offset_map_lock(mm, pmd, address, &ptl); ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!ptep)
return;
pte = *ptep; pte = *ptep;
pte_unmap(ptep); pte_unmap(ptep);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment