Commit 1440f576 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2022-10-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc hotfixes from Andrew Morton:
 "Five hotfixes - three for nilfs2, two for MM. For are cc:stable, one
  is not"

* tag 'mm-hotfixes-stable-2022-10-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  nilfs2: fix leak of nilfs_root in case of writer thread creation failure
  nilfs2: fix NULL pointer dereference at nilfs_bmap_lookup_at_level()
  nilfs2: fix use-after-free bug of struct nilfs_root
  mm/damon/core: initialize damon_target->list in damon_new_target()
  mm/hugetlb: fix races when looking up a CONT-PTE/PMD size hugetlb page
parents 676cb495 d0d51a97
...@@ -328,6 +328,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) ...@@ -328,6 +328,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
struct inode *inode; struct inode *inode;
struct nilfs_inode_info *ii; struct nilfs_inode_info *ii;
struct nilfs_root *root; struct nilfs_root *root;
struct buffer_head *bh;
int err = -ENOMEM; int err = -ENOMEM;
ino_t ino; ino_t ino;
...@@ -343,11 +344,25 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) ...@@ -343,11 +344,25 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
ii->i_state = BIT(NILFS_I_NEW); ii->i_state = BIT(NILFS_I_NEW);
ii->i_root = root; ii->i_root = root;
err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
if (unlikely(err)) if (unlikely(err))
goto failed_ifile_create_inode; goto failed_ifile_create_inode;
/* reference count of i_bh inherits from nilfs_mdt_read_block() */ /* reference count of i_bh inherits from nilfs_mdt_read_block() */
if (unlikely(ino < NILFS_USER_INO)) {
nilfs_warn(sb,
"inode bitmap is inconsistent for reserved inodes");
do {
brelse(bh);
err = nilfs_ifile_create_inode(root->ifile, &ino, &bh);
if (unlikely(err))
goto failed_ifile_create_inode;
} while (ino < NILFS_USER_INO);
nilfs_info(sb, "repaired inode bitmap for reserved inodes");
}
ii->i_bh = bh;
atomic64_inc(&root->inodes_count); atomic64_inc(&root->inodes_count);
inode_init_owner(&init_user_ns, inode, dir, mode); inode_init_owner(&init_user_ns, inode, dir, mode);
inode->i_ino = ino; inode->i_ino = ino;
...@@ -440,6 +455,8 @@ int nilfs_read_inode_common(struct inode *inode, ...@@ -440,6 +455,8 @@ int nilfs_read_inode_common(struct inode *inode,
inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
return -EIO; /* this inode is for metadata and corrupted */
if (inode->i_nlink == 0) if (inode->i_nlink == 0)
return -ESTALE; /* this inode is deleted */ return -ESTALE; /* this inode is deleted */
......
...@@ -2790,10 +2790,9 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) ...@@ -2790,10 +2790,9 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
err = nilfs_segctor_start_thread(nilfs->ns_writer); err = nilfs_segctor_start_thread(nilfs->ns_writer);
if (err) { if (unlikely(err))
kfree(nilfs->ns_writer); nilfs_detach_log_writer(sb);
nilfs->ns_writer = NULL;
}
return err; return err;
} }
......
...@@ -214,8 +214,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, ...@@ -214,8 +214,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pd(struct vm_area_struct *vma, struct page *follow_huge_pd(struct vm_area_struct *vma,
unsigned long address, hugepd_t hpd, unsigned long address, hugepd_t hpd,
int flags, int pdshift); int flags, int pdshift);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, int flags); int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags); pud_t *pud, int flags);
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
...@@ -327,8 +327,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma, ...@@ -327,8 +327,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
return NULL; return NULL;
} }
static inline struct page *follow_huge_pmd(struct mm_struct *mm, static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, int flags) unsigned long address, int flags)
{ {
return NULL; return NULL;
} }
......
...@@ -335,6 +335,7 @@ struct damon_target *damon_new_target(void) ...@@ -335,6 +335,7 @@ struct damon_target *damon_new_target(void)
t->pid = NULL; t->pid = NULL;
t->nr_regions = 0; t->nr_regions = 0;
INIT_LIST_HEAD(&t->regions_list); INIT_LIST_HEAD(&t->regions_list);
INIT_LIST_HEAD(&t->list);
return t; return t;
} }
......
...@@ -537,6 +537,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ...@@ -537,6 +537,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET))) (FOLL_PIN | FOLL_GET)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/*
* Considering PTE level hugetlb, like continuous-PTE hugetlb on
* ARM64 architecture.
*/
if (is_vm_hugetlb_page(vma)) {
page = follow_huge_pmd_pte(vma, address, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
retry: retry:
if (unlikely(pmd_bad(*pmd))) if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags); return no_page_table(vma, flags);
...@@ -669,7 +681,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, ...@@ -669,7 +681,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (pmd_none(pmdval)) if (pmd_none(pmdval))
return no_page_table(vma, flags); return no_page_table(vma, flags);
if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) { if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
page = follow_huge_pmd(mm, address, pmd, flags); page = follow_huge_pmd_pte(vma, address, flags);
if (page) if (page)
return page; return page;
return no_page_table(vma, flags); return no_page_table(vma, flags);
......
...@@ -7150,12 +7150,13 @@ follow_huge_pd(struct vm_area_struct *vma, ...@@ -7150,12 +7150,13 @@ follow_huge_pd(struct vm_area_struct *vma,
} }
struct page * __weak struct page * __weak
follow_huge_pmd(struct mm_struct *mm, unsigned long address, follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
pmd_t *pmd, int flags)
{ {
struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm;
struct page *page = NULL; struct page *page = NULL;
spinlock_t *ptl; spinlock_t *ptl;
pte_t pte; pte_t *ptep, pte;
/* /*
* FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
...@@ -7165,17 +7166,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -7165,17 +7166,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL; return NULL;
retry: retry:
ptl = pmd_lockptr(mm, pmd); ptep = huge_pte_offset(mm, address, huge_page_size(h));
spin_lock(ptl); if (!ptep)
/* return NULL;
* make sure that the address range covered by this pmd is not
* unmapped from other threads. ptl = huge_pte_lock(h, mm, ptep);
*/ pte = huge_ptep_get(ptep);
if (!pmd_huge(*pmd))
goto out;
pte = huge_ptep_get((pte_t *)pmd);
if (pte_present(pte)) { if (pte_present(pte)) {
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); page = pte_page(pte) +
((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
/* /*
* try_grab_page() should always succeed here, because: a) we * try_grab_page() should always succeed here, because: a) we
* hold the pmd (ptl) lock, and b) we've just checked that the * hold the pmd (ptl) lock, and b) we've just checked that the
...@@ -7191,7 +7190,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -7191,7 +7190,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
} else { } else {
if (is_hugetlb_entry_migration(pte)) { if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl); spin_unlock(ptl);
__migration_entry_wait_huge((pte_t *)pmd, ptl); __migration_entry_wait_huge(ptep, ptl);
goto retry; goto retry;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment