Commit 38f8ccde authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2023-02-17-15-16-2' of...

Merge tag 'mm-hotfixes-stable-2023-02-17-15-16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "Six hotfixes. Five are cc:stable: four for MM, one for nilfs2.

  Also a MAINTAINERS update"

* tag 'mm-hotfixes-stable-2023-02-17-15-16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  nilfs2: fix underflow in second superblock position calculations
  hugetlb: check for undefined shift on 32 bit architectures
  mm/migrate: fix wrongly apply write bit after mkdirty on sparc64
  MAINTAINERS: update FPU EMULATOR web page
  mm/MADV_COLLAPSE: set EAGAIN on unexpected page refcount
  mm/filemap: fix page end in filemap_get_read_batch
parents dbeed98d 99b9402a
...@@ -8202,7 +8202,7 @@ F: drivers/fpga/microchip-spi.c ...@@ -8202,7 +8202,7 @@ F: drivers/fpga/microchip-spi.c
FPU EMULATOR FPU EMULATOR
M: Bill Metzenthen <billm@melbpc.org.au> M: Bill Metzenthen <billm@melbpc.org.au>
S: Maintained S: Maintained
W: http://floatingpoint.sourceforge.net/emulator/index.html W: https://floatingpoint.billm.au/
F: arch/x86/math-emu/ F: arch/x86/math-emu/
FRAMEBUFFER CORE FRAMEBUFFER CORE
......
...@@ -1114,7 +1114,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) ...@@ -1114,7 +1114,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
minseg = range[0] + segbytes - 1; minseg = range[0] + segbytes - 1;
do_div(minseg, segbytes); do_div(minseg, segbytes);
if (range[1] < 4096)
goto out;
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]); maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
if (maxseg < segbytes)
goto out;
do_div(maxseg, segbytes); do_div(maxseg, segbytes);
maxseg--; maxseg--;
......
...@@ -408,6 +408,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize) ...@@ -408,6 +408,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
if (newsize > devsize) if (newsize > devsize)
goto out; goto out;
/*
* Prevent underflow in second superblock position calculation.
* The exact minimum size check is done in nilfs_sufile_resize().
*/
if (newsize < 4096) {
ret = -ENOSPC;
goto out;
}
/* /*
* Write lock is required to protect some functions depending * Write lock is required to protect some functions depending
* on the number of segments, the number of reserved segments, * on the number of segments, the number of reserved segments,
......
...@@ -544,9 +544,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, ...@@ -544,9 +544,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
{ {
struct nilfs_super_block **sbp = nilfs->ns_sbp; struct nilfs_super_block **sbp = nilfs->ns_sbp;
struct buffer_head **sbh = nilfs->ns_sbh; struct buffer_head **sbh = nilfs->ns_sbh;
u64 sb2off = NILFS_SB2_OFFSET_BYTES(bdev_nr_bytes(nilfs->ns_bdev)); u64 sb2off, devsize = bdev_nr_bytes(nilfs->ns_bdev);
int valid[2], swp = 0; int valid[2], swp = 0;
if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
nilfs_err(sb, "device size too small");
return -EINVAL;
}
sb2off = NILFS_SB2_OFFSET_BYTES(devsize);
sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize, sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
&sbh[0]); &sbh[0]);
sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]); sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
......
...@@ -743,7 +743,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log) ...@@ -743,7 +743,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
if (!page_size_log) if (!page_size_log)
return &default_hstate; return &default_hstate;
return size_to_hstate(1UL << page_size_log); if (page_size_log < BITS_PER_LONG)
return size_to_hstate(1UL << page_size_log);
return NULL;
} }
static inline struct hstate *hstate_vma(struct vm_area_struct *vma) static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
......
...@@ -2588,18 +2588,19 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter, ...@@ -2588,18 +2588,19 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
struct folio *folio; struct folio *folio;
int err = 0; int err = 0;
/* "last_index" is the index of the page beyond the end of the read */
last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE); last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE);
retry: retry:
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return -EINTR; return -EINTR;
filemap_get_read_batch(mapping, index, last_index, fbatch); filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
if (!folio_batch_count(fbatch)) { if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & IOCB_NOIO) if (iocb->ki_flags & IOCB_NOIO)
return -EAGAIN; return -EAGAIN;
page_cache_sync_readahead(mapping, ra, filp, index, page_cache_sync_readahead(mapping, ra, filp, index,
last_index - index); last_index - index);
filemap_get_read_batch(mapping, index, last_index, fbatch); filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
} }
if (!folio_batch_count(fbatch)) { if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
......
...@@ -3272,8 +3272,6 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) ...@@ -3272,8 +3272,6 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd)) if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde); pmde = pmd_mksoft_dirty(pmde);
if (is_writable_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma);
if (pmd_swp_uffd_wp(*pvmw->pmd)) if (pmd_swp_uffd_wp(*pvmw->pmd))
pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde)); pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
if (!is_migration_entry_young(entry)) if (!is_migration_entry_young(entry))
...@@ -3281,6 +3279,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) ...@@ -3281,6 +3279,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
/* NOTE: this may contain setting soft-dirty on some archs */ /* NOTE: this may contain setting soft-dirty on some archs */
if (PageDirty(new) && is_migration_entry_dirty(entry)) if (PageDirty(new) && is_migration_entry_dirty(entry))
pmde = pmd_mkdirty(pmde); pmde = pmd_mkdirty(pmde);
if (is_writable_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma);
else
pmde = pmd_wrprotect(pmde);
if (PageAnon(new)) { if (PageAnon(new)) {
rmap_t rmap_flags = RMAP_COMPOUND; rmap_t rmap_flags = RMAP_COMPOUND;
......
...@@ -2611,6 +2611,7 @@ static int madvise_collapse_errno(enum scan_result r) ...@@ -2611,6 +2611,7 @@ static int madvise_collapse_errno(enum scan_result r)
case SCAN_CGROUP_CHARGE_FAIL: case SCAN_CGROUP_CHARGE_FAIL:
return -EBUSY; return -EBUSY;
/* Resource temporary unavailable - trying again might succeed */ /* Resource temporary unavailable - trying again might succeed */
case SCAN_PAGE_COUNT:
case SCAN_PAGE_LOCK: case SCAN_PAGE_LOCK:
case SCAN_PAGE_LRU: case SCAN_PAGE_LRU:
case SCAN_DEL_PAGE_LRU: case SCAN_DEL_PAGE_LRU:
......
...@@ -224,6 +224,8 @@ static bool remove_migration_pte(struct folio *folio, ...@@ -224,6 +224,8 @@ static bool remove_migration_pte(struct folio *folio,
pte = maybe_mkwrite(pte, vma); pte = maybe_mkwrite(pte, vma);
else if (pte_swp_uffd_wp(*pvmw.pte)) else if (pte_swp_uffd_wp(*pvmw.pte))
pte = pte_mkuffd_wp(pte); pte = pte_mkuffd_wp(pte);
else
pte = pte_wrprotect(pte);
if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
rmap_flags |= RMAP_EXCLUSIVE; rmap_flags |= RMAP_EXCLUSIVE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment