Commit 6f3d7d5c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2024-01-28-23-21' of...

Merge tag 'mm-hotfixes-stable-2024-01-28-23-21' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "22 hotfixes. 11 are cc:stable and the remainder address post-6.7
  issues or aren't considered appropriate for backporting"

* tag 'mm-hotfixes-stable-2024-01-28-23-21' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (22 commits)
  mm: thp_get_unmapped_area must honour topdown preference
  mm: huge_memory: don't force huge page alignment on 32 bit
  userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb
  selftests/mm: ksm_tests should only MADV_HUGEPAGE valid memory
  scs: add CONFIG_MMU dependency for vfree_atomic()
  mm/memory: fix folio_set_dirty() vs. folio_mark_dirty() in zap_pte_range()
  mm/huge_memory: fix folio_set_dirty() vs. folio_mark_dirty()
  selftests/mm: Update va_high_addr_switch.sh to check CPU for la57 flag
  selftests: mm: fix map_hugetlb failure on 64K page size systems
  MAINTAINERS: supplement of zswap maintainers update
  stackdepot: make fast paths lock-less again
  stackdepot: add stats counters exported via debugfs
  mm, kmsan: fix infinite recursion due to RCU critical section
  mm/writeback: fix possible divide-by-zero in wb_dirty_limits(), again
  selftests/mm: switch to bash from sh
  MAINTAINERS: add man-pages git trees
  mm: memcontrol: don't throttle dying tasks on memory.high
  mm: mmap: map MAP_STACK to VM_NOHUGEPAGE
  uprobes: use pagesize-aligned virtual address when replacing pages
  selftests/mm: mremap_test: fix build warning
  ...
parents 41bccc98 96204e15
......@@ -2161,6 +2161,19 @@ N: Mike Kravetz
E: mike.kravetz@oracle.com
D: Maintenance and development of the hugetlb subsystem
N: Seth Jennings
E: sjenning@redhat.com
D: Creation and maintenance of zswap
N: Dan Streetman
E: ddstreet@ieee.org
D: Maintenance and development of zswap
D: Creation and maintenance of the zpool API
N: Vitaly Wool
E: vitaly.wool@konsulko.com
D: Maintenance and development of zswap
N: Andreas S. Krebs
E: akrebs@altavista.net
D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards
......
......@@ -12903,6 +12903,8 @@ M: Alejandro Colomar <alx@kernel.org>
L: linux-man@vger.kernel.org
S: Maintained
W: http://www.kernel.org/doc/man-pages
T: git git://git.kernel.org/pub/scm/docs/man-pages/man-pages.git
T: git git://www.alejandro-colomar.es/src/alx/linux/man-pages/man-pages.git
MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP)
M: Jeremy Kerr <jk@codeconstruct.com.au>
......@@ -24341,13 +24343,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs.git
F: Documentation/filesystems/zonefs.rst
F: fs/zonefs/
ZPOOL COMPRESSED PAGE STORAGE API
M: Dan Streetman <ddstreet@ieee.org>
L: linux-mm@kvack.org
S: Maintained
F: include/linux/zpool.h
F: mm/zpool.c
ZR36067 VIDEO FOR LINUX DRIVER
M: Corentin Labbe <clabbe@baylibre.com>
L: mjpeg-users@lists.sourceforge.net
......@@ -24399,7 +24394,9 @@ M: Nhat Pham <nphamcs@gmail.com>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/admin-guide/mm/zswap.rst
F: include/linux/zpool.h
F: include/linux/zswap.h
F: mm/zpool.c
F: mm/zswap.c
THE REST
......
......@@ -673,6 +673,7 @@ config SHADOW_CALL_STACK
bool "Shadow Call Stack"
depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
depends on MMU
help
This option enables the compiler's Shadow Call Stack, which
uses a shadow stack to protect function return addresses from
......
......@@ -64,6 +64,7 @@ static inline bool kmsan_virt_addr_valid(void *addr)
{
unsigned long x = (unsigned long)addr;
unsigned long y = x - __START_KERNEL_map;
bool ret;
/* use the carry flag to determine if x was < __START_KERNEL_map */
if (unlikely(x > y)) {
......@@ -79,7 +80,21 @@ static inline bool kmsan_virt_addr_valid(void *addr)
return false;
}
return pfn_valid(x >> PAGE_SHIFT);
/*
* pfn_valid() relies on RCU, and may call into the scheduler on exiting
* the critical section. However, this would result in recursion with
* KMSAN. Therefore, disable preemption here, and re-enable preemption
* below while suppressing reschedules to avoid recursion.
*
* Note, this sacrifices occasionally breaking scheduling guarantees.
* Although, a kernel compiled with KMSAN has already given up on any
* performance guarantees due to being heavily instrumented.
*/
preempt_disable();
ret = pfn_valid(x >> PAGE_SHIFT);
preempt_enable_no_resched();
return ret;
}
#endif /* !MODULE */
......
......@@ -340,7 +340,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
} else {
folio_unlock(folio);
if (!folio_test_has_hwpoisoned(folio))
if (!folio_test_hwpoison(folio))
want = nr;
else {
/*
......
......@@ -156,6 +156,7 @@ calc_vm_flag_bits(unsigned long flags)
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
_calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
arch_calc_vm_flag_bits(flags);
}
......
......@@ -2013,9 +2013,9 @@ static inline int pfn_valid(unsigned long pfn)
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
ms = __pfn_to_section(pfn);
rcu_read_lock();
rcu_read_lock_sched();
if (!valid_section(ms)) {
rcu_read_unlock();
rcu_read_unlock_sched();
return 0;
}
/*
......@@ -2023,7 +2023,7 @@ static inline int pfn_valid(unsigned long pfn)
* the entire section-sized span.
*/
ret = early_section(ms) || pfn_section_valid(ms, pfn);
rcu_read_unlock();
rcu_read_unlock_sched();
return ret;
}
......
......@@ -537,7 +537,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
}
}
ret = __replace_page(vma, vaddr, old_page, new_page);
ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
if (new_page)
put_page(new_page);
put_old:
......
This diff is collapsed.
......@@ -37,6 +37,7 @@
#include <linux/page_owner.h>
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
#include <linux/compat.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
......@@ -809,7 +810,10 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
{
loff_t off_end = off + len;
loff_t off_align = round_up(off, size);
unsigned long len_pad, ret;
unsigned long len_pad, ret, off_sub;
if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
return 0;
if (off_end <= off_align || (off_end - off_align) < size)
return 0;
......@@ -835,7 +839,13 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
if (ret == addr)
return addr;
ret += (off - ret) & (size - 1);
off_sub = (off - ret) & (size - 1);
if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown &&
!off_sub)
return ret + size;
ret += off_sub;
return ret;
}
......@@ -2437,7 +2447,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
page = pmd_page(old_pmd);
folio = page_folio(page);
if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
folio_set_dirty(folio);
folio_mark_dirty(folio);
if (!folio_test_referenced(folio) && pmd_young(old_pmd))
folio_set_referenced(folio);
folio_remove_rmap_pmd(folio, page, vma);
......@@ -3563,7 +3573,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
}
if (pmd_dirty(pmdval))
folio_set_dirty(folio);
folio_mark_dirty(folio);
if (pmd_write(pmdval))
entry = make_writable_migration_entry(page_to_pfn(page));
else if (anon_exclusive)
......
......@@ -2623,8 +2623,9 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
}
/*
* Scheduled by try_charge() to be executed from the userland return path
* and reclaims memory over the high limit.
* Reclaims memory over the high limit. Called directly from
* try_charge() (context permitting), as well as from the userland
* return path where reclaim is always able to block.
*/
void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
......@@ -2643,6 +2644,17 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask)
current->memcg_nr_pages_over_high = 0;
retry_reclaim:
/*
* Bail if the task is already exiting. Unlike memory.max,
* memory.high enforcement isn't as strict, and there is no
* OOM killer involved, which means the excess could already
* be much bigger (and still growing) than it could for
* memory.max; the dying task could get stuck in fruitless
* reclaim for a long time, which isn't desirable.
*/
if (task_is_dying())
goto out;
/*
* The allocating task should reclaim at least the batch size, but for
* subsequent retries we only want to do what's necessary to prevent oom
......@@ -2693,6 +2705,9 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask)
}
/*
* Reclaim didn't manage to push usage below the limit, slow
* this allocating task down.
*
* If we exit early, we're guaranteed to die (since
* schedule_timeout_killable sets TASK_KILLABLE). This means we don't
* need to account for any ill-begotten jiffies to pay them off later.
......@@ -2887,11 +2902,17 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
}
} while ((memcg = parent_mem_cgroup(memcg)));
/*
* Reclaim is set up above to be called from the userland
* return path. But also attempt synchronous reclaim to avoid
* excessive overrun while the task is still inside the
* kernel. If this is successful, the return path will see it
* when it rechecks the overage and simply bail out.
*/
if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
!(current->flags & PF_MEMALLOC) &&
gfpflags_allow_blocking(gfp_mask)) {
gfpflags_allow_blocking(gfp_mask))
mem_cgroup_handle_over_high(gfp_mask);
}
return 0;
}
......
......@@ -982,7 +982,7 @@ static bool has_extra_refcount(struct page_state *ps, struct page *p,
int count = page_count(p) - 1;
if (extra_pins)
count -= 1;
count -= folio_nr_pages(page_folio(p));
if (count > 0) {
pr_err("%#lx: %s still referenced by %d users\n",
......
......@@ -1464,7 +1464,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
delay_rmap = 0;
if (!folio_test_anon(folio)) {
if (pte_dirty(ptent)) {
folio_set_dirty(folio);
folio_mark_dirty(folio);
if (tlb_delay_rmap(tlb)) {
delay_rmap = 1;
force_flush = 1;
......
......@@ -1825,15 +1825,17 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
/*
* mmap_region() will call shmem_zero_setup() to create a file,
* so use shmem's get_unmapped_area in case it can be huge.
* do_mmap() will clear pgoff, so match alignment.
*/
pgoff = 0;
get_area = shmem_get_unmapped_area;
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
/* Ensures that larger anonymous mappings are THP aligned. */
get_area = thp_get_unmapped_area;
}
/* Always treat pgoff as zero for anonymous memory. */
if (!file)
pgoff = 0;
addr = get_area(file, addr, len, pgoff, flags);
if (IS_ERR_VALUE(addr))
return addr;
......
......@@ -1638,7 +1638,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
*/
dtc->wb_thresh = __wb_calc_thresh(dtc);
dtc->wb_bg_thresh = dtc->thresh ?
div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
/*
* In order to avoid the stacked BDI deadlock we need
......
......@@ -469,7 +469,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
if (!folio)
return -ENOMEM;
mark = round_up(mark, 1UL << order);
mark = round_down(mark, 1UL << order);
if (index == mark)
folio_set_readahead(folio);
err = filemap_add_folio(ractl->mapping, folio, index, gfp);
......@@ -575,7 +575,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
* It's the expected callback index, assume sequential access.
* Ramp up sizes, and push forward the readahead window.
*/
expected = round_up(ra->start + ra->size - ra->async_size,
expected = round_down(ra->start + ra->size - ra->async_size,
1UL << order);
if (index == expected || index == (ra->start + ra->size)) {
ra->start += ra->size;
......
......@@ -357,6 +357,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
atomic_t *mmap_changing,
uffd_flags_t flags)
{
struct mm_struct *dst_mm = dst_vma->vm_mm;
......@@ -472,6 +473,15 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
goto out;
}
mmap_read_lock(dst_mm);
/*
* If memory mappings are changing because of non-cooperative
* operation (e.g. mremap) running in parallel, bail out and
* request the user to retry later
*/
if (mmap_changing && atomic_read(mmap_changing)) {
err = -EAGAIN;
break;
}
dst_vma = NULL;
goto retry;
......@@ -506,6 +516,7 @@ extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
unsigned long dst_start,
unsigned long src_start,
unsigned long len,
atomic_t *mmap_changing,
uffd_flags_t flags);
#endif /* CONFIG_HUGETLB_PAGE */
......@@ -622,8 +633,8 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
* If this is a HUGETLB vma, pass off to appropriate routine
*/
if (is_vm_hugetlb_page(dst_vma))
return mfill_atomic_hugetlb(dst_vma, dst_start,
src_start, len, flags);
return mfill_atomic_hugetlb(dst_vma, dst_start, src_start,
len, mmap_changing, flags);
if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
goto out_unlock;
......
#!/bin/sh
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Kselftest framework requirement - SKIP code is 4.
......
......@@ -566,7 +566,7 @@ static int ksm_merge_hugepages_time(int merge_type, int mapping, int prot,
if (map_ptr_orig == MAP_FAILED)
err(2, "initial mmap");
if (madvise(map_ptr, len + HPAGE_SIZE, MADV_HUGEPAGE))
if (madvise(map_ptr, len, MADV_HUGEPAGE))
err(2, "MADV_HUGEPAGE");
pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
......
......@@ -15,6 +15,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#include "vm_util.h"
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
......@@ -58,10 +59,16 @@ int main(int argc, char **argv)
{
void *addr;
int ret;
size_t hugepage_size;
size_t length = LENGTH;
int flags = FLAGS;
int shift = 0;
hugepage_size = default_huge_page_size();
/* munmap with fail if the length is not page aligned */
if (hugepage_size > length)
length = hugepage_size;
if (argc > 1)
length = atol(argv[1]) << 20;
if (argc > 2) {
......
......@@ -360,7 +360,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
char pattern_seed)
{
void *addr, *src_addr, *dest_addr, *dest_preamble_addr;
unsigned long long i;
int d;
unsigned long long t;
struct timespec t_start = {0, 0}, t_end = {0, 0};
long long start_ns, end_ns, align_mask, ret, offset;
unsigned long long threshold;
......@@ -378,8 +379,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
/* Set byte pattern for source block. */
srand(pattern_seed);
for (i = 0; i < threshold; i++)
memset((char *) src_addr + i, (char) rand(), 1);
for (t = 0; t < threshold; t++)
memset((char *) src_addr + t, (char) rand(), 1);
/* Mask to zero out lower bits of address for alignment */
align_mask = ~(c.dest_alignment - 1);
......@@ -420,8 +421,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
/* Set byte pattern for the dest preamble block. */
srand(pattern_seed);
for (i = 0; i < c.dest_preamble_size; i++)
memset((char *) dest_preamble_addr + i, (char) rand(), 1);
for (d = 0; d < c.dest_preamble_size; d++)
memset((char *) dest_preamble_addr + d, (char) rand(), 1);
}
clock_gettime(CLOCK_MONOTONIC, &t_start);
......@@ -437,14 +438,14 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
/* Verify byte pattern after remapping */
srand(pattern_seed);
for (i = 0; i < threshold; i++) {
for (t = 0; t < threshold; t++) {
char c = (char) rand();
if (((char *) dest_addr)[i] != c) {
if (((char *) dest_addr)[t] != c) {
ksft_print_msg("Data after remap doesn't match at offset %llu\n",
i);
t);
ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
((char *) dest_addr)[i] & 0xff);
((char *) dest_addr)[t] & 0xff);
ret = -1;
goto clean_up_dest;
}
......@@ -453,14 +454,14 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
/* Verify the dest preamble byte pattern after remapping */
if (c.dest_preamble_size) {
srand(pattern_seed);
for (i = 0; i < c.dest_preamble_size; i++) {
for (d = 0; d < c.dest_preamble_size; d++) {
char c = (char) rand();
if (((char *) dest_preamble_addr)[i] != c) {
if (((char *) dest_preamble_addr)[d] != c) {
ksft_print_msg("Preamble data after remap doesn't match at offset %d\n",
i);
d);
ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
((char *) dest_preamble_addr)[i] & 0xff);
((char *) dest_preamble_addr)[d] & 0xff);
ret = -1;
goto clean_up_dest;
}
......
......@@ -29,9 +29,15 @@ check_supported_x86_64()
# See man 1 gzip under '-f'.
local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
local cpu_supports_pl5=$(awk '/^flags/ {if (/la57/) {print 0;}
else {print 1}; exit}' /proc/cpuinfo 2>/dev/null)
if [[ "${pg_table_levels}" -lt 5 ]]; then
echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
exit $ksft_skip
elif [[ "${cpu_supports_pl5}" -ne 0 ]]; then
echo "$0: CPU does not have the necessary la57 flag to support page table level 5"
exit $ksft_skip
fi
}
......
#!/bin/sh
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
set -e
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment