Commit 3aaa8ce7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mm-hotfixes-stable-2024-03-07-16-17' of...

Merge tag 'mm-hotfixes-stable-2024-03-07-16-17' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "6 hotfixes. 4 are cc:stable and the remainder pertain to post-6.7
  issues or aren't considered to be needed in earlier kernel versions"

* tag 'mm-hotfixes-stable-2024-03-07-16-17' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  scripts/gdb/symbols: fix invalid escape sequence warning
  mailmap: fix Kishon's email
  init/Kconfig: lower GCC version check for -Warray-bounds
  mm, mmap: fix vma_merge() case 7 with vma_ops->close
  mm: userfaultfd: fix unexpected change to src_folio when UFFDIO_MOVE fails
  mm, vmscan: prevent infinite loop for costly GFP_NOIO | __GFP_RETRY_MAYFAIL allocations
parents c381c89d ded79af4
...@@ -325,6 +325,7 @@ Kenneth W Chen <kenneth.w.chen@intel.com> ...@@ -325,6 +325,7 @@ Kenneth W Chen <kenneth.w.chen@intel.com>
Kenneth Westfield <quic_kwestfie@quicinc.com> <kwestfie@codeaurora.org> Kenneth Westfield <quic_kwestfie@quicinc.com> <kwestfie@codeaurora.org>
Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org> Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org>
Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com> Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com>
Kishon Vijay Abraham I <kishon@kernel.org> <kishon@ti.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru> Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com> Koushik <raghavendra.koushik@neterion.com>
......
...@@ -353,6 +353,15 @@ static inline bool gfp_has_io_fs(gfp_t gfp) ...@@ -353,6 +353,15 @@ static inline bool gfp_has_io_fs(gfp_t gfp)
return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS); return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
} }
/*
* Check if the gfp flags allow compaction - GFP_NOIO is a really
* tricky context because the migration might require IO.
*/
static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
{
return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
}
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC #ifdef CONFIG_CONTIG_ALLOC
......
...@@ -876,14 +876,14 @@ config CC_IMPLICIT_FALLTHROUGH ...@@ -876,14 +876,14 @@ config CC_IMPLICIT_FALLTHROUGH
default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5) default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough) default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
# Currently, disable gcc-11+ array-bounds globally. # Currently, disable gcc-10+ array-bounds globally.
# It's still broken in gcc-13, so no upper bound yet. # It's still broken in gcc-13, so no upper bound yet.
config GCC11_NO_ARRAY_BOUNDS config GCC10_NO_ARRAY_BOUNDS
def_bool y def_bool y
config CC_NO_ARRAY_BOUNDS config CC_NO_ARRAY_BOUNDS
bool bool
default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS default y if CC_IS_GCC && GCC_VERSION >= 100000 && GCC10_NO_ARRAY_BOUNDS
# Currently, disable -Wstringop-overflow for GCC globally. # Currently, disable -Wstringop-overflow for GCC globally.
config GCC_NO_STRINGOP_OVERFLOW config GCC_NO_STRINGOP_OVERFLOW
......
...@@ -2723,16 +2723,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, ...@@ -2723,16 +2723,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac, unsigned int alloc_flags, const struct alloc_context *ac,
enum compact_priority prio, struct page **capture) enum compact_priority prio, struct page **capture)
{ {
int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
struct zoneref *z; struct zoneref *z;
struct zone *zone; struct zone *zone;
enum compact_result rc = COMPACT_SKIPPED; enum compact_result rc = COMPACT_SKIPPED;
/* if (!gfp_compaction_allowed(gfp_mask))
* Check if the GFP flags allow compaction - GFP_NOIO is really
* tricky context because the migration might require IO
*/
if (!may_perform_io)
return COMPACT_SKIPPED; return COMPACT_SKIPPED;
trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
......
...@@ -954,13 +954,21 @@ static struct vm_area_struct ...@@ -954,13 +954,21 @@ static struct vm_area_struct
} else if (merge_prev) { /* case 2 */ } else if (merge_prev) { /* case 2 */
if (curr) { if (curr) {
vma_start_write(curr); vma_start_write(curr);
err = dup_anon_vma(prev, curr, &anon_dup);
if (end == curr->vm_end) { /* case 7 */ if (end == curr->vm_end) { /* case 7 */
/*
* can_vma_merge_after() assumed we would not be
* removing prev vma, so it skipped the check
* for vm_ops->close, but we are removing curr
*/
if (curr->vm_ops && curr->vm_ops->close)
err = -EINVAL;
remove = curr; remove = curr;
} else { /* case 5 */ } else { /* case 5 */
adjust = curr; adjust = curr;
adj_start = (end - curr->vm_start); adj_start = (end - curr->vm_start);
} }
if (!err)
err = dup_anon_vma(prev, curr, &anon_dup);
} }
} else { /* merge_next */ } else { /* merge_next */
vma_start_write(next); vma_start_write(next);
......
...@@ -4041,6 +4041,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -4041,6 +4041,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct alloc_context *ac) struct alloc_context *ac)
{ {
bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
bool can_compact = gfp_compaction_allowed(gfp_mask);
const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
struct page *page = NULL; struct page *page = NULL;
unsigned int alloc_flags; unsigned int alloc_flags;
...@@ -4111,7 +4112,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -4111,7 +4112,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* Don't try this for allocations that are allowed to ignore * Don't try this for allocations that are allowed to ignore
* watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
*/ */
if (can_direct_reclaim && if (can_direct_reclaim && can_compact &&
(costly_order || (costly_order ||
(order > 0 && ac->migratetype != MIGRATE_MOVABLE)) (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
&& !gfp_pfmemalloc_allowed(gfp_mask)) { && !gfp_pfmemalloc_allowed(gfp_mask)) {
...@@ -4209,9 +4210,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -4209,9 +4210,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
/* /*
* Do not retry costly high order allocations unless they are * Do not retry costly high order allocations unless they are
* __GFP_RETRY_MAYFAIL * __GFP_RETRY_MAYFAIL and we can compact
*/ */
if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) if (costly_order && (!can_compact ||
!(gfp_mask & __GFP_RETRY_MAYFAIL)))
goto nopage; goto nopage;
if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
...@@ -4224,7 +4226,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -4224,7 +4226,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* implementation of the compaction depends on the sufficient amount * implementation of the compaction depends on the sufficient amount
* of free memory (see __compaction_suitable) * of free memory (see __compaction_suitable)
*/ */
if (did_some_progress > 0 && if (did_some_progress > 0 && can_compact &&
should_compact_retry(ac, order, alloc_flags, should_compact_retry(ac, order, alloc_flags,
compact_result, &compact_priority, compact_result, &compact_priority,
&compaction_retries)) &compaction_retries))
......
...@@ -914,9 +914,6 @@ static int move_present_pte(struct mm_struct *mm, ...@@ -914,9 +914,6 @@ static int move_present_pte(struct mm_struct *mm,
goto out; goto out;
} }
folio_move_anon_rmap(src_folio, dst_vma);
WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte); orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
/* Folio got pinned from under us. Put it back and fail the move. */ /* Folio got pinned from under us. Put it back and fail the move. */
if (folio_maybe_dma_pinned(src_folio)) { if (folio_maybe_dma_pinned(src_folio)) {
...@@ -925,6 +922,9 @@ static int move_present_pte(struct mm_struct *mm, ...@@ -925,6 +922,9 @@ static int move_present_pte(struct mm_struct *mm,
goto out; goto out;
} }
folio_move_anon_rmap(src_folio, dst_vma);
WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot); orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
/* Follow mremap() behavior and treat the entry dirty after the move */ /* Follow mremap() behavior and treat the entry dirty after the move */
orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma); orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
......
...@@ -5753,7 +5753,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) ...@@ -5753,7 +5753,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
/* Use reclaim/compaction for costly allocs or under memory pressure */ /* Use reclaim/compaction for costly allocs or under memory pressure */
static bool in_reclaim_compaction(struct scan_control *sc) static bool in_reclaim_compaction(struct scan_control *sc)
{ {
if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && if (gfp_compaction_allowed(sc->gfp_mask) && sc->order &&
(sc->order > PAGE_ALLOC_COSTLY_ORDER || (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
sc->priority < DEF_PRIORITY - 2)) sc->priority < DEF_PRIORITY - 2))
return true; return true;
...@@ -5998,6 +5998,9 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) ...@@ -5998,6 +5998,9 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
{ {
unsigned long watermark; unsigned long watermark;
if (!gfp_compaction_allowed(sc->gfp_mask))
return false;
/* Allocation can already succeed, nothing to do */ /* Allocation can already succeed, nothing to do */
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
sc->reclaim_idx, 0)) sc->reclaim_idx, 0))
......
...@@ -82,7 +82,7 @@ lx-symbols command.""" ...@@ -82,7 +82,7 @@ lx-symbols command."""
self.module_files_updated = True self.module_files_updated = True
def _get_module_file(self, module_name): def _get_module_file(self, module_name):
module_pattern = ".*/{0}\.ko(?:.debug)?$".format( module_pattern = r".*/{0}\.ko(?:.debug)?$".format(
module_name.replace("_", r"[_\-]")) module_name.replace("_", r"[_\-]"))
for name in self.module_files: for name in self.module_files:
if re.match(module_pattern, name) and os.path.exists(name): if re.match(module_pattern, name) and os.path.exists(name):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment