Commit 5da226db authored by Zhaoyang Huang's avatar Zhaoyang Huang Committed by Andrew Morton

mm: skip CMA pages when they are not available

This patch fixes unproductive reclaiming of CMA pages by skipping them
when they are not available for current context.  It arises from the below
OOM issue, which was caused by a large proportion of MIGRATE_CMA pages
among free pages.

[   36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0
[   36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB
[   36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB
...
[   36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC)
[   36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0
[   36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0

This change further decreases the chance for wrong OOMs in the presence
of a lot of CMA memory.

[david@redhat.com: changelog addition]
Link: https://lkml.kernel.org/r/1685501461-19290-1-git-send-email-zhaoyang.huang@unisoc.comSigned-off-by: default avatarZhaoyang Huang <zhaoyang.huang@unisoc.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: ke.wang <ke.wang@unisoc.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ce5df776
...@@ -2271,6 +2271,25 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, ...@@ -2271,6 +2271,25 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
} }
#ifdef CONFIG_CMA
/*
* It is waste of effort to scan and reclaim CMA pages if it is not available
* for current allocation context. Kswapd can not be enrolled as it can not
* distinguish this scenario by using sc->gfp_mask = GFP_KERNEL
*/
static bool skip_cma(struct folio *folio, struct scan_control *sc)
{
return !current_is_kswapd() &&
gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE &&
get_pageblock_migratetype(&folio->page) == MIGRATE_CMA;
}
#else
static bool skip_cma(struct folio *folio, struct scan_control *sc)
{
return false;
}
#endif
/* /*
* Isolating page from the lruvec to fill in @dst list by nr_to_scan times. * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
* *
...@@ -2317,7 +2336,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, ...@@ -2317,7 +2336,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
nr_pages = folio_nr_pages(folio); nr_pages = folio_nr_pages(folio);
total_scan += nr_pages; total_scan += nr_pages;
if (folio_zonenum(folio) > sc->reclaim_idx) { if (folio_zonenum(folio) > sc->reclaim_idx ||
skip_cma(folio, sc)) {
nr_skipped[folio_zonenum(folio)] += nr_pages; nr_skipped[folio_zonenum(folio)] += nr_pages;
move_to = &folios_skipped; move_to = &folios_skipped;
goto move; goto move;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment