Commit 7c76d922 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert free_unref_page_list() to use folios

Most of its callees are not yet ready to accept a folio, but we know all
of the pages passed in are actually folios because they're linked through
->lru.

Link: https://lkml.kernel.org/r/20240227174254.710559-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarRyan Roberts <ryan.roberts@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 99fbb6bf
...@@ -2520,17 +2520,17 @@ void free_unref_page(struct page *page, unsigned int order) ...@@ -2520,17 +2520,17 @@ void free_unref_page(struct page *page, unsigned int order)
void free_unref_page_list(struct list_head *list) void free_unref_page_list(struct list_head *list)
{ {
unsigned long __maybe_unused UP_flags; unsigned long __maybe_unused UP_flags;
struct page *page, *next; struct folio *folio, *next;
struct per_cpu_pages *pcp = NULL; struct per_cpu_pages *pcp = NULL;
struct zone *locked_zone = NULL; struct zone *locked_zone = NULL;
int batch_count = 0; int batch_count = 0;
int migratetype; int migratetype;
/* Prepare pages for freeing */ /* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) { list_for_each_entry_safe(folio, next, list, lru) {
unsigned long pfn = page_to_pfn(page); unsigned long pfn = folio_pfn(folio);
if (!free_unref_page_prepare(page, pfn, 0)) { if (!free_unref_page_prepare(&folio->page, pfn, 0)) {
list_del(&page->lru); list_del(&folio->lru);
continue; continue;
} }
...@@ -2538,24 +2538,25 @@ void free_unref_page_list(struct list_head *list) ...@@ -2538,24 +2538,25 @@ void free_unref_page_list(struct list_head *list)
* Free isolated pages directly to the allocator, see * Free isolated pages directly to the allocator, see
* comment in free_unref_page. * comment in free_unref_page.
*/ */
migratetype = get_pcppage_migratetype(page); migratetype = get_pcppage_migratetype(&folio->page);
if (unlikely(is_migrate_isolate(migratetype))) { if (unlikely(is_migrate_isolate(migratetype))) {
list_del(&page->lru); list_del(&folio->lru);
free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); free_one_page(folio_zone(folio), &folio->page, pfn,
0, migratetype, FPI_NONE);
continue; continue;
} }
} }
list_for_each_entry_safe(page, next, list, lru) { list_for_each_entry_safe(folio, next, list, lru) {
struct zone *zone = page_zone(page); struct zone *zone = folio_zone(folio);
list_del(&page->lru); list_del(&folio->lru);
migratetype = get_pcppage_migratetype(page); migratetype = get_pcppage_migratetype(&folio->page);
/* /*
* Either different zone requiring a different pcp lock or * Either different zone requiring a different pcp lock or
* excessive lock hold times when freeing a large list of * excessive lock hold times when freeing a large list of
* pages. * folios.
*/ */
if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) { if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
if (pcp) { if (pcp) {
...@@ -2566,15 +2567,16 @@ void free_unref_page_list(struct list_head *list) ...@@ -2566,15 +2567,16 @@ void free_unref_page_list(struct list_head *list)
batch_count = 0; batch_count = 0;
/* /*
* trylock is necessary as pages may be getting freed * trylock is necessary as folios may be getting freed
* from IRQ or SoftIRQ context after an IO completion. * from IRQ or SoftIRQ context after an IO completion.
*/ */
pcp_trylock_prepare(UP_flags); pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset); pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (unlikely(!pcp)) { if (unlikely(!pcp)) {
pcp_trylock_finish(UP_flags); pcp_trylock_finish(UP_flags);
free_one_page(zone, page, page_to_pfn(page), free_one_page(zone, &folio->page,
0, migratetype, FPI_NONE); folio_pfn(folio), 0,
migratetype, FPI_NONE);
locked_zone = NULL; locked_zone = NULL;
continue; continue;
} }
...@@ -2588,8 +2590,8 @@ void free_unref_page_list(struct list_head *list) ...@@ -2588,8 +2590,8 @@ void free_unref_page_list(struct list_head *list)
if (unlikely(migratetype >= MIGRATE_PCPTYPES)) if (unlikely(migratetype >= MIGRATE_PCPTYPES))
migratetype = MIGRATE_MOVABLE; migratetype = MIGRATE_MOVABLE;
trace_mm_page_free_batched(page); trace_mm_page_free_batched(&folio->page);
free_unref_page_commit(zone, pcp, page, migratetype, 0); free_unref_page_commit(zone, pcp, &folio->page, migratetype, 0);
batch_count++; batch_count++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment