Commit 52881539 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: migrate: split folio_migrate_mapping()

The folio refcount check is moved out for both !mapping and mapping folio,
also update comment from page to folio for folio_migrate_mapping().

No functional change intended.

Link: https://lkml.kernel.org/r/20240626085328.608006-4-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jiaqi Yan <jiaqiyan@google.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 02f4ee5a
...@@ -393,28 +393,23 @@ static int folio_expected_refs(struct address_space *mapping, ...@@ -393,28 +393,23 @@ static int folio_expected_refs(struct address_space *mapping,
} }
/* /*
* Replace the page in the mapping. * Replace the folio in the mapping.
* *
* The number of remaining references must be: * The number of remaining references must be:
* 1 for anonymous pages without a mapping * 1 for anonymous folios without a mapping
* 2 for pages with a mapping * 2 for folios with a mapping
* 3 for pages with a mapping and PagePrivate/PagePrivate2 set. * 3 for folios with a mapping and PagePrivate/PagePrivate2 set.
*/ */
int folio_migrate_mapping(struct address_space *mapping, static int __folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int extra_count) struct folio *newfolio, struct folio *folio, int expected_count)
{ {
XA_STATE(xas, &mapping->i_pages, folio_index(folio)); XA_STATE(xas, &mapping->i_pages, folio_index(folio));
struct zone *oldzone, *newzone; struct zone *oldzone, *newzone;
int dirty; int dirty;
int expected_count = folio_expected_refs(mapping, folio) + extra_count;
long nr = folio_nr_pages(folio); long nr = folio_nr_pages(folio);
long entries, i; long entries, i;
if (!mapping) { if (!mapping) {
/* Anonymous page without mapping */
if (folio_ref_count(folio) != expected_count)
return -EAGAIN;
/* Take off deferred split queue while frozen and memcg set */ /* Take off deferred split queue while frozen and memcg set */
if (folio_test_large(folio) && if (folio_test_large(folio) &&
folio_test_large_rmappable(folio)) { folio_test_large_rmappable(folio)) {
...@@ -465,7 +460,7 @@ int folio_migrate_mapping(struct address_space *mapping, ...@@ -465,7 +460,7 @@ int folio_migrate_mapping(struct address_space *mapping,
entries = 1; entries = 1;
} }
/* Move dirty while page refs frozen and newpage not yet exposed */ /* Move dirty while folio refs frozen and newfolio not yet exposed */
dirty = folio_test_dirty(folio); dirty = folio_test_dirty(folio);
if (dirty) { if (dirty) {
folio_clear_dirty(folio); folio_clear_dirty(folio);
...@@ -479,7 +474,7 @@ int folio_migrate_mapping(struct address_space *mapping, ...@@ -479,7 +474,7 @@ int folio_migrate_mapping(struct address_space *mapping,
} }
/* /*
* Drop cache reference from old page by unfreezing * Drop cache reference from old folio by unfreezing
* to one less reference. * to one less reference.
* We know this isn't the last reference. * We know this isn't the last reference.
*/ */
...@@ -490,11 +485,11 @@ int folio_migrate_mapping(struct address_space *mapping, ...@@ -490,11 +485,11 @@ int folio_migrate_mapping(struct address_space *mapping,
/* /*
* If moved to a different zone then also account * If moved to a different zone then also account
* the page for that zone. Other VM counters will be * the folio for that zone. Other VM counters will be
* taken care of when we establish references to the * taken care of when we establish references to the
* new page and drop references to the old page. * new folio and drop references to the old folio.
* *
* Note that anonymous pages are accounted for * Note that anonymous folios are accounted for
* via NR_FILE_PAGES and NR_ANON_MAPPED if they * via NR_FILE_PAGES and NR_ANON_MAPPED if they
* are mapped to swap space. * are mapped to swap space.
*/ */
...@@ -534,6 +529,17 @@ int folio_migrate_mapping(struct address_space *mapping, ...@@ -534,6 +529,17 @@ int folio_migrate_mapping(struct address_space *mapping,
return MIGRATEPAGE_SUCCESS; return MIGRATEPAGE_SUCCESS;
} }
int folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int extra_count)
{
int expected_count = folio_expected_refs(mapping, folio) + extra_count;
if (folio_ref_count(folio) != expected_count)
return -EAGAIN;
return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
}
EXPORT_SYMBOL(folio_migrate_mapping); EXPORT_SYMBOL(folio_migrate_mapping);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment