Commit 91e79d22 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert DAX lock/unlock page to lock/unlock folio

The one caller of DAX lock/unlock page already calls compound_head(), so
use page_folio() instead, then use a folio throughout the DAX code to
remove uses of page->mapping and page->index.

[jane.chu@oracle.com: add comment to mf_generic_kill_procss(), simplify mf_generic_kill_procs:folio initialization]
  Link: https://lkml.kernel.org/r/20230908222336.186313-1-jane.chu@oracle.com
Link: https://lkml.kernel.org/r/20230822231314.349200-1-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarJane Chu <jane.chu@oracle.com>
Acked-by: default avatarNaoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jane Chu <jane.chu@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent bc0c3357
...@@ -412,23 +412,23 @@ static struct page *dax_busy_page(void *entry) ...@@ -412,23 +412,23 @@ static struct page *dax_busy_page(void *entry)
return NULL; return NULL;
} }
/* /**
* dax_lock_page - Lock the DAX entry corresponding to a page * dax_lock_folio - Lock the DAX entry corresponding to a folio
* @page: The page whose entry we want to lock * @folio: The folio whose entry we want to lock
* *
* Context: Process context. * Context: Process context.
* Return: A cookie to pass to dax_unlock_page() or 0 if the entry could * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could
* not be locked. * not be locked.
*/ */
dax_entry_t dax_lock_page(struct page *page) dax_entry_t dax_lock_folio(struct folio *folio)
{ {
XA_STATE(xas, NULL, 0); XA_STATE(xas, NULL, 0);
void *entry; void *entry;
/* Ensure page->mapping isn't freed while we look at it */ /* Ensure folio->mapping isn't freed while we look at it */
rcu_read_lock(); rcu_read_lock();
for (;;) { for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping); struct address_space *mapping = READ_ONCE(folio->mapping);
entry = NULL; entry = NULL;
if (!mapping || !dax_mapping(mapping)) if (!mapping || !dax_mapping(mapping))
...@@ -447,11 +447,11 @@ dax_entry_t dax_lock_page(struct page *page) ...@@ -447,11 +447,11 @@ dax_entry_t dax_lock_page(struct page *page)
xas.xa = &mapping->i_pages; xas.xa = &mapping->i_pages;
xas_lock_irq(&xas); xas_lock_irq(&xas);
if (mapping != page->mapping) { if (mapping != folio->mapping) {
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
continue; continue;
} }
xas_set(&xas, page->index); xas_set(&xas, folio->index);
entry = xas_load(&xas); entry = xas_load(&xas);
if (dax_is_locked(entry)) { if (dax_is_locked(entry)) {
rcu_read_unlock(); rcu_read_unlock();
...@@ -467,10 +467,10 @@ dax_entry_t dax_lock_page(struct page *page) ...@@ -467,10 +467,10 @@ dax_entry_t dax_lock_page(struct page *page)
return (dax_entry_t)entry; return (dax_entry_t)entry;
} }
void dax_unlock_page(struct page *page, dax_entry_t cookie) void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = folio->mapping;
XA_STATE(xas, &mapping->i_pages, page->index); XA_STATE(xas, &mapping->i_pages, folio->index);
if (S_ISCHR(mapping->host->i_mode)) if (S_ISCHR(mapping->host->i_mode))
return; return;
......
...@@ -159,8 +159,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, ...@@ -159,8 +159,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct page *dax_layout_busy_page(struct address_space *mapping); struct page *dax_layout_busy_page(struct address_space *mapping);
struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
dax_entry_t dax_lock_page(struct page *page); dax_entry_t dax_lock_folio(struct folio *folio);
void dax_unlock_page(struct page *page, dax_entry_t cookie); void dax_unlock_folio(struct folio *folio, dax_entry_t cookie);
dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
unsigned long index, struct page **page); unsigned long index, struct page **page);
void dax_unlock_mapping_entry(struct address_space *mapping, void dax_unlock_mapping_entry(struct address_space *mapping,
...@@ -182,14 +182,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping, ...@@ -182,14 +182,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline dax_entry_t dax_lock_page(struct page *page) static inline dax_entry_t dax_lock_folio(struct folio *folio)
{ {
if (IS_DAX(page->mapping->host)) if (IS_DAX(folio->mapping->host))
return ~0UL; return ~0UL;
return 0; return 0;
} }
static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
{ {
} }
......
...@@ -1713,20 +1713,23 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, ...@@ -1713,20 +1713,23 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags); kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
} }
/*
* Only dev_pagemap pages get here, such as fsdax when the filesystem
* either do not claim or fails to claim a hwpoison event, or devdax.
* The fsdax pages are initialized per base page, and the devdax pages
* could be initialized either as base pages, or as compound pages with
* vmemmap optimization enabled. Devdax is simplistic in its dealing with
* hwpoison, such that, if a subpage of a compound page is poisoned,
* simply mark the compound head page is by far sufficient.
*/
static int mf_generic_kill_procs(unsigned long long pfn, int flags, static int mf_generic_kill_procs(unsigned long long pfn, int flags,
struct dev_pagemap *pgmap) struct dev_pagemap *pgmap)
{ {
struct page *page = pfn_to_page(pfn); struct folio *folio = pfn_folio(pfn);
LIST_HEAD(to_kill); LIST_HEAD(to_kill);
dax_entry_t cookie; dax_entry_t cookie;
int rc = 0; int rc = 0;
/*
* Pages instantiated by device-dax (not filesystem-dax)
* may be compound pages.
*/
page = compound_head(page);
/* /*
* Prevent the inode from being freed while we are interrogating * Prevent the inode from being freed while we are interrogating
* the address_space, typically this would be handled by * the address_space, typically this would be handled by
...@@ -1734,11 +1737,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, ...@@ -1734,11 +1737,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
* also prevents changes to the mapping of this pfn until * also prevents changes to the mapping of this pfn until
* poison signaling is complete. * poison signaling is complete.
*/ */
cookie = dax_lock_page(page); cookie = dax_lock_folio(folio);
if (!cookie) if (!cookie)
return -EBUSY; return -EBUSY;
if (hwpoison_filter(page)) { if (hwpoison_filter(&folio->page)) {
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
goto unlock; goto unlock;
} }
...@@ -1760,7 +1763,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, ...@@ -1760,7 +1763,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
* Use this flag as an indication that the dax page has been * Use this flag as an indication that the dax page has been
* remapped UC to prevent speculative consumption of poison. * remapped UC to prevent speculative consumption of poison.
*/ */
SetPageHWPoison(page); SetPageHWPoison(&folio->page);
/* /*
* Unlike System-RAM there is no possibility to swap in a * Unlike System-RAM there is no possibility to swap in a
...@@ -1769,11 +1772,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags, ...@@ -1769,11 +1772,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
* SIGBUS (i.e. MF_MUST_KILL) * SIGBUS (i.e. MF_MUST_KILL)
*/ */
flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
collect_procs(page, &to_kill, true); collect_procs(&folio->page, &to_kill, true);
unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
unlock: unlock:
dax_unlock_page(page, cookie); dax_unlock_folio(folio, cookie);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment