Commit c5bbd451 authored by Matthew Wilcox's avatar Matthew Wilcox

dax: Reinstate RCU protection of inode

For the device-dax case, it is possible that the inode can go away
underneath us.  The rcu_read_lock() was there to prevent it from
being freed, and not (as I thought) to protect the tree.  Bring back
the rcu_read_lock() protection.  Also add a little kernel-doc; while
this function is not exported to modules, it is used from outside dax.c
Reported-by: default avatarDan Williams <dan.j.williams@intel.com>
Fixes: 9f32d221 ("dax: Convert dax_lock_mapping_entry to XArray")
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 7ae2ea7d
...@@ -353,16 +353,27 @@ static struct page *dax_busy_page(void *entry) ...@@ -353,16 +353,27 @@ static struct page *dax_busy_page(void *entry)
return NULL; return NULL;
} }
/*
* dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
* @page: The page whose entry we want to lock
*
* Context: Process context.
* Return: %true if the entry was locked or does not need to be locked.
*/
bool dax_lock_mapping_entry(struct page *page) bool dax_lock_mapping_entry(struct page *page)
{ {
XA_STATE(xas, NULL, 0); XA_STATE(xas, NULL, 0);
void *entry; void *entry;
bool locked;
/* Ensure page->mapping isn't freed while we look at it */
rcu_read_lock();
for (;;) { for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping); struct address_space *mapping = READ_ONCE(page->mapping);
locked = false;
if (!dax_mapping(mapping)) if (!dax_mapping(mapping))
return false; break;
/* /*
* In the device-dax case there's no need to lock, a * In the device-dax case there's no need to lock, a
...@@ -371,8 +382,9 @@ bool dax_lock_mapping_entry(struct page *page) ...@@ -371,8 +382,9 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page() * otherwise we would not have a valid pfn_to_page()
* translation. * translation.
*/ */
locked = true;
if (S_ISCHR(mapping->host->i_mode)) if (S_ISCHR(mapping->host->i_mode))
return true; break;
xas.xa = &mapping->i_pages; xas.xa = &mapping->i_pages;
xas_lock_irq(&xas); xas_lock_irq(&xas);
...@@ -383,14 +395,18 @@ bool dax_lock_mapping_entry(struct page *page) ...@@ -383,14 +395,18 @@ bool dax_lock_mapping_entry(struct page *page)
xas_set(&xas, page->index); xas_set(&xas, page->index);
entry = xas_load(&xas); entry = xas_load(&xas);
if (dax_is_locked(entry)) { if (dax_is_locked(entry)) {
rcu_read_unlock();
entry = get_unlocked_entry(&xas); entry = get_unlocked_entry(&xas);
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
rcu_read_lock();
continue; continue;
} }
dax_lock_entry(&xas, entry); dax_lock_entry(&xas, entry);
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
return true; break;
} }
rcu_read_unlock();
return locked;
} }
void dax_unlock_mapping_entry(struct page *page) void dax_unlock_mapping_entry(struct page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment