Commit fa82dcbf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dax-fixes-4.20-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull dax fixes from Dan Williams:
 "The last of the known regression fixes and fallout from the Xarray
  conversion of the filesystem-dax implementation.

  On the path to debugging why the dax memory-failure injection test
  started failing after the Xarray conversion a couple more fixes for
  the dax_lock_mapping_entry(), now called dax_lock_page(), surfaced.
  Those plus the bug that started the hunt are now addressed. These
  patches have appeared in a -next release with no issues reported.

  Note the touches to mm/memory-failure.c are just the conversion to the
  new function signature for dax_lock_page().

  Summary:

   - Fix the Xarray conversion of fsdax to properly handle
     dax_lock_mapping_entry() in the presense of pmd entries

   - Fix inode destruction racing a new lock request"

* tag 'dax-fixes-4.20-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  dax: Fix unlock mismatch with updated API
  dax: Don't access a freed inode
  dax: Check page->mapping isn't NULL
parents bd799eb6 27359fd6
...@@ -232,6 +232,34 @@ static void *get_unlocked_entry(struct xa_state *xas) ...@@ -232,6 +232,34 @@ static void *get_unlocked_entry(struct xa_state *xas)
} }
} }
/*
* The only thing keeping the address space around is the i_pages lock
* (it's cycled in clear_inode() after removing the entries from i_pages)
* After we call xas_unlock_irq(), we cannot touch xas->xa.
*/
static void wait_entry_unlocked(struct xa_state *xas, void *entry)
{
struct wait_exceptional_entry_queue ewait;
wait_queue_head_t *wq;
init_wait(&ewait.wait);
ewait.wait.func = wake_exceptional_entry_func;
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
xas_unlock_irq(xas);
schedule();
finish_wait(wq, &ewait.wait);
/*
* Entry lock waits are exclusive. Wake up the next waiter since
* we aren't sure we will acquire the entry lock and thus wake
* the next waiter up on unlock.
*/
if (waitqueue_active(wq))
__wake_up(wq, TASK_NORMAL, 1, &ewait.key);
}
static void put_unlocked_entry(struct xa_state *xas, void *entry) static void put_unlocked_entry(struct xa_state *xas, void *entry)
{ {
/* If we were the only waiter woken, wake the next one */ /* If we were the only waiter woken, wake the next one */
...@@ -351,21 +379,21 @@ static struct page *dax_busy_page(void *entry) ...@@ -351,21 +379,21 @@ static struct page *dax_busy_page(void *entry)
* @page: The page whose entry we want to lock * @page: The page whose entry we want to lock
* *
* Context: Process context. * Context: Process context.
* Return: %true if the entry was locked or does not need to be locked. * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
* not be locked.
*/ */
bool dax_lock_mapping_entry(struct page *page) dax_entry_t dax_lock_page(struct page *page)
{ {
XA_STATE(xas, NULL, 0); XA_STATE(xas, NULL, 0);
void *entry; void *entry;
bool locked;
/* Ensure page->mapping isn't freed while we look at it */ /* Ensure page->mapping isn't freed while we look at it */
rcu_read_lock(); rcu_read_lock();
for (;;) { for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping); struct address_space *mapping = READ_ONCE(page->mapping);
locked = false; entry = NULL;
if (!dax_mapping(mapping)) if (!mapping || !dax_mapping(mapping))
break; break;
/* /*
...@@ -375,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page) ...@@ -375,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page() * otherwise we would not have a valid pfn_to_page()
* translation. * translation.
*/ */
locked = true; entry = (void *)~0UL;
if (S_ISCHR(mapping->host->i_mode)) if (S_ISCHR(mapping->host->i_mode))
break; break;
...@@ -389,9 +417,7 @@ bool dax_lock_mapping_entry(struct page *page) ...@@ -389,9 +417,7 @@ bool dax_lock_mapping_entry(struct page *page)
entry = xas_load(&xas); entry = xas_load(&xas);
if (dax_is_locked(entry)) { if (dax_is_locked(entry)) {
rcu_read_unlock(); rcu_read_unlock();
entry = get_unlocked_entry(&xas); wait_entry_unlocked(&xas, entry);
xas_unlock_irq(&xas);
put_unlocked_entry(&xas, entry);
rcu_read_lock(); rcu_read_lock();
continue; continue;
} }
...@@ -400,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page) ...@@ -400,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page)
break; break;
} }
rcu_read_unlock(); rcu_read_unlock();
return locked; return (dax_entry_t)entry;
} }
void dax_unlock_mapping_entry(struct page *page) void dax_unlock_page(struct page *page, dax_entry_t cookie)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
XA_STATE(xas, &mapping->i_pages, page->index); XA_STATE(xas, &mapping->i_pages, page->index);
void *entry;
if (S_ISCHR(mapping->host->i_mode)) if (S_ISCHR(mapping->host->i_mode))
return; return;
rcu_read_lock(); dax_unlock_entry(&xas, (void *)cookie);
entry = xas_load(&xas);
rcu_read_unlock();
entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
dax_unlock_entry(&xas, entry);
} }
/* /*
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
typedef unsigned long dax_entry_t;
struct iomap_ops; struct iomap_ops;
struct dax_device; struct dax_device;
struct dax_operations { struct dax_operations {
...@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, ...@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc); struct block_device *bdev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping); struct page *dax_layout_busy_page(struct address_space *mapping);
bool dax_lock_mapping_entry(struct page *page); dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_mapping_entry(struct page *page); void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else #else
static inline bool bdev_dax_supported(struct block_device *bdev, static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize) int blocksize)
...@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping, ...@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline bool dax_lock_mapping_entry(struct page *page) static inline dax_entry_t dax_lock_page(struct page *page)
{ {
if (IS_DAX(page->mapping->host)) if (IS_DAX(page->mapping->host))
return true; return ~0UL;
return false; return 0;
} }
static inline void dax_unlock_mapping_entry(struct page *page) static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
{ {
} }
#endif #endif
......
...@@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, ...@@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
LIST_HEAD(tokill); LIST_HEAD(tokill);
int rc = -EBUSY; int rc = -EBUSY;
loff_t start; loff_t start;
dax_entry_t cookie;
/* /*
* Prevent the inode from being freed while we are interrogating * Prevent the inode from being freed while we are interrogating
...@@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, ...@@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
* also prevents changes to the mapping of this pfn until * also prevents changes to the mapping of this pfn until
* poison signaling is complete. * poison signaling is complete.
*/ */
if (!dax_lock_mapping_entry(page)) cookie = dax_lock_page(page);
if (!cookie)
goto out; goto out;
if (hwpoison_filter(page)) { if (hwpoison_filter(page)) {
...@@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, ...@@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags); kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
rc = 0; rc = 0;
unlock: unlock:
dax_unlock_mapping_entry(page); dax_unlock_page(page, cookie);
out: out:
/* drop pgmap ref acquired in caller */ /* drop pgmap ref acquired in caller */
put_dev_pagemap(pgmap); put_dev_pagemap(pgmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment