Commit 2990c89d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'netfs-fixes-20211207' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

Pull netfslib fixes from David Howells:

 - Fix a lockdep warning and potential deadlock. This is takes the
   simple approach of offloading the write-to-cache done from within a
   network filesystem read to a worker thread to avoid taking the
   sb_writer lock from the cache backing filesystem whilst holding the
   mmap lock on an inode from the network filesystem.

   Jan Kara posits a scenario whereby this can cause deadlock[1], though
   it's quite complex and I think requires someone in userspace to
   actually do I/O on the cache files. Matthew Wilcox isn't so certain,
   though[2].

   An alternative way to fix this, suggested by Darrick Wong, might be
   to allow cachefiles to prevent userspace from performing I/O upon the
   file - something like an exclusive open - but that's beyond the scope
   of a fix here if we do want to make such a facility in the future.

 - In some of the error handling paths where netfs_ops->cleanup() is
   called, the arguments are transposed[3]. gcc doesn't complain because
   one of the parameters is void* and one of the values is void*.

Link: https://lore.kernel.org/r/20210922110420.GA21576@quack2.suse.cz/ [1]
Link: https://lore.kernel.org/r/Ya9eDiFCE2fO7K/S@casper.infradead.org/ [2]
Link: https://lore.kernel.org/r/20211207031449.100510-1-jefflexu@linux.alibaba.com/ [3]

* tag 'netfs-fixes-20211207' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
  netfs: fix parameter of cleanup()
  netfs: Fix lockdep warning from taking sb_writers whilst holding mmap_lock
parents 3a49cc22 3cfef1b6
...@@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work) ...@@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work)
netfs_rreq_do_write_to_cache(rreq); netfs_rreq_do_write_to_cache(rreq);
} }
static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq, static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
bool was_async)
{ {
if (was_async) { rreq->work.func = netfs_rreq_write_to_cache_work;
rreq->work.func = netfs_rreq_write_to_cache_work; if (!queue_work(system_unbound_wq, &rreq->work))
if (!queue_work(system_unbound_wq, &rreq->work)) BUG();
BUG();
} else {
netfs_rreq_do_write_to_cache(rreq);
}
} }
/* /*
...@@ -558,7 +553,7 @@ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async) ...@@ -558,7 +553,7 @@ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags)) if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
return netfs_rreq_write_to_cache(rreq, was_async); return netfs_rreq_write_to_cache(rreq);
netfs_rreq_completed(rreq, was_async); netfs_rreq_completed(rreq, was_async);
} }
...@@ -960,7 +955,7 @@ int netfs_readpage(struct file *file, ...@@ -960,7 +955,7 @@ int netfs_readpage(struct file *file,
rreq = netfs_alloc_read_request(ops, netfs_priv, file); rreq = netfs_alloc_read_request(ops, netfs_priv, file);
if (!rreq) { if (!rreq) {
if (netfs_priv) if (netfs_priv)
ops->cleanup(netfs_priv, folio_file_mapping(folio)); ops->cleanup(folio_file_mapping(folio), netfs_priv);
folio_unlock(folio); folio_unlock(folio);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1191,7 +1186,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1191,7 +1186,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
goto error; goto error;
have_folio_no_wait: have_folio_no_wait:
if (netfs_priv) if (netfs_priv)
ops->cleanup(netfs_priv, mapping); ops->cleanup(mapping, netfs_priv);
*_folio = folio; *_folio = folio;
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
...@@ -1202,7 +1197,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1202,7 +1197,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
if (netfs_priv) if (netfs_priv)
ops->cleanup(netfs_priv, mapping); ops->cleanup(mapping, netfs_priv);
_leave(" = %d", ret); _leave(" = %d", ret);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment