Commit 7c4310ff authored by NeilBrown's avatar NeilBrown Committed by Trond Myklebust

SUNRPC: defer slow parts of rpc_free_client() to a workqueue.

The rpciod workqueue is on the write-out path for freeing dirty memory,
so it is important that it never block waiting for memory to be
allocated - this can lead to a deadlock.

rpc_execute() - which is often called by an rpciod work item - calls
rcp_task_release_client() which can lead to rpc_free_client().

rpc_free_client() makes two calls which could potentially block wating
for memory allocation.

rpc_clnt_debugfs_unregister() calls into debugfs and will block while
any of the debugfs files are being accessed.  In particular it can block
while any of the 'open' methods are being called and all of these use
malloc for one thing or another.  So this can deadlock if the memory
allocation waits for NFS to complete some writes via rpciod.

rpc_clnt_remove_pipedir() can take the inode_lock() and while it isn't
obvious that memory allocations can happen while the lock it held, it is
safer to assume they might and to not let rpciod call
rpc_clnt_remove_pipedir().

So this patch moves these two calls (together with the final kfree() and
rpciod_down()) into a work-item to be run from the system work-queue.
rpciod can continue its important work, and the final stages of the free
can happen whenever they happen.

I have seen this deadlock on a 4.12 based kernel where debugfs used
synchronize_srcu() when removing objects.  synchronize_srcu() requires a
workqueue and there were no free workther threads and none could be
allocated.  While debugsfs no longer uses SRCU, I believe the deadlock
is still possible.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 6e47666e
...@@ -71,7 +71,13 @@ struct rpc_clnt { ...@@ -71,7 +71,13 @@ struct rpc_clnt {
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */ struct dentry *cl_debugfs; /* debugfs directory */
#endif #endif
struct rpc_xprt_iter cl_xpi; /* cl_work is only needed after cl_xpi is no longer used,
* and that are of similar size
*/
union {
struct rpc_xprt_iter cl_xpi;
struct work_struct cl_work;
};
const struct cred *cl_cred; const struct cred *cl_cred;
}; };
......
...@@ -880,6 +880,20 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client); ...@@ -880,6 +880,20 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
/* /*
* Free an RPC client * Free an RPC client
*/ */
static void rpc_free_client_work(struct work_struct *work)
{
struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
/* These might block on processes that might allocate memory,
* so they cannot be called in rpciod, so they are handled separately
* here.
*/
rpc_clnt_debugfs_unregister(clnt);
rpc_clnt_remove_pipedir(clnt);
kfree(clnt);
rpciod_down();
}
static struct rpc_clnt * static struct rpc_clnt *
rpc_free_client(struct rpc_clnt *clnt) rpc_free_client(struct rpc_clnt *clnt)
{ {
...@@ -890,17 +904,16 @@ rpc_free_client(struct rpc_clnt *clnt) ...@@ -890,17 +904,16 @@ rpc_free_client(struct rpc_clnt *clnt)
rcu_dereference(clnt->cl_xprt)->servername); rcu_dereference(clnt->cl_xprt)->servername);
if (clnt->cl_parent != clnt) if (clnt->cl_parent != clnt)
parent = clnt->cl_parent; parent = clnt->cl_parent;
rpc_clnt_debugfs_unregister(clnt);
rpc_clnt_remove_pipedir(clnt);
rpc_unregister_client(clnt); rpc_unregister_client(clnt);
rpc_free_iostats(clnt->cl_metrics); rpc_free_iostats(clnt->cl_metrics);
clnt->cl_metrics = NULL; clnt->cl_metrics = NULL;
xprt_put(rcu_dereference_raw(clnt->cl_xprt)); xprt_put(rcu_dereference_raw(clnt->cl_xprt));
xprt_iter_destroy(&clnt->cl_xpi); xprt_iter_destroy(&clnt->cl_xpi);
rpciod_down();
put_cred(clnt->cl_cred); put_cred(clnt->cl_cred);
rpc_free_clid(clnt); rpc_free_clid(clnt);
kfree(clnt);
INIT_WORK(&clnt->cl_work, rpc_free_client_work);
schedule_work(&clnt->cl_work);
return parent; return parent;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment