Commit 66daeec3 authored by Trond Myklebust's avatar Trond Myklebust Committed by Greg Kroah-Hartman

NFSv4: Fix a potential sleep while atomic in nfs4_do_reclaim()

[ Upstream commit c77e2283 ]

John Hubbard reports seeing the following stack trace:

nfs4_do_reclaim
   rcu_read_lock /* we are now in_atomic() and must not sleep */
       nfs4_purge_state_owners
           nfs4_free_state_owner
               nfs4_destroy_seqid_counter
                   rpc_destroy_wait_queue
                       cancel_delayed_work_sync
                           __cancel_work_timer
                               __flush_work
                                   start_flush_work
                                       might_sleep:
                                        (kernel/workqueue.c:2975: BUG)

The solution is to separate out the freeing of the state owners
from nfs4_purge_state_owners(), and perform that outside the atomic
context.
Reported-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Fixes: 0aaaf5c4 ("NFS: Cache state owners after files are closed")
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent d1ba0b81
...@@ -469,7 +469,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, ...@@ -469,7 +469,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t); extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
extern void nfs4_put_state_owner(struct nfs4_state_owner *); extern void nfs4_put_state_owner(struct nfs4_state_owner *);
extern void nfs4_purge_state_owners(struct nfs_server *); extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
extern void nfs4_free_state_owners(struct list_head *head);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
extern void nfs4_put_open_state(struct nfs4_state *); extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct nfs4_state *, fmode_t); extern void nfs4_close_state(struct nfs4_state *, fmode_t);
......
...@@ -754,9 +754,12 @@ int nfs41_walk_client_list(struct nfs_client *new, ...@@ -754,9 +754,12 @@ int nfs41_walk_client_list(struct nfs_client *new,
static void nfs4_destroy_server(struct nfs_server *server) static void nfs4_destroy_server(struct nfs_server *server)
{ {
LIST_HEAD(freeme);
nfs_server_return_all_delegations(server); nfs_server_return_all_delegations(server);
unset_pnfs_layoutdriver(server); unset_pnfs_layoutdriver(server);
nfs4_purge_state_owners(server); nfs4_purge_state_owners(server, &freeme);
nfs4_free_state_owners(&freeme);
} }
/* /*
......
...@@ -628,24 +628,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp) ...@@ -628,24 +628,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
/** /**
* nfs4_purge_state_owners - Release all cached state owners * nfs4_purge_state_owners - Release all cached state owners
* @server: nfs_server with cached state owners to release * @server: nfs_server with cached state owners to release
* @head: resulting list of state owners
* *
* Called at umount time. Remaining state owners will be on * Called at umount time. Remaining state owners will be on
* the LRU with ref count of zero. * the LRU with ref count of zero.
* Note that the state owners are not freed, but are added
* to the list @head, which can later be used as an argument
* to nfs4_free_state_owners.
*/ */
void nfs4_purge_state_owners(struct nfs_server *server) void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
{ {
struct nfs_client *clp = server->nfs_client; struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *tmp; struct nfs4_state_owner *sp, *tmp;
LIST_HEAD(doomed);
spin_lock(&clp->cl_lock); spin_lock(&clp->cl_lock);
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) { list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
list_move(&sp->so_lru, &doomed); list_move(&sp->so_lru, head);
nfs4_remove_state_owner_locked(sp); nfs4_remove_state_owner_locked(sp);
} }
spin_unlock(&clp->cl_lock); spin_unlock(&clp->cl_lock);
}
list_for_each_entry_safe(sp, tmp, &doomed, so_lru) { /**
* nfs4_purge_state_owners - Release all cached state owners
* @head: resulting list of state owners
*
* Frees a list of state owners that was generated by
* nfs4_purge_state_owners
*/
void nfs4_free_state_owners(struct list_head *head)
{
struct nfs4_state_owner *sp, *tmp;
list_for_each_entry_safe(sp, tmp, head, so_lru) {
list_del(&sp->so_lru); list_del(&sp->so_lru);
nfs4_free_state_owner(sp); nfs4_free_state_owner(sp);
} }
...@@ -1843,12 +1858,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov ...@@ -1843,12 +1858,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
struct nfs4_state_owner *sp; struct nfs4_state_owner *sp;
struct nfs_server *server; struct nfs_server *server;
struct rb_node *pos; struct rb_node *pos;
LIST_HEAD(freeme);
int status = 0; int status = 0;
restart: restart:
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
nfs4_purge_state_owners(server); nfs4_purge_state_owners(server, &freeme);
spin_lock(&clp->cl_lock); spin_lock(&clp->cl_lock);
for (pos = rb_first(&server->state_owners); for (pos = rb_first(&server->state_owners);
pos != NULL; pos != NULL;
...@@ -1877,6 +1893,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov ...@@ -1877,6 +1893,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
spin_unlock(&clp->cl_lock); spin_unlock(&clp->cl_lock);
} }
rcu_read_unlock(); rcu_read_unlock();
nfs4_free_state_owners(&freeme);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment