Commit 60034d3d authored by David Howells's avatar David Howells

rxrpc: Fix potential deadlock

There is a potential deadlock in rxrpc_peer_keepalive_dispatch() whereby
rxrpc_put_peer() is called with the peer_hash_lock held, but if it reduces
the peer's refcount to 0, rxrpc_put_peer() calls __rxrpc_put_peer() - which
the tries to take the already held lock.

Fix this by providing a version of rxrpc_put_peer() that can be called in
situations where the lock is already held.

The bug may produce the following lockdep report:

============================================
WARNING: possible recursive locking detected
5.2.0-next-20190718 #41 Not tainted
--------------------------------------------
kworker/0:3/21678 is trying to acquire lock:
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: spin_lock_bh
/./include/linux/spinlock.h:343 [inline]
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at:
__rxrpc_put_peer /net/rxrpc/peer_object.c:415 [inline]
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at:
rxrpc_put_peer+0x2d3/0x6a0 /net/rxrpc/peer_object.c:435

but task is already holding lock:
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at: spin_lock_bh
/./include/linux/spinlock.h:343 [inline]
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at:
rxrpc_peer_keepalive_dispatch /net/rxrpc/peer_event.c:378 [inline]
00000000aa5eecdf (&(&rxnet->peer_hash_lock)->rlock){+.-.}, at:
rxrpc_peer_keepalive_worker+0x6b3/0xd02 /net/rxrpc/peer_event.c:430

Fixes: 330bdcfa ("rxrpc: Fix the keepalive generator [ver #2]")
Reported-by: syzbot+72af434e4b3417318f84@syzkaller.appspotmail.com
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarMarc Dionne <marc.dionne@auristor.com>
Reviewed-by: default avatarJeffrey Altman <jaltman@auristor.com>
parent a20961cc
...@@ -1061,6 +1061,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *); ...@@ -1061,6 +1061,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
void rxrpc_put_peer(struct rxrpc_peer *); void rxrpc_put_peer(struct rxrpc_peer *);
void rxrpc_put_peer_locked(struct rxrpc_peer *);
/* /*
* proc.c * proc.c
......
...@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, ...@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
spin_lock_bh(&rxnet->peer_hash_lock); spin_lock_bh(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link, list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]); &rxnet->peer_keepalive[slot & mask]);
rxrpc_put_peer(peer); rxrpc_put_peer_locked(peer);
} }
spin_unlock_bh(&rxnet->peer_hash_lock); spin_unlock_bh(&rxnet->peer_hash_lock);
......
...@@ -436,6 +436,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer) ...@@ -436,6 +436,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
} }
} }
/*
* Drop a ref on a peer record where the caller already holds the
* peer_hash_lock.
*/
void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
{
const void *here = __builtin_return_address(0);
int n;
n = atomic_dec_return(&peer->usage);
trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
if (n == 0) {
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
kfree_rcu(peer, rcu);
}
}
/* /*
* Make sure all peer records have been discarded. * Make sure all peer records have been discarded.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment