Commit d3be4d24 authored by David Howells's avatar David Howells

rxrpc: Fix potential call vs socket/net destruction race

rxrpc_call structs don't pin sockets or network namespaces, but may attempt
to access both after their refcount reaches 0 so that they can detach
themselves from the network namespace.  However, there's no guarantee that
the socket still exists at this point (so sock_net(&call->socket->sk) may
be invalid) and the namespace may have gone away if the call isn't pinning
a peer.

Fix this by (a) carrying a net pointer in the rxrpc_call struct and (b)
waiting for all calls to be destroyed when the network namespace goes away.

This was detected by checker:

net/rxrpc/call_object.c:634:57: warning: incorrect type in argument 1 (different address spaces)
net/rxrpc/call_object.c:634:57:    expected struct sock const *sk
net/rxrpc/call_object.c:634:57:    got struct sock [noderef] <asn:4>*<noident>

Fixes: 2baec2c3 ("rxrpc: Support network namespacing")
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
parent 88f2a825
...@@ -75,6 +75,7 @@ struct rxrpc_net { ...@@ -75,6 +75,7 @@ struct rxrpc_net {
u32 epoch; /* Local epoch for detecting local-end reset */ u32 epoch; /* Local epoch for detecting local-end reset */
struct list_head calls; /* List of calls active in this namespace */ struct list_head calls; /* List of calls active in this namespace */
rwlock_t call_lock; /* Lock for ->calls */ rwlock_t call_lock; /* Lock for ->calls */
atomic_t nr_calls; /* Count of allocated calls */
struct list_head conn_proc_list; /* List of conns in this namespace for proc */ struct list_head conn_proc_list; /* List of conns in this namespace for proc */
struct list_head service_conns; /* Service conns in this namespace */ struct list_head service_conns; /* Service conns in this namespace */
...@@ -528,6 +529,7 @@ struct rxrpc_call { ...@@ -528,6 +529,7 @@ struct rxrpc_call {
struct rxrpc_connection *conn; /* connection carrying call */ struct rxrpc_connection *conn; /* connection carrying call */
struct rxrpc_peer *peer; /* Peer record for remote address */ struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */ struct rxrpc_sock __rcu *socket; /* socket responsible */
struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
struct mutex user_mutex; /* User access mutex */ struct mutex user_mutex; /* User access mutex */
unsigned long ack_at; /* When deferred ACK needs to happen */ unsigned long ack_at; /* When deferred ACK needs to happen */
unsigned long ack_lost_at; /* When ACK is figured as lost */ unsigned long ack_lost_at; /* When ACK is figured as lost */
......
...@@ -138,6 +138,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, ...@@ -138,6 +138,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock); write_unlock(&rx->call_lock);
rxnet = call->rxnet;
write_lock(&rxnet->call_lock); write_lock(&rxnet->call_lock);
list_add_tail(&call->link, &rxnet->calls); list_add_tail(&call->link, &rxnet->calls);
write_unlock(&rxnet->call_lock); write_unlock(&rxnet->call_lock);
......
...@@ -103,6 +103,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, ...@@ -103,6 +103,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
unsigned int debug_id) unsigned int debug_id)
{ {
struct rxrpc_call *call; struct rxrpc_call *call;
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
call = kmem_cache_zalloc(rxrpc_call_jar, gfp); call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
if (!call) if (!call)
...@@ -153,6 +154,9 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, ...@@ -153,6 +154,9 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
call->cong_cwnd = 2; call->cong_cwnd = 2;
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1; call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
call->rxnet = rxnet;
atomic_inc(&rxnet->nr_calls);
return call; return call;
nomem_2: nomem_2:
...@@ -222,7 +226,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, ...@@ -222,7 +226,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
__acquires(&call->user_mutex) __acquires(&call->user_mutex)
{ {
struct rxrpc_call *call, *xcall; struct rxrpc_call *call, *xcall;
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); struct rxrpc_net *rxnet;
struct rb_node *parent, **pp; struct rb_node *parent, **pp;
const void *here = __builtin_return_address(0); const void *here = __builtin_return_address(0);
int ret; int ret;
...@@ -272,6 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, ...@@ -272,6 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
write_unlock(&rx->call_lock); write_unlock(&rx->call_lock);
rxnet = call->rxnet;
write_lock(&rxnet->call_lock); write_lock(&rxnet->call_lock);
list_add_tail(&call->link, &rxnet->calls); list_add_tail(&call->link, &rxnet->calls);
write_unlock(&rxnet->call_lock); write_unlock(&rxnet->call_lock);
...@@ -617,7 +622,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) ...@@ -617,7 +622,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
*/ */
void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{ {
struct rxrpc_net *rxnet; struct rxrpc_net *rxnet = call->rxnet;
const void *here = __builtin_return_address(0); const void *here = __builtin_return_address(0);
int n; int n;
...@@ -631,7 +636,6 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) ...@@ -631,7 +636,6 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
if (!list_empty(&call->link)) { if (!list_empty(&call->link)) {
rxnet = rxrpc_net(sock_net(&call->socket->sk));
write_lock(&rxnet->call_lock); write_lock(&rxnet->call_lock);
list_del_init(&call->link); list_del_init(&call->link);
write_unlock(&rxnet->call_lock); write_unlock(&rxnet->call_lock);
...@@ -647,11 +651,14 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) ...@@ -647,11 +651,14 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{ {
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
struct rxrpc_net *rxnet = call->rxnet;
rxrpc_put_peer(call->peer); rxrpc_put_peer(call->peer);
kfree(call->rxtx_buffer); kfree(call->rxtx_buffer);
kfree(call->rxtx_annotations); kfree(call->rxtx_annotations);
kmem_cache_free(rxrpc_call_jar, call); kmem_cache_free(rxrpc_call_jar, call);
if (atomic_dec_and_test(&rxnet->nr_calls))
wake_up_atomic_t(&rxnet->nr_calls);
} }
/* /*
...@@ -716,4 +723,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet) ...@@ -716,4 +723,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
} }
write_unlock(&rxnet->call_lock); write_unlock(&rxnet->call_lock);
atomic_dec(&rxnet->nr_calls);
wait_on_atomic_t(&rxnet->nr_calls, atomic_t_wait, TASK_UNINTERRUPTIBLE);
} }
...@@ -55,6 +55,7 @@ static __net_init int rxrpc_init_net(struct net *net) ...@@ -55,6 +55,7 @@ static __net_init int rxrpc_init_net(struct net *net)
INIT_LIST_HEAD(&rxnet->calls); INIT_LIST_HEAD(&rxnet->calls);
rwlock_init(&rxnet->call_lock); rwlock_init(&rxnet->call_lock);
atomic_set(&rxnet->nr_calls, 1);
INIT_LIST_HEAD(&rxnet->conn_proc_list); INIT_LIST_HEAD(&rxnet->conn_proc_list);
INIT_LIST_HEAD(&rxnet->service_conns); INIT_LIST_HEAD(&rxnet->service_conns);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment