Commit 53d0e83f authored by Sowmini Varadhan's avatar Sowmini Varadhan Committed by David S. Miller

rds: tcp: must use spin_lock_irq* and not spin_lock_bh with rds_tcp_conn_lock

rds_tcp_connection allocation/free management has the potential to be
called from __rds_conn_create after IRQs have been disabled, so
spin_[un]lock_bh cannot be used with rds_tcp_conn_lock.

Bottom-halves that need to synchronize for critical sections protected
by rds_tcp_conn_lock should instead use rds_destroy_pending() correctly.

Reported-by: syzbot+c68e51bb5e699d3f8d91@syzkaller.appspotmail.com
Fixes: ebeeb1ad ("rds: tcp: use rds_destroy_pending() to synchronize
       netns/module teardown and rds connection/workq management")
Signed-off-by: default avatarSowmini Varadhan <sowmini.varadhan@oracle.com>
Acked-by: default avatarSantosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3008ba5f
...@@ -272,13 +272,14 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr) ...@@ -272,13 +272,14 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
static void rds_tcp_conn_free(void *arg) static void rds_tcp_conn_free(void *arg)
{ {
struct rds_tcp_connection *tc = arg; struct rds_tcp_connection *tc = arg;
unsigned long flags;
rdsdebug("freeing tc %p\n", tc); rdsdebug("freeing tc %p\n", tc);
spin_lock_bh(&rds_tcp_conn_lock); spin_lock_irqsave(&rds_tcp_conn_lock, flags);
if (!tc->t_tcp_node_detached) if (!tc->t_tcp_node_detached)
list_del(&tc->t_tcp_node); list_del(&tc->t_tcp_node);
spin_unlock_bh(&rds_tcp_conn_lock); spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
kmem_cache_free(rds_tcp_conn_slab, tc); kmem_cache_free(rds_tcp_conn_slab, tc);
} }
...@@ -308,13 +309,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) ...@@ -308,13 +309,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
rdsdebug("rds_conn_path [%d] tc %p\n", i, rdsdebug("rds_conn_path [%d] tc %p\n", i,
conn->c_path[i].cp_transport_data); conn->c_path[i].cp_transport_data);
} }
spin_lock_bh(&rds_tcp_conn_lock); spin_lock_irq(&rds_tcp_conn_lock);
for (i = 0; i < RDS_MPATH_WORKERS; i++) { for (i = 0; i < RDS_MPATH_WORKERS; i++) {
tc = conn->c_path[i].cp_transport_data; tc = conn->c_path[i].cp_transport_data;
tc->t_tcp_node_detached = false; tc->t_tcp_node_detached = false;
list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
} }
spin_unlock_bh(&rds_tcp_conn_lock); spin_unlock_irq(&rds_tcp_conn_lock);
fail: fail:
if (ret) { if (ret) {
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
...@@ -527,7 +528,7 @@ static void rds_tcp_kill_sock(struct net *net) ...@@ -527,7 +528,7 @@ static void rds_tcp_kill_sock(struct net *net)
rtn->rds_tcp_listen_sock = NULL; rtn->rds_tcp_listen_sock = NULL;
rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
spin_lock_bh(&rds_tcp_conn_lock); spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
...@@ -540,7 +541,7 @@ static void rds_tcp_kill_sock(struct net *net) ...@@ -540,7 +541,7 @@ static void rds_tcp_kill_sock(struct net *net)
tc->t_tcp_node_detached = true; tc->t_tcp_node_detached = true;
} }
} }
spin_unlock_bh(&rds_tcp_conn_lock); spin_unlock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
rds_conn_destroy(tc->t_cpath->cp_conn); rds_conn_destroy(tc->t_cpath->cp_conn);
} }
...@@ -588,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net) ...@@ -588,7 +589,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
{ {
struct rds_tcp_connection *tc, *_tc; struct rds_tcp_connection *tc, *_tc;
spin_lock_bh(&rds_tcp_conn_lock); spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
...@@ -598,7 +599,7 @@ static void rds_tcp_sysctl_reset(struct net *net) ...@@ -598,7 +599,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
/* reconnect with new parameters */ /* reconnect with new parameters */
rds_conn_path_drop(tc->t_cpath, false); rds_conn_path_drop(tc->t_cpath, false);
} }
spin_unlock_bh(&rds_tcp_conn_lock); spin_unlock_irq(&rds_tcp_conn_lock);
} }
static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment