Commit e93abb84 authored by Eric Dumazet's avatar Eric Dumazet Committed by Jakub Kicinski

net/tcp_fastopen: remove tcp_fastopen_ctx_lock

Remove the (per netns) spinlock in favor of xchg() atomic operations.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Acked-by: default avatarWei Wang <weiwan@google.com>
Link: https://lore.kernel.org/r/20210719101107.3203943-1-eric.dumazet@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent fef773fc
...@@ -174,7 +174,6 @@ struct netns_ipv4 { ...@@ -174,7 +174,6 @@ struct netns_ipv4 {
int sysctl_tcp_fastopen; int sysctl_tcp_fastopen;
const struct tcp_congestion_ops __rcu *tcp_congestion_control; const struct tcp_congestion_ops __rcu *tcp_congestion_control;
struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
spinlock_t tcp_fastopen_ctx_lock;
unsigned int sysctl_tcp_fastopen_blackhole_timeout; unsigned int sysctl_tcp_fastopen_blackhole_timeout;
atomic_t tfo_active_disable_times; atomic_t tfo_active_disable_times;
unsigned long tfo_active_disable_stamp; unsigned long tfo_active_disable_stamp;
......
...@@ -55,12 +55,7 @@ void tcp_fastopen_ctx_destroy(struct net *net) ...@@ -55,12 +55,7 @@ void tcp_fastopen_ctx_destroy(struct net *net)
{ {
struct tcp_fastopen_context *ctxt; struct tcp_fastopen_context *ctxt;
spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
if (ctxt) if (ctxt)
call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
...@@ -89,18 +84,12 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, ...@@ -89,18 +84,12 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
ctx->num = 1; ctx->num = 1;
} }
spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
if (sk) { if (sk) {
q = &inet_csk(sk)->icsk_accept_queue.fastopenq; q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
octx = rcu_dereference_protected(q->ctx, octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
rcu_assign_pointer(q->ctx, ctx);
} else { } else {
octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
} }
spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
if (octx) if (octx)
call_rcu(&octx->rcu, tcp_fastopen_ctx_free); call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
......
...@@ -2964,7 +2964,6 @@ static int __net_init tcp_sk_init(struct net *net) ...@@ -2964,7 +2964,6 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC; net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
net->ipv4.sysctl_tcp_comp_sack_nr = 44; net->ipv4.sysctl_tcp_comp_sack_nr = 44;
net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE; net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60; net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
atomic_set(&net->ipv4.tfo_active_disable_times, 0); atomic_set(&net->ipv4.tfo_active_disable_times, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment