Commit 459837b5 authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Jakub Kicinski

net/tcp: Disable TCP-MD5 static key on tcp_md5sig_info destruction

To do that, separate two scenarios:
- where it's the first MD5 key on the system, which means that enabling
  of the static key may need to sleep;
- copying of an existing key from a listening socket to the request
  socket upon receiving a signed TCP segment, where static key was
  already enabled (when the key was added to the listening socket).

Now the life-time of the static branch for TCP-MD5 is until:
- last tcp_md5sig_info is destroyed
- last socket in time-wait state with MD5 key is closed.

Which means that after all sockets with TCP-MD5 keys are gone, the
system gets back the performance of disabled md5-key static branch.

While at here, provide static_key_fast_inc() helper that does ref
counter increment in atomic fashion (without grabbing cpus_read_lock()
on CONFIG_JUMP_LABEL=y). This is needed to add a new user for
a static_key when the caller controls the lifetime of another user.
Signed-off-by: default avatarDmitry Safonov <dima@arista.com>
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent f62c7517
...@@ -1675,7 +1675,11 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, ...@@ -1675,7 +1675,11 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb); const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags, int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen, gfp_t gfp); const u8 *newkey, u8 newkeylen);
int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index,
struct tcp_md5sig_key *key);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags); int family, u8 prefixlen, int l3index, u8 flags);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
...@@ -1683,7 +1687,7 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, ...@@ -1683,7 +1687,7 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
#include <linux/jump_label.h> #include <linux/jump_label.h>
extern struct static_key_false tcp_md5_needed; extern struct static_key_false_deferred tcp_md5_needed;
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr, const union tcp_md5_addr *addr,
int family); int family);
...@@ -1691,7 +1695,7 @@ static inline struct tcp_md5sig_key * ...@@ -1691,7 +1695,7 @@ static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index, tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr, int family) const union tcp_md5_addr *addr, int family)
{ {
if (!static_branch_unlikely(&tcp_md5_needed)) if (!static_branch_unlikely(&tcp_md5_needed.key))
return NULL; return NULL;
return __tcp_md5_do_lookup(sk, l3index, addr, family); return __tcp_md5_do_lookup(sk, l3index, addr, family);
} }
......
...@@ -4464,11 +4464,8 @@ bool tcp_alloc_md5sig_pool(void) ...@@ -4464,11 +4464,8 @@ bool tcp_alloc_md5sig_pool(void)
if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) {
mutex_lock(&tcp_md5sig_mutex); mutex_lock(&tcp_md5sig_mutex);
if (!tcp_md5sig_pool_populated) { if (!tcp_md5sig_pool_populated)
__tcp_alloc_md5sig_pool(); __tcp_alloc_md5sig_pool();
if (tcp_md5sig_pool_populated)
static_branch_inc(&tcp_md5_needed);
}
mutex_unlock(&tcp_md5sig_mutex); mutex_unlock(&tcp_md5sig_mutex);
} }
......
...@@ -1053,7 +1053,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) ...@@ -1053,7 +1053,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
* We need to maintain these in the sk structure. * We need to maintain these in the sk structure.
*/ */
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed); DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ);
EXPORT_SYMBOL(tcp_md5_needed); EXPORT_SYMBOL(tcp_md5_needed);
static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new) static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
...@@ -1166,9 +1166,6 @@ static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp) ...@@ -1166,9 +1166,6 @@ static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_info *md5sig; struct tcp_md5sig_info *md5sig;
if (rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk)))
return 0;
md5sig = kmalloc(sizeof(*md5sig), gfp); md5sig = kmalloc(sizeof(*md5sig), gfp);
if (!md5sig) if (!md5sig)
return -ENOMEM; return -ENOMEM;
...@@ -1180,9 +1177,9 @@ static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp) ...@@ -1180,9 +1177,9 @@ static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
} }
/* This can be called on a newly created socket, from other files */ /* This can be called on a newly created socket, from other files */
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags, int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen, gfp_t gfp) const u8 *newkey, u8 newkeylen, gfp_t gfp)
{ {
/* Add Key to the list */ /* Add Key to the list */
struct tcp_md5sig_key *key; struct tcp_md5sig_key *key;
...@@ -1209,9 +1206,6 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, ...@@ -1209,9 +1206,6 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
return 0; return 0;
} }
if (tcp_md5sig_info_add(sk, gfp))
return -ENOMEM;
md5sig = rcu_dereference_protected(tp->md5sig_info, md5sig = rcu_dereference_protected(tp->md5sig_info,
lockdep_sock_is_held(sk)); lockdep_sock_is_held(sk));
...@@ -1235,8 +1229,59 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, ...@@ -1235,8 +1229,59 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
hlist_add_head_rcu(&key->node, &md5sig->head); hlist_add_head_rcu(&key->node, &md5sig->head);
return 0; return 0;
} }
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
if (tcp_md5sig_info_add(sk, GFP_KERNEL))
return -ENOMEM;
if (!static_branch_inc(&tcp_md5_needed.key)) {
struct tcp_md5sig_info *md5sig;
md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
rcu_assign_pointer(tp->md5sig_info, NULL);
kfree_rcu(md5sig);
return -EUSERS;
}
}
return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags,
newkey, newkeylen, GFP_KERNEL);
}
EXPORT_SYMBOL(tcp_md5_do_add); EXPORT_SYMBOL(tcp_md5_do_add);
int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index,
struct tcp_md5sig_key *key)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC)))
return -ENOMEM;
if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) {
struct tcp_md5sig_info *md5sig;
md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
net_warn_ratelimited("Too many TCP-MD5 keys in the system\n");
rcu_assign_pointer(tp->md5sig_info, NULL);
kfree_rcu(md5sig);
return -EUSERS;
}
}
return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index,
key->flags, key->key, key->keylen,
sk_gfp_mask(sk, GFP_ATOMIC));
}
EXPORT_SYMBOL(tcp_md5_key_copy);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family, int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
u8 prefixlen, int l3index, u8 flags) u8 prefixlen, int l3index, u8 flags)
{ {
...@@ -1323,7 +1368,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname, ...@@ -1323,7 +1368,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
return -EINVAL; return -EINVAL;
return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags, return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); cmd.tcpm_key, cmd.tcpm_keylen);
} }
static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp, static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
...@@ -1580,8 +1625,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, ...@@ -1580,8 +1625,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
* memory, then we end up not copying the key * memory, then we end up not copying the key
* across. Shucks. * across. Shucks.
*/ */
tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags, tcp_md5_key_copy(newsk, addr, AF_INET, 32, l3index, key);
key->key, key->keylen, GFP_ATOMIC);
sk_gso_disable(newsk); sk_gso_disable(newsk);
} }
#endif #endif
...@@ -2273,6 +2317,7 @@ void tcp_v4_destroy_sock(struct sock *sk) ...@@ -2273,6 +2317,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
tcp_clear_md5_list(sk); tcp_clear_md5_list(sk);
kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu); kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
tp->md5sig_info = NULL; tp->md5sig_info = NULL;
static_branch_slow_dec_deferred(&tcp_md5_needed);
} }
#endif #endif
......
...@@ -291,13 +291,19 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -291,13 +291,19 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
*/ */
do { do {
tcptw->tw_md5_key = NULL; tcptw->tw_md5_key = NULL;
if (static_branch_unlikely(&tcp_md5_needed)) { if (static_branch_unlikely(&tcp_md5_needed.key)) {
struct tcp_md5sig_key *key; struct tcp_md5sig_key *key;
key = tp->af_specific->md5_lookup(sk, sk); key = tp->af_specific->md5_lookup(sk, sk);
if (key) { if (key) {
tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()); if (!tcptw->tw_md5_key)
break;
BUG_ON(!tcp_alloc_md5sig_pool());
if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) {
kfree(tcptw->tw_md5_key);
tcptw->tw_md5_key = NULL;
}
} }
} }
} while (0); } while (0);
...@@ -337,11 +343,13 @@ EXPORT_SYMBOL(tcp_time_wait); ...@@ -337,11 +343,13 @@ EXPORT_SYMBOL(tcp_time_wait);
void tcp_twsk_destructor(struct sock *sk) void tcp_twsk_destructor(struct sock *sk)
{ {
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
if (static_branch_unlikely(&tcp_md5_needed)) { if (static_branch_unlikely(&tcp_md5_needed.key)) {
struct tcp_timewait_sock *twsk = tcp_twsk(sk); struct tcp_timewait_sock *twsk = tcp_twsk(sk);
if (twsk->tw_md5_key) if (twsk->tw_md5_key) {
kfree_rcu(twsk->tw_md5_key, rcu); kfree_rcu(twsk->tw_md5_key, rcu);
static_branch_slow_dec_deferred(&tcp_md5_needed);
}
} }
#endif #endif
} }
......
...@@ -766,7 +766,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, ...@@ -766,7 +766,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
*md5 = NULL; *md5 = NULL;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
if (static_branch_unlikely(&tcp_md5_needed) && if (static_branch_unlikely(&tcp_md5_needed.key) &&
rcu_access_pointer(tp->md5sig_info)) { rcu_access_pointer(tp->md5sig_info)) {
*md5 = tp->af_specific->md5_lookup(sk, sk); *md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) { if (*md5) {
...@@ -922,7 +922,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb ...@@ -922,7 +922,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
*md5 = NULL; *md5 = NULL;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
if (static_branch_unlikely(&tcp_md5_needed) && if (static_branch_unlikely(&tcp_md5_needed.key) &&
rcu_access_pointer(tp->md5sig_info)) { rcu_access_pointer(tp->md5sig_info)) {
*md5 = tp->af_specific->md5_lookup(sk, sk); *md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) { if (*md5) {
......
...@@ -665,12 +665,11 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, ...@@ -665,12 +665,11 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
if (ipv6_addr_v4mapped(&sin6->sin6_addr)) if (ipv6_addr_v4mapped(&sin6->sin6_addr))
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
AF_INET, prefixlen, l3index, flags, AF_INET, prefixlen, l3index, flags,
cmd.tcpm_key, cmd.tcpm_keylen, cmd.tcpm_key, cmd.tcpm_keylen);
GFP_KERNEL);
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
AF_INET6, prefixlen, l3index, flags, AF_INET6, prefixlen, l3index, flags,
cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); cmd.tcpm_key, cmd.tcpm_keylen);
} }
static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp, static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
...@@ -1370,9 +1369,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * ...@@ -1370,9 +1369,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
* memory, then we end up not copying the key * memory, then we end up not copying the key
* across. Shucks. * across. Shucks.
*/ */
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr, tcp_md5_key_copy(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
AF_INET6, 128, l3index, key->flags, key->key, key->keylen, AF_INET6, 128, l3index, key);
sk_gfp_mask(sk, GFP_ATOMIC));
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment