Commit 7b2464d9 authored by David S. Miller's avatar David S. Miller

Merge branch 'tcp-cleanups'

Eric Dumazet says:

====================
tcp: cleanups for linux-5.1

This small patch series cleanups few things, and add a small
timewait optimization for hosts not using md5.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a110ae70 56483341
...@@ -1558,7 +1558,7 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, ...@@ -1558,7 +1558,7 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
#include <linux/jump_label.h> #include <linux/jump_label.h>
extern struct static_key tcp_md5_needed; extern struct static_key_false tcp_md5_needed;
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr, const union tcp_md5_addr *addr,
int family); int family);
...@@ -1567,7 +1567,7 @@ tcp_md5_do_lookup(const struct sock *sk, ...@@ -1567,7 +1567,7 @@ tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr, const union tcp_md5_addr *addr,
int family) int family)
{ {
if (!static_key_false(&tcp_md5_needed)) if (!static_branch_unlikely(&tcp_md5_needed))
return NULL; return NULL;
return __tcp_md5_do_lookup(sk, addr, family); return __tcp_md5_do_lookup(sk, addr, family);
} }
...@@ -1716,20 +1716,9 @@ static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) ...@@ -1716,20 +1716,9 @@ static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
} }
static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
{
if (tcp_write_queue_empty(sk))
tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
}
static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
{
__skb_queue_tail(&sk->sk_write_queue, skb);
}
static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
{ {
__tcp_add_write_queue_tail(sk, skb); __skb_queue_tail(&sk->sk_write_queue, skb);
/* Queue it, remembering where we must start sending. */ /* Queue it, remembering where we must start sending. */
if (sk->sk_write_queue.next == skb) if (sk->sk_write_queue.next == skb)
......
...@@ -1412,7 +1412,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) ...@@ -1412,7 +1412,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
/* It is the one place in all of TCP, except connection /* It is the one place in all of TCP, except connection
* reset, where we can be unlinking the send_head. * reset, where we can be unlinking the send_head.
*/ */
tcp_check_send_head(sk, skb); if (tcp_write_queue_empty(sk))
tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
sk_wmem_free_skb(sk, skb); sk_wmem_free_skb(sk, skb);
} }
...@@ -3698,7 +3699,7 @@ bool tcp_alloc_md5sig_pool(void) ...@@ -3698,7 +3699,7 @@ bool tcp_alloc_md5sig_pool(void)
if (!tcp_md5sig_pool_populated) { if (!tcp_md5sig_pool_populated) {
__tcp_alloc_md5sig_pool(); __tcp_alloc_md5sig_pool();
if (tcp_md5sig_pool_populated) if (tcp_md5sig_pool_populated)
static_key_slow_inc(&tcp_md5_needed); static_branch_inc(&tcp_md5_needed);
} }
mutex_unlock(&tcp_md5sig_mutex); mutex_unlock(&tcp_md5sig_mutex);
......
...@@ -973,7 +973,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) ...@@ -973,7 +973,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
* We need to maintain these in the sk structure. * We need to maintain these in the sk structure.
*/ */
struct static_key tcp_md5_needed __read_mostly; DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
EXPORT_SYMBOL(tcp_md5_needed); EXPORT_SYMBOL(tcp_md5_needed);
/* Find the Key structure for an address. */ /* Find the Key structure for an address. */
......
...@@ -294,12 +294,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -294,12 +294,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
* so the timewait ack generating code has the key. * so the timewait ack generating code has the key.
*/ */
do { do {
struct tcp_md5sig_key *key;
tcptw->tw_md5_key = NULL; tcptw->tw_md5_key = NULL;
key = tp->af_specific->md5_lookup(sk, sk); if (static_branch_unlikely(&tcp_md5_needed)) {
if (key) { struct tcp_md5sig_key *key;
tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()); key = tp->af_specific->md5_lookup(sk, sk);
if (key) {
tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
}
} }
} while (0); } while (0);
#endif #endif
...@@ -338,10 +341,12 @@ EXPORT_SYMBOL(tcp_time_wait); ...@@ -338,10 +341,12 @@ EXPORT_SYMBOL(tcp_time_wait);
void tcp_twsk_destructor(struct sock *sk) void tcp_twsk_destructor(struct sock *sk)
{ {
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
struct tcp_timewait_sock *twsk = tcp_twsk(sk); if (static_branch_unlikely(&tcp_md5_needed)) {
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
if (twsk->tw_md5_key) if (twsk->tw_md5_key)
kfree_rcu(twsk->tw_md5_key, rcu); kfree_rcu(twsk->tw_md5_key, rcu);
}
#endif #endif
} }
EXPORT_SYMBOL_GPL(tcp_twsk_destructor); EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
......
...@@ -597,7 +597,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, ...@@ -597,7 +597,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
*md5 = NULL; *md5 = NULL;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
if (static_key_false(&tcp_md5_needed) && if (static_branch_unlikely(&tcp_md5_needed) &&
rcu_access_pointer(tp->md5sig_info)) { rcu_access_pointer(tp->md5sig_info)) {
*md5 = tp->af_specific->md5_lookup(sk, sk); *md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) { if (*md5) {
...@@ -734,7 +734,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb ...@@ -734,7 +734,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
*md5 = NULL; *md5 = NULL;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
if (static_key_false(&tcp_md5_needed) && if (static_branch_unlikely(&tcp_md5_needed) &&
rcu_access_pointer(tp->md5sig_info)) { rcu_access_pointer(tp->md5sig_info)) {
*md5 = tp->af_specific->md5_lookup(sk, sk); *md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) { if (*md5) {
...@@ -1846,17 +1846,17 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp, ...@@ -1846,17 +1846,17 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
* know that all the data is in scatter-gather pages, and that the * know that all the data is in scatter-gather pages, and that the
* packet has never been sent out before (and thus is not cloned). * packet has never been sent out before (and thus is not cloned).
*/ */
static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue, static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
struct sk_buff *skb, unsigned int len,
unsigned int mss_now, gfp_t gfp) unsigned int mss_now, gfp_t gfp)
{ {
struct sk_buff *buff;
int nlen = skb->len - len; int nlen = skb->len - len;
struct sk_buff *buff;
u8 flags; u8 flags;
/* All of a TSO frame must be composed of paged data. */ /* All of a TSO frame must be composed of paged data. */
if (skb->len != skb->data_len) if (skb->len != skb->data_len)
return tcp_fragment(sk, tcp_queue, skb, len, mss_now, gfp); return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
skb, len, mss_now, gfp);
buff = sk_stream_alloc_skb(sk, 0, gfp, true); buff = sk_stream_alloc_skb(sk, 0, gfp, true);
if (unlikely(!buff)) if (unlikely(!buff))
...@@ -1892,7 +1892,7 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue, ...@@ -1892,7 +1892,7 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
/* Link BUFF into the send queue. */ /* Link BUFF into the send queue. */
__skb_header_release(buff); __skb_header_release(buff);
tcp_insert_write_queue_after(skb, buff, sk, tcp_queue); tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
return 0; return 0;
} }
...@@ -2391,8 +2391,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2391,8 +2391,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
nonagle); nonagle);
if (skb->len > limit && if (skb->len > limit &&
unlikely(tso_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
skb, limit, mss_now, gfp)))
break; break;
if (tcp_small_queue_check(sk, skb, 0)) if (tcp_small_queue_check(sk, skb, 0))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment