Commit f4a775d1 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: introduce __skb_header_release()

While profiling TCP stack, I noticed one useless atomic operation
in tcp_sendmsg(), caused by skb_header_release().

It turns out all current skb_header_release() users have a fresh skb,
that no other user can see, so we can avoid one atomic operation.

Introduce __skb_header_release() to clearly document this.

This gave me a 1.5 % improvement on TCP_RR workload.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent aebac744
...@@ -1083,6 +1083,7 @@ static inline int skb_header_cloned(const struct sk_buff *skb) ...@@ -1083,6 +1083,7 @@ static inline int skb_header_cloned(const struct sk_buff *skb)
* Drop a reference to the header part of the buffer. This is done * Drop a reference to the header part of the buffer. This is done
* by acquiring a payload reference. You must not read from the header * by acquiring a payload reference. You must not read from the header
* part of skb->data after this. * part of skb->data after this.
* Note : Check if you can use __skb_header_release() instead.
*/ */
static inline void skb_header_release(struct sk_buff *skb) static inline void skb_header_release(struct sk_buff *skb)
{ {
...@@ -1091,6 +1092,20 @@ static inline void skb_header_release(struct sk_buff *skb) ...@@ -1091,6 +1092,20 @@ static inline void skb_header_release(struct sk_buff *skb)
atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
} }
/**
* __skb_header_release - release reference to header
* @skb: buffer to operate on
*
* Variant of skb_header_release() assuming skb is private to caller.
* We can avoid one atomic operation.
*/
static inline void __skb_header_release(struct sk_buff *skb)
{
skb->nohdr = 1;
atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
}
/** /**
* skb_shared - is the buffer shared * skb_shared - is the buffer shared
* @skb: buffer to check * @skb: buffer to check
......
...@@ -3179,7 +3179,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) ...@@ -3179,7 +3179,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_shinfo(nskb)->frag_list = p; skb_shinfo(nskb)->frag_list = p;
skb_shinfo(nskb)->gso_size = pinfo->gso_size; skb_shinfo(nskb)->gso_size = pinfo->gso_size;
pinfo->gso_size = 0; pinfo->gso_size = 0;
skb_header_release(p); __skb_header_release(p);
NAPI_GRO_CB(nskb)->last = p; NAPI_GRO_CB(nskb)->last = p;
nskb->data_len += p->len; nskb->data_len += p->len;
...@@ -3211,7 +3211,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) ...@@ -3211,7 +3211,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
else else
NAPI_GRO_CB(p)->last->next = skb; NAPI_GRO_CB(p)->last->next = skb;
NAPI_GRO_CB(p)->last = skb; NAPI_GRO_CB(p)->last = skb;
skb_header_release(skb); __skb_header_release(skb);
lp = p; lp = p;
done: done:
......
...@@ -609,7 +609,7 @@ static inline bool forced_push(const struct tcp_sock *tp) ...@@ -609,7 +609,7 @@ static inline bool forced_push(const struct tcp_sock *tp)
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
} }
static inline void skb_entail(struct sock *sk, struct sk_buff *skb) static void skb_entail(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
...@@ -618,7 +618,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb) ...@@ -618,7 +618,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
tcb->seq = tcb->end_seq = tp->write_seq; tcb->seq = tcb->end_seq = tp->write_seq;
tcb->tcp_flags = TCPHDR_ACK; tcb->tcp_flags = TCPHDR_ACK;
tcb->sacked = 0; tcb->sacked = 0;
skb_header_release(skb); __skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb); tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize; sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize); sk_mem_charge(sk, skb->truesize);
......
...@@ -995,7 +995,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) ...@@ -995,7 +995,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
/* Advance write_seq and place onto the write_queue. */ /* Advance write_seq and place onto the write_queue. */
tp->write_seq = TCP_SKB_CB(skb)->end_seq; tp->write_seq = TCP_SKB_CB(skb)->end_seq;
skb_header_release(skb); __skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb); tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize; sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize); sk_mem_charge(sk, skb->truesize);
...@@ -1167,7 +1167,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, ...@@ -1167,7 +1167,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
} }
/* Link BUFF into the send queue. */ /* Link BUFF into the send queue. */
skb_header_release(buff); __skb_header_release(buff);
tcp_insert_write_queue_after(skb, buff, sk); tcp_insert_write_queue_after(skb, buff, sk);
return 0; return 0;
...@@ -1671,7 +1671,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, ...@@ -1671,7 +1671,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
tcp_set_skb_tso_segs(sk, buff, mss_now); tcp_set_skb_tso_segs(sk, buff, mss_now);
/* Link BUFF into the send queue. */ /* Link BUFF into the send queue. */
skb_header_release(buff); __skb_header_release(buff);
tcp_insert_write_queue_after(skb, buff, sk); tcp_insert_write_queue_after(skb, buff, sk);
return 0; return 0;
...@@ -2772,7 +2772,7 @@ int tcp_send_synack(struct sock *sk) ...@@ -2772,7 +2772,7 @@ int tcp_send_synack(struct sock *sk)
if (nskb == NULL) if (nskb == NULL)
return -ENOMEM; return -ENOMEM;
tcp_unlink_write_queue(skb, sk); tcp_unlink_write_queue(skb, sk);
skb_header_release(nskb); __skb_header_release(nskb);
__tcp_add_write_queue_head(sk, nskb); __tcp_add_write_queue_head(sk, nskb);
sk_wmem_free_skb(sk, skb); sk_wmem_free_skb(sk, skb);
sk->sk_wmem_queued += nskb->truesize; sk->sk_wmem_queued += nskb->truesize;
...@@ -2947,7 +2947,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) ...@@ -2947,7 +2947,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
tcb->end_seq += skb->len; tcb->end_seq += skb->len;
skb_header_release(skb); __skb_header_release(skb);
__tcp_add_write_queue_tail(sk, skb); __tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize; sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize); sk_mem_charge(sk, skb->truesize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment