Commit c8753d55 authored by Vijay Subramanian's avatar Vijay Subramanian Committed by David S. Miller

net: Cleanup skb cloning by adding SKB_FCLONE_FREE

SKB_FCLONE_UNAVAILABLE has overloaded meaning depending on type of skb.
1: If skb is allocated from head_cache, it indicates fclone is not available.
2: If skb is a companion fclone skb (allocated from fclone_cache), it indicates
it is available to be used.

To avoid confusion for case 2 above, this patch  replaces
SKB_FCLONE_UNAVAILABLE with SKB_FCLONE_FREE where appropriate. For fclone
companion skbs, this indicates it is free for use.

SKB_FCLONE_UNAVAILABLE will now simply indicate skb is from head_cache and
cannot / will not have a companion fclone.
Signed-off-by: default avatarVijay Subramanian <subramanian.vijay@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9fab426d
...@@ -339,9 +339,10 @@ struct skb_shared_info { ...@@ -339,9 +339,10 @@ struct skb_shared_info {
enum { enum {
SKB_FCLONE_UNAVAILABLE, SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
SKB_FCLONE_ORIG, SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
SKB_FCLONE_CLONE, SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
SKB_FCLONE_FREE, /* this companion fclone skb is available */
}; };
enum { enum {
......
...@@ -265,7 +265,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, ...@@ -265,7 +265,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb->fclone = SKB_FCLONE_ORIG; skb->fclone = SKB_FCLONE_ORIG;
atomic_set(&fclones->fclone_ref, 1); atomic_set(&fclones->fclone_ref, 1);
fclones->skb2.fclone = SKB_FCLONE_UNAVAILABLE; fclones->skb2.fclone = SKB_FCLONE_FREE;
fclones->skb2.pfmemalloc = pfmemalloc; fclones->skb2.pfmemalloc = pfmemalloc;
} }
out: out:
...@@ -542,7 +542,7 @@ static void kfree_skbmem(struct sk_buff *skb) ...@@ -542,7 +542,7 @@ static void kfree_skbmem(struct sk_buff *skb)
fclones = container_of(skb, struct sk_buff_fclones, skb2); fclones = container_of(skb, struct sk_buff_fclones, skb2);
/* Warning : We must perform the atomic_dec_and_test() before /* Warning : We must perform the atomic_dec_and_test() before
* setting skb->fclone back to SKB_FCLONE_UNAVAILABLE, otherwise * setting skb->fclone back to SKB_FCLONE_FREE, otherwise
* skb_clone() could set clone_ref to 2 before our decrement. * skb_clone() could set clone_ref to 2 before our decrement.
* Anyway, if we are going to free the structure, no need to * Anyway, if we are going to free the structure, no need to
* rewrite skb->fclone. * rewrite skb->fclone.
...@@ -553,7 +553,7 @@ static void kfree_skbmem(struct sk_buff *skb) ...@@ -553,7 +553,7 @@ static void kfree_skbmem(struct sk_buff *skb)
/* The clone portion is available for /* The clone portion is available for
* fast-cloning again. * fast-cloning again.
*/ */
skb->fclone = SKB_FCLONE_UNAVAILABLE; skb->fclone = SKB_FCLONE_FREE;
} }
break; break;
} }
...@@ -874,7 +874,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) ...@@ -874,7 +874,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
return NULL; return NULL;
if (skb->fclone == SKB_FCLONE_ORIG && if (skb->fclone == SKB_FCLONE_ORIG &&
n->fclone == SKB_FCLONE_UNAVAILABLE) { n->fclone == SKB_FCLONE_FREE) {
n->fclone = SKB_FCLONE_CLONE; n->fclone = SKB_FCLONE_CLONE;
/* As our fastclone was free, clone_ref must be 1 at this point. /* As our fastclone was free, clone_ref must be 1 at this point.
* We could use atomic_inc() here, but it is faster * We could use atomic_inc() here, but it is faster
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment