Commit b07a2d97 authored by Jakub Kicinski's avatar Jakub Kicinski

net: skb: plumb napi state thru skb freeing paths

We maintain a NAPI-local cache of skbs which is fed by napi_consume_skb().
Going forward we will also try to cache head and data pages.
Plumb the "are we in a normal NAPI context" information thru
deeper into the freeing path, up to skb_release_data() and
skb_free_head()/skb_pp_recycle(). The "not normal NAPI context"
comes from netpoll which passes budget of 0 to try to reap
the Tx completions but not perform any Rx.

Use "bool napi_safe" rather than bare "int budget",
the further we get from NAPI the more confusing the budget
argument may seem (particularly whether 0 or MAX is the
correct value to pass in when not in NAPI).
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Tested-by: default avatarDragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c11d2e71
...@@ -839,7 +839,7 @@ static void skb_clone_fraglist(struct sk_buff *skb) ...@@ -839,7 +839,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
skb_get(list); skb_get(list);
} }
static bool skb_pp_recycle(struct sk_buff *skb, void *data) static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
{ {
if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
return false; return false;
...@@ -856,12 +856,12 @@ static void skb_kfree_head(void *head, unsigned int end_offset) ...@@ -856,12 +856,12 @@ static void skb_kfree_head(void *head, unsigned int end_offset)
kfree(head); kfree(head);
} }
static void skb_free_head(struct sk_buff *skb) static void skb_free_head(struct sk_buff *skb, bool napi_safe)
{ {
unsigned char *head = skb->head; unsigned char *head = skb->head;
if (skb->head_frag) { if (skb->head_frag) {
if (skb_pp_recycle(skb, head)) if (skb_pp_recycle(skb, head, napi_safe))
return; return;
skb_free_frag(head); skb_free_frag(head);
} else { } else {
...@@ -869,7 +869,8 @@ static void skb_free_head(struct sk_buff *skb) ...@@ -869,7 +869,8 @@ static void skb_free_head(struct sk_buff *skb)
} }
} }
static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
bool napi_safe)
{ {
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
int i; int i;
...@@ -894,7 +895,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) ...@@ -894,7 +895,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason)
if (shinfo->frag_list) if (shinfo->frag_list)
kfree_skb_list_reason(shinfo->frag_list, reason); kfree_skb_list_reason(shinfo->frag_list, reason);
skb_free_head(skb); skb_free_head(skb, napi_safe);
exit: exit:
/* When we clone an SKB we copy the reycling bit. The pp_recycle /* When we clone an SKB we copy the reycling bit. The pp_recycle
* bit is only set on the head though, so in order to avoid races * bit is only set on the head though, so in order to avoid races
...@@ -955,11 +956,12 @@ void skb_release_head_state(struct sk_buff *skb) ...@@ -955,11 +956,12 @@ void skb_release_head_state(struct sk_buff *skb)
} }
/* Free everything but the sk_buff shell. */ /* Free everything but the sk_buff shell. */
static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason,
bool napi_safe)
{ {
skb_release_head_state(skb); skb_release_head_state(skb);
if (likely(skb->head)) if (likely(skb->head))
skb_release_data(skb, reason); skb_release_data(skb, reason, napi_safe);
} }
/** /**
...@@ -973,7 +975,7 @@ static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) ...@@ -973,7 +975,7 @@ static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason)
void __kfree_skb(struct sk_buff *skb) void __kfree_skb(struct sk_buff *skb)
{ {
skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false);
kfree_skbmem(skb); kfree_skbmem(skb);
} }
EXPORT_SYMBOL(__kfree_skb); EXPORT_SYMBOL(__kfree_skb);
...@@ -1027,7 +1029,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb, ...@@ -1027,7 +1029,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb,
return; return;
} }
skb_release_all(skb, reason); skb_release_all(skb, reason, false);
sa->skb_array[sa->skb_count++] = skb; sa->skb_array[sa->skb_count++] = skb;
if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
...@@ -1201,7 +1203,7 @@ EXPORT_SYMBOL(consume_skb); ...@@ -1201,7 +1203,7 @@ EXPORT_SYMBOL(consume_skb);
void __consume_stateless_skb(struct sk_buff *skb) void __consume_stateless_skb(struct sk_buff *skb)
{ {
trace_consume_skb(skb, __builtin_return_address(0)); trace_consume_skb(skb, __builtin_return_address(0));
skb_release_data(skb, SKB_CONSUMED); skb_release_data(skb, SKB_CONSUMED, false);
kfree_skbmem(skb); kfree_skbmem(skb);
} }
...@@ -1226,7 +1228,7 @@ static void napi_skb_cache_put(struct sk_buff *skb) ...@@ -1226,7 +1228,7 @@ static void napi_skb_cache_put(struct sk_buff *skb)
void __kfree_skb_defer(struct sk_buff *skb) void __kfree_skb_defer(struct sk_buff *skb)
{ {
skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, true);
napi_skb_cache_put(skb); napi_skb_cache_put(skb);
} }
...@@ -1264,7 +1266,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget) ...@@ -1264,7 +1266,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return; return;
} }
skb_release_all(skb, SKB_CONSUMED); skb_release_all(skb, SKB_CONSUMED, !!budget);
napi_skb_cache_put(skb); napi_skb_cache_put(skb);
} }
EXPORT_SYMBOL(napi_consume_skb); EXPORT_SYMBOL(napi_consume_skb);
...@@ -1395,7 +1397,7 @@ EXPORT_SYMBOL_GPL(alloc_skb_for_msg); ...@@ -1395,7 +1397,7 @@ EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
*/ */
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
{ {
skb_release_all(dst, SKB_CONSUMED); skb_release_all(dst, SKB_CONSUMED, false);
return __skb_clone(dst, src); return __skb_clone(dst, src);
} }
EXPORT_SYMBOL_GPL(skb_morph); EXPORT_SYMBOL_GPL(skb_morph);
...@@ -2018,9 +2020,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, ...@@ -2018,9 +2020,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_has_frag_list(skb)) if (skb_has_frag_list(skb))
skb_clone_fraglist(skb); skb_clone_fraglist(skb);
skb_release_data(skb, SKB_CONSUMED); skb_release_data(skb, SKB_CONSUMED, false);
} else { } else {
skb_free_head(skb); skb_free_head(skb, false);
} }
off = (data + nhead) - skb->head; off = (data + nhead) - skb->head;
...@@ -6389,12 +6391,12 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, ...@@ -6389,12 +6391,12 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
skb_frag_ref(skb, i); skb_frag_ref(skb, i);
if (skb_has_frag_list(skb)) if (skb_has_frag_list(skb))
skb_clone_fraglist(skb); skb_clone_fraglist(skb);
skb_release_data(skb, SKB_CONSUMED); skb_release_data(skb, SKB_CONSUMED, false);
} else { } else {
/* we can reuse existing recount- all we did was /* we can reuse existing recount- all we did was
* relocate values * relocate values
*/ */
skb_free_head(skb); skb_free_head(skb, false);
} }
skb->head = data; skb->head = data;
...@@ -6529,7 +6531,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, ...@@ -6529,7 +6531,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
skb_kfree_head(data, size); skb_kfree_head(data, size);
return -ENOMEM; return -ENOMEM;
} }
skb_release_data(skb, SKB_CONSUMED); skb_release_data(skb, SKB_CONSUMED, false);
skb->head = data; skb->head = data;
skb->head_frag = 0; skb->head_frag = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment