Commit 21dc3301 authored by David S. Miller's avatar David S. Miller

net: Rename skb_has_frags to skb_has_frag_list

SKBs can be "fragmented" in two ways, via a page array (called
skb_shinfo(skb)->frags[]) and via a list of SKBs (called
skb_shinfo(skb)->frag_list).

Since skb_has_frags() tests the latter, it's name is confusing
since it sounds more like it's testing the former.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2d4833aa
...@@ -2191,7 +2191,7 @@ static inline int net_gso_ok(int features, int gso_type) ...@@ -2191,7 +2191,7 @@ static inline int net_gso_ok(int features, int gso_type)
static inline int skb_gso_ok(struct sk_buff *skb, int features) static inline int skb_gso_ok(struct sk_buff *skb, int features)
{ {
return net_gso_ok(features, skb_shinfo(skb)->gso_type) && return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST)); (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
} }
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
......
...@@ -1120,7 +1120,7 @@ extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, ...@@ -1120,7 +1120,7 @@ extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
int off, int size); int off, int size);
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb)) #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
#ifdef NET_SKBUFF_DATA_USES_OFFSET #ifdef NET_SKBUFF_DATA_USES_OFFSET
...@@ -1784,7 +1784,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) ...@@ -1784,7 +1784,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
skb = skb->prev) skb = skb->prev)
static inline bool skb_has_frags(const struct sk_buff *skb) static inline bool skb_has_frag_list(const struct sk_buff *skb)
{ {
return skb_shinfo(skb)->frag_list != NULL; return skb_shinfo(skb)->frag_list != NULL;
} }
......
...@@ -1930,7 +1930,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb, ...@@ -1930,7 +1930,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
return skb_is_nonlinear(skb) && return skb_is_nonlinear(skb) &&
((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
(skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
illegal_highdma(dev, skb)))); illegal_highdma(dev, skb))));
} }
...@@ -3090,7 +3090,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) ...@@ -3090,7 +3090,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
goto normal; goto normal;
if (skb_is_gso(skb) || skb_has_frags(skb)) if (skb_is_gso(skb) || skb_has_frag_list(skb))
goto normal; goto normal;
rcu_read_lock(); rcu_read_lock();
......
...@@ -340,7 +340,7 @@ static void skb_release_data(struct sk_buff *skb) ...@@ -340,7 +340,7 @@ static void skb_release_data(struct sk_buff *skb)
put_page(skb_shinfo(skb)->frags[i].page); put_page(skb_shinfo(skb)->frags[i].page);
} }
if (skb_has_frags(skb)) if (skb_has_frag_list(skb))
skb_drop_fraglist(skb); skb_drop_fraglist(skb);
kfree(skb->head); kfree(skb->head);
...@@ -759,7 +759,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) ...@@ -759,7 +759,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
skb_shinfo(n)->nr_frags = i; skb_shinfo(n)->nr_frags = i;
} }
if (skb_has_frags(skb)) { if (skb_has_frag_list(skb)) {
skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
skb_clone_fraglist(n); skb_clone_fraglist(n);
} }
...@@ -822,7 +822,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, ...@@ -822,7 +822,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
get_page(skb_shinfo(skb)->frags[i].page); get_page(skb_shinfo(skb)->frags[i].page);
if (skb_has_frags(skb)) if (skb_has_frag_list(skb))
skb_clone_fraglist(skb); skb_clone_fraglist(skb);
skb_release_data(skb); skb_release_data(skb);
...@@ -1099,7 +1099,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len) ...@@ -1099,7 +1099,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
for (; i < nfrags; i++) for (; i < nfrags; i++)
put_page(skb_shinfo(skb)->frags[i].page); put_page(skb_shinfo(skb)->frags[i].page);
if (skb_has_frags(skb)) if (skb_has_frag_list(skb))
skb_drop_fraglist(skb); skb_drop_fraglist(skb);
goto done; goto done;
} }
...@@ -1194,7 +1194,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) ...@@ -1194,7 +1194,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Optimization: no fragments, no reasons to preestimate /* Optimization: no fragments, no reasons to preestimate
* size of pulled pages. Superb. * size of pulled pages. Superb.
*/ */
if (!skb_has_frags(skb)) if (!skb_has_frag_list(skb))
goto pull_pages; goto pull_pages;
/* Estimate size of pulled pages. */ /* Estimate size of pulled pages. */
...@@ -2323,7 +2323,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, ...@@ -2323,7 +2323,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
st->frag_data = NULL; st->frag_data = NULL;
} }
if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) { if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
st->cur_skb = skb_shinfo(st->root_skb)->frag_list; st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0; st->frag_idx = 0;
goto next_skb; goto next_skb;
...@@ -2889,7 +2889,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) ...@@ -2889,7 +2889,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
return -ENOMEM; return -ENOMEM;
/* Easy case. Most of packets will go this way. */ /* Easy case. Most of packets will go this way. */
if (!skb_has_frags(skb)) { if (!skb_has_frag_list(skb)) {
/* A little of trouble, not enough of space for trailer. /* A little of trouble, not enough of space for trailer.
* This should not happen, when stack is tuned to generate * This should not happen, when stack is tuned to generate
* good frames. OK, on miss we reallocate and reserve even more * good frames. OK, on miss we reallocate and reserve even more
...@@ -2924,7 +2924,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) ...@@ -2924,7 +2924,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
if (skb1->next == NULL && tailbits) { if (skb1->next == NULL && tailbits) {
if (skb_shinfo(skb1)->nr_frags || if (skb_shinfo(skb1)->nr_frags ||
skb_has_frags(skb1) || skb_has_frag_list(skb1) ||
skb_tailroom(skb1) < tailbits) skb_tailroom(skb1) < tailbits)
ntail = tailbits + 128; ntail = tailbits + 128;
} }
...@@ -2933,7 +2933,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) ...@@ -2933,7 +2933,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
skb_cloned(skb1) || skb_cloned(skb1) ||
ntail || ntail ||
skb_shinfo(skb1)->nr_frags || skb_shinfo(skb1)->nr_frags ||
skb_has_frags(skb1)) { skb_has_frag_list(skb1)) {
struct sk_buff *skb2; struct sk_buff *skb2;
/* Fuck, we are miserable poor guys... */ /* Fuck, we are miserable poor guys... */
......
...@@ -542,7 +542,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, ...@@ -542,7 +542,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
/* If the first fragment is fragmented itself, we split /* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part * it to two chunks: the first with data and paged part
* and the second, holding only fragments. */ * and the second, holding only fragments. */
if (skb_has_frags(head)) { if (skb_has_frag_list(head)) {
struct sk_buff *clone; struct sk_buff *clone;
int i, plen = 0; int i, plen = 0;
......
...@@ -487,7 +487,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) ...@@ -487,7 +487,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
* LATER: this step can be merged to real generation of fragments, * LATER: this step can be merged to real generation of fragments,
* we can switch to copy when see the first bad fragment. * we can switch to copy when see the first bad fragment.
*/ */
if (skb_has_frags(skb)) { if (skb_has_frag_list(skb)) {
struct sk_buff *frag; struct sk_buff *frag;
int first_len = skb_pagelen(skb); int first_len = skb_pagelen(skb);
int truesizes = 0; int truesizes = 0;
......
...@@ -637,7 +637,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) ...@@ -637,7 +637,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
} }
mtu -= hlen + sizeof(struct frag_hdr); mtu -= hlen + sizeof(struct frag_hdr);
if (skb_has_frags(skb)) { if (skb_has_frag_list(skb)) {
int first_len = skb_pagelen(skb); int first_len = skb_pagelen(skb);
int truesizes = 0; int truesizes = 0;
......
...@@ -413,7 +413,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) ...@@ -413,7 +413,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
/* If the first fragment is fragmented itself, we split /* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part * it to two chunks: the first with data and paged part
* and the second, holding only fragments. */ * and the second, holding only fragments. */
if (skb_has_frags(head)) { if (skb_has_frag_list(head)) {
struct sk_buff *clone; struct sk_buff *clone;
int i, plen = 0; int i, plen = 0;
......
...@@ -499,7 +499,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, ...@@ -499,7 +499,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
/* If the first fragment is fragmented itself, we split /* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part * it to two chunks: the first with data and paged part
* and the second, holding only fragments. */ * and the second, holding only fragments. */
if (skb_has_frags(head)) { if (skb_has_frag_list(head)) {
struct sk_buff *clone; struct sk_buff *clone;
int i, plen = 0; int i, plen = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment