Commit 06b4feb3 authored by Jonathan Lemon's avatar Jonathan Lemon Committed by Jakub Kicinski

net: group skb_shinfo zerocopy related bits together.

In preparation for expanded zerocopy (TX and RX), move
the zerocopy related bits out of tx_flags into their own
flag word.
Signed-off-by: default avatarJonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8c793822
...@@ -723,8 +723,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, ...@@ -723,8 +723,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
/* copy skb_ubuf_info for callback when skb has no error */ /* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) { if (zerocopy) {
skb_shinfo(skb)->destructor_arg = msg_control; skb_shinfo(skb)->destructor_arg = msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
} else if (msg_control) { } else if (msg_control) {
struct ubuf_info *uarg = msg_control; struct ubuf_info *uarg = msg_control;
uarg->callback(NULL, uarg, false); uarg->callback(NULL, uarg, false);
......
...@@ -1815,8 +1815,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, ...@@ -1815,8 +1815,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
/* copy skb_ubuf_info for callback when skb has no error */ /* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) { if (zerocopy) {
skb_shinfo(skb)->destructor_arg = msg_control; skb_shinfo(skb)->destructor_arg = msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
} else if (msg_control) { } else if (msg_control) {
struct ubuf_info *uarg = msg_control; struct ubuf_info *uarg = msg_control;
uarg->callback(NULL, uarg, false); uarg->callback(NULL, uarg, false);
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
/* Number of bytes allowed on the internal guest Rx queue. */ /* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
/* This function is used to set SKBTX_DEV_ZEROCOPY as well as /* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as
* increasing the inflight counter. We need to increase the inflight * increasing the inflight counter. We need to increase the inflight
* counter because core driver calls into xenvif_zerocopy_callback * counter because core driver calls into xenvif_zerocopy_callback
* which calls xenvif_skb_zerocopy_complete. * which calls xenvif_skb_zerocopy_complete.
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb) struct sk_buff *skb)
{ {
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE;
atomic_inc(&queue->inflight_packets); atomic_inc(&queue->inflight_packets);
} }
......
...@@ -430,28 +430,32 @@ enum { ...@@ -430,28 +430,32 @@ enum {
/* device driver is going to provide hardware time stamp */ /* device driver is going to provide hardware time stamp */
SKBTX_IN_PROGRESS = 1 << 2, SKBTX_IN_PROGRESS = 1 << 2,
/* device driver supports TX zero-copy buffers */
SKBTX_DEV_ZEROCOPY = 1 << 3,
/* generate wifi status information (where possible) */ /* generate wifi status information (where possible) */
SKBTX_WIFI_STATUS = 1 << 4, SKBTX_WIFI_STATUS = 1 << 4,
/* This indicates at least one fragment might be overwritten
* (as in vmsplice(), sendfile() ...)
* If we need to compute a TX checksum, we'll need to copy
* all frags to avoid possible bad checksum
*/
SKBTX_SHARED_FRAG = 1 << 5,
/* generate software time stamp when entering packet scheduling */ /* generate software time stamp when entering packet scheduling */
SKBTX_SCHED_TSTAMP = 1 << 6, SKBTX_SCHED_TSTAMP = 1 << 6,
}; };
#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
SKBTX_SCHED_TSTAMP) SKBTX_SCHED_TSTAMP)
#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
/* Definitions for flags in struct skb_shared_info */
enum {
/* use zcopy routines */
SKBFL_ZEROCOPY_ENABLE = BIT(0),
/* This indicates at least one fragment might be overwritten
* (as in vmsplice(), sendfile() ...)
* If we need to compute a TX checksum, we'll need to copy
* all frags to avoid possible bad checksum
*/
SKBFL_SHARED_FRAG = BIT(1),
};
#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
/* /*
* The callback notifies userspace to release buffers when skb DMA is done in * The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this. * lower device, the skb last reference should be 0 when calling this.
...@@ -506,7 +510,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, ...@@ -506,7 +510,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
* the end of the header data, ie. at skb->end. * the end of the header data, ie. at skb->end.
*/ */
struct skb_shared_info { struct skb_shared_info {
__u8 __unused; __u8 flags;
__u8 meta_len; __u8 meta_len;
__u8 nr_frags; __u8 nr_frags;
__u8 tx_flags; __u8 tx_flags;
...@@ -1433,7 +1437,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) ...@@ -1433,7 +1437,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
{ {
bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY; bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
return is_zcopy ? skb_uarg(skb) : NULL; return is_zcopy ? skb_uarg(skb) : NULL;
} }
...@@ -1452,14 +1456,14 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, ...@@ -1452,14 +1456,14 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
else else
skb_zcopy_get(uarg); skb_zcopy_get(uarg);
skb_shinfo(skb)->destructor_arg = uarg; skb_shinfo(skb)->destructor_arg = uarg;
skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
} }
} }
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{ {
skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
} }
static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
...@@ -1497,7 +1501,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) ...@@ -1497,7 +1501,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
if (!skb_zcopy_is_nouarg(skb)) if (!skb_zcopy_is_nouarg(skb))
uarg->callback(skb, uarg, zerocopy_success); uarg->callback(skb, uarg, zerocopy_success);
skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
} }
} }
...@@ -3323,7 +3327,7 @@ static inline int skb_linearize(struct sk_buff *skb) ...@@ -3323,7 +3327,7 @@ static inline int skb_linearize(struct sk_buff *skb)
static inline bool skb_has_shared_frag(const struct sk_buff *skb) static inline bool skb_has_shared_frag(const struct sk_buff *skb)
{ {
return skb_is_nonlinear(skb) && return skb_is_nonlinear(skb) &&
skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
} }
/** /**
......
...@@ -1327,7 +1327,7 @@ static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, ...@@ -1327,7 +1327,7 @@ static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
* @skb: the skb to modify * @skb: the skb to modify
* @gfp_mask: allocation priority * @gfp_mask: allocation priority
* *
* This must be called on SKBTX_DEV_ZEROCOPY skb. * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
* It will copy all frags into kernel and drop the reference * It will copy all frags into kernel and drop the reference
* to userspace pages. * to userspace pages.
* *
...@@ -3264,8 +3264,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) ...@@ -3264,8 +3264,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
{ {
int pos = skb_headlen(skb); int pos = skb_headlen(skb);
skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
SKBTX_SHARED_FRAG;
skb_zerocopy_clone(skb1, skb, 0); skb_zerocopy_clone(skb1, skb, 0);
if (len < pos) /* Split line is inside header. */ if (len < pos) /* Split line is inside header. */
skb_split_inside_header(skb, skb1, len, pos); skb_split_inside_header(skb, skb1, len, pos);
...@@ -3954,8 +3953,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, ...@@ -3954,8 +3953,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
skb_copy_from_linear_data_offset(head_skb, offset, skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize); skb_put(nskb, hsize), hsize);
skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
SKBTX_SHARED_FRAG; SKBFL_SHARED_FRAG;
if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
......
...@@ -1010,7 +1010,7 @@ struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, ...@@ -1010,7 +1010,7 @@ struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
} }
if (!(flags & MSG_NO_SHARED_FRAGS)) if (!(flags & MSG_NO_SHARED_FRAGS))
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
skb->len += copy; skb->len += copy;
skb->data_len += copy; skb->data_len += copy;
......
...@@ -786,7 +786,7 @@ static ssize_t kcm_sendpage(struct socket *sock, struct page *page, ...@@ -786,7 +786,7 @@ static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
if (skb_can_coalesce(skb, i, page, offset)) { if (skb_can_coalesce(skb, i, page, offset)) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
goto coalesced; goto coalesced;
} }
...@@ -834,7 +834,7 @@ static ssize_t kcm_sendpage(struct socket *sock, struct page *page, ...@@ -834,7 +834,7 @@ static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
get_page(page); get_page(page);
skb_fill_page_desc(skb, i, page, offset, size); skb_fill_page_desc(skb, i, page, offset, size);
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
coalesced: coalesced:
skb->len += size; skb->len += size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment