Commit af046fd1 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'for-uring-ubufops' into HEAD

Pavel Begunkov says:

====================
implement io_uring notification (ubuf_info) stacking (net part)

To have per request buffer notifications each zerocopy io_uring send
request allocates a new ubuf_info. However, as an skb can carry only
one uarg, it may force the stack to create many small skbs hurting
performance in many ways.

The patchset implements notification, i.e. an io_uring's ubuf_info
extension, stacking. It attempts to link ubuf_info's into a list,
allowing to have multiple of them per skb.

liburing/examples/send-zerocopy shows up 6 times performance improvement
for TCP with 4KB bytes per send, and levels it with MSG_ZEROCOPY. Without
the patchset it requires much larger sends to utilise all potential.

bytes  | before | after (Kqps)
1200   | 195    | 1023
4000   | 193    | 1386
8000   | 154    | 1058
====================

Link: https://lore.kernel.org/all/cover.1713369317.git.asml.silence@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 65f1df11 65bada80
...@@ -754,7 +754,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, ...@@ -754,7 +754,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_zcopy_init(skb, msg_control); skb_zcopy_init(skb, msg_control);
} else if (msg_control) { } else if (msg_control) {
struct ubuf_info *uarg = msg_control; struct ubuf_info *uarg = msg_control;
uarg->callback(NULL, uarg, false); uarg->ops->complete(NULL, uarg, false);
} }
dev_queue_xmit(skb); dev_queue_xmit(skb);
......
...@@ -1906,7 +1906,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, ...@@ -1906,7 +1906,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_zcopy_init(skb, msg_control); skb_zcopy_init(skb, msg_control);
} else if (msg_control) { } else if (msg_control) {
struct ubuf_info *uarg = msg_control; struct ubuf_info *uarg = msg_control;
uarg->callback(NULL, uarg, false); uarg->ops->complete(NULL, uarg, false);
} }
skb_reset_network_header(skb); skb_reset_network_header(skb);
......
...@@ -390,9 +390,8 @@ bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); ...@@ -390,9 +390,8 @@ bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif); void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */ /* Callbacks from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf, extern const struct ubuf_info_ops xenvif_ubuf_ops;
bool zerocopy_success);
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{ {
......
...@@ -593,7 +593,7 @@ int xenvif_init_queue(struct xenvif_queue *queue) ...@@ -593,7 +593,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
for (i = 0; i < MAX_PENDING_REQS; i++) { for (i = 0; i < MAX_PENDING_REQS; i++) {
queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc) queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
{ { .callback = xenvif_zerocopy_callback }, { { .ops = &xenvif_ubuf_ops },
{ { .ctx = NULL, { { .ctx = NULL,
.desc = i } } }; .desc = i } } };
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
......
...@@ -1157,7 +1157,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s ...@@ -1157,7 +1157,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
uarg = skb_shinfo(skb)->destructor_arg; uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */ /* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets); atomic_inc(&queue->inflight_packets);
uarg->callback(NULL, uarg, true); uarg->ops->complete(NULL, uarg, true);
skb_shinfo(skb)->destructor_arg = NULL; skb_shinfo(skb)->destructor_arg = NULL;
/* Fill the skb with the new (local) frags. */ /* Fill the skb with the new (local) frags. */
...@@ -1279,8 +1279,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) ...@@ -1279,8 +1279,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done; return work_done;
} }
void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base, static void xenvif_zerocopy_callback(struct sk_buff *skb,
bool zerocopy_success) struct ubuf_info *ubuf_base,
bool zerocopy_success)
{ {
unsigned long flags; unsigned long flags;
pending_ring_idx_t index; pending_ring_idx_t index;
...@@ -1313,6 +1314,10 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base, ...@@ -1313,6 +1314,10 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
xenvif_skb_zerocopy_complete(queue); xenvif_skb_zerocopy_complete(queue);
} }
const struct ubuf_info_ops xenvif_ubuf_ops = {
.complete = xenvif_zerocopy_callback,
};
static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
{ {
struct gnttab_unmap_grant_ref *gop; struct gnttab_unmap_grant_ref *gop;
......
...@@ -380,7 +380,7 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net, ...@@ -380,7 +380,7 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net,
} }
} }
static void vhost_zerocopy_callback(struct sk_buff *skb, static void vhost_zerocopy_complete(struct sk_buff *skb,
struct ubuf_info *ubuf_base, bool success) struct ubuf_info *ubuf_base, bool success)
{ {
struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base); struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
...@@ -408,6 +408,10 @@ static void vhost_zerocopy_callback(struct sk_buff *skb, ...@@ -408,6 +408,10 @@ static void vhost_zerocopy_callback(struct sk_buff *skb,
rcu_read_unlock_bh(); rcu_read_unlock_bh();
} }
static const struct ubuf_info_ops vhost_ubuf_ops = {
.complete = vhost_zerocopy_complete,
};
static inline unsigned long busy_clock(void) static inline unsigned long busy_clock(void)
{ {
return local_clock() >> 10; return local_clock() >> 10;
...@@ -879,7 +883,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) ...@@ -879,7 +883,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->ctx = nvq->ubufs; ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx; ubuf->desc = nvq->upend_idx;
ubuf->ubuf.callback = vhost_zerocopy_callback; ubuf->ubuf.ops = &vhost_ubuf_ops;
ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG; ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
refcount_set(&ubuf->ubuf.refcnt, 1); refcount_set(&ubuf->ubuf.refcnt, 1);
msg.msg_control = &ctl; msg.msg_control = &ctl;
......
...@@ -527,6 +527,13 @@ enum { ...@@ -527,6 +527,13 @@ enum {
#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \ #define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \
SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS) SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS)
struct ubuf_info_ops {
void (*complete)(struct sk_buff *, struct ubuf_info *,
bool zerocopy_success);
/* has to be compatible with skb_zcopy_set() */
int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg);
};
/* /*
* The callback notifies userspace to release buffers when skb DMA is done in * The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this. * lower device, the skb last reference should be 0 when calling this.
...@@ -536,8 +543,7 @@ enum { ...@@ -536,8 +543,7 @@ enum {
* The desc field is used to track userspace buffer index. * The desc field is used to track userspace buffer index.
*/ */
struct ubuf_info { struct ubuf_info {
void (*callback)(struct sk_buff *, struct ubuf_info *, const struct ubuf_info_ops *ops;
bool zerocopy_success);
refcount_t refcnt; refcount_t refcnt;
u8 flags; u8 flags;
}; };
...@@ -1671,14 +1677,13 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) ...@@ -1671,14 +1677,13 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
} }
#endif #endif
extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops;
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
struct ubuf_info *uarg); struct ubuf_info *uarg);
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
bool success);
int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, struct iov_iter *from, struct sk_buff *skb, struct iov_iter *from,
size_t length); size_t length);
...@@ -1766,13 +1771,13 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) ...@@ -1766,13 +1771,13 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
static inline void net_zcopy_put(struct ubuf_info *uarg) static inline void net_zcopy_put(struct ubuf_info *uarg)
{ {
if (uarg) if (uarg)
uarg->callback(NULL, uarg, true); uarg->ops->complete(NULL, uarg, true);
} }
static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref) static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{ {
if (uarg) { if (uarg) {
if (uarg->callback == msg_zerocopy_callback) if (uarg->ops == &msg_zerocopy_ubuf_ops)
msg_zerocopy_put_abort(uarg, have_uref); msg_zerocopy_put_abort(uarg, have_uref);
else if (have_uref) else if (have_uref)
net_zcopy_put(uarg); net_zcopy_put(uarg);
...@@ -1786,7 +1791,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) ...@@ -1786,7 +1791,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
if (uarg) { if (uarg) {
if (!skb_zcopy_is_nouarg(skb)) if (!skb_zcopy_is_nouarg(skb))
uarg->callback(skb, uarg, zerocopy_success); uarg->ops->complete(skb, uarg, zerocopy_success);
skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY; skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
} }
......
...@@ -24,7 +24,7 @@ static void io_notif_complete_tw_ext(struct io_kiocb *notif, struct io_tw_state ...@@ -24,7 +24,7 @@ static void io_notif_complete_tw_ext(struct io_kiocb *notif, struct io_tw_state
io_req_task_complete(notif, ts); io_req_task_complete(notif, ts);
} }
static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg, static void io_tx_ubuf_complete(struct sk_buff *skb, struct ubuf_info *uarg,
bool success) bool success)
{ {
struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg); struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
...@@ -45,19 +45,27 @@ static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg, ...@@ -45,19 +45,27 @@ static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg,
else if (!success && !nd->zc_copied) else if (!success && !nd->zc_copied)
WRITE_ONCE(nd->zc_copied, true); WRITE_ONCE(nd->zc_copied, true);
} }
io_tx_ubuf_callback(skb, uarg, success); io_tx_ubuf_complete(skb, uarg, success);
} }
static const struct ubuf_info_ops io_ubuf_ops = {
.complete = io_tx_ubuf_complete,
};
static const struct ubuf_info_ops io_ubuf_ops_ext = {
.complete = io_tx_ubuf_callback_ext,
};
void io_notif_set_extended(struct io_kiocb *notif) void io_notif_set_extended(struct io_kiocb *notif)
{ {
struct io_notif_data *nd = io_notif_to_data(notif); struct io_notif_data *nd = io_notif_to_data(notif);
if (nd->uarg.callback != io_tx_ubuf_callback_ext) { if (nd->uarg.ops != &io_ubuf_ops_ext) {
nd->account_pages = 0; nd->account_pages = 0;
nd->zc_report = false; nd->zc_report = false;
nd->zc_used = false; nd->zc_used = false;
nd->zc_copied = false; nd->zc_copied = false;
nd->uarg.callback = io_tx_ubuf_callback_ext; nd->uarg.ops = &io_ubuf_ops_ext;
notif->io_task_work.func = io_notif_complete_tw_ext; notif->io_task_work.func = io_notif_complete_tw_ext;
} }
} }
...@@ -80,7 +88,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) ...@@ -80,7 +88,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
nd = io_notif_to_data(notif); nd = io_notif_to_data(notif);
nd->uarg.flags = IO_NOTIF_UBUF_FLAGS; nd->uarg.flags = IO_NOTIF_UBUF_FLAGS;
nd->uarg.callback = io_tx_ubuf_callback; nd->uarg.ops = &io_ubuf_ops;
refcount_set(&nd->uarg.refcnt, 1); refcount_set(&nd->uarg.refcnt, 1);
return notif; return notif;
} }
...@@ -1652,7 +1652,7 @@ static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) ...@@ -1652,7 +1652,7 @@ static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
return NULL; return NULL;
} }
uarg->ubuf.callback = msg_zerocopy_callback; uarg->ubuf.ops = &msg_zerocopy_ubuf_ops;
uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
uarg->len = 1; uarg->len = 1;
uarg->bytelen = size; uarg->bytelen = size;
...@@ -1678,7 +1678,7 @@ struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, ...@@ -1678,7 +1678,7 @@ struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
u32 bytelen, next; u32 bytelen, next;
/* there might be non MSG_ZEROCOPY users */ /* there might be non MSG_ZEROCOPY users */
if (uarg->callback != msg_zerocopy_callback) if (uarg->ops != &msg_zerocopy_ubuf_ops)
return NULL; return NULL;
/* realloc only when socket is locked (TCP, UDP cork), /* realloc only when socket is locked (TCP, UDP cork),
...@@ -1789,8 +1789,8 @@ static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg) ...@@ -1789,8 +1789,8 @@ static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg)
sock_put(sk); sock_put(sk);
} }
void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg,
bool success) bool success)
{ {
struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg);
...@@ -1799,7 +1799,6 @@ void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, ...@@ -1799,7 +1799,6 @@ void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
if (refcount_dec_and_test(&uarg->refcnt)) if (refcount_dec_and_test(&uarg->refcnt))
__msg_zerocopy_callback(uarg_zc); __msg_zerocopy_callback(uarg_zc);
} }
EXPORT_SYMBOL_GPL(msg_zerocopy_callback);
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{ {
...@@ -1809,10 +1808,15 @@ void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) ...@@ -1809,10 +1808,15 @@ void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
uarg_to_msgzc(uarg)->len--; uarg_to_msgzc(uarg)->len--;
if (have_uref) if (have_uref)
msg_zerocopy_callback(NULL, uarg, true); msg_zerocopy_complete(NULL, uarg, true);
} }
EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
const struct ubuf_info_ops msg_zerocopy_ubuf_ops = {
.complete = msg_zerocopy_complete,
};
EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops);
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len, struct msghdr *msg, int len,
struct ubuf_info *uarg) struct ubuf_info *uarg)
...@@ -1820,11 +1824,18 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, ...@@ -1820,11 +1824,18 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct ubuf_info *orig_uarg = skb_zcopy(skb); struct ubuf_info *orig_uarg = skb_zcopy(skb);
int err, orig_len = skb->len; int err, orig_len = skb->len;
/* An skb can only point to one uarg. This edge case happens when if (uarg->ops->link_skb) {
* TCP appends to an skb, but zerocopy_realloc triggered a new alloc. err = uarg->ops->link_skb(skb, uarg);
*/ if (err)
if (orig_uarg && uarg != orig_uarg) return err;
return -EEXIST; } else {
/* An skb can only point to one uarg. This edge case happens
* when TCP appends to an skb, but zerocopy_realloc triggered
* a new alloc.
*/
if (orig_uarg && uarg != orig_uarg)
return -EEXIST;
}
err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len);
if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
...@@ -1838,7 +1849,8 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, ...@@ -1838,7 +1849,8 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
return err; return err;
} }
skb_zcopy_set(skb, uarg, NULL); if (!uarg->ops->link_skb)
skb_zcopy_set(skb, uarg, NULL);
return skb->len - orig_len; return skb->len - orig_len;
} }
EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment