Commit f5bd9138 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Jeff Kirsher

net: xsk: add a simple buffer reuse queue

XSK UMEM is strongly single producer single consumer so reuse of
frames is challenging.  Add a simple "stash" of FILL packets to
reuse for drivers to optionally make use of.  This is useful
when driver has to free (ndo_stop) or resize a ring with an active
AF_XDP ZC socket.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 9dbb1370
...@@ -21,6 +21,12 @@ struct xdp_umem_page { ...@@ -21,6 +21,12 @@ struct xdp_umem_page {
dma_addr_t dma; dma_addr_t dma;
}; };
struct xdp_umem_fq_reuse {
u32 nentries;
u32 length;
u64 handles[];
};
struct xdp_umem { struct xdp_umem {
struct xsk_queue *fq; struct xsk_queue *fq;
struct xsk_queue *cq; struct xsk_queue *cq;
...@@ -37,6 +43,7 @@ struct xdp_umem { ...@@ -37,6 +43,7 @@ struct xdp_umem {
struct page **pgs; struct page **pgs;
u32 npgs; u32 npgs;
struct net_device *dev; struct net_device *dev;
struct xdp_umem_fq_reuse *fq_reuse;
u16 queue_id; u16 queue_id;
bool zc; bool zc;
spinlock_t xsk_list_lock; spinlock_t xsk_list_lock;
...@@ -75,6 +82,10 @@ void xsk_umem_discard_addr(struct xdp_umem *umem); ...@@ -75,6 +82,10 @@ void xsk_umem_discard_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
void xsk_umem_consume_tx_done(struct xdp_umem *umem); void xsk_umem_consume_tx_done(struct xdp_umem *umem);
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
struct xdp_umem_fq_reuse *newq);
void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{ {
...@@ -85,6 +96,35 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) ...@@ -85,6 +96,35 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
{ {
return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
} }
/* Reuse-queue aware version of FILL queue helpers */
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
return xsk_umem_peek_addr(umem, addr);
*addr = rq->handles[rq->length - 1];
return addr;
}
static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
xsk_umem_discard_addr(umem);
else
rq->length--;
}
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
rq->handles[rq->length++] = addr;
}
#else #else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{ {
...@@ -128,6 +168,21 @@ static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) ...@@ -128,6 +168,21 @@ static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
{ {
} }
static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
{
return NULL;
}
static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
struct xdp_umem *umem,
struct xdp_umem_fq_reuse *newq)
{
return NULL;
}
static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
{
}
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{ {
return NULL; return NULL;
...@@ -137,6 +192,20 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) ...@@ -137,6 +192,20 @@ static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
{ {
return 0; return 0;
} }
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
return NULL;
}
static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
{
}
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
{
}
#endif /* CONFIG_XDP_SOCKETS */ #endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */ #endif /* _LINUX_XDP_SOCK_H */
...@@ -165,6 +165,8 @@ static void xdp_umem_release(struct xdp_umem *umem) ...@@ -165,6 +165,8 @@ static void xdp_umem_release(struct xdp_umem *umem)
umem->cq = NULL; umem->cq = NULL;
} }
xsk_reuseq_destroy(umem);
xdp_umem_unpin_pages(umem); xdp_umem_unpin_pages(umem);
task = get_pid_task(umem->pid, PIDTYPE_PID); task = get_pid_task(umem->pid, PIDTYPE_PID);
......
...@@ -3,7 +3,9 @@ ...@@ -3,7 +3,9 @@
* Copyright(c) 2018 Intel Corporation. * Copyright(c) 2018 Intel Corporation.
*/ */
#include <linux/log2.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/overflow.h>
#include "xsk_queue.h" #include "xsk_queue.h"
...@@ -62,3 +64,56 @@ void xskq_destroy(struct xsk_queue *q) ...@@ -62,3 +64,56 @@ void xskq_destroy(struct xsk_queue *q)
page_frag_free(q->ring); page_frag_free(q->ring);
kfree(q); kfree(q);
} }
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
{
struct xdp_umem_fq_reuse *newq;
/* Check for overflow */
if (nentries > (u32)roundup_pow_of_two(nentries))
return NULL;
nentries = roundup_pow_of_two(nentries);
newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
if (!newq)
return NULL;
memset(newq, 0, offsetof(typeof(*newq), handles));
newq->nentries = nentries;
return newq;
}
EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
struct xdp_umem_fq_reuse *newq)
{
struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
if (!oldq) {
umem->fq_reuse = newq;
return NULL;
}
if (newq->nentries < oldq->length)
return newq;
memcpy(newq->handles, oldq->handles,
array_size(oldq->length, sizeof(u64)));
newq->length = oldq->length;
umem->fq_reuse = newq;
return oldq;
}
EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
{
kvfree(rq);
}
EXPORT_SYMBOL_GPL(xsk_reuseq_free);
void xsk_reuseq_destroy(struct xdp_umem *umem)
{
xsk_reuseq_free(umem->fq_reuse);
umem->fq_reuse = NULL;
}
...@@ -258,4 +258,7 @@ void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask); ...@@ -258,4 +258,7 @@ void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
void xskq_destroy(struct xsk_queue *q_ops); void xskq_destroy(struct xsk_queue *q_ops);
/* Executed by the core when the entire UMEM gets freed */
void xsk_reuseq_destroy(struct xdp_umem *umem);
#endif /* _LINUX_XSK_QUEUE_H */ #endif /* _LINUX_XSK_QUEUE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment