Commit fc9df2a0 authored by Andre Guedes's avatar Andre Guedes Committed by Tony Nguyen

igc: Enable RX via AF_XDP zero-copy

Add support for receiving packets via AF_XDP zero-copy mechanism.

Add a new flag to 'enum igc_ring_flags_t' to indicate the ring has
AF_XDP zero-copy enabled so proper ring setup is carried out during ring
configuration in igc_configure_rx_ring().

RX buffers can now be allocated via the shared pages mechanism (default
behavior of the driver) or via xsk pool (when AF_XDP zero-copy is
enabled) so a union is added to the 'struct igc_rx_buffer' to cover both
cases.

When AF_XDP zero-copy is enabled, rx buffers are allocated from the xsk
pool using the new helper igc_alloc_rx_buffers_zc() which is the
counterpart of igc_alloc_rx_buffers().

Likewise other Intel drivers that support AF_XDP zero-copy, in igc we
have a dedicated path for cleaning up rx irqs when zero-copy is enabled.
This avoids adding too many checks within igc_clean_rx_irq(), resulting
in a more readable and efficient code since this function is called from
the hot-path of the driver.
Signed-off-by: default avatarAndre Guedes <andre.guedes@intel.com>
Signed-off-by: default avatarVedang Patel <vedang.patel@intel.com>
Signed-off-by: default avatarJithu Joseph <jithu.joseph@intel.com>
Reviewed-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: default avatarDvora Fuxbrumer <dvorax.fuxbrumer@linux.intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 859b4dfa
...@@ -118,6 +118,7 @@ struct igc_ring { ...@@ -118,6 +118,7 @@ struct igc_ring {
}; };
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/* Board specific private data structure */ /* Board specific private data structure */
...@@ -255,6 +256,9 @@ bool igc_has_link(struct igc_adapter *adapter); ...@@ -255,6 +256,9 @@ bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter); void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
void igc_update_stats(struct igc_adapter *adapter); void igc_update_stats(struct igc_adapter *adapter);
void igc_disable_rx_ring(struct igc_ring *ring);
void igc_enable_rx_ring(struct igc_ring *ring);
int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
/* igc_dump declarations */ /* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter); void igc_rings_dump(struct igc_adapter *adapter);
...@@ -432,14 +436,19 @@ struct igc_tx_buffer { ...@@ -432,14 +436,19 @@ struct igc_tx_buffer {
}; };
struct igc_rx_buffer { struct igc_rx_buffer {
dma_addr_t dma; union {
struct page *page; struct {
dma_addr_t dma;
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset; __u32 page_offset;
#else #else
__u16 page_offset; __u16 page_offset;
#endif #endif
__u16 pagecnt_bias; __u16 pagecnt_bias;
};
struct xdp_buff *xdp;
};
}; };
struct igc_q_vector { struct igc_q_vector {
...@@ -525,7 +534,8 @@ enum igc_ring_flags_t { ...@@ -525,7 +534,8 @@ enum igc_ring_flags_t {
IGC_RING_FLAG_RX_SCTP_CSUM, IGC_RING_FLAG_RX_SCTP_CSUM,
IGC_RING_FLAG_RX_LB_VLAN_BSWAP, IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
IGC_RING_FLAG_TX_CTX_IDX, IGC_RING_FLAG_TX_CTX_IDX,
IGC_RING_FLAG_TX_DETECT_HANG IGC_RING_FLAG_TX_DETECT_HANG,
IGC_RING_FLAG_AF_XDP_ZC,
}; };
#define ring_uses_large_buffer(ring) \ #define ring_uses_large_buffer(ring) \
......
...@@ -81,6 +81,7 @@ union igc_adv_rx_desc { ...@@ -81,6 +81,7 @@ union igc_adv_rx_desc {
/* Additional Receive Descriptor Control definitions */ /* Additional Receive Descriptor Control definitions */
#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ #define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
/* SRRCTL bit definitions */ /* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ #define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */ /* Copyright (c) 2020, Intel Corporation. */
#include <net/xdp_sock_drv.h>
#include "igc.h" #include "igc.h"
#include "igc_xdp.h" #include "igc_xdp.h"
...@@ -31,3 +33,101 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, ...@@ -31,3 +33,101 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return 0; return 0;
} }
static int igc_xdp_enable_pool(struct igc_adapter *adapter,
struct xsk_buff_pool *pool, u16 queue_id)
{
struct net_device *ndev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
struct igc_ring *rx_ring;
struct napi_struct *napi;
bool needs_reset;
u32 frame_size;
int err;
if (queue_id >= adapter->num_rx_queues)
return -EINVAL;
frame_size = xsk_pool_get_rx_frame_size(pool);
if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
/* When XDP is enabled, the driver doesn't support frames that
* span over multiple buffers. To avoid that, we check if xsk
* frame size is big enough to fit the max ethernet frame size
* + vlan double tagging.
*/
return -EOPNOTSUPP;
}
err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
if (err) {
netdev_err(ndev, "Failed to map xsk pool\n");
return err;
}
needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
rx_ring = adapter->rx_ring[queue_id];
napi = &rx_ring->q_vector->napi;
if (needs_reset) {
igc_disable_rx_ring(rx_ring);
napi_disable(napi);
}
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
if (needs_reset) {
napi_enable(napi);
igc_enable_rx_ring(rx_ring);
err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
if (err) {
xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
return err;
}
}
return 0;
}
static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
{
struct xsk_buff_pool *pool;
struct igc_ring *rx_ring;
struct napi_struct *napi;
bool needs_reset;
if (queue_id >= adapter->num_rx_queues)
return -EINVAL;
pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
if (!pool)
return -EINVAL;
needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
rx_ring = adapter->rx_ring[queue_id];
napi = &rx_ring->q_vector->napi;
if (needs_reset) {
igc_disable_rx_ring(rx_ring);
napi_disable(napi);
}
xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
if (needs_reset) {
napi_enable(napi);
igc_enable_rx_ring(rx_ring);
}
return 0;
}
int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
u16 queue_id)
{
return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
igc_xdp_disable_pool(adapter, queue_id);
}
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
u16 queue_id);
static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter) static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment