Commit 3ca3f39c authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
1GbE Intel Wired LAN Driver Updates 2021-03-29

This series contains updates to igc driver only.

Andre Guedes says:

Add XDP support for the igc driver. The approach implemented by this
series follows the same approach implemented in other Intel drivers as
much as possible for the sake of consistency.

The series is organized in two parts. In the first part, i.e. patches
from 1 to 4, igc_main.c and igc_ptp.c code is refactored in preparation
for landing the XDP support, which is introduced in the second part
(patches from 5 to 8).

As far as code organization is concerned, XDP-related helpers are
defined in a new file, igc_xdp.c, and are called by igc_main.c.

The features added by this series have been tested with the samples
provided in samples/bpf/: xdp1, xdp2, xdp_redirect_cpu, and
xdp_redirect_map.

Upcoming series will add support of UMEM and zero-copy features from
AF_XDP.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3af562a3 4ff32036
......@@ -8,4 +8,4 @@
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o
igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
......@@ -111,6 +111,8 @@ struct igc_ring {
struct sk_buff *skb;
};
};
struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
/* Board specific private data structure */
......@@ -219,6 +221,8 @@ struct igc_adapter {
ktime_t ptp_reset_start; /* Reset time in clock mono */
char fw_version[32];
struct bpf_prog *xdp_prog;
};
void igc_up(struct igc_adapter *adapter);
......@@ -373,6 +377,8 @@ enum igc_tx_flags {
/* olinfo flags */
IGC_TX_FLAGS_IPV4 = 0x10,
IGC_TX_FLAGS_CSUM = 0x20,
IGC_TX_FLAGS_XDP = 0x100,
};
enum igc_boards {
......@@ -395,7 +401,10 @@ enum igc_boards {
struct igc_tx_buffer {
union igc_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
struct sk_buff *skb;
union {
struct sk_buff *skb;
struct xdp_frame *xdpf;
};
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
......@@ -504,6 +513,10 @@ enum igc_ring_flags_t {
#define ring_uses_large_buffer(ring) \
test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define set_ring_uses_large_buffer(ring) \
set_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define clear_ring_uses_large_buffer(ring) \
clear_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define ring_uses_build_skb(ring) \
test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
......@@ -547,8 +560,7 @@ void igc_ptp_init(struct igc_adapter *adapter);
void igc_ptp_reset(struct igc_adapter *adapter);
void igc_ptp_suspend(struct igc_adapter *adapter);
void igc_ptp_stop(struct igc_adapter *adapter);
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
struct sk_buff *skb);
ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf);
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
......
......@@ -10,17 +10,24 @@
#include <linux/ip.h>
#include <linux/pm_runtime.h>
#include <net/pkt_sched.h>
#include <linux/bpf_trace.h>
#include <net/ipv6.h>
#include "igc.h"
#include "igc_hw.h"
#include "igc_tsn.h"
#include "igc_xdp.h"
#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
#define IGC_XDP_PASS 0
#define IGC_XDP_CONSUMED BIT(0)
#define IGC_XDP_TX BIT(1)
#define IGC_XDP_REDIRECT BIT(2)
static int debug = -1;
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
......@@ -176,8 +183,10 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
while (i != tx_ring->next_to_use) {
union igc_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer->skb);
if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
xdp_return_frame(tx_buffer->xdpf);
else
dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
......@@ -375,6 +384,8 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
i = 0;
}
clear_ring_uses_large_buffer(rx_ring);
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
......@@ -403,6 +414,8 @@ void igc_free_rx_resources(struct igc_ring *rx_ring)
{
igc_clean_rx_ring(rx_ring);
igc_xdp_unregister_rxq_info(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
......@@ -440,7 +453,11 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
{
struct net_device *ndev = rx_ring->netdev;
struct device *dev = rx_ring->dev;
int size, desc_len;
int size, desc_len, res;
res = igc_xdp_register_rxq_info(rx_ring);
if (res < 0)
return res;
size = sizeof(struct igc_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size);
......@@ -466,6 +483,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
return 0;
err:
igc_xdp_unregister_rxq_info(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
......@@ -497,6 +515,11 @@ static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
return err;
}
static bool igc_xdp_is_enabled(struct igc_adapter *adapter)
{
return !!adapter->xdp_prog;
}
/**
* igc_configure_rx_ring - Configure a receive ring after Reset
* @adapter: board private structure
......@@ -513,6 +536,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
u32 srrctl = 0, rxdctl = 0;
u64 rdba = ring->dma;
if (igc_xdp_is_enabled(adapter))
set_ring_uses_large_buffer(ring);
/* disable the queue */
wr32(IGC_RXDCTL(reg_idx), 0);
......@@ -1029,7 +1055,7 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
((u32)((_input) & (_flag)) / ((_flag) / (_result))))
static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
static u32 igc_tx_cmd_type(u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
......@@ -1078,7 +1104,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
dma_addr_t dma;
u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
u32 cmd_type = igc_tx_cmd_type(tx_flags);
tx_desc = IGC_TX_DESC(tx_ring, i);
......@@ -1480,11 +1506,18 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
}
static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
const unsigned int size)
const unsigned int size,
int *rx_buffer_pgcnt)
{
struct igc_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
*rx_buffer_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
0;
#endif
prefetchw(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
......@@ -1499,6 +1532,32 @@ static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
return rx_buffer;
}
static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
unsigned int truesize)
{
#if (PAGE_SIZE < 8192)
buffer->page_offset ^= truesize;
#else
buffer->page_offset += truesize;
#endif
}
static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
unsigned int size)
{
unsigned int truesize;
#if (PAGE_SIZE < 8192)
truesize = igc_rx_pg_size(ring) / 2;
#else
truesize = ring_uses_build_skb(ring) ?
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
SKB_DATA_ALIGN(size);
#endif
return truesize;
}
/**
* igc_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
......@@ -1513,20 +1572,19 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring,
struct sk_buff *skb,
unsigned int size)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
unsigned int truesize;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
rx_buffer->page_offset ^= truesize;
#if (PAGE_SIZE < 8192)
truesize = igc_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = ring_uses_build_skb(rx_ring) ?
SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
SKB_DATA_ALIGN(size);
truesize = ring_uses_build_skb(rx_ring) ?
SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
SKB_DATA_ALIGN(size);
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
rx_buffer->page_offset += truesize;
#endif
igc_rx_buffer_flip(rx_buffer, truesize);
}
static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
......@@ -1535,12 +1593,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
unsigned int size)
{
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(IGC_SKB_PAD + size);
#endif
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
struct sk_buff *skb;
/* prefetch first cache line of first page */
......@@ -1555,27 +1608,18 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
skb_reserve(skb, IGC_SKB_PAD);
__skb_put(skb, size);
/* update buffer offset */
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
igc_rx_buffer_flip(rx_buffer, truesize);
return skb;
}
static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer,
union igc_adv_rx_desc *rx_desc,
unsigned int size)
struct xdp_buff *xdp,
ktime_t timestamp)
{
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
void *va = xdp->data;
unsigned int headlen;
struct sk_buff *skb;
......@@ -1587,11 +1631,8 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
if (unlikely(!skb))
return NULL;
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
va += IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
}
if (timestamp)
skb_hwtstamps(skb)->hwtstamp = timestamp;
/* Determine available headroom for copy */
headlen = size;
......@@ -1607,11 +1648,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
skb_add_rx_frag(skb, 0, rx_buffer->page,
(va + headlen) - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
#endif
igc_rx_buffer_flip(rx_buffer, truesize);
} else {
rx_buffer->pagecnt_bias++;
}
......@@ -1648,7 +1685,8 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
......@@ -1659,7 +1697,7 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define IGC_LAST_OFFSET \
......@@ -1673,8 +1711,8 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX);
if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
......@@ -1726,6 +1764,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
/* XDP packets use error pointer so abort at this point */
if (IS_ERR(skb))
return true;
if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
......@@ -1743,9 +1785,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
}
static void igc_put_rx_buffer(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer)
struct igc_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
if (igc_can_reuse_rx_page(rx_buffer)) {
if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
igc_reuse_rx_page(rx_ring, rx_buffer);
} else {
......@@ -1765,7 +1808,14 @@ static void igc_put_rx_buffer(struct igc_ring *rx_ring,
static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
struct igc_adapter *adapter = rx_ring->q_vector->adapter;
if (ring_uses_build_skb(rx_ring))
return IGC_SKB_PAD;
if (igc_xdp_is_enabled(adapter))
return XDP_PACKET_HEADROOM;
return 0;
}
static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
......@@ -1804,7 +1854,8 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = igc_rx_offset(rx_ring);
bi->pagecnt_bias = 1;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
return true;
}
......@@ -1879,17 +1930,196 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
}
}
static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
struct xdp_frame *xdpf,
struct igc_ring *ring)
{
dma_addr_t dma;
dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma)) {
netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
return -ENOMEM;
}
buffer->xdpf = xdpf;
buffer->tx_flags = IGC_TX_FLAGS_XDP;
buffer->protocol = 0;
buffer->bytecount = xdpf->len;
buffer->gso_segs = 1;
buffer->time_stamp = jiffies;
dma_unmap_len_set(buffer, len, xdpf->len);
dma_unmap_addr_set(buffer, dma, dma);
return 0;
}
/* This function requires __netif_tx_lock is held by the caller. */
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
struct xdp_frame *xdpf)
{
struct igc_tx_buffer *buffer;
union igc_adv_tx_desc *desc;
u32 cmd_type, olinfo_status;
int err;
if (!igc_desc_unused(ring))
return -EBUSY;
buffer = &ring->tx_buffer_info[ring->next_to_use];
err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
if (err)
return err;
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
buffer->bytecount;
olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
desc = IGC_TX_DESC(ring, ring->next_to_use);
desc->read.cmd_type_len = cpu_to_le32(cmd_type);
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
buffer->next_to_watch = desc;
ring->next_to_use++;
if (ring->next_to_use == ring->count)
ring->next_to_use = 0;
return 0;
}
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
int cpu)
{
int index = cpu;
if (unlikely(index < 0))
index = 0;
while (index >= adapter->num_tx_queues)
index -= adapter->num_tx_queues;
return adapter->tx_ring[index];
}
static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
int cpu = smp_processor_id();
struct netdev_queue *nq;
struct igc_ring *ring;
int res;
if (unlikely(!xdpf))
return -EFAULT;
ring = igc_xdp_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
res = igc_xdp_init_tx_descriptor(ring, xdpf);
__netif_tx_unlock(nq);
return res;
}
static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
struct xdp_buff *xdp)
{
struct bpf_prog *prog;
int res;
u32 act;
rcu_read_lock();
prog = READ_ONCE(adapter->xdp_prog);
if (!prog) {
res = IGC_XDP_PASS;
goto unlock;
}
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
res = IGC_XDP_PASS;
break;
case XDP_TX:
if (igc_xdp_xmit_back(adapter, xdp) < 0)
res = IGC_XDP_CONSUMED;
else
res = IGC_XDP_TX;
break;
case XDP_REDIRECT:
if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
res = IGC_XDP_CONSUMED;
else
res = IGC_XDP_REDIRECT;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(adapter->netdev, prog, act);
fallthrough;
case XDP_DROP:
res = IGC_XDP_CONSUMED;
break;
}
unlock:
rcu_read_unlock();
return ERR_PTR(-res);
}
/* This function assumes __netif_tx_lock is held by the caller. */
static void igc_flush_tx_descriptors(struct igc_ring *ring)
{
/* Once tail pointer is updated, hardware can fetch the descriptors
* any time so we issue a write membar here to ensure all memory
* writes are complete before the tail pointer is updated.
*/
wmb();
writel(ring->next_to_use, ring->tail);
}
static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
{
int cpu = smp_processor_id();
struct netdev_queue *nq;
struct igc_ring *ring;
if (status & IGC_XDP_TX) {
ring = igc_xdp_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
igc_flush_tx_descriptors(ring);
__netif_tx_unlock(nq);
}
if (status & IGC_XDP_REDIRECT)
xdp_do_flush();
}
static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
{
unsigned int total_bytes = 0, total_packets = 0;
struct igc_adapter *adapter = q_vector->adapter;
struct igc_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = igc_desc_unused(rx_ring);
int xdp_status = 0, rx_buffer_pgcnt;
while (likely(total_packets < budget)) {
union igc_adv_rx_desc *rx_desc;
struct igc_rx_buffer *rx_buffer;
unsigned int size;
unsigned int size, truesize;
ktime_t timestamp = 0;
struct xdp_buff xdp;
int pkt_offset = 0;
void *pktbuf;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
......@@ -1908,16 +2138,52 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
*/
dma_rmb();
rx_buffer = igc_get_rx_buffer(rx_ring, size);
rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
truesize = igc_get_rx_frame_truesize(rx_ring, size);
pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
/* retrieve a buffer from the ring */
if (skb)
if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
pktbuf);
pkt_offset = IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
}
if (!skb) {
xdp.data = pktbuf + pkt_offset;
xdp.data_end = xdp.data + size;
xdp.data_hard_start = pktbuf - igc_rx_offset(rx_ring);
xdp_set_data_meta_invalid(&xdp);
xdp.frame_sz = truesize;
xdp.rxq = &rx_ring->xdp_rxq;
skb = igc_xdp_run_prog(adapter, &xdp);
}
if (IS_ERR(skb)) {
unsigned int xdp_res = -PTR_ERR(skb);
switch (xdp_res) {
case IGC_XDP_CONSUMED:
rx_buffer->pagecnt_bias++;
break;
case IGC_XDP_TX:
case IGC_XDP_REDIRECT:
igc_rx_buffer_flip(rx_buffer, truesize);
xdp_status |= xdp_res;
break;
}
total_packets++;
total_bytes += size;
} else if (skb)
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
else
skb = igc_construct_skb(rx_ring, rx_buffer,
rx_desc, size);
skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
timestamp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
......@@ -1926,7 +2192,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
break;
}
igc_put_rx_buffer(rx_ring, rx_buffer);
igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
/* fetch next buffer in frame if non-eop */
......@@ -1954,6 +2220,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
total_packets++;
}
if (xdp_status)
igc_finalize_xdp(adapter, xdp_status);
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
......@@ -2015,8 +2284,10 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
/* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget);
if (tx_buffer->tx_flags & IGC_TX_FLAGS_XDP)
xdp_return_frame(tx_buffer->xdpf);
else
napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
......@@ -3858,6 +4129,11 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct igc_adapter *adapter = netdev_priv(netdev);
if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
netdev_dbg(netdev, "Jumbo frames not supported with XDP");
return -EINVAL;
}
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
......@@ -4844,6 +5120,58 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (bpf->command) {
case XDP_SETUP_PROG:
return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
default:
return -EOPNOTSUPP;
}
}
static int igc_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags)
{
struct igc_adapter *adapter = netdev_priv(dev);
int cpu = smp_processor_id();
struct netdev_queue *nq;
struct igc_ring *ring;
int i, drops;
if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
return -ENETDOWN;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
ring = igc_xdp_get_tx_ring(adapter, cpu);
nq = txring_txq(ring);
__netif_tx_lock(nq, cpu);
drops = 0;
for (i = 0; i < num_frames; i++) {
int err;
struct xdp_frame *xdpf = frames[i];
err = igc_xdp_init_tx_descriptor(ring, xdpf);
if (err) {
xdp_return_frame_rx_napi(xdpf);
drops++;
}
}
if (flags & XDP_XMIT_FLUSH)
igc_flush_tx_descriptors(ring);
__netif_tx_unlock(nq);
return num_frames - drops;
}
static const struct net_device_ops igc_netdev_ops = {
.ndo_open = igc_open,
.ndo_stop = igc_close,
......@@ -4857,6 +5185,8 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_features_check = igc_features_check,
.ndo_do_ioctl = igc_ioctl,
.ndo_setup_tc = igc_setup_tc,
.ndo_bpf = igc_bpf,
.ndo_xdp_xmit = igc_xdp_xmit,
};
/* PCIe configuration access */
......
......@@ -153,20 +153,20 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
/**
* igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
* @skb: Buffer containing timestamp and packet
* @adapter: Pointer to adapter the packet buffer belongs to
* @buf: Pointer to packet buffer
*
* This function retrieves the timestamp saved in the beginning of packet
* buffer. While two timestamps are available, one in timer0 reference and the
* other in timer1 reference, this function considers only the timestamp in
* timer0 reference.
*
* Returns timestamp value.
*/
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
struct sk_buff *skb)
ktime_t igc_ptp_rx_pktstamp(struct igc_adapter *adapter, __le32 *buf)
{
struct igc_adapter *adapter = q_vector->adapter;
u64 regval;
ktime_t timestamp;
u32 secs, nsecs;
int adjust;
/* Timestamps are saved in little endian at the beginning of the packet
......@@ -178,9 +178,10 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
* SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
* part of the timestamp.
*/
regval = le32_to_cpu(va[2]);
regval |= (u64)le32_to_cpu(va[3]) << 32;
igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
nsecs = le32_to_cpu(buf[2]);
secs = le32_to_cpu(buf[3]);
timestamp = ktime_set(secs, nsecs);
/* Adjust timestamp for the RX latency based on link speed */
switch (adapter->link_speed) {
......@@ -201,8 +202,8 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
break;
}
skb_hwtstamps(skb)->hwtstamp =
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
return ktime_sub_ns(timestamp, adjust);
}
static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter)
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
#include "igc.h"
#include "igc_xdp.h"
int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct net_device *dev = adapter->netdev;
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
* jumbo frames so we return error.
*/
NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
return -EOPNOTSUPP;
}
if (if_running)
igc_close(dev);
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
if (if_running)
igc_open(dev);
return 0;
}
int igc_xdp_register_rxq_info(struct igc_ring *ring)
{
struct net_device *dev = ring->netdev;
int err;
err = xdp_rxq_info_reg(&ring->xdp_rxq, dev, ring->queue_index, 0);
if (err) {
netdev_err(dev, "Failed to register xdp rxq info\n");
return err;
}
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
NULL);
if (err) {
netdev_err(dev, "Failed to register xdp rxq mem model\n");
xdp_rxq_info_unreg(&ring->xdp_rxq);
return err;
}
return 0;
}
void igc_xdp_unregister_rxq_info(struct igc_ring *ring)
{
xdp_rxq_info_unreg(&ring->xdp_rxq);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020, Intel Corporation. */
#ifndef _IGC_XDP_H_
#define _IGC_XDP_H_
int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack);
int igc_xdp_register_rxq_info(struct igc_ring *ring);
void igc_xdp_unregister_rxq_info(struct igc_ring *ring);
#endif /* _IGC_XDP_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment