Commit 88287773 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2020-12-09

This series contains updates to igb, ixgbe, i40e, and ice drivers.

Sven Auhagen fixes issues with igb XDP: return correct error value in XDP
xmit back, increase header padding to include space for double VLAN, add
an extack error when Rx buffer is too small for frame size, set metasize if
it is set in xdp, change xdp_do_flush_map to xdp_do_flush, and update
trans_start to avoid possible Tx timeout.

Björn fixes an issue where an Rx buffer can be reused prematurely with
XDP redirect for ixgbe, i40e, and ice drivers.

The following are changes since commit 323a391a:
  can: isotp: isotp_setsockopt(): block setsockopt on bound sockets
and are available in the git repository at:
  git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue 1GbE
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9a25a30e 1beb7830
......@@ -1850,6 +1850,7 @@ static inline bool i40e_page_is_reusable(struct page *page)
* the adapter for another receive
*
* @rx_buffer: buffer containing the page
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
* If page is reusable, rx_buffer->page_offset is adjusted to point to
* an unused region in the page.
......@@ -1872,7 +1873,8 @@ static inline bool i40e_page_is_reusable(struct page *page)
*
* In either case, if the page is reusable its refcount is increased.
**/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
......@@ -1883,7 +1885,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((page_count(page) - pagecnt_bias) > 1))
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define I40E_LAST_OFFSET \
......@@ -1942,16 +1944,24 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
* @rx_buffer_pgcnt: buffer page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
const unsigned int size)
const unsigned int size,
int *rx_buffer_pgcnt)
{
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
*rx_buffer_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
0;
#endif
prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
......@@ -2102,14 +2112,16 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
* @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
*
* This function will clean up the contents of the rx_buffer. It will
* either recycle the buffer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer)
struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
if (i40e_can_reuse_rx_page(rx_buffer)) {
if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
} else {
......@@ -2336,6 +2348,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
int rx_buffer_pgcnt;
unsigned int size;
u64 qword;
......@@ -2378,7 +2391,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = i40e_get_rx_buffer(rx_ring, size);
rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
......@@ -2421,7 +2434,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
break;
}
i40e_put_rx_buffer(rx_ring, rx_buffer);
i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
......
......@@ -762,13 +762,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
* @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
static bool
ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
......@@ -779,7 +781,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((page_count(page) - pagecnt_bias) > 1))
if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define ICE_LAST_OFFSET \
......@@ -864,17 +866,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* @rx_ring: Rx descriptor ring to transact packets on
* @skb: skb to be used
* @size: size of buffer to add to skb
* @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
const unsigned int size)
const unsigned int size, int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
*rx_buf_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buf->page);
#else
0;
#endif
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
......@@ -1006,12 +1015,15 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
* @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
* This function will update next_to_clean and then clean up the contents
* of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources.
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
static void
ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
......@@ -1022,7 +1034,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (!rx_buf)
return;
if (ice_can_reuse_rx_page(rx_buf)) {
if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
......@@ -1097,6 +1109,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
int rx_buf_pgcnt;
u16 vlan_tag = 0;
u8 rx_ptype;
......@@ -1119,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
ice_put_rx_buf(rx_ring, NULL);
ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
}
......@@ -1128,7 +1141,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
......@@ -1168,7 +1181,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
cleaned_count++;
ice_put_rx_buf(rx_ring, rx_buf);
ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
continue;
construct_skb:
if (skb) {
......@@ -1187,7 +1200,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
break;
}
ice_put_rx_buf(rx_ring, rx_buf);
ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
cleaned_count++;
/* skip if it is NOP desc */
......
......@@ -138,6 +138,8 @@ struct vf_mac_filter {
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_1536 1536
......@@ -247,6 +249,9 @@ enum igb_tx_flags {
#define IGB_SFF_ADDRESSING_MODE 0x4
#define IGB_SFF_8472_UNSUP 0x00
/* TX resources are shared between XDP and netstack
* and we need to tag the buffer type to distinguish them
*/
enum igb_tx_buf_type {
IGB_TYPE_SKB = 0,
IGB_TYPE_XDP,
......
......@@ -2824,20 +2824,25 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
{
int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
struct igb_adapter *adapter = netdev_priv(dev);
struct bpf_prog *prog = bpf->prog, *old_prog;
bool running = netif_running(dev);
struct bpf_prog *old_prog;
bool need_reset;
/* verify igb ring attributes are sufficient for XDP */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = adapter->rx_ring[i];
if (frame_size > igb_rx_bufsz(ring))
if (frame_size > igb_rx_bufsz(ring)) {
NL_SET_ERR_MSG_MOD(bpf->extack,
"The RX buffer size is too small for the frame size");
netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
igb_rx_bufsz(ring), frame_size);
return -EINVAL;
}
}
old_prog = xchg(&adapter->xdp_prog, prog);
......@@ -2869,7 +2874,7 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return igb_xdp_setup(dev, xdp->prog);
return igb_xdp_setup(dev, xdp);
default:
return -EINVAL;
}
......@@ -2910,10 +2915,12 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
*/
tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
if (unlikely(!tx_ring))
return -ENXIO;
return IGB_XDP_CONSUMED;
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
nq->trans_start = jiffies;
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
......@@ -2946,6 +2953,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
nq->trans_start = jiffies;
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
......@@ -3950,8 +3960,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* set default work limits */
adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN;
adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
......@@ -6491,7 +6500,7 @@ static void igb_get_stats64(struct net_device *netdev,
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
if (adapter->xdp_prog) {
int i;
......@@ -6500,7 +6509,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_ring *ring = adapter->rx_ring[i];
if (max_frame > igb_rx_bufsz(ring)) {
netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
netdev_warn(adapter->netdev,
"Requested MTU size is not supported with XDP. Max frame size is %d\n",
max_frame);
return -EINVAL;
}
}
......@@ -8351,6 +8362,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
......@@ -8365,6 +8377,9 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
if (metasize)
skb_metadata_set(skb, metasize);
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
......@@ -8771,7 +8786,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_ring->skb = skb;
if (xdp_xmit & IGB_XDP_REDIR)
xdp_do_flush_map();
xdp_do_flush();
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
......
......@@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
......@@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
return false;
#else
/* The last offset is a bit aggressive in that we assume the
......@@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff **skb,
const unsigned int size)
const unsigned int size,
int *rx_buffer_pgcnt)
{
struct ixgbe_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
*rx_buffer_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
0;
#endif
prefetchw(rx_buffer->page);
*skb = rx_buffer->skb;
......@@ -2055,9 +2063,10 @@ static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
struct sk_buff *skb)
struct sk_buff *skb,
int rx_buffer_pgcnt)
{
if (ixgbe_can_reuse_rx_page(rx_buffer)) {
if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
......@@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *rx_buffer;
struct sk_buff *skb;
int rx_buffer_pgcnt;
unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
......@@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
dma_rmb();
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
/* retrieve a buffer from the ring */
if (!skb) {
......@@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
}
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
cleaned_count++;
/* place incomplete frames back on ring for completion */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment