Commit 21092e9c authored by Tony Nguyen's avatar Tony Nguyen Committed by Jeff Kirsher

ixgbevf: Add support for XDP_TX action

This implements the XDP_TX action which is modeled on the ixgbe
implementation. However instead of using CPU id to determine which XDP
queue to use, this uses the received RX queue index, which is similar
to i40e. Doing this eliminates the restriction that number of CPUs not
exceed number of XDP queues that ixgbe has.

Also, based on the number of queues available, the number of TX queues
may be reduced when an XDP program is loaded in order to accommodate the
XDP queues.

Based largely on
commit 33fdc82f ("ixgbe: add support for XDP_TX action")
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent c7aec596
...@@ -269,7 +269,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -269,7 +269,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
u32 new_rx_count, new_tx_count; u32 new_rx_count, new_tx_count;
int i, err = 0; int i, j, err = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL; return -EINVAL;
...@@ -293,15 +293,19 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -293,15 +293,19 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) { if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->count = new_tx_count; adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_xdp_queues; i++)
adapter->xdp_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->count = new_rx_count; adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count; adapter->tx_ring_count = new_tx_count;
adapter->xdp_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count; adapter->rx_ring_count = new_rx_count;
goto clear_reset; goto clear_reset;
} }
if (new_tx_count != adapter->tx_ring_count) { if (new_tx_count != adapter->tx_ring_count) {
tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); tx_ring = vmalloc((adapter->num_tx_queues +
adapter->num_xdp_queues) * sizeof(*tx_ring));
if (!tx_ring) { if (!tx_ring) {
err = -ENOMEM; err = -ENOMEM;
goto clear_reset; goto clear_reset;
...@@ -324,6 +328,24 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -324,6 +328,24 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
goto clear_reset; goto clear_reset;
} }
} }
for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
/* clone ring and setup updated count */
tx_ring[i] = *adapter->xdp_ring[j];
tx_ring[i].count = new_tx_count;
err = ixgbevf_setup_tx_resources(&tx_ring[i]);
if (err) {
while (i) {
i--;
ixgbevf_free_tx_resources(&tx_ring[i]);
}
vfree(tx_ring);
tx_ring = NULL;
goto clear_reset;
}
}
} }
if (new_rx_count != adapter->rx_ring_count) { if (new_rx_count != adapter->rx_ring_count) {
...@@ -368,6 +390,12 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -368,6 +390,12 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
} }
adapter->tx_ring_count = new_tx_count; adapter->tx_ring_count = new_tx_count;
for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
*adapter->xdp_ring[j] = tx_ring[i];
}
adapter->xdp_ring_count = new_tx_count;
vfree(tx_ring); vfree(tx_ring);
tx_ring = NULL; tx_ring = NULL;
} }
...@@ -390,7 +418,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -390,7 +418,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
clear_reset: clear_reset:
/* free Tx resources if Rx error is encountered */ /* free Tx resources if Rx error is encountered */
if (tx_ring) { if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0;
i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
ixgbevf_free_tx_resources(&tx_ring[i]); ixgbevf_free_tx_resources(&tx_ring[i]);
vfree(tx_ring); vfree(tx_ring);
} }
......
...@@ -52,7 +52,11 @@ ...@@ -52,7 +52,11 @@
struct ixgbevf_tx_buffer { struct ixgbevf_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch; union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp; unsigned long time_stamp;
struct sk_buff *skb; union {
struct sk_buff *skb;
/* XDP uses address ptr on irq_clean */
void *data;
};
unsigned int bytecount; unsigned int bytecount;
unsigned short gso_segs; unsigned short gso_segs;
__be16 protocol; __be16 protocol;
...@@ -95,8 +99,16 @@ enum ixgbevf_ring_state_t { ...@@ -95,8 +99,16 @@ enum ixgbevf_ring_state_t {
__IXGBEVF_RX_BUILD_SKB_ENABLED, __IXGBEVF_RX_BUILD_SKB_ENABLED,
__IXGBEVF_TX_DETECT_HANG, __IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED, __IXGBEVF_HANG_CHECK_ARMED,
__IXGBEVF_TX_XDP_RING,
}; };
#define ring_is_xdp(ring) \
test_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
#define set_ring_xdp(ring) \
set_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
#define clear_ring_xdp(ring) \
clear_bit(__IXGBEVF_TX_XDP_RING, &(ring)->state)
struct ixgbevf_ring { struct ixgbevf_ring {
struct ixgbevf_ring *next; struct ixgbevf_ring *next;
struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */ struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */
...@@ -139,6 +151,7 @@ struct ixgbevf_ring { ...@@ -139,6 +151,7 @@ struct ixgbevf_ring {
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
#define MAX_XDP_QUEUES IXGBE_VF_MAX_TX_QUEUES
#define IXGBEVF_MAX_RSS_QUEUES 2 #define IXGBEVF_MAX_RSS_QUEUES 2
#define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */ #define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */
#define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */ #define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */
...@@ -339,6 +352,10 @@ struct ixgbevf_adapter { ...@@ -339,6 +352,10 @@ struct ixgbevf_adapter {
u32 eims_enable_mask; u32 eims_enable_mask;
u32 eims_other; u32 eims_other;
/* XDP */
int num_xdp_queues;
struct ixgbevf_ring *xdp_ring[MAX_XDP_QUEUES];
/* TX */ /* TX */
int num_tx_queues; int num_tx_queues;
struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
...@@ -373,6 +390,7 @@ struct ixgbevf_adapter { ...@@ -373,6 +390,7 @@ struct ixgbevf_adapter {
unsigned long state; unsigned long state;
u64 tx_busy; u64 tx_busy;
unsigned int tx_ring_count; unsigned int tx_ring_count;
unsigned int xdp_ring_count;
unsigned int rx_ring_count; unsigned int rx_ring_count;
u8 __iomem *io_addr; /* Mainly for iounmap use */ u8 __iomem *io_addr; /* Mainly for iounmap use */
......
...@@ -324,7 +324,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -324,7 +324,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
total_packets += tx_buffer->gso_segs; total_packets += tx_buffer->gso_segs;
/* free the skb */ /* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget); if (ring_is_xdp(tx_ring))
page_frag_free(tx_buffer->data);
else
napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */ /* unmap skb header data */
dma_unmap_single(tx_ring->dev, dma_unmap_single(tx_ring->dev,
...@@ -388,7 +391,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -388,7 +391,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
pr_err("Detected Tx Unit Hang\n" pr_err("Detected Tx Unit Hang%s\n"
" Tx Queue <%d>\n" " Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n" " TDH, TDT <%x>, <%x>\n"
" next_to_use <%x>\n" " next_to_use <%x>\n"
...@@ -398,6 +401,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -398,6 +401,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
" eop_desc->wb.status <%x>\n" " eop_desc->wb.status <%x>\n"
" time_stamp <%lx>\n" " time_stamp <%lx>\n"
" jiffies <%lx>\n", " jiffies <%lx>\n",
ring_is_xdp(tx_ring) ? " XDP" : "",
tx_ring->queue_index, tx_ring->queue_index,
IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
...@@ -405,7 +409,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -405,7 +409,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
eop_desc, (eop_desc ? eop_desc->wb.status : 0), eop_desc, (eop_desc ? eop_desc->wb.status : 0),
tx_ring->tx_buffer_info[i].time_stamp, jiffies); tx_ring->tx_buffer_info[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); if (!ring_is_xdp(tx_ring))
netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index);
/* schedule immediate reset if we believe we hung */ /* schedule immediate reset if we believe we hung */
ixgbevf_tx_timeout_reset(adapter); ixgbevf_tx_timeout_reset(adapter);
...@@ -413,6 +419,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -413,6 +419,9 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
return true; return true;
} }
if (ring_is_xdp(tx_ring))
return !!budget;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
...@@ -963,11 +972,78 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring, ...@@ -963,11 +972,78 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
#define IXGBEVF_XDP_PASS 0 #define IXGBEVF_XDP_PASS 0
#define IXGBEVF_XDP_CONSUMED 1 #define IXGBEVF_XDP_CONSUMED 1
#define IXGBEVF_XDP_TX 2
static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
struct xdp_buff *xdp)
{
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
u32 len, cmd_type;
dma_addr_t dma;
u16 i;
len = xdp->data_end - xdp->data;
if (unlikely(!ixgbevf_desc_unused(ring)))
return IXGBEVF_XDP_CONSUMED;
dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma))
return IXGBEVF_XDP_CONSUMED;
/* record the location of the first descriptor for this packet */
tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
tx_buffer->bytecount = len;
tx_buffer->gso_segs = 1;
tx_buffer->protocol = 0;
i = ring->next_to_use;
tx_desc = IXGBEVF_TX_DESC(ring, i);
dma_unmap_len_set(tx_buffer, len, len);
dma_unmap_addr_set(tx_buffer, dma, dma);
tx_buffer->data = xdp->data;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
/* put descriptor type bits */
cmd_type = IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS;
cmd_type |= len | IXGBE_TXD_CMD;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
tx_desc->read.olinfo_status =
cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
IXGBE_ADVTXD_CC);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
*
* We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
wmb();
static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_ring *rx_ring, /* set next_to_watch value indicating a packet is present */
i++;
if (i == ring->count)
i = 0;
tx_buffer->next_to_watch = tx_desc;
ring->next_to_use = i;
/* notify HW of packet */
ixgbevf_write_tail(ring, i);
return IXGBEVF_XDP_TX;
}
static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
struct ixgbevf_ring *rx_ring,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
int result = IXGBEVF_XDP_PASS; int result = IXGBEVF_XDP_PASS;
struct ixgbevf_ring *xdp_ring;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
u32 act; u32 act;
...@@ -981,10 +1057,13 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_ring *rx_ring, ...@@ -981,10 +1057,13 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_ring *rx_ring,
switch (act) { switch (act) {
case XDP_PASS: case XDP_PASS:
break; break;
case XDP_TX:
xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* fallthrough */ /* fallthrough */
case XDP_TX:
case XDP_ABORTED: case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act); trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
/* fallthrough -- handle aborts by dropping packet */ /* fallthrough -- handle aborts by dropping packet */
...@@ -997,11 +1076,29 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_ring *rx_ring, ...@@ -997,11 +1076,29 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_ring *rx_ring,
return ERR_PTR(-result); return ERR_PTR(-result);
} }
static void ixgbevf_rx_buffer_flip(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
unsigned int size)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
rx_buffer->page_offset ^= truesize;
#else
unsigned int truesize = ring_uses_build_skb(rx_ring) ?
SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
SKB_DATA_ALIGN(size);
rx_buffer->page_offset += truesize;
#endif
}
static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring, struct ixgbevf_ring *rx_ring,
int budget) int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct ixgbevf_adapter *adapter = q_vector->adapter;
u16 cleaned_count = ixgbevf_desc_unused(rx_ring); u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb; struct sk_buff *skb = rx_ring->skb;
struct xdp_buff xdp; struct xdp_buff xdp;
...@@ -1041,13 +1138,17 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -1041,13 +1138,17 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
ixgbevf_rx_offset(rx_ring); ixgbevf_rx_offset(rx_ring);
xdp.data_end = xdp.data + size; xdp.data_end = xdp.data + size;
skb = ixgbevf_run_xdp(rx_ring, &xdp); skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp);
} }
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
if (PTR_ERR(skb) == -IXGBEVF_XDP_TX)
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
size);
else
rx_buffer->pagecnt_bias++;
total_rx_packets++; total_rx_packets++;
total_rx_bytes += size; total_rx_bytes += size;
rx_buffer->pagecnt_bias++;
} else if (skb) { } else if (skb) {
ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size); ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
} else if (ring_uses_build_skb(rx_ring)) { } else if (ring_uses_build_skb(rx_ring)) {
...@@ -1608,6 +1709,8 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) ...@@ -1608,6 +1709,8 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */ /* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
for (i = 0; i < adapter->num_xdp_queues; i++)
ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]);
} }
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
...@@ -2239,7 +2342,10 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) ...@@ -2239,7 +2342,10 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
union ixgbe_adv_tx_desc *eop_desc, *tx_desc; union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs */ /* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer->skb); if (ring_is_xdp(tx_ring))
page_frag_free(tx_buffer->data);
else
dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */ /* unmap skb header data */
dma_unmap_single(tx_ring->dev, dma_unmap_single(tx_ring->dev,
...@@ -2307,6 +2413,8 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) ...@@ -2307,6 +2413,8 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_clean_tx_ring(adapter->tx_ring[i]); ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
for (i = 0; i < adapter->num_xdp_queues; i++)
ixgbevf_clean_tx_ring(adapter->xdp_ring[i]);
} }
void ixgbevf_down(struct ixgbevf_adapter *adapter) void ixgbevf_down(struct ixgbevf_adapter *adapter)
...@@ -2345,6 +2453,13 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) ...@@ -2345,6 +2453,13 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
IXGBE_TXDCTL_SWFLSH); IXGBE_TXDCTL_SWFLSH);
} }
for (i = 0; i < adapter->num_xdp_queues; i++) {
u8 reg_idx = adapter->xdp_ring[i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
IXGBE_TXDCTL_SWFLSH);
}
if (!pci_channel_offline(adapter->pdev)) if (!pci_channel_offline(adapter->pdev))
ixgbevf_reset(adapter); ixgbevf_reset(adapter);
...@@ -2442,6 +2557,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) ...@@ -2442,6 +2557,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
/* Start with base case */ /* Start with base case */
adapter->num_rx_queues = 1; adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1; adapter->num_tx_queues = 1;
adapter->num_xdp_queues = 0;
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
...@@ -2463,8 +2579,13 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) ...@@ -2463,8 +2579,13 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_11: case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12: case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13: case ixgbe_mbox_api_13:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
adapter->num_rx_queues = rss; adapter->num_rx_queues = rss;
adapter->num_tx_queues = rss; adapter->num_tx_queues = rss;
adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0;
default: default:
break; break;
} }
...@@ -2521,6 +2642,8 @@ static void ixgbevf_add_ring(struct ixgbevf_ring *ring, ...@@ -2521,6 +2642,8 @@ static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
* @v_idx: index of vector in adapter struct * @v_idx: index of vector in adapter struct
* @txr_count: number of Tx rings for q vector * @txr_count: number of Tx rings for q vector
* @txr_idx: index of first Tx ring to assign * @txr_idx: index of first Tx ring to assign
* @xdp_count: total number of XDP rings to allocate
* @xdp_idx: index of first XDP ring to allocate
* @rxr_count: number of Rx rings for q vector * @rxr_count: number of Rx rings for q vector
* @rxr_idx: index of first Rx ring to assign * @rxr_idx: index of first Rx ring to assign
* *
...@@ -2528,13 +2651,15 @@ static void ixgbevf_add_ring(struct ixgbevf_ring *ring, ...@@ -2528,13 +2651,15 @@ static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
**/ **/
static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
int txr_count, int txr_idx, int txr_count, int txr_idx,
int xdp_count, int xdp_idx,
int rxr_count, int rxr_idx) int rxr_count, int rxr_idx)
{ {
struct ixgbevf_q_vector *q_vector; struct ixgbevf_q_vector *q_vector;
int reg_idx = txr_idx + xdp_idx;
struct ixgbevf_ring *ring; struct ixgbevf_ring *ring;
int ring_count, size; int ring_count, size;
ring_count = txr_count + rxr_count; ring_count = txr_count + xdp_count + rxr_count;
size = sizeof(*q_vector) + (sizeof(*ring) * ring_count); size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
/* allocate q_vector and rings */ /* allocate q_vector and rings */
...@@ -2567,7 +2692,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, ...@@ -2567,7 +2692,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
/* apply Tx specific ring traits */ /* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count; ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx; ring->queue_index = txr_idx;
ring->reg_idx = txr_idx; ring->reg_idx = reg_idx;
/* assign ring to adapter */ /* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring; adapter->tx_ring[txr_idx] = ring;
...@@ -2575,6 +2700,36 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx, ...@@ -2575,6 +2700,36 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
/* update count and index */ /* update count and index */
txr_count--; txr_count--;
txr_idx++; txr_idx++;
reg_idx++;
/* push pointer to next ring */
ring++;
}
while (xdp_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Tx values */
ixgbevf_add_ring(ring, &q_vector->tx);
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
ring->queue_index = xdp_idx;
ring->reg_idx = reg_idx;
set_ring_xdp(ring);
/* assign ring to adapter */
adapter->xdp_ring[xdp_idx] = ring;
/* update count and index */
xdp_count--;
xdp_idx++;
reg_idx++;
/* push pointer to next ring */ /* push pointer to next ring */
ring++; ring++;
...@@ -2624,8 +2779,12 @@ static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx) ...@@ -2624,8 +2779,12 @@ static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
struct ixgbevf_ring *ring; struct ixgbevf_ring *ring;
ixgbevf_for_each_ring(ring, q_vector->tx) ixgbevf_for_each_ring(ring, q_vector->tx) {
adapter->tx_ring[ring->queue_index] = NULL; if (ring_is_xdp(ring))
adapter->xdp_ring[ring->queue_index] = NULL;
else
adapter->tx_ring[ring->queue_index] = NULL;
}
ixgbevf_for_each_ring(ring, q_vector->rx) ixgbevf_for_each_ring(ring, q_vector->rx)
adapter->rx_ring[ring->queue_index] = NULL; adapter->rx_ring[ring->queue_index] = NULL;
...@@ -2651,15 +2810,16 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) ...@@ -2651,15 +2810,16 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int rxr_remaining = adapter->num_rx_queues; int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues; int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0; int xdp_remaining = adapter->num_xdp_queues;
int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
int err; int err;
if (q_vectors >= (rxr_remaining + txr_remaining)) { if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
for (; rxr_remaining; v_idx++, q_vectors--) { for (; rxr_remaining; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
err = ixgbevf_alloc_q_vector(adapter, v_idx, err = ixgbevf_alloc_q_vector(adapter, v_idx,
0, 0, rqpv, rxr_idx); 0, 0, 0, 0, rqpv, rxr_idx);
if (err) if (err)
goto err_out; goto err_out;
...@@ -2672,9 +2832,11 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) ...@@ -2672,9 +2832,11 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
for (; q_vectors; v_idx++, q_vectors--) { for (; q_vectors; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors);
err = ixgbevf_alloc_q_vector(adapter, v_idx, err = ixgbevf_alloc_q_vector(adapter, v_idx,
tqpv, txr_idx, tqpv, txr_idx,
xqpv, xdp_idx,
rqpv, rxr_idx); rqpv, rxr_idx);
if (err) if (err)
...@@ -2685,6 +2847,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) ...@@ -2685,6 +2847,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
rxr_idx += rqpv; rxr_idx += rqpv;
txr_remaining -= tqpv; txr_remaining -= tqpv;
txr_idx += tqpv; txr_idx += tqpv;
xdp_remaining -= xqpv;
xdp_idx += xqpv;
} }
return 0; return 0;
...@@ -2756,9 +2920,10 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) ...@@ -2756,9 +2920,10 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
goto err_alloc_q_vectors; goto err_alloc_q_vectors;
} }
hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" : (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
"Disabled", adapter->num_rx_queues, adapter->num_tx_queues); adapter->num_rx_queues, adapter->num_tx_queues,
adapter->num_xdp_queues);
set_bit(__IXGBEVF_DOWN, &adapter->state); set_bit(__IXGBEVF_DOWN, &adapter->state);
...@@ -2779,6 +2944,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) ...@@ -2779,6 +2944,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{ {
adapter->num_tx_queues = 0; adapter->num_tx_queues = 0;
adapter->num_xdp_queues = 0;
adapter->num_rx_queues = 0; adapter->num_rx_queues = 0;
ixgbevf_free_q_vectors(adapter); ixgbevf_free_q_vectors(adapter);
...@@ -2986,6 +3152,8 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) ...@@ -2986,6 +3152,8 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
if (netif_carrier_ok(adapter->netdev)) { if (netif_carrier_ok(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
set_check_for_tx_hang(adapter->tx_ring[i]); set_check_for_tx_hang(adapter->tx_ring[i]);
for (i = 0; i < adapter->num_xdp_queues; i++)
set_check_for_tx_hang(adapter->xdp_ring[i]);
} }
/* get one bit for every active Tx/Rx interrupt vector */ /* get one bit for every active Tx/Rx interrupt vector */
...@@ -3157,6 +3325,9 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) ...@@ -3157,6 +3325,9 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tx_ring[i]->desc) if (adapter->tx_ring[i]->desc)
ixgbevf_free_tx_resources(adapter->tx_ring[i]); ixgbevf_free_tx_resources(adapter->tx_ring[i]);
for (i = 0; i < adapter->num_xdp_queues; i++)
if (adapter->xdp_ring[i]->desc)
ixgbevf_free_tx_resources(adapter->xdp_ring[i]);
} }
/** /**
...@@ -3207,7 +3378,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) ...@@ -3207,7 +3378,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
**/ **/
static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
{ {
int i, err = 0; int i, j = 0, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
...@@ -3217,11 +3388,22 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) ...@@ -3217,11 +3388,22 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
goto err_setup_tx; goto err_setup_tx;
} }
for (j = 0; j < adapter->num_xdp_queues; j++) {
err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]);
if (!err)
continue;
hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
break;
}
return 0; return 0;
err_setup_tx: err_setup_tx:
/* rewind the index freeing the rings as we go */ /* rewind the index freeing the rings as we go */
while (j--)
ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
while (i--) while (i--)
ixgbevf_free_tx_resources(adapter->tx_ring[i]); ixgbevf_free_tx_resources(adapter->tx_ring[i]);
return err; return err;
} }
...@@ -4114,6 +4296,23 @@ static void ixgbevf_shutdown(struct pci_dev *pdev) ...@@ -4114,6 +4296,23 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
ixgbevf_suspend(pdev, PMSG_SUSPEND); ixgbevf_suspend(pdev, PMSG_SUSPEND);
} }
static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
const struct ixgbevf_ring *ring)
{
u64 bytes, packets;
unsigned int start;
if (ring) {
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
}
static void ixgbevf_get_stats(struct net_device *netdev, static void ixgbevf_get_stats(struct net_device *netdev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
...@@ -4141,13 +4340,12 @@ static void ixgbevf_get_stats(struct net_device *netdev, ...@@ -4141,13 +4340,12 @@ static void ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
ring = adapter->tx_ring[i]; ring = adapter->tx_ring[i];
do { ixgbevf_get_tx_ring_stats(stats, ring);
start = u64_stats_fetch_begin_irq(&ring->syncp); }
bytes = ring->stats.bytes;
packets = ring->stats.packets; for (i = 0; i < adapter->num_xdp_queues; i++) {
} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); ring = adapter->xdp_ring[i];
stats->tx_bytes += bytes; ixgbevf_get_tx_ring_stats(stats, ring);
stats->tx_packets += packets;
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -4201,8 +4399,25 @@ static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) ...@@ -4201,8 +4399,25 @@ static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
} }
old_prog = xchg(&adapter->xdp_prog, prog); old_prog = xchg(&adapter->xdp_prog, prog);
for (i = 0; i < adapter->num_rx_queues; i++)
xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); /* If transitioning XDP modes reconfigure rings */
if (!!prog != !!old_prog) {
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
if (netif_running(dev))
ixgbevf_close(dev);
ixgbevf_clear_interrupt_scheme(adapter);
ixgbevf_init_interrupt_scheme(adapter);
if (netif_running(dev))
ixgbevf_open(dev);
} else {
for (i = 0; i < adapter->num_rx_queues; i++)
xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog);
}
if (old_prog) if (old_prog)
bpf_prog_put(old_prog); bpf_prog_put(old_prog);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment