Commit 1dc8b538 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

i40e: Reorder logic for coalescing RS bits

This patch reorders the logic at the end of i40e_tx_map to address the
fact that the logic was rather convoluted and much larger than it needed
to be.

In order to try and coalesce the code paths I have updated some of the
comments and repurposed some of the variables in order to reduce
unnecessary overhead.

This patch does the following:
1.  Quit tracking skb->xmit_more with a flag, just max out packet_stride
2.  Drop tail_bump and do_rs and instead just use desc_count and td_cmd
3.  Pull comments from ixgbe that make need for wmb() more explicit.

Change-ID: Ic7da85ec75043c634e87fef958109789bcc6317c
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 4b816446
...@@ -616,7 +616,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) ...@@ -616,7 +616,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0; return 0;
} }
#define WB_STRIDE 0x3 #define WB_STRIDE 4
/** /**
* i40e_clean_tx_irq - Reclaim resources after transmit completes * i40e_clean_tx_irq - Reclaim resources after transmit completes
...@@ -732,7 +732,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -732,7 +732,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40e_get_tx_pending(tx_ring, false); unsigned int j = i40e_get_tx_pending(tx_ring, false);
if (budget && if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) && ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) && !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
...@@ -2700,9 +2700,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2700,9 +2700,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 gso_segs; u16 gso_segs;
u16 desc_count = 0; u16 desc_count = 1;
bool tail_bump = true;
bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
...@@ -2785,8 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2785,8 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i]; tx_bi = &tx_ring->tx_bi[i];
} }
/* set next_to_watch value indicating a packet is present */ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
first->next_to_watch = tx_desc;
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
...@@ -2794,66 +2791,72 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2794,66 +2791,72 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP;
/* We can OR these values together as they both are checked against
* 4 below and at this point desc_count will be used as a boolean value
* after this if/else block.
*/
desc_count |= ++tx_ring->packet_stride;
/* Algorithm to optimize tail and RS bit setting: /* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported * if queue is stopped
* if xmit_more is true * mark RS bit
* do not update tail and do not mark RS bit. * reset packet counter
* if xmit_more is false and last xmit_more was false * else if xmit_more is supported and is true
* if every packet spanned less than 4 desc * advance packet counter to 4
* then set RS bit on 4th packet and update tail * reset desc_count to 0
* on every packet
* else
* update tail and set RS bit on every packet.
* if xmit_more is false and last_xmit_more was true
* update tail and set RS bit.
* *
* Optimization: wmb to be issued only in case of tail update. * if desc_count >= 4
* Also optimize the Descriptor WB path for RS bit with the same * mark RS bit
* algorithm. * reset packet counter
* if desc_count > 0
* update tail
* *
* Note: If there are less than 4 packets * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will * pending and interrupts were disabled the service task will
* trigger a force WB. * trigger a force WB.
*/ */
if (skb->xmit_more && if (netif_xmit_stopped(txring_txq(tx_ring))) {
!netif_xmit_stopped(txring_txq(tx_ring))) { goto do_rs;
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; } else if (skb->xmit_more) {
tail_bump = false; /* set stride to arm on next packet and reset desc_count */
} else if (!skb->xmit_more && tx_ring->packet_stride = WB_STRIDE;
!netif_xmit_stopped(txring_txq(tx_ring)) && desc_count = 0;
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && } else if (desc_count >= WB_STRIDE) {
(tx_ring->packet_stride < WB_STRIDE) && do_rs:
(desc_count < WB_STRIDE)) { /* write last descriptor with RS bit set */
tx_ring->packet_stride++; td_cmd |= I40E_TX_DESC_CMD_RS;
} else {
tx_ring->packet_stride = 0; tx_ring->packet_stride = 0;
tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
do_rs = true;
} }
if (do_rs)
tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) | build_ctob(td_cmd, td_offset, size, td_tag);
cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
I40E_TX_DESC_CMD_EOP) << /* Force memory writes to complete before letting h/w know there
I40E_TXD_QW1_CMD_SHIFT); * are new descriptors to fetch.
*
* We also use this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
/* notify HW of packet */ /* notify HW of packet */
if (!tail_bump) { if (desc_count) {
prefetchw(tx_desc + 1);
} else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
} }
return; return;
dma_error: dma_error:
......
...@@ -313,7 +313,6 @@ struct i40e_ring { ...@@ -313,7 +313,6 @@ struct i40e_ring {
u16 flags; u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */ /* stats structs */
struct i40e_queue_stats stats; struct i40e_queue_stats stats;
......
...@@ -150,7 +150,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) ...@@ -150,7 +150,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0; return 0;
} }
#define WB_STRIDE 0x3 #define WB_STRIDE 4
/** /**
* i40e_clean_tx_irq - Reclaim resources after transmit completes * i40e_clean_tx_irq - Reclaim resources after transmit completes
...@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40evf_get_tx_pending(tx_ring, false); unsigned int j = i40evf_get_tx_pending(tx_ring, false);
if (budget && if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j > 0) && ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) && !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
...@@ -1950,9 +1950,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1950,9 +1950,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 gso_segs; u16 gso_segs;
u16 desc_count = 0; u16 desc_count = 1;
bool tail_bump = true;
bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
...@@ -2035,8 +2033,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2035,8 +2033,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i]; tx_bi = &tx_ring->tx_bi[i];
} }
/* set next_to_watch value indicating a packet is present */ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
first->next_to_watch = tx_desc;
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
...@@ -2044,66 +2041,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2044,66 +2041,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP;
/* We can OR these values together as they both are checked against
* 4 below and at this point desc_count will be used as a boolean value
* after this if/else block.
*/
desc_count |= ++tx_ring->packet_stride;
/* Algorithm to optimize tail and RS bit setting: /* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported * if queue is stopped
* if xmit_more is true * mark RS bit
* do not update tail and do not mark RS bit. * reset packet counter
* if xmit_more is false and last xmit_more was false * else if xmit_more is supported and is true
* if every packet spanned less than 4 desc * advance packet counter to 4
* then set RS bit on 4th packet and update tail * reset desc_count to 0
* on every packet
* else
* update tail and set RS bit on every packet.
* if xmit_more is false and last_xmit_more was true
* update tail and set RS bit.
* *
* Optimization: wmb to be issued only in case of tail update. * if desc_count >= 4
* Also optimize the Descriptor WB path for RS bit with the same * mark RS bit
* algorithm. * reset packet counter
* if desc_count > 0
* update tail
* *
* Note: If there are less than 4 packets * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will * pending and interrupts were disabled the service task will
* trigger a force WB. * trigger a force WB.
*/ */
if (skb->xmit_more && if (netif_xmit_stopped(txring_txq(tx_ring))) {
!netif_xmit_stopped(txring_txq(tx_ring))) { goto do_rs;
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; } else if (skb->xmit_more) {
tail_bump = false; /* set stride to arm on next packet and reset desc_count */
} else if (!skb->xmit_more && tx_ring->packet_stride = WB_STRIDE;
!netif_xmit_stopped(txring_txq(tx_ring)) && desc_count = 0;
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && } else if (desc_count >= WB_STRIDE) {
(tx_ring->packet_stride < WB_STRIDE) && do_rs:
(desc_count < WB_STRIDE)) { /* write last descriptor with RS bit set */
tx_ring->packet_stride++; td_cmd |= I40E_TX_DESC_CMD_RS;
} else {
tx_ring->packet_stride = 0; tx_ring->packet_stride = 0;
tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
do_rs = true;
} }
if (do_rs)
tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) | build_ctob(td_cmd, td_offset, size, td_tag);
cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
I40E_TX_DESC_CMD_EOP) << /* Force memory writes to complete before letting h/w know there
I40E_TXD_QW1_CMD_SHIFT); * are new descriptors to fetch.
*
* We also use this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
/* notify HW of packet */ /* notify HW of packet */
if (!tail_bump) { if (desc_count) {
prefetchw(tx_desc + 1);
} else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
} }
return; return;
dma_error: dma_error:
......
...@@ -309,7 +309,6 @@ struct i40e_ring { ...@@ -309,7 +309,6 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */ bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */ bool arm_wb; /* do something to arm write back */
u8 packet_stride; u8 packet_stride;
#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
u16 flags; u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment