Commit d9028db6 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Tony Nguyen

idpf: convert to libeth Tx buffer completion

&idpf_tx_buffer is almost identical to the previous generations, as well
as the way it's handled. Moreover, relying on dma_unmap_addr() and
!!buf->skb instead of explicit defining of buffer's type was never good.
Use the newly added libeth helpers to do it properly and reduce the
copy-paste around the Tx code.
Reviewed-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Signed-off-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 080d72f4
......@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
#include <net/libeth/tx.h>
#include "idpf.h"
......@@ -224,6 +225,7 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
tx_buf->type = LIBETH_SQE_FRAG;
/* align size to end of page */
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
......@@ -245,6 +247,8 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
i = 0;
}
tx_q->tx_buf[i].type = LIBETH_SQE_EMPTY;
dma += max_data;
size -= max_data;
......@@ -282,13 +286,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
first->type = LIBETH_SQE_SKB;
first->rs_idx = i;
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount);
netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
......@@ -306,8 +310,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
txq->tx_buf[ntu].ctx_entry = true;
txq->tx_buf[ntu].type = LIBETH_SQE_CTX;
ctx_desc = &txq->base_ctx[ntu];
......@@ -396,11 +399,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
first->gso_segs = offload.tso_segs;
first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
first->packets = offload.tso_segs;
first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len);
} else {
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
first->gso_segs = 1;
first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
first->packets = 1;
}
idpf_tx_singleq_map(tx_q, first, &offload);
......@@ -420,10 +423,15 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
unsigned int total_bytes = 0, total_pkts = 0;
struct libeth_sq_napi_stats ss = { };
struct idpf_base_tx_desc *tx_desc;
u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
struct libeth_cq_pp cp = {
.dev = tx_q->dev,
.ss = &ss,
.napi = napi_budget,
};
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
struct netdev_queue *nq;
......@@ -441,47 +449,23 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
* such. We can skip this descriptor since there is no buffer
* to clean.
*/
if (tx_buf->ctx_entry) {
/* Clear this flag here to avoid stale flag values when
* this buffer is used for actual data in the future.
* There are cases where the tx_buf struct / the flags
* field will not be cleared before being reused.
*/
tx_buf->ctx_entry = false;
if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) {
tx_buf->type = LIBETH_SQE_EMPTY;
goto fetch_next_txq_desc;
}
/* if next_to_watch is not set then no work pending */
eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
if (!eop_desc)
break;
/* prevent any other reads prior to eop_desc */
/* prevent any other reads prior to type */
smp_rmb();
eop_desc = &tx_q->base_tx[tx_buf->rs_idx];
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->qw1 &
cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
break;
/* clear next_to_watch to prevent false hangs */
tx_buf->next_to_watch = NULL;
/* update the statistics for this packet */
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_q->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
/* clear tx_buf data */
tx_buf->skb = NULL;
dma_unmap_len_set(tx_buf, len, 0);
libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
......@@ -495,13 +479,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
}
/* unmap any remaining paged data */
if (dma_unmap_len(tx_buf, len)) {
dma_unmap_page(tx_q->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buf, len, 0);
}
libeth_tx_complete(tx_buf, &cp);
}
/* update budget only if we did something */
......@@ -521,11 +499,11 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
*cleaned += total_pkts;
*cleaned += ss.packets;
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_add(&tx_q->q_stats.packets, total_pkts);
u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
u64_stats_add(&tx_q->q_stats.packets, ss.packets);
u64_stats_add(&tx_q->q_stats.bytes, ss.bytes);
u64_stats_update_end(&tx_q->stats_sync);
np = netdev_priv(tx_q->netdev);
......@@ -533,7 +511,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
dont_wake = np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
__netif_txq_completed_wake(nq, total_pkts, total_bytes,
__netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
......
This diff is collapsed.
......@@ -131,7 +131,6 @@ do { \
(txq)->num_completions_pending - (txq)->complq->num_completions)
#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
/* Adjust the generation for the completion tag and wrap if necessary */
#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
......@@ -149,47 +148,7 @@ union idpf_tx_flex_desc {
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
};
/**
* struct idpf_tx_buf
* @next_to_watch: Next descriptor to clean
* @skb: Pointer to the skb
* @dma: DMA address
* @len: DMA length
* @bytecount: Number of bytes
* @gso_segs: Number of GSO segments
* @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
* with completion tag returned in buffer completion event.
* Because the completion tag is expected to be the same in all
* data descriptors for a given packet, and a single packet can
* span multiple buffers, we need this field to track all
* buffers associated with this completion tag independently of
* the buf_id. The tag consists of a N bit buf_id and M upper
* order "generation bits". See compl_tag_bufid_m and
* compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
* to indicate the tag is not valid.
* @ctx_entry: Singleq only. Used to indicate the corresponding entry
* in the descriptor ring was used for a context descriptor and
* this buffer entry should be skipped.
*/
struct idpf_tx_buf {
void *next_to_watch;
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
unsigned int bytecount;
unsigned short gso_segs;
union {
int compl_tag;
bool ctx_entry;
};
};
struct idpf_tx_stash {
struct hlist_node hlist;
struct idpf_tx_buf buf;
};
#define idpf_tx_buf libeth_sqe
/**
* struct idpf_buf_lifo - LIFO for managing OOO completions
......@@ -496,11 +455,6 @@ struct idpf_tx_queue_stats {
u64_stats_t dma_map_errs;
};
struct idpf_cleaned_stats {
u32 packets;
u32 bytes;
};
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_MAX 0x1FE0
#define IDPF_ITR_20K 0x0032
......@@ -688,7 +642,7 @@ struct idpf_tx_queue {
void *desc_ring;
};
struct idpf_tx_buf *tx_buf;
struct libeth_sqe *tx_buf;
struct idpf_txq_group *txq_grp;
struct device *dev;
void __iomem *tail;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment