Commit b1bc8457 authored by Geetha sowjanya's avatar Geetha sowjanya Committed by David S. Miller

octeontx2-pf: Cleanup all receive buffers in SG descriptor

With MTU sized receive buffers it is not expected to have CQE_RX
with multiple receive buffer pointers. But since same physcial link
is shared by PF and it's VFs, the max receive packet configured
at link could be morethan MTU. Hence there is a chance of receiving
plts morethan MTU which then gets DMA'ed into multiple buffers
and notified in a single CQE_RX. This patch treats such pkts as errors
and frees up receive buffers pointers back to hardware.

Also on the transmit side this patch sets SMQ MAXLEN to max value to avoid
HW length errors for the packets whose size > MTU, eg due to path MTU.
Signed-off-by: default avatarGeetha sowjanya <gakula@marvell.com>
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ad513ed9
...@@ -212,8 +212,6 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) ...@@ -212,8 +212,6 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM; return -ENOMEM;
} }
/* SMQ config limits maximum pkt size that can be transmitted */
req->update_smq = true;
pfvf->max_frs = mtu + OTX2_ETH_HLEN; pfvf->max_frs = mtu + OTX2_ETH_HLEN;
req->maxlen = pfvf->max_frs; req->maxlen = pfvf->max_frs;
...@@ -472,7 +470,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) ...@@ -472,7 +470,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */ /* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) { if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq); req->reg[0] = NIX_AF_SMQX_CFG(schq);
req->regval[0] = ((pfvf->netdev->mtu + OTX2_ETH_HLEN) << 8) | req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
OTX2_MIN_MTU; OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
...@@ -582,17 +580,19 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) ...@@ -582,17 +580,19 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{ {
int qidx, sqe_tail, sqe_head; int qidx, sqe_tail, sqe_head;
u64 incr, *ptr, val; u64 incr, *ptr, val;
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
incr = (u64)qidx << 32; incr = (u64)qidx << 32;
while (1) { while (timeout) {
val = otx2_atomic64_add(incr, ptr); val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F; sqe_head = (val >> 20) & 0x3F;
sqe_tail = (val >> 28) & 0x3F; sqe_tail = (val >> 28) & 0x3F;
if (sqe_head == sqe_tail) if (sqe_head == sqe_tail)
break; break;
usleep_range(1, 3); usleep_range(1, 3);
timeout--;
} }
} }
} }
...@@ -988,6 +988,7 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf) ...@@ -988,6 +988,7 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->fc_addr); qmem_free(pfvf->dev, pool->fc_addr);
} }
devm_kfree(pfvf->dev, pfvf->qset.pool); devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
} }
static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
......
...@@ -138,6 +138,25 @@ static void otx2_set_rxhash(struct otx2_nic *pfvf, ...@@ -138,6 +138,25 @@ static void otx2_set_rxhash(struct otx2_nic *pfvf,
skb_set_hash(skb, hash, hash_type); skb_set_hash(skb, hash, hash_type);
} }
static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
int qidx)
{
struct nix_rx_sg_s *sg = &cqe->sg;
void *end, *start;
u64 *seg_addr;
int seg;
start = (void *)sg;
end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
while (start < end) {
sg = (struct nix_rx_sg_s *)start;
seg_addr = &sg->seg_addr;
for (seg = 0; seg < sg->segs; seg++, seg_addr++)
otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
start += sizeof(*sg);
}
}
static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe, int qidx) struct nix_cqe_rx_s *cqe, int qidx)
{ {
...@@ -189,16 +208,17 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf, ...@@ -189,16 +208,17 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
/* For now ignore all the NPC parser errors and /* For now ignore all the NPC parser errors and
* pass the packets to stack. * pass the packets to stack.
*/ */
return false; if (cqe->sg.segs == 1)
return false;
} }
/* If RXALL is enabled pass on packets to stack. */ /* If RXALL is enabled pass on packets to stack. */
if (cqe->sg.segs && (pfvf->netdev->features & NETIF_F_RXALL)) if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
return false; return false;
/* Free buffer back to pool */ /* Free buffer back to pool */
if (cqe->sg.segs) if (cqe->sg.segs)
otx2_aura_freeptr(pfvf, qidx, cqe->sg.seg_addr & ~0x07ULL); otx2_free_rcv_seg(pfvf, cqe, qidx);
return true; return true;
} }
...@@ -210,7 +230,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, ...@@ -210,7 +230,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_parse_s *parse = &cqe->parse; struct nix_rx_parse_s *parse = &cqe->parse;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
if (unlikely(parse->errlev || parse->errcode)) { if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx)) if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
return; return;
} }
...@@ -789,11 +809,15 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) ...@@ -789,11 +809,15 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) { while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
if (!cqe->sg.subdc) if (!cqe->sg.subdc)
continue; continue;
processed_cqe++;
if (cqe->sg.segs > 1) {
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
continue;
}
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
put_page(virt_to_page(phys_to_virt(pa))); put_page(virt_to_page(phys_to_virt(pa)));
processed_cqe++;
} }
/* Free CQEs to HW */ /* Free CQEs to HW */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment