Commit d10a7f55 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'ravb-ethernet-driver-bugfixes'

Paul Barker says:

====================
ravb Ethernet driver bugfixes

These patches fix bugs found during recent work on the ravb driver.

Patches 1 & 2 affect the R-Car code paths so have been tested on an
R-Car M3N Salvator-XS board - this is the only R-Car board I currently
have access to.

Patches 2, 3 & 4 affect the GbEth code paths so have been tested on
RZ/G2L and RZ/G2UL SMARC EVK boards.

Changes v2->v3:
  * Incorporate feedback from Niklas and add Reviewed-by tag to patch
    "net: ravb: Count packets instead of descriptors in R-Car RX path".
Changes v1->v2:
  * Fixed typos in commit message of patch
    "net: ravb: Allow RX loop to move past DMA mapping errors".
  * Added Sergey's Reviewed-by tags.
  * Expanded Cc list as Patchwork complained that I had missed people.
  * Trimmed the call trace in accordance with the docs [1] in patch
    "net: ravb: Fix GbEth jumbo packet RX checksum handling".

[1]: https://docs.kernel.org/process/submitting-patches.html#backtraces-in-commit-messages
====================

Link: https://lore.kernel.org/r/20240416120254.2620-1-paul.barker.ct@bp.renesas.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 94667949 2e36c9fb
...@@ -769,25 +769,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) ...@@ -769,25 +769,28 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
dma_addr_t dma_addr; dma_addr_t dma_addr;
int rx_packets = 0; int rx_packets = 0;
u8 desc_status; u8 desc_status;
u16 pkt_len; u16 desc_len;
u8 die_dt; u8 die_dt;
int entry; int entry;
int limit; int limit;
int i; int i;
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q]; stats = &priv->stats[q];
desc = &priv->rx_ring[q].desc[entry]; for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */ /* Descriptor type must be checked before all other reads */
dma_rmb(); dma_rmb();
desc_status = desc->msc; desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; desc_len = le16_to_cpu(desc->ds_cc) & RX_DS;
/* We use 0-byte descriptors to mark the DMA mapping errors */ /* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len) if (!desc_len)
continue; continue;
if (desc_status & MSC_MC) if (desc_status & MSC_MC)
...@@ -808,25 +811,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) ...@@ -808,25 +811,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
switch (die_dt) { switch (die_dt) {
case DT_FSINGLE: case DT_FSINGLE:
skb = ravb_get_skb_gbeth(ndev, entry, desc); skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_put(skb, pkt_len); skb_put(skb, desc_len);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM) if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum_gbeth(skb); ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb); napi_gro_receive(&priv->napi[q], skb);
rx_packets++; rx_packets++;
stats->rx_bytes += pkt_len; stats->rx_bytes += desc_len;
break; break;
case DT_FSTART: case DT_FSTART:
priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_put(priv->rx_1st_skb, pkt_len); skb_put(priv->rx_1st_skb, desc_len);
break; break;
case DT_FMID: case DT_FMID:
skb = ravb_get_skb_gbeth(ndev, entry, desc); skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_copy_to_linear_data_offset(priv->rx_1st_skb, skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len, priv->rx_1st_skb->len,
skb->data, skb->data,
pkt_len); desc_len);
skb_put(priv->rx_1st_skb, pkt_len); skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb); dev_kfree_skb(skb);
break; break;
case DT_FEND: case DT_FEND:
...@@ -834,23 +837,20 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) ...@@ -834,23 +837,20 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb_copy_to_linear_data_offset(priv->rx_1st_skb, skb_copy_to_linear_data_offset(priv->rx_1st_skb,
priv->rx_1st_skb->len, priv->rx_1st_skb->len,
skb->data, skb->data,
pkt_len); desc_len);
skb_put(priv->rx_1st_skb, pkt_len); skb_put(priv->rx_1st_skb, desc_len);
dev_kfree_skb(skb); dev_kfree_skb(skb);
priv->rx_1st_skb->protocol = priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev); eth_type_trans(priv->rx_1st_skb, ndev);
if (ndev->features & NETIF_F_RXCSUM) if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum_gbeth(skb); ravb_rx_csum_gbeth(priv->rx_1st_skb);
stats->rx_bytes += priv->rx_1st_skb->len;
napi_gro_receive(&priv->napi[q], napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb); priv->rx_1st_skb);
rx_packets++; rx_packets++;
stats->rx_bytes += pkt_len;
break; break;
} }
} }
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
} }
/* Refill the RX ring buffers. */ /* Refill the RX ring buffers. */
...@@ -891,30 +891,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) ...@@ -891,30 +891,29 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{ {
struct ravb_private *priv = netdev_priv(ndev); struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info; const struct ravb_hw_info *info = priv->info;
int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
priv->cur_rx[q];
struct net_device_stats *stats = &priv->stats[q]; struct net_device_stats *stats = &priv->stats[q];
struct ravb_ex_rx_desc *desc; struct ravb_ex_rx_desc *desc;
unsigned int limit, i;
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct timespec64 ts; struct timespec64 ts;
int rx_packets = 0;
u8 desc_status; u8 desc_status;
u16 pkt_len; u16 pkt_len;
int limit; int entry;
limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].ex_desc[entry];
if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
break;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
desc = &priv->rx_ring[q].ex_desc[entry];
while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */ /* Descriptor type must be checked before all other reads */
dma_rmb(); dma_rmb();
desc_status = desc->msc; desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
if (--boguscnt < 0)
break;
/* We use 0-byte descriptors to mark the DMA mapping errors */ /* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len) if (!pkt_len)
continue; continue;
...@@ -960,12 +959,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) ...@@ -960,12 +959,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
if (ndev->features & NETIF_F_RXCSUM) if (ndev->features & NETIF_F_RXCSUM)
ravb_rx_csum(skb); ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb); napi_gro_receive(&priv->napi[q], skb);
stats->rx_packets++; rx_packets++;
stats->rx_bytes += pkt_len; stats->rx_bytes += pkt_len;
} }
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].ex_desc[entry];
} }
/* Refill the RX ring buffers. */ /* Refill the RX ring buffers. */
...@@ -995,9 +991,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) ...@@ -995,9 +991,9 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY; desc->die_dt = DT_FEMPTY;
} }
*quota -= limit - (++boguscnt); stats->rx_packets += rx_packets;
*quota -= rx_packets;
return boguscnt <= 0; return *quota == 0;
} }
/* Packet receive function for Ethernet AVB */ /* Packet receive function for Ethernet AVB */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment