Commit f543305d authored by Kazuya Mizuguchi's avatar Kazuya Mizuguchi Committed by David S. Miller

ravb: remove tx buffer addr 4byte alilgnment restriction for R-Car Gen3

This patch sets from two descriptor to one descriptor because R-Car Gen3
does not have the 4 bytes alignment restriction of the transmission buffer.
Signed-off-by: default avatarKazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
Signed-off-by: default avatarSimon Horman <horms+renesas@verge.net.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f46f33a6
...@@ -954,7 +954,10 @@ enum RAVB_QUEUE { ...@@ -954,7 +954,10 @@ enum RAVB_QUEUE {
#define RX_QUEUE_OFFSET 4 #define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2 #define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2 #define NUM_TX_QUEUE 2
#define NUM_TX_DESC 2 /* TX descriptors per packet */
/* TX descriptors per packet */
#define NUM_TX_DESC_GEN2 2
#define NUM_TX_DESC_GEN3 1
struct ravb_tstamp_skb { struct ravb_tstamp_skb {
struct list_head list; struct list_head list;
...@@ -1033,6 +1036,7 @@ struct ravb_private { ...@@ -1033,6 +1036,7 @@ struct ravb_private {
unsigned no_avb_link:1; unsigned no_avb_link:1;
unsigned avb_link_active_low:1; unsigned avb_link_active_low:1;
unsigned wol_enabled:1; unsigned wol_enabled:1;
int num_tx_desc; /* TX descriptors per packet */
}; };
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
......
...@@ -182,6 +182,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) ...@@ -182,6 +182,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{ {
struct ravb_private *priv = netdev_priv(ndev); struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q]; struct net_device_stats *stats = &priv->stats[q];
int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *desc; struct ravb_tx_desc *desc;
int free_num = 0; int free_num = 0;
int entry; int entry;
...@@ -191,7 +192,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) ...@@ -191,7 +192,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
bool txed; bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
NUM_TX_DESC); num_tx_desc);
desc = &priv->tx_ring[q][entry]; desc = &priv->tx_ring[q][entry];
txed = desc->die_dt == DT_FEMPTY; txed = desc->die_dt == DT_FEMPTY;
if (free_txed_only && !txed) if (free_txed_only && !txed)
...@@ -200,12 +201,12 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) ...@@ -200,12 +201,12 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
dma_rmb(); dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS; size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */ /* Free the original skb. */
if (priv->tx_skb[q][entry / NUM_TX_DESC]) { if (priv->tx_skb[q][entry / num_tx_desc]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE); size, DMA_TO_DEVICE);
/* Last packet descriptor? */ /* Last packet descriptor? */
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { if (entry % num_tx_desc == num_tx_desc - 1) {
entry /= NUM_TX_DESC; entry /= num_tx_desc;
dev_kfree_skb_any(priv->tx_skb[q][entry]); dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL; priv->tx_skb[q][entry] = NULL;
if (txed) if (txed)
...@@ -224,6 +225,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) ...@@ -224,6 +225,7 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
static void ravb_ring_free(struct net_device *ndev, int q) static void ravb_ring_free(struct net_device *ndev, int q)
{ {
struct ravb_private *priv = netdev_priv(ndev); struct ravb_private *priv = netdev_priv(ndev);
int num_tx_desc = priv->num_tx_desc;
int ring_size; int ring_size;
int i; int i;
...@@ -249,7 +251,7 @@ static void ravb_ring_free(struct net_device *ndev, int q) ...@@ -249,7 +251,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
ravb_tx_free(ndev, q, false); ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) * ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1); (priv->num_tx_ring[q] * num_tx_desc + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]); priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL; priv->tx_ring[q] = NULL;
...@@ -278,12 +280,13 @@ static void ravb_ring_free(struct net_device *ndev, int q) ...@@ -278,12 +280,13 @@ static void ravb_ring_free(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q) static void ravb_ring_format(struct net_device *ndev, int q)
{ {
struct ravb_private *priv = netdev_priv(ndev); struct ravb_private *priv = netdev_priv(ndev);
int num_tx_desc = priv->num_tx_desc;
struct ravb_ex_rx_desc *rx_desc; struct ravb_ex_rx_desc *rx_desc;
struct ravb_tx_desc *tx_desc; struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc; struct ravb_desc *desc;
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
NUM_TX_DESC; num_tx_desc;
dma_addr_t dma_addr; dma_addr_t dma_addr;
int i; int i;
...@@ -318,9 +321,11 @@ static void ravb_ring_format(struct net_device *ndev, int q) ...@@ -318,9 +321,11 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
i++, tx_desc++) { i++, tx_desc++) {
tx_desc->die_dt = DT_EEMPTY; tx_desc->die_dt = DT_EEMPTY;
if (num_tx_desc > 1) {
tx_desc++; tx_desc++;
tx_desc->die_dt = DT_EEMPTY; tx_desc->die_dt = DT_EEMPTY;
} }
}
tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
tx_desc->die_dt = DT_LINKFIX; /* type */ tx_desc->die_dt = DT_LINKFIX; /* type */
...@@ -339,6 +344,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) ...@@ -339,6 +344,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q) static int ravb_ring_init(struct net_device *ndev, int q)
{ {
struct ravb_private *priv = netdev_priv(ndev); struct ravb_private *priv = netdev_priv(ndev);
int num_tx_desc = priv->num_tx_desc;
struct sk_buff *skb; struct sk_buff *skb;
int ring_size; int ring_size;
int i; int i;
...@@ -362,11 +368,13 @@ static int ravb_ring_init(struct net_device *ndev, int q) ...@@ -362,11 +368,13 @@ static int ravb_ring_init(struct net_device *ndev, int q)
priv->rx_skb[q][i] = skb; priv->rx_skb[q][i] = skb;
} }
if (num_tx_desc > 1) {
/* Allocate rings for the aligned buffers */ /* Allocate rings for the aligned buffers */
priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
DPTR_ALIGN - 1, GFP_KERNEL); DPTR_ALIGN - 1, GFP_KERNEL);
if (!priv->tx_align[q]) if (!priv->tx_align[q])
goto error; goto error;
}
/* Allocate all RX descriptors. */ /* Allocate all RX descriptors. */
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
...@@ -380,7 +388,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) ...@@ -380,7 +388,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
/* Allocate all TX descriptors. */ /* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) * ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1); (priv->num_tx_ring[q] * num_tx_desc + 1);
priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
&priv->tx_desc_dma[q], &priv->tx_desc_dma[q],
GFP_KERNEL); GFP_KERNEL);
...@@ -1485,6 +1493,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) ...@@ -1485,6 +1493,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ {
struct ravb_private *priv = netdev_priv(ndev); struct ravb_private *priv = netdev_priv(ndev);
int num_tx_desc = priv->num_tx_desc;
u16 q = skb_get_queue_mapping(skb); u16 q = skb_get_queue_mapping(skb);
struct ravb_tstamp_skb *ts_skb; struct ravb_tstamp_skb *ts_skb;
struct ravb_tx_desc *desc; struct ravb_tx_desc *desc;
...@@ -1496,7 +1505,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1496,7 +1505,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
NUM_TX_DESC) { num_tx_desc) {
netif_err(priv, tx_queued, ndev, netif_err(priv, tx_queued, ndev,
"still transmitting with the full ring!\n"); "still transmitting with the full ring!\n");
netif_stop_subqueue(ndev, q); netif_stop_subqueue(ndev, q);
...@@ -1507,27 +1516,32 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1507,27 +1516,32 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_put_padto(skb, ETH_ZLEN)) if (skb_put_padto(skb, ETH_ZLEN))
goto exit; goto exit;
entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc);
priv->tx_skb[q][entry / NUM_TX_DESC] = skb; priv->tx_skb[q][entry / num_tx_desc] = skb;
if (num_tx_desc > 1) {
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
entry / NUM_TX_DESC * DPTR_ALIGN; entry / num_tx_desc * DPTR_ALIGN;
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
/* Zero length DMA descriptors are problematic as they seem to
* terminate DMA transfers. Avoid them by simply using a length of /* Zero length DMA descriptors are problematic as they seem
* DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. * to terminate DMA transfers. Avoid them by simply using a
* length of DPTR_ALIGN (4) when skb data is aligned to
* DPTR_ALIGN.
* *
* As skb is guaranteed to have at least ETH_ZLEN (60) bytes of * As skb is guaranteed to have at least ETH_ZLEN (60)
* data by the call to skb_put_padto() above this is safe with * bytes of data by the call to skb_put_padto() above this
* respect to both the length of the first DMA descriptor (len) * is safe with respect to both the length of the first DMA
* overflowing the available data and the length of the second DMA * descriptor (len) overflowing the available data and the
* descriptor (skb->len - len) being negative. * length of the second DMA descriptor (skb->len - len)
* being negative.
*/ */
if (len == 0) if (len == 0)
len = DPTR_ALIGN; len = DPTR_ALIGN;
memcpy(buffer, skb->data, len); memcpy(buffer, skb->data, len);
dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr)) if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto drop; goto drop;
...@@ -1537,11 +1551,20 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1537,11 +1551,20 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
buffer = skb->data + len; buffer = skb->data + len;
len = skb->len - len; len = skb->len - len;
dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); dma_addr = dma_map_single(ndev->dev.parent, buffer, len,
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr)) if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto unmap; goto unmap;
desc++; desc++;
} else {
desc = &priv->tx_ring[q][entry];
len = skb->len;
dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
goto drop;
}
desc->ds_tagl = cpu_to_le16(len); desc->ds_tagl = cpu_to_le16(len);
desc->dptr = cpu_to_le32(dma_addr); desc->dptr = cpu_to_le32(dma_addr);
...@@ -1549,9 +1572,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1549,9 +1572,11 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (q == RAVB_NC) { if (q == RAVB_NC) {
ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
if (!ts_skb) { if (!ts_skb) {
if (num_tx_desc > 1) {
desc--; desc--;
dma_unmap_single(ndev->dev.parent, dma_addr, len, dma_unmap_single(ndev->dev.parent, dma_addr,
DMA_TO_DEVICE); len, DMA_TO_DEVICE);
}
goto unmap; goto unmap;
} }
ts_skb->skb = skb; ts_skb->skb = skb;
...@@ -1568,15 +1593,18 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1568,15 +1593,18 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* Descriptor type must be set after all the above writes */ /* Descriptor type must be set after all the above writes */
dma_wmb(); dma_wmb();
if (num_tx_desc > 1) {
desc->die_dt = DT_FEND; desc->die_dt = DT_FEND;
desc--; desc--;
desc->die_dt = DT_FSTART; desc->die_dt = DT_FSTART;
} else {
desc->die_dt = DT_FSINGLE;
}
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
priv->cur_tx[q] += NUM_TX_DESC; priv->cur_tx[q] += num_tx_desc;
if (priv->cur_tx[q] - priv->dirty_tx[q] > if (priv->cur_tx[q] - priv->dirty_tx[q] >
(priv->num_tx_ring[q] - 1) * NUM_TX_DESC && (priv->num_tx_ring[q] - 1) * num_tx_desc &&
!ravb_tx_free(ndev, q, true)) !ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q); netif_stop_subqueue(ndev, q);
...@@ -1590,7 +1618,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1590,7 +1618,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
drop: drop:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; priv->tx_skb[q][entry / num_tx_desc] = NULL;
goto exit; goto exit;
} }
...@@ -2076,6 +2104,9 @@ static int ravb_probe(struct platform_device *pdev) ...@@ -2076,6 +2104,9 @@ static int ravb_probe(struct platform_device *pdev)
ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU; ndev->min_mtu = ETH_MIN_MTU;
priv->num_tx_desc = chip_id == RCAR_GEN2 ?
NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3;
/* Set function */ /* Set function */
ndev->netdev_ops = &ravb_netdev_ops; ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops; ndev->ethtool_ops = &ravb_ethtool_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment