Commit a1560dd7 authored by David S. Miller's avatar David S. Miller

Merge branch 'mediatek-fixes'

Sean Wang says:

====================
mediatek: Fix warning and issue

This patch set fixes the following warning and issues

v1 -> v2: Fix message typos and add coverletter

v2 -> v3: Split from the previous series for submitting bug fixes
as a series targeting 'net'
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d2fbdf76 55a4e778
...@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) ...@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_MII:
ge_mode = 1; ge_mode = 1;
break; break;
case PHY_INTERFACE_MODE_RMII: case PHY_INTERFACE_MODE_REVMII:
ge_mode = 2; ge_mode = 2;
break; break;
case PHY_INTERFACE_MODE_RMII:
if (!mac->id)
goto err_phy;
ge_mode = 3;
break;
default: default:
dev_err(eth->dev, "invalid phy_mode\n"); goto err_phy;
return -1;
} }
/* put the gmac into the right mode */ /* put the gmac into the right mode */
...@@ -263,6 +267,11 @@ static int mtk_phy_connect(struct mtk_mac *mac) ...@@ -263,6 +267,11 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE; mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0; mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0; mac->phy_dev->duplex = 0;
if (of_phy_is_fixed_link(mac->of_node))
mac->phy_dev->supported |=
SUPPORTED_Pause | SUPPORTED_Asym_Pause;
mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause; SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported | mac->phy_dev->advertising = mac->phy_dev->supported |
...@@ -272,6 +281,11 @@ static int mtk_phy_connect(struct mtk_mac *mac) ...@@ -272,6 +281,11 @@ static int mtk_phy_connect(struct mtk_mac *mac)
of_node_put(np); of_node_put(np);
return 0; return 0;
err_phy:
of_node_put(np);
dev_err(eth->dev, "invalid phy_mode\n");
return -EINVAL;
} }
static int mtk_mdio_init(struct mtk_eth *eth) static int mtk_mdio_init(struct mtk_eth *eth)
...@@ -544,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, ...@@ -544,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
return &ring->buf[idx]; return &ring->buf[idx];
} }
static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{ {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
dma_unmap_single(dev, dma_unmap_single(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0), dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
dma_unmap_page(dev, dma_unmap_page(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0), dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -597,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -597,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (skb_vlan_tag_present(skb)) if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
mapped_addr = dma_map_single(&dev->dev, skb->data, mapped_addr = dma_map_single(eth->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE); skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
return -ENOMEM; return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr); WRITE_ONCE(itxd->txd1, mapped_addr);
...@@ -625,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -625,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
n_desc++; n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size, frag_map_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
goto err_dma; goto err_dma;
if (i == nr_frags - 1 && if (i == nr_frags - 1 &&
...@@ -681,7 +695,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -681,7 +695,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
tx_buf = mtk_desc_to_tx_buf(ring, itxd); tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */ /* unmap dma */
mtk_tx_unmap(&dev->dev, tx_buf); mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
...@@ -838,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -838,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev->stats.rx_dropped++; netdev->stats.rx_dropped++;
goto release_desc; goto release_desc;
} }
dma_addr = dma_map_single(&eth->netdev[mac]->dev, dma_addr = dma_map_single(eth->dev,
new_data + NET_SKB_PAD, new_data + NET_SKB_PAD,
ring->buf_size, ring->buf_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
skb_free_frag(new_data); skb_free_frag(new_data);
netdev->stats.rx_dropped++; netdev->stats.rx_dropped++;
goto release_desc; goto release_desc;
...@@ -857,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -857,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
} }
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
dma_unmap_single(&netdev->dev, trxd.rxd1, dma_unmap_single(eth->dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE); ring->buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev; skb->dev = netdev;
...@@ -939,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) ...@@ -939,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
done[mac]++; done[mac]++;
budget--; budget--;
} }
mtk_tx_unmap(eth->dev, tx_buf); mtk_tx_unmap(eth, tx_buf);
ring->last_free = desc; ring->last_free = desc;
atomic_inc(&ring->free_count); atomic_inc(&ring->free_count);
...@@ -1094,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) ...@@ -1094,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) { if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++) for (i = 0; i < MTK_DMA_SIZE; i++)
mtk_tx_unmap(eth->dev, &ring->buf[i]); mtk_tx_unmap(eth, &ring->buf[i]);
kfree(ring->buf); kfree(ring->buf);
ring->buf = NULL; ring->buf = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment