Commit c3e6b2c3 authored by Aleksander Jan Bajkowski's avatar Aleksander Jan Bajkowski Committed by David S. Miller

net: lantiq_xrx200: add ingress SG DMA support

This patch adds support for scatter gather DMA. DMA in PMAC splits
the packet into several buffers when the MTU on the CPU port is
less than the MTU of the switch. The first buffer starts at an
offset of NET_IP_ALIGN. In subsequent buffers, dma ignores the
offset. Thanks to this patch, the user can still connect to the
device in such a situation. For normal configurations, the patch
has no effect on performance.
Signed-off-by: default avatarAleksander Jan Bajkowski <olek2@wp.pl>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7a71c8aa
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
#define XRX200_DMA_TX 1 #define XRX200_DMA_TX 1
#define XRX200_DMA_BURST_LEN 8 #define XRX200_DMA_BURST_LEN 8
#define XRX200_DMA_PACKET_COMPLETE 0
#define XRX200_DMA_PACKET_IN_PROGRESS 1
/* cpu port mac */ /* cpu port mac */
#define PMAC_RX_IPG 0x0024 #define PMAC_RX_IPG 0x0024
#define PMAC_RX_IPG_MASK 0xf #define PMAC_RX_IPG_MASK 0xf
...@@ -62,6 +65,9 @@ struct xrx200_chan { ...@@ -62,6 +65,9 @@ struct xrx200_chan {
struct ltq_dma_channel dma; struct ltq_dma_channel dma;
struct sk_buff *skb[LTQ_DESC_NUM]; struct sk_buff *skb[LTQ_DESC_NUM];
struct sk_buff *skb_head;
struct sk_buff *skb_tail;
struct xrx200_priv *priv; struct xrx200_priv *priv;
}; };
...@@ -205,7 +211,8 @@ static int xrx200_hw_receive(struct xrx200_chan *ch) ...@@ -205,7 +211,8 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
struct xrx200_priv *priv = ch->priv; struct xrx200_priv *priv = ch->priv;
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
struct sk_buff *skb = ch->skb[ch->dma.desc]; struct sk_buff *skb = ch->skb[ch->dma.desc];
int len = (desc->ctl & LTQ_DMA_SIZE_MASK); u32 ctl = desc->ctl;
int len = (ctl & LTQ_DMA_SIZE_MASK);
struct net_device *net_dev = priv->net_dev; struct net_device *net_dev = priv->net_dev;
int ret; int ret;
...@@ -221,12 +228,36 @@ static int xrx200_hw_receive(struct xrx200_chan *ch) ...@@ -221,12 +228,36 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
} }
skb_put(skb, len); skb_put(skb, len);
skb->protocol = eth_type_trans(skb, net_dev);
netif_receive_skb(skb); /* add buffers to skb via skb->frag_list */
if (ctl & LTQ_DMA_SOP) {
ch->skb_head = skb;
ch->skb_tail = skb;
} else if (ch->skb_head) {
if (ch->skb_head == ch->skb_tail)
skb_shinfo(ch->skb_tail)->frag_list = skb;
else
ch->skb_tail->next = skb;
ch->skb_tail = skb;
skb_reserve(ch->skb_tail, -NET_IP_ALIGN);
ch->skb_head->len += skb->len;
ch->skb_head->data_len += skb->len;
ch->skb_head->truesize += skb->truesize;
}
if (ctl & LTQ_DMA_EOP) {
ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
netif_receive_skb(ch->skb_head);
net_dev->stats.rx_packets++; net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += len; net_dev->stats.rx_bytes += ch->skb_head->len;
ch->skb_head = NULL;
ch->skb_tail = NULL;
ret = XRX200_DMA_PACKET_COMPLETE;
} else {
ret = XRX200_DMA_PACKET_IN_PROGRESS;
}
return 0; return ret;
} }
static int xrx200_poll_rx(struct napi_struct *napi, int budget) static int xrx200_poll_rx(struct napi_struct *napi, int budget)
...@@ -241,7 +272,9 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget) ...@@ -241,7 +272,9 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
ret = xrx200_hw_receive(ch); ret = xrx200_hw_receive(ch);
if (ret) if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
continue;
if (ret != XRX200_DMA_PACKET_COMPLETE)
return ret; return ret;
rx++; rx++;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment