Commit c1fcda2b authored by Loic Poulain's avatar Loic Poulain Committed by Jakub Kicinski

net: mhi-net: Add re-aggregation of fragmented packets

When device side MTU is larger than host side MTU, the packets
(typically rmnet packets) are split over multiple MHI transfers.
In that case, fragments must be re-aggregated to recover the packet
before forwarding to upper layer.

A fragmented packet result in -EOVERFLOW MHI transaction status for
each of its fragments, except the final one. Such transfer was
previously considered as error and fragments were simply dropped.

This change adds re-aggregation mechanism using skb chaining, via
skb frag_list.

A warning (once) is printed since this behavior usually comes from
a misconfiguration of the device (e.g. modem MTU).
Signed-off-by: default avatarLoic Poulain <loic.poulain@linaro.org>
Acked-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Link: https://lore.kernel.org/r/1612428002-12333-1-git-send-email-loic.poulain@linaro.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent d698e6a0
...@@ -32,6 +32,8 @@ struct mhi_net_stats { ...@@ -32,6 +32,8 @@ struct mhi_net_stats {
struct mhi_net_dev { struct mhi_net_dev {
struct mhi_device *mdev; struct mhi_device *mdev;
struct net_device *ndev; struct net_device *ndev;
struct sk_buff *skbagg_head;
struct sk_buff *skbagg_tail;
struct delayed_work rx_refill; struct delayed_work rx_refill;
struct mhi_net_stats stats; struct mhi_net_stats stats;
u32 rx_queue_sz; u32 rx_queue_sz;
...@@ -132,6 +134,32 @@ static void mhi_net_setup(struct net_device *ndev) ...@@ -132,6 +134,32 @@ static void mhi_net_setup(struct net_device *ndev)
ndev->tx_queue_len = 1000; ndev->tx_queue_len = 1000;
} }
static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
struct sk_buff *skb)
{
struct sk_buff *head = mhi_netdev->skbagg_head;
struct sk_buff *tail = mhi_netdev->skbagg_tail;
/* This is non-paged skb chaining using frag_list */
if (!head) {
mhi_netdev->skbagg_head = skb;
return skb;
}
if (!skb_shinfo(head)->frag_list)
skb_shinfo(head)->frag_list = skb;
else
tail->next = skb;
head->len += skb->len;
head->data_len += skb->len;
head->truesize += skb->truesize;
mhi_netdev->skbagg_tail = skb;
return mhi_netdev->skbagg_head;
}
static void mhi_net_dl_callback(struct mhi_device *mhi_dev, static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
struct mhi_result *mhi_res) struct mhi_result *mhi_res)
{ {
...@@ -142,19 +170,42 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev, ...@@ -142,19 +170,42 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
if (unlikely(mhi_res->transaction_status)) { if (unlikely(mhi_res->transaction_status)) {
dev_kfree_skb_any(skb); switch (mhi_res->transaction_status) {
case -EOVERFLOW:
/* MHI layer stopping/resetting the DL channel */ /* Packet can not fit in one MHI buffer and has been
if (mhi_res->transaction_status == -ENOTCONN) * split over multiple MHI transfers, do re-aggregation.
* That usually means the device side MTU is larger than
* the host side MTU/MRU. Since this is not optimal,
* print a warning (once).
*/
netdev_warn_once(mhi_netdev->ndev,
"Fragmented packets received, fix MTU?\n");
skb_put(skb, mhi_res->bytes_xferd);
mhi_net_skb_agg(mhi_netdev, skb);
break;
case -ENOTCONN:
/* MHI layer stopping/resetting the DL channel */
dev_kfree_skb_any(skb);
return; return;
default:
u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); /* Unknown error, simply drop */
u64_stats_inc(&mhi_netdev->stats.rx_errors); dev_kfree_skb_any(skb);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp); u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
u64_stats_inc(&mhi_netdev->stats.rx_errors);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
}
} else { } else {
skb_put(skb, mhi_res->bytes_xferd);
if (mhi_netdev->skbagg_head) {
/* Aggregate the final fragment */
skb = mhi_net_skb_agg(mhi_netdev, skb);
mhi_netdev->skbagg_head = NULL;
}
u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
u64_stats_inc(&mhi_netdev->stats.rx_packets); u64_stats_inc(&mhi_netdev->stats.rx_packets);
u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd); u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp); u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
switch (skb->data[0] & 0xf0) { switch (skb->data[0] & 0xf0) {
...@@ -169,7 +220,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev, ...@@ -169,7 +220,6 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
break; break;
} }
skb_put(skb, mhi_res->bytes_xferd);
netif_rx(skb); netif_rx(skb);
} }
...@@ -267,6 +317,7 @@ static int mhi_net_probe(struct mhi_device *mhi_dev, ...@@ -267,6 +317,7 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
dev_set_drvdata(dev, mhi_netdev); dev_set_drvdata(dev, mhi_netdev);
mhi_netdev->ndev = ndev; mhi_netdev->ndev = ndev;
mhi_netdev->mdev = mhi_dev; mhi_netdev->mdev = mhi_dev;
mhi_netdev->skbagg_head = NULL;
SET_NETDEV_DEV(ndev, &mhi_dev->dev); SET_NETDEV_DEV(ndev, &mhi_dev->dev);
SET_NETDEV_DEVTYPE(ndev, &wwan_type); SET_NETDEV_DEVTYPE(ndev, &wwan_type);
...@@ -301,6 +352,9 @@ static void mhi_net_remove(struct mhi_device *mhi_dev) ...@@ -301,6 +352,9 @@ static void mhi_net_remove(struct mhi_device *mhi_dev)
mhi_unprepare_from_transfer(mhi_netdev->mdev); mhi_unprepare_from_transfer(mhi_netdev->mdev);
if (mhi_netdev->skbagg_head)
kfree_skb(mhi_netdev->skbagg_head);
free_netdev(mhi_netdev->ndev); free_netdev(mhi_netdev->ndev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment