Commit 3af562a3 authored by Loic Poulain's avatar Loic Poulain Committed by David S. Miller

net: mhi: Allow decoupled MTU/MRU

MBIM protocol makes the mhi network interface asymmetric, ingress data
received from MHI is MBIM protocol, possibly containing multiple
aggregated IP packets, while egress data received from network stack is
IP protocol.

This changes allows a 'protocol' to specify its own MRU, that when
specified is used to allocate MHI RX buffers (skb).

For MBIM, Set the default MTU to 1500, which is the usual network MTU
for WWAN IP packets, and MRU to 3.5K (for allocation efficiency),
allowing skb to fit in an usual 4K page (including padding,
skb_shared_info, ...).
Signed-off-by: default avatarLoic Poulain <loic.poulain@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d9f0713c
...@@ -29,6 +29,7 @@ struct mhi_net_dev { ...@@ -29,6 +29,7 @@ struct mhi_net_dev {
struct mhi_net_stats stats; struct mhi_net_stats stats;
u32 rx_queue_sz; u32 rx_queue_sz;
int msg_enable; int msg_enable;
unsigned int mru;
}; };
struct mhi_net_proto { struct mhi_net_proto {
......
...@@ -265,10 +265,12 @@ static void mhi_net_rx_refill_work(struct work_struct *work) ...@@ -265,10 +265,12 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
rx_refill.work); rx_refill.work);
struct net_device *ndev = mhi_netdev->ndev; struct net_device *ndev = mhi_netdev->ndev;
struct mhi_device *mdev = mhi_netdev->mdev; struct mhi_device *mdev = mhi_netdev->mdev;
int size = READ_ONCE(ndev->mtu);
struct sk_buff *skb; struct sk_buff *skb;
unsigned int size;
int err; int err;
size = mhi_netdev->mru ? mhi_netdev->mru : READ_ONCE(ndev->mtu);
while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
skb = netdev_alloc_skb(ndev, size); skb = netdev_alloc_skb(ndev, size);
if (unlikely(!skb)) if (unlikely(!skb))
......
...@@ -26,6 +26,15 @@ ...@@ -26,6 +26,15 @@
#define MBIM_NDP16_SIGN_MASK 0x00ffffff #define MBIM_NDP16_SIGN_MASK 0x00ffffff
/* Usual WWAN MTU */
#define MHI_MBIM_DEFAULT_MTU 1500
/* 3500 allows to optimize skb allocation, the skbs will basically fit in
* one 4K page. Large MBIM packets will simply be split over several MHI
* transfers and chained by the MHI net layer (zerocopy).
*/
#define MHI_MBIM_DEFAULT_MRU 3500
struct mbim_context { struct mbim_context {
u16 rx_seq; u16 rx_seq;
u16 tx_seq; u16 tx_seq;
...@@ -281,6 +290,8 @@ static int mbim_init(struct mhi_net_dev *mhi_netdev) ...@@ -281,6 +290,8 @@ static int mbim_init(struct mhi_net_dev *mhi_netdev)
return -ENOMEM; return -ENOMEM;
ndev->needed_headroom = sizeof(struct mbim_tx_hdr); ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
ndev->mtu = MHI_MBIM_DEFAULT_MTU;
mhi_netdev->mru = MHI_MBIM_DEFAULT_MRU;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment