Commit cd607f2c authored by Felix Fietkau's avatar Felix Fietkau Committed by Kalle Valo

wifi: mt76: fix crash with WED rx support enabled

If WED rx is enabled, rx buffers are added to a buffer pool that can be
filled from multiple page pools. Because buffers freed from rx poll are
not guaranteed to belong to the processed queue's page pool, lockless
caching must not be used in this case.

Cc: stable@vger.kernel.org
Fixes: 2f5c3c77 ("wifi: mt76: switch to page_pool allocator")
Signed-off-by: default avatarFelix Fietkau <nbd@nbd.name>
Acked-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarKalle Valo <kvalo@kernel.org>
Link: https://lore.kernel.org/r/20231208075004.69843-1-nbd@nbd.name
parent ce038edf
...@@ -783,7 +783,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) ...@@ -783,7 +783,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
static void static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more, u32 info) int len, bool more, u32 info, bool allow_direct)
{ {
struct sk_buff *skb = q->rx_head; struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb); struct skb_shared_info *shinfo = skb_shinfo(skb);
...@@ -795,7 +795,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, ...@@ -795,7 +795,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
} else { } else {
mt76_put_page_pool_buf(data, true); mt76_put_page_pool_buf(data, allow_direct);
} }
if (more) if (more)
...@@ -815,6 +815,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) ...@@ -815,6 +815,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
struct sk_buff *skb; struct sk_buff *skb;
unsigned char *data; unsigned char *data;
bool check_ddone = false; bool check_ddone = false;
bool allow_direct = !mt76_queue_is_wed_rx(q);
bool more; bool more;
if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
...@@ -855,7 +856,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) ...@@ -855,7 +856,8 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
} }
if (q->rx_head) { if (q->rx_head) {
mt76_add_fragment(dev, q, data, len, more, info); mt76_add_fragment(dev, q, data, len, more, info,
allow_direct);
continue; continue;
} }
...@@ -884,7 +886,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) ...@@ -884,7 +886,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
continue; continue;
free_frag: free_frag:
mt76_put_page_pool_buf(data, true); mt76_put_page_pool_buf(data, allow_direct);
} }
mt76_dma_rx_fill(dev, q, true); mt76_dma_rx_fill(dev, q, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment