Commit 086c1616 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'update-coding-style-and-check-alloc_frag'

Haiyang Zhang says:

====================
Update coding style and check alloc_frag

Follow up patches for the jumbo frame support.

As suggested by Jakub Kicinski, update coding style, and check napi_alloc_frag
for possible fallback to single pages.
====================

Link: https://lore.kernel.org/r/1682096818-30056-1-git-send-email-haiyangz@microsoft.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 8e8e47d9 df18f2da
......@@ -553,6 +553,14 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
if (!va)
goto error;
page = virt_to_head_page(va);
/* Check if the frag falls back to single page */
if (compound_order(page) <
get_order(mpc->rxbpre_alloc_size)) {
put_page(page);
goto error;
}
} else {
page = dev_alloc_page();
if (!page)
......@@ -563,7 +571,6 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
put_page(virt_to_head_page(va));
goto error;
......@@ -1505,6 +1512,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
if (!va)
return NULL;
page = virt_to_head_page(va);
/* Check if the frag falls back to single page */
if (compound_order(page) < get_order(rxq->alloc_size)) {
put_page(page);
return NULL;
}
} else {
page = dev_alloc_page();
if (!page)
......@@ -1515,7 +1529,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, *da)) {
put_page(virt_to_head_page(va));
return NULL;
......@@ -1525,14 +1538,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
}
/* Allocate frag for rx buffer, and save the old buf */
static void mana_refill_rxoob(struct device *dev, struct mana_rxq *rxq,
struct mana_recv_buf_oob *rxoob, void **old_buf)
static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
struct mana_recv_buf_oob *rxoob, void **old_buf)
{
dma_addr_t da;
void *va;
va = mana_get_rxfrag(rxq, dev, &da, true);
if (!va)
return;
......@@ -1597,7 +1609,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
mana_refill_rxoob(dev, rxq, rxbuf_oob, &old_buf);
mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf);
/* Unsuccessful refill will have old_buf == NULL.
* In this case, mana_rx_skb() will drop the packet.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment