Commit 1f2149c1 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: remove netdev_alloc_page and use __GFP_COLD

Given we dont use anymore the struct net_device *dev argument, and this
interface brings litle benefit, remove netdev_{alloc|free}_page(), to
debloat include/linux/skbuff.h a bit.

(Some drivers used a mix of these interfaces and alloc_pages())

When allocating a page given to device for DMA transfer (device to
memory), it makes sense to use a cold one (__GFP_COLD)
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
CC: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
CC: Dimitris Michailidis <dm@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 84b40501
...@@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, ...@@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
__be64 *d = &q->desc[q->pidx]; __be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx]; struct rx_sw_desc *sd = &q->sdesc[q->pidx];
gfp |= __GFP_NOWARN; /* failures are expected */ gfp |= __GFP_NOWARN | __GFP_COLD;
#if FL_PG_ORDER > 0 #if FL_PG_ORDER > 0
/* /*
...@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, ...@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
#endif #endif
while (n--) { while (n--) {
pg = __netdev_alloc_page(adap->port[0], gfp); pg = alloc_page(gfp);
if (unlikely(!pg)) { if (unlikely(!pg)) {
q->alloc_failed++; q->alloc_failed++;
break; break;
...@@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, ...@@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
netdev_free_page(adap->port[0], pg); put_page(pg);
goto out; goto out;
} }
*d++ = cpu_to_be64(mapping); *d++ = cpu_to_be64(mapping);
......
...@@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, ...@@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
alloc_small_pages: alloc_small_pages:
while (n--) { while (n--) {
page = __netdev_alloc_page(adapter->port[0], page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
gfp | __GFP_NOWARN);
if (unlikely(!page)) { if (unlikely(!page)) {
fl->alloc_failed++; fl->alloc_failed++;
break; break;
...@@ -664,7 +663,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, ...@@ -664,7 +663,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
netdev_free_page(adapter->port[0], page); put_page(page);
break; break;
} }
*d++ = cpu_to_be64(dma_addr); *d++ = cpu_to_be64(dma_addr);
......
...@@ -6135,7 +6135,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, ...@@ -6135,7 +6135,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true; return true;
if (!page) { if (!page) {
page = netdev_alloc_page(rx_ring->netdev); page = alloc_page(GFP_ATOMIC | __GFP_COLD);
bi->page = page; bi->page = page;
if (unlikely(!page)) { if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
......
...@@ -1140,7 +1140,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) ...@@ -1140,7 +1140,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
if (ring_is_ps_enabled(rx_ring)) { if (ring_is_ps_enabled(rx_ring)) {
if (!bi->page) { if (!bi->page) {
bi->page = netdev_alloc_page(rx_ring->netdev); bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) { if (!bi->page) {
rx_ring->rx_stats.alloc_rx_page_failed++; rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers; goto no_buffers;
......
...@@ -366,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, ...@@ -366,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
if (!bi->page_dma && if (!bi->page_dma &&
(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
if (!bi->page) { if (!bi->page) {
bi->page = netdev_alloc_page(adapter->netdev); bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) { if (!bi->page) {
adapter->alloc_rx_page_failed++; adapter->alloc_rx_page_failed++;
goto no_buffers; goto no_buffers;
......
...@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) ...@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
struct page *page; struct page *page;
int err; int err;
page = __netdev_alloc_page(dev, gfp_flags); page = alloc_page(gfp_flags);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) ...@@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
err = usb_submit_urb(req, gfp_flags); err = usb_submit_urb(req, gfp_flags);
if (unlikely(err)) { if (unlikely(err)) {
dev_dbg(&dev->dev, "RX submit error (%d)\n", err); dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
netdev_free_page(dev, page); put_page(page);
} }
return err; return err;
} }
...@@ -208,9 +208,9 @@ static void rx_complete(struct urb *req) ...@@ -208,9 +208,9 @@ static void rx_complete(struct urb *req)
dev->stats.rx_errors++; dev->stats.rx_errors++;
resubmit: resubmit:
if (page) if (page)
netdev_free_page(dev, page); put_page(page);
if (req) if (req)
rx_submit(pnd, req, GFP_ATOMIC); rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
} }
static int usbpn_close(struct net_device *dev); static int usbpn_close(struct net_device *dev);
...@@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev) ...@@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev)
for (i = 0; i < rxq_size; i++) { for (i = 0; i < rxq_size; i++) {
struct urb *req = usb_alloc_urb(0, GFP_KERNEL); struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
if (!req || rx_submit(pnd, req, GFP_KERNEL)) { if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
usbpn_close(dev); usbpn_close(dev);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev) ...@@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev)
static int static int
pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags) pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
{ {
struct net_device *dev = fp->dev;
struct page *page; struct page *page;
int err; int err;
page = __netdev_alloc_page(dev, gfp_flags); page = alloc_page(gfp_flags);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags) ...@@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
err = usb_ep_queue(fp->out_ep, req, gfp_flags); err = usb_ep_queue(fp->out_ep, req, gfp_flags);
if (unlikely(err)) if (unlikely(err))
netdev_free_page(dev, page); put_page(page);
return err; return err;
} }
...@@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) ...@@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
} }
if (page) if (page)
netdev_free_page(dev, page); put_page(page);
if (req) if (req)
pn_rx_submit(fp, req, GFP_ATOMIC); pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
} }
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
...@@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) ...@@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
netif_carrier_on(dev); netif_carrier_on(dev);
for (i = 0; i < phonet_rxq_size; i++) for (i = 0; i < phonet_rxq_size; i++)
pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC); pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
} }
spin_unlock(&port->lock); spin_unlock(&port->lock);
return 0; return 0;
......
...@@ -1668,38 +1668,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, ...@@ -1668,38 +1668,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
} }
/**
* __netdev_alloc_page - allocate a page for ps-rx on a specific device
* @dev: network device to receive on
* @gfp_mask: alloc_pages_node mask
*
* Allocate a new page. dev currently unused.
*
* %NULL is returned if there is no free memory.
*/
static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
{
return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
}
/**
* netdev_alloc_page - allocate a page for ps-rx on a specific device
* @dev: network device to receive on
*
* Allocate a new page. dev currently unused.
*
* %NULL is returned if there is no free memory.
*/
static inline struct page *netdev_alloc_page(struct net_device *dev)
{
return __netdev_alloc_page(dev, GFP_ATOMIC);
}
static inline void netdev_free_page(struct net_device *dev, struct page *page)
{
__free_page(page);
}
/** /**
* skb_frag_page - retrieve the page refered to by a paged fragment * skb_frag_page - retrieve the page refered to by a paged fragment
* @frag: the paged fragment * @frag: the paged fragment
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment