Commit 30580b33 authored by David S. Miller's avatar David S. Miller

Merge branch 'pagepool-api-and-dma-address-storage'

Jesper Dangaard Brouer says:

====================
Fix page_pool API and dma address storage

As pointed out by David Miller in [1] the current page_pool implementation
stores dma_addr_t in page->private. This won't work on 32-bit platforms with
64-bit DMA addresses since the page->private is an unsigned long and the
dma_addr_t a u64.

Since no driver is yet using the DMA mapping capabilities of the API let's
fix this by storing the information in 'struct page' and use that to store
and retrieve DMA addresses from network drivers.

As long as the addresses returned from dma_map_page() are aligned the first
bit, used by the compound pages code should not be set.

Ilias tested the first two patches on Espressobin driver mvneta, for which
we have patches for using the DMA API of page_pool.

[1]: https://lore.kernel.org/netdev/20181207.230655.1261252486319967024.davem@davemloft.net/
====================
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fb14b096 13f16d9d
......@@ -95,6 +95,13 @@ struct page {
*/
unsigned long private;
};
struct { /* page_pool used by netstack */
/**
* @dma_addr: might require a 64-bit value even on
* 32-bit architectures.
*/
dma_addr_t dma_addr;
};
struct { /* slab, slob and slub */
union {
struct list_head slab_list; /* uses lru */
......
......@@ -136,17 +136,19 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
goto skip_dma_map;
/* Setup DMA mapping: use page->private for DMA-addr
/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
* since dma_addr_t can be either 32 or 64 bits and does not always fit
* into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
dma = dma_map_page(pool->p.dev, page, 0,
dma = dma_map_page_attrs(pool->p.dev, page, 0,
(PAGE_SIZE << pool->p.order),
pool->p.dma_dir);
pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(pool->p.dev, dma)) {
put_page(page);
return NULL;
}
set_page_private(page, dma); /* page->private = dma; */
page->dma_addr = dma;
skip_dma_map:
/* When page just alloc'ed is should/must have refcnt 1. */
......@@ -175,13 +177,17 @@ EXPORT_SYMBOL(page_pool_alloc_pages);
static void __page_pool_clean_page(struct page_pool *pool,
struct page *page)
{
dma_addr_t dma;
if (!(pool->p.flags & PP_FLAG_DMA_MAP))
return;
dma = page->dma_addr;
/* DMA unmap */
dma_unmap_page(pool->p.dev, page_private(page),
PAGE_SIZE << pool->p.order, pool->p.dma_dir);
set_page_private(page, 0);
dma_unmap_page_attrs(pool->p.dev, dma,
PAGE_SIZE << pool->p.order, pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC);
page->dma_addr = 0;
}
/* Return a page to the page allocator, cleaning up our state */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment