Commit d00e60ee authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

page_pool: disable dma mapping support for 32-bit arch with 64-bit DMA

As the 32-bit arch with 64-bit DMA seems to rare those days,
and page pool might carry a lot of code and complexity for
systems that possibly.

So disable dma mapping support for such systems, if drivers
really want to work on such systems, they have to implement
their own DMA-mapping fallback tracking outside page_pool.
Reviewed-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 40088915
...@@ -104,19 +104,8 @@ struct page { ...@@ -104,19 +104,8 @@ struct page {
struct page_pool *pp; struct page_pool *pp;
unsigned long _pp_mapping_pad; unsigned long _pp_mapping_pad;
unsigned long dma_addr; unsigned long dma_addr;
union {
/**
* dma_addr_upper: might require a 64-bit
* value on 32-bit architectures.
*/
unsigned long dma_addr_upper;
/**
* For frag page support, not supported in
* 32-bit architectures with 64-bit DMA.
*/
atomic_long_t pp_frag_count; atomic_long_t pp_frag_count;
}; };
};
struct { /* slab, slob and slub */ struct { /* slab, slob and slub */
union { union {
struct list_head slab_list; struct list_head slab_list;
......
...@@ -216,24 +216,14 @@ static inline void page_pool_recycle_direct(struct page_pool *pool, ...@@ -216,24 +216,14 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
page_pool_put_full_page(pool, page, true); page_pool_put_full_page(pool, page, true);
} }
#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
(sizeof(dma_addr_t) > sizeof(unsigned long))
static inline dma_addr_t page_pool_get_dma_addr(struct page *page) static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{ {
dma_addr_t ret = page->dma_addr; return page->dma_addr;
if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
return ret;
} }
static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{ {
page->dma_addr = addr; page->dma_addr = addr;
if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
page->dma_addr_upper = upper_32_bits(addr);
} }
static inline void page_pool_set_frag_count(struct page *page, long nr) static inline void page_pool_set_frag_count(struct page *page, long nr)
......
...@@ -49,6 +49,12 @@ static int page_pool_init(struct page_pool *pool, ...@@ -49,6 +49,12 @@ static int page_pool_init(struct page_pool *pool,
* which is the XDP_TX use-case. * which is the XDP_TX use-case.
*/ */
if (pool->p.flags & PP_FLAG_DMA_MAP) { if (pool->p.flags & PP_FLAG_DMA_MAP) {
/* DMA-mapping is not supported on 32-bit systems with
* 64-bit DMA mapping.
*/
if (sizeof(dma_addr_t) > sizeof(unsigned long))
return -EOPNOTSUPP;
if ((pool->p.dma_dir != DMA_FROM_DEVICE) && if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
(pool->p.dma_dir != DMA_BIDIRECTIONAL)) (pool->p.dma_dir != DMA_BIDIRECTIONAL))
return -EINVAL; return -EINVAL;
...@@ -69,10 +75,6 @@ static int page_pool_init(struct page_pool *pool, ...@@ -69,10 +75,6 @@ static int page_pool_init(struct page_pool *pool,
*/ */
} }
if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
pool->p.flags & PP_FLAG_PAGE_FRAG)
return -EINVAL;
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment