Commit 19dde589 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: cleanup ttm_page_alloc_dma.c

Remove unused defines and variables. Also stop computing the
gfp_flags when they aren't used.

No intended functional change.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a2f14820
...@@ -60,22 +60,15 @@ ...@@ -60,22 +60,15 @@
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4 #define SMALL_ALLOCATION 4
#define FREE_ALL_PAGES (~0U) #define FREE_ALL_PAGES (~0U)
/* times are in msecs */
#define IS_UNDEFINED (0)
#define IS_WC (1<<1)
#define IS_UC (1<<2)
#define IS_CACHED (1<<3)
#define IS_DMA32 (1<<4)
enum pool_type { enum pool_type {
POOL_IS_UNDEFINED, IS_UNDEFINED = 0,
POOL_IS_WC = IS_WC, IS_WC = 1 << 1,
POOL_IS_UC = IS_UC, IS_UC = 1 << 2,
POOL_IS_CACHED = IS_CACHED, IS_CACHED = 1 << 3,
POOL_IS_WC_DMA32 = IS_WC | IS_DMA32, IS_DMA32 = 1 << 4
POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
}; };
/* /*
* The pool structure. There are usually six pools: * The pool structure. There are usually six pools:
* - generic (not restricted to DMA32): * - generic (not restricted to DMA32):
...@@ -86,11 +79,9 @@ enum pool_type { ...@@ -86,11 +79,9 @@ enum pool_type {
* The other ones can be shrunk by the shrinker API if neccessary. * The other ones can be shrunk by the shrinker API if neccessary.
* @pools: The 'struct device->dma_pools' link. * @pools: The 'struct device->dma_pools' link.
* @type: Type of the pool * @type: Type of the pool
* @lock: Protects the inuse_list and free_list from concurrnet access. Must be * @lock: Protects the free_list from concurrnet access. Must be
* used with irqsave/irqrestore variants because pool allocator maybe called * used with irqsave/irqrestore variants because pool allocator maybe called
* from delayed work. * from delayed work.
* @inuse_list: Pool of pages that are in use. The order is very important and
* it is in the order that the TTM pages that are put back are in.
* @free_list: Pool of pages that are free to be used. No order requirements. * @free_list: Pool of pages that are free to be used. No order requirements.
* @dev: The device that is associated with these pools. * @dev: The device that is associated with these pools.
* @size: Size used during DMA allocation. * @size: Size used during DMA allocation.
...@@ -107,7 +98,6 @@ struct dma_pool { ...@@ -107,7 +98,6 @@ struct dma_pool {
struct list_head pools; /* The 'struct device->dma_pools link */ struct list_head pools; /* The 'struct device->dma_pools link */
enum pool_type type; enum pool_type type;
spinlock_t lock; spinlock_t lock;
struct list_head inuse_list;
struct list_head free_list; struct list_head free_list;
struct device *dev; struct device *dev;
unsigned size; unsigned size;
...@@ -609,7 +599,6 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, ...@@ -609,7 +599,6 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
sec_pool->pool = pool; sec_pool->pool = pool;
INIT_LIST_HEAD(&pool->free_list); INIT_LIST_HEAD(&pool->free_list);
INIT_LIST_HEAD(&pool->inuse_list);
INIT_LIST_HEAD(&pool->pools); INIT_LIST_HEAD(&pool->pools);
spin_lock_init(&pool->lock); spin_lock_init(&pool->lock);
pool->dev = dev; pool->dev = dev;
...@@ -882,22 +871,23 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -882,22 +871,23 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
struct dma_pool *pool; struct dma_pool *pool;
enum pool_type type; enum pool_type type;
unsigned i; unsigned i;
gfp_t gfp_flags;
int ret; int ret;
if (ttm->state != tt_unpopulated) if (ttm->state != tt_unpopulated)
return 0; return 0;
type = ttm_to_type(ttm->page_flags, ttm->caching_state); type = ttm_to_type(ttm->page_flags, ttm->caching_state);
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
gfp_flags = GFP_USER | GFP_DMA32;
else
gfp_flags = GFP_HIGHUSER;
if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
pool = ttm_dma_find_pool(dev, type); pool = ttm_dma_find_pool(dev, type);
if (!pool) { if (!pool) {
gfp_t gfp_flags;
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
gfp_flags = GFP_USER | GFP_DMA32;
else
gfp_flags = GFP_HIGHUSER;
if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
pool = ttm_dma_pool_init(dev, gfp_flags, type); pool = ttm_dma_pool_init(dev, gfp_flags, type);
if (IS_ERR_OR_NULL(pool)) { if (IS_ERR_OR_NULL(pool)) {
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment