Commit 4c5ac948 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: cleanup ttm_bo_mem_space

We tried this once before, but that turned out to be more
complicated than thought. With all the right prerequisites
it looks like we can do this now.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Tested-by: default avatarPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent af1dac01
...@@ -893,13 +893,12 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, ...@@ -893,13 +893,12 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
* space, or we've evicted everything and there isn't enough space. * space, or we've evicted everything and there isn't enough space.
*/ */
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type, const struct ttm_place *place,
const struct ttm_place *place, struct ttm_mem_reg *mem,
struct ttm_mem_reg *mem, struct ttm_operation_ctx *ctx)
struct ttm_operation_ctx *ctx)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret; int ret;
do { do {
...@@ -908,11 +907,11 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ...@@ -908,11 +907,11 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret; return ret;
if (mem->mm_node) if (mem->mm_node)
break; break;
ret = ttm_mem_evict_first(bdev, mem_type, place, ctx); ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
} while (1); } while (1);
mem->mem_type = mem_type;
return ttm_bo_add_move_fence(bo, man, mem); return ttm_bo_add_move_fence(bo, man, mem);
} }
...@@ -960,6 +959,51 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, ...@@ -960,6 +959,51 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
return true; return true;
} }
/**
* ttm_bo_mem_placement - check if placement is compatible
* @bo: BO to find memory for
* @place: where to search
* @mem: the memory object to fill in
* @ctx: operation context
*
* Check if placement is compatible and fill in mem structure.
* Returns -EBUSY if placement won't work or negative error code.
* 0 when placement can be used.
*/
static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_mem_reg *mem,
struct ttm_operation_ctx *ctx)
{
struct ttm_bo_device *bdev = bo->bdev;
uint32_t mem_type = TTM_PL_SYSTEM;
struct ttm_mem_type_manager *man;
uint32_t cur_flags = 0;
int ret;
ret = ttm_mem_type_from_place(place, &mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
if (!man->has_type || !man->use_type)
return -EBUSY;
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
return -EBUSY;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE);
mem->mem_type = mem_type;
mem->placement = cur_flags;
return 0;
}
/** /**
* Creates space for memory region @mem according to its type. * Creates space for memory region @mem according to its type.
* *
...@@ -974,11 +1018,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -974,11 +1018,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0;
bool type_found = false; bool type_found = false;
bool type_ok = false;
int i, ret; int i, ret;
ret = reservation_object_reserve_shared(bo->resv, 1); ret = reservation_object_reserve_shared(bo->resv, 1);
...@@ -988,37 +1028,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -988,37 +1028,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = NULL; mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) { for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i]; const struct ttm_place *place = &placement->placement[i];
struct ttm_mem_type_manager *man;
ret = ttm_mem_type_from_place(place, &mem_type); ret = ttm_bo_mem_placement(bo, place, mem, ctx);
if (ret == -EBUSY)
continue;
if (ret) if (ret)
return ret; return ret;
man = &bdev->man[mem_type];
if (!man->has_type || !man->use_type)
continue;
type_ok = ttm_bo_mt_compatible(man, mem_type, place,
&cur_flags);
if (!type_ok)
continue;
type_found = true; type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, mem->mm_node = NULL;
cur_flags); if (mem->mem_type == TTM_PL_SYSTEM)
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, place->flags,
~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
mem->mm_node = NULL;
return 0; return 0;
}
man = &bdev->man[mem->mem_type];
ret = (*man->func->get_node)(man, bo, place, mem); ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
...@@ -1029,8 +1052,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -1029,8 +1052,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
(*man->func->put_node)(man, mem); (*man->func->put_node)(man, mem);
return ret; return ret;
} }
mem->mem_type = mem_type;
mem->placement = cur_flags;
return 0; return 0;
} }
} }
...@@ -1038,37 +1059,21 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -1038,37 +1059,21 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_busy_placement; ++i) { for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i]; const struct ttm_place *place = &placement->busy_placement[i];
ret = ttm_mem_type_from_place(place, &mem_type); ret = ttm_bo_mem_placement(bo, place, mem, ctx);
if (ret == -EBUSY)
continue;
if (ret) if (ret)
return ret; return ret;
man = &bdev->man[mem_type];
if (!man->has_type || !man->use_type)
continue;
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
continue;
type_found = true; type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, mem->mm_node = NULL;
cur_flags); if (mem->mem_type == TTM_PL_SYSTEM)
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, place->flags,
~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
mem->mm_node = NULL;
return 0; return 0;
}
ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ctx); ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
if (ret == 0 && mem->mm_node) { if (ret == 0 && mem->mm_node)
mem->placement = cur_flags;
return 0; return 0;
}
if (ret && ret != -EBUSY) if (ret && ret != -EBUSY)
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment