Commit c13c55d6 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: use an operation context for ttm_bo_mem_space v2

Instead of specifying interruptible and no_wait_gpu manually.

v2: rebase
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Tested-by: default avatarDieter Nützel <Dieter@nuetzel-hh.de>
Tested-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6fead44a
...@@ -657,6 +657,7 @@ void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev) ...@@ -657,6 +657,7 @@ void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
*/ */
int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
{ {
struct ttm_operation_ctx ctx = { false, false };
int r = 0; int r = 0;
int i; int i;
u64 vram_size = adev->mc.visible_vram_size; u64 vram_size = adev->mc.visible_vram_size;
...@@ -693,8 +694,8 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) ...@@ -693,8 +694,8 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
} }
ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
false, false); &bo->tbo.mem, &ctx);
if (r) if (r)
goto error_pin; goto error_pin;
......
...@@ -471,6 +471,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -471,6 +471,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
bool no_wait_gpu, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem; struct ttm_mem_reg tmp_mem;
...@@ -488,8 +489,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -488,8 +489,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = 0; placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
interruptible, no_wait_gpu);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
...@@ -518,6 +518,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, ...@@ -518,6 +518,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
bool no_wait_gpu, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem; struct ttm_mem_reg tmp_mem;
...@@ -535,8 +536,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, ...@@ -535,8 +536,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = 0; placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
interruptible, no_wait_gpu);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
...@@ -878,6 +878,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, ...@@ -878,6 +878,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
struct ttm_mem_reg tmp; struct ttm_mem_reg tmp;
struct ttm_placement placement; struct ttm_placement placement;
...@@ -900,7 +901,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) ...@@ -900,7 +901,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
TTM_PL_FLAG_TT; TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, false, false); r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
if (unlikely(r)) if (unlikely(r))
return r; return r;
......
...@@ -1199,6 +1199,7 @@ static int ...@@ -1199,6 +1199,7 @@ static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_gpu, struct ttm_mem_reg *new_reg) bool no_wait_gpu, struct ttm_mem_reg *new_reg)
{ {
struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = { struct ttm_place placement_memtype = {
.fpfn = 0, .fpfn = 0,
.lpfn = 0, .lpfn = 0,
...@@ -1213,7 +1214,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -1213,7 +1214,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_reg = *new_reg; tmp_reg = *new_reg;
tmp_reg.mm_node = NULL; tmp_reg.mm_node = NULL;
ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu); ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -1235,6 +1236,7 @@ static int ...@@ -1235,6 +1236,7 @@ static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_gpu, struct ttm_mem_reg *new_reg) bool no_wait_gpu, struct ttm_mem_reg *new_reg)
{ {
struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = { struct ttm_place placement_memtype = {
.fpfn = 0, .fpfn = 0,
.lpfn = 0, .lpfn = 0,
...@@ -1249,7 +1251,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, ...@@ -1249,7 +1251,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_reg = *new_reg; tmp_reg = *new_reg;
tmp_reg.mm_node = NULL; tmp_reg.mm_node = NULL;
ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu); ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
if (ret) if (ret)
return ret; return ret;
......
...@@ -311,6 +311,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -311,6 +311,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool no_wait_gpu, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct radeon_device *rdev; struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem; struct ttm_mem_reg tmp_mem;
...@@ -328,8 +329,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, ...@@ -328,8 +329,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = 0; placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
interruptible, no_wait_gpu);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
...@@ -358,6 +358,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, ...@@ -358,6 +358,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool no_wait_gpu, bool no_wait_gpu,
struct ttm_mem_reg *new_mem) struct ttm_mem_reg *new_mem)
{ {
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct radeon_device *rdev; struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem; struct ttm_mem_reg tmp_mem;
...@@ -375,8 +376,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, ...@@ -375,8 +376,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = 0; placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
interruptible, no_wait_gpu);
if (unlikely(r)) { if (unlikely(r)) {
return r; return r;
} }
......
...@@ -656,6 +656,7 @@ EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); ...@@ -656,6 +656,7 @@ EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
bool no_wait_gpu) bool no_wait_gpu)
{ {
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg evict_mem; struct ttm_mem_reg evict_mem;
struct ttm_placement placement; struct ttm_placement placement;
...@@ -671,8 +672,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -671,8 +672,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_placement = 0; placement.num_placement = 0;
placement.num_busy_placement = 0; placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement); bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, ret = ttm_bo_mem_space(bo, &placement, &evict_mem, &ctx);
no_wait_gpu);
if (ret) { if (ret) {
if (ret != -ERESTARTSYS) { if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n", pr_err("Failed to find memory space for buffer 0x%p eviction\n",
...@@ -682,8 +682,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ...@@ -682,8 +682,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out; goto out;
} }
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
no_wait_gpu); interruptible, no_wait_gpu);
if (unlikely(ret)) { if (unlikely(ret)) {
if (ret != -ERESTARTSYS) if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n"); pr_err("Buffer eviction failed\n");
...@@ -903,8 +903,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, ...@@ -903,8 +903,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo, int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, struct ttm_operation_ctx *ctx)
bool no_wait_gpu)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man; struct ttm_mem_type_manager *man;
...@@ -999,7 +998,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -999,7 +998,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
} }
ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
interruptible, no_wait_gpu); ctx->interruptible,
ctx->no_wait_gpu);
if (ret == 0 && mem->mm_node) { if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags; mem->placement = cur_flags;
return 0; return 0;
...@@ -1022,6 +1022,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -1022,6 +1022,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
bool interruptible, bool interruptible,
bool no_wait_gpu) bool no_wait_gpu)
{ {
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
int ret = 0; int ret = 0;
struct ttm_mem_reg mem; struct ttm_mem_reg mem;
...@@ -1035,12 +1036,11 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -1035,12 +1036,11 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
/* /*
* Determine where to move the buffer. * Determine where to move the buffer.
*/ */
ret = ttm_bo_mem_space(bo, placement, &mem, ret = ttm_bo_mem_space(bo, placement, &mem, &ctx);
interruptible, no_wait_gpu);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = ttm_bo_handle_move_mem(bo, &mem, false, ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible,
interruptible, no_wait_gpu); no_wait_gpu);
out_unlock: out_unlock:
if (ret && mem.mm_node) if (ret && mem.mm_node)
ttm_bo_mem_put(bo, &mem); ttm_bo_mem_put(bo, &mem);
......
...@@ -743,8 +743,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); ...@@ -743,8 +743,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
int ttm_bo_mem_space(struct ttm_buffer_object *bo, int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
struct ttm_mem_reg *mem, struct ttm_mem_reg *mem,
bool interruptible, struct ttm_operation_ctx *ctx);
bool no_wait_gpu);
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment