Commit 3084cf46 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: return -EBUSY on pipelining with no_gpu_wait (v2)

Setting the no_gpu_wait flag means that the allocate BO must be available
immediately and we can't wait for any GPU operation to finish.

v2: squash in mem leak fix, rebase
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6817bf28
...@@ -925,7 +925,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put); ...@@ -925,7 +925,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/ */
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man, struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem,
bool no_wait_gpu)
{ {
struct dma_fence *fence; struct dma_fence *fence;
int ret; int ret;
...@@ -934,19 +935,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, ...@@ -934,19 +935,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
fence = dma_fence_get(man->move); fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock); spin_unlock(&man->move_lock);
if (fence) { if (!fence)
dma_resv_add_shared_fence(bo->base.resv, fence); return 0;
ret = dma_resv_reserve_shared(bo->base.resv, 1); if (no_wait_gpu)
if (unlikely(ret)) { return -EBUSY;
dma_fence_put(fence);
return ret; dma_resv_add_shared_fence(bo->base.resv, fence);
}
dma_fence_put(bo->moving); ret = dma_resv_reserve_shared(bo->base.resv, 1);
bo->moving = fence; if (unlikely(ret)) {
dma_fence_put(fence);
return ret;
} }
dma_fence_put(bo->moving);
bo->moving = fence;
return 0; return 0;
} }
...@@ -977,7 +981,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ...@@ -977,7 +981,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret; return ret;
} while (1); } while (1);
return ttm_bo_add_move_fence(bo, man, mem); return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
} }
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
...@@ -1119,14 +1123,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -1119,14 +1123,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret)) if (unlikely(ret))
goto error; goto error;
if (mem->mm_node) { if (!mem->mm_node)
ret = ttm_bo_add_move_fence(bo, man, mem); continue;
if (unlikely(ret)) {
(*man->func->put_node)(man, mem); ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
goto error; if (unlikely(ret)) {
} (*man->func->put_node)(man, mem);
return 0; if (ret == -EBUSY)
continue;
goto error;
} }
return 0;
} }
for (i = 0; i < placement->num_busy_placement; ++i) { for (i = 0; i < placement->num_busy_placement; ++i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment