Commit 58e4d686 authored by Christian König's avatar Christian König

drm/ttm: cleanup ttm_mem_type_manager_func.get_node interface v3

Instead of signaling failure by setting the node pointer to
NULL do so by returning -ENOSPC.

v2: add memset() to make sure that mem is always initialized.
v3: drop memset() only set mm_node = NULL, move mm_node init in amdgpu
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Link: https://patchwork.freedesktop.org/patch/373181/
parent 60e9eabf
...@@ -229,7 +229,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, ...@@ -229,7 +229,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
atomic64_read(&mgr->available) < mem->num_pages) { atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
return 0; return -ENOSPC;
} }
atomic64_sub(mem->num_pages, &mgr->available); atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
...@@ -250,7 +250,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, ...@@ -250,7 +250,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r)) { if (unlikely(r)) {
kfree(node); kfree(node);
mem->mm_node = NULL; mem->mm_node = NULL;
r = 0;
goto err_out; goto err_out;
} }
} else { } else {
......
...@@ -319,8 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -319,8 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
atomic64_sub(mem_bytes, &mgr->usage); atomic64_sub(mem_bytes, &mgr->usage);
mem->mm_node = NULL; return -ENOSPC;
return 0;
} }
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
...@@ -400,7 +399,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -400,7 +399,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
kvfree(nodes); kvfree(nodes);
return r == -ENOSPC ? 0 : r; return r;
} }
/** /**
......
...@@ -75,10 +75,6 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, ...@@ -75,10 +75,6 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page); ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
if (ret) { if (ret) {
nouveau_mem_del(reg); nouveau_mem_del(reg);
if (ret == -ENOSPC) {
reg->mm_node = NULL;
return 0;
}
return ret; return ret;
} }
...@@ -139,10 +135,6 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man, ...@@ -139,10 +135,6 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
reg->num_pages << PAGE_SHIFT, &mem->vma[0]); reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) { if (ret) {
nouveau_mem_del(reg); nouveau_mem_del(reg);
if (ret == -ENOSPC) {
reg->mm_node = NULL;
return 0;
}
return ret; return ret;
} }
......
...@@ -909,10 +909,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ...@@ -909,10 +909,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
ticket = dma_resv_locking_ctx(bo->base.resv); ticket = dma_resv_locking_ctx(bo->base.resv);
do { do {
ret = (*man->func->get_node)(man, bo, place, mem); ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret != 0)) if (likely(!ret))
return ret;
if (mem->mm_node)
break; break;
if (unlikely(ret != -ENOSPC))
return ret;
ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx, ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
ticket); ticket);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -1056,12 +1056,11 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -1056,12 +1056,11 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = &bdev->man[mem->mem_type]; man = &bdev->man[mem->mem_type];
ret = (*man->func->get_node)(man, bo, place, mem); ret = (*man->func->get_node)(man, bo, place, mem);
if (ret == -ENOSPC)
continue;
if (unlikely(ret)) if (unlikely(ret))
goto error; goto error;
if (!mem->mm_node)
continue;
ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
if (unlikely(ret)) { if (unlikely(ret)) {
(*man->func->put_node)(man, mem); (*man->func->put_node)(man, mem);
...@@ -1126,6 +1125,8 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ...@@ -1126,6 +1125,8 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
mem.page_alignment = bo->mem.page_alignment; mem.page_alignment = bo->mem.page_alignment;
mem.bus.io_reserved_vm = false; mem.bus.io_reserved_vm = false;
mem.bus.io_reserved_count = 0; mem.bus.io_reserved_count = 0;
mem.mm_node = NULL;
/* /*
* Determine where to move the buffer. * Determine where to move the buffer.
*/ */
......
...@@ -86,7 +86,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, ...@@ -86,7 +86,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
mem->start = node->start; mem->start = node->start;
} }
return 0; return ret;
} }
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
......
...@@ -53,8 +53,6 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, ...@@ -53,8 +53,6 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
(struct vmwgfx_gmrid_man *)man->priv; (struct vmwgfx_gmrid_man *)man->priv;
int id; int id;
mem->mm_node = NULL;
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0) if (id < 0)
return (id != -ENOMEM ? 0 : id); return (id != -ENOMEM ? 0 : id);
...@@ -78,7 +76,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, ...@@ -78,7 +76,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
gman->used_gmr_pages -= bo->num_pages; gman->used_gmr_pages -= bo->num_pages;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id); ida_free(&gman->gmr_ida, id);
return 0; return -ENOSPC;
} }
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment