Commit 3e640f1b authored by Lang Yu's avatar Lang Yu Committed by Andrey Grodzovsky

drm/amdgpu: user temporary GTT as bounce buffer

Currently, we have a limitted GTT memory size and need a bounce buffer
when doing buffer migration between VRAM and SYSTEM domain.

The problem is under GTT memory pressure we can't do buffer migration
between VRAM and SYSTEM domain. But in some cases we really need that.
Eespecially when validating a VRAM backing store BO which resides in
SYSTEM domain.

v2: still account temporary GTT allocations
v3 (chk): revert to the simpler change for now
Signed-off-by: default avatarLang Yu <Lang.Yu@amd.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Acked-by: default avatarNirmoy Das <nirmoy.das@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210622162339.761651-2-andrey.grodzovsky@amd.com
parent ae1bef72
...@@ -132,14 +132,15 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -132,14 +132,15 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct amdgpu_gtt_node *node; struct amdgpu_gtt_node *node;
int r; int r;
spin_lock(&mgr->lock); if (!(place->flags & TTM_PL_FLAG_TEMPORARY)) {
if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT && spin_lock(&mgr->lock);
atomic64_read(&mgr->available) < num_pages) { if (atomic64_read(&mgr->available) < num_pages) {
spin_unlock(&mgr->lock);
return -ENOSPC;
}
atomic64_sub(num_pages, &mgr->available);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
return -ENOSPC;
} }
atomic64_sub(num_pages, &mgr->available);
spin_unlock(&mgr->lock);
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
if (!node) { if (!node) {
...@@ -175,7 +176,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -175,7 +176,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
kfree(node); kfree(node);
err_out: err_out:
atomic64_add(num_pages, &mgr->available); if (!(place->flags & TTM_PL_FLAG_TEMPORARY))
atomic64_add(num_pages, &mgr->available);
return r; return r;
} }
...@@ -198,7 +200,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, ...@@ -198,7 +200,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
if (drm_mm_node_allocated(&node->base.mm_nodes[0])) if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
drm_mm_remove_node(&node->base.mm_nodes[0]); drm_mm_remove_node(&node->base.mm_nodes[0]);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
atomic64_add(res->num_pages, &mgr->available);
if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
atomic64_add(res->num_pages, &mgr->available);
kfree(node); kfree(node);
} }
......
...@@ -521,7 +521,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -521,7 +521,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
hop->fpfn = 0; hop->fpfn = 0;
hop->lpfn = 0; hop->lpfn = 0;
hop->mem_type = TTM_PL_TT; hop->mem_type = TTM_PL_TT;
hop->flags = 0; hop->flags = TTM_PL_FLAG_TEMPORARY;
return -EMULTIHOP; return -EMULTIHOP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment