Commit 8f8cc3fb authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: remove table_freed param from the VM code

Better to leave the decision when to flush the VM changes in the TLB to
the VM code.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Philip Yang<Philip.Yang@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4d30a83c
...@@ -1104,7 +1104,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, ...@@ -1104,7 +1104,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret; return ret;
/* Update the page tables */ /* Update the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false, NULL); ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret) { if (ret) {
pr_err("amdgpu_vm_bo_update failed\n"); pr_err("amdgpu_vm_bo_update failed\n");
return ret; return ret;
......
...@@ -806,7 +806,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -806,7 +806,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r) if (r)
return r; return r;
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL); r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
if (r) if (r)
return r; return r;
...@@ -817,7 +817,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -817,7 +817,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
bo_va = fpriv->csa_va; bo_va = fpriv->csa_va;
BUG_ON(!bo_va); BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r)
return r; return r;
...@@ -836,7 +836,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -836,7 +836,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (bo_va == NULL) if (bo_va == NULL)
continue; continue;
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r)
return r; return r;
......
...@@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (operation == AMDGPU_VA_OP_MAP || if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) { operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r)
goto error; goto error;
} }
......
...@@ -808,7 +808,6 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, ...@@ -808,7 +808,6 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
* @res: ttm_resource to map * @res: ttm_resource to map
* @pages_addr: DMA addresses to use for mapping * @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence * @fence: optional resulting fence
* @table_freed: return true if page table is freed
* *
* Fill in the page table entries between @start and @last. * Fill in the page table entries between @start and @last.
* *
...@@ -823,8 +822,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -823,8 +822,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
uint64_t flags, uint64_t offset, uint64_t flags, uint64_t offset,
struct ttm_resource *res, struct ttm_resource *res,
dma_addr_t *pages_addr, dma_addr_t *pages_addr,
struct dma_fence **fence, struct dma_fence **fence)
bool *table_freed)
{ {
struct amdgpu_vm_update_params params; struct amdgpu_vm_update_params params;
struct amdgpu_vm_tlb_seq_cb *tlb_cb; struct amdgpu_vm_tlb_seq_cb *tlb_cb;
...@@ -938,9 +936,6 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -938,9 +936,6 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
tlb_cb = NULL; tlb_cb = NULL;
} }
if (table_freed)
*table_freed = *table_freed || params.table_freed;
error_free: error_free:
kfree(tlb_cb); kfree(tlb_cb);
...@@ -1000,7 +995,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, ...@@ -1000,7 +995,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object * @bo_va: requested BO and VM object
* @clear: if true clear the entries * @clear: if true clear the entries
* @table_freed: return true if page table is freed
* *
* Fill in the page table entries for @bo_va. * Fill in the page table entries for @bo_va.
* *
...@@ -1008,7 +1002,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, ...@@ -1008,7 +1002,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* 0 for success, -EINVAL for failure. * 0 for success, -EINVAL for failure.
*/ */
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bool clear, bool *table_freed) bool clear)
{ {
struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_vm *vm = bo_va->base.vm;
...@@ -1087,7 +1081,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, ...@@ -1087,7 +1081,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
resv, mapping->start, resv, mapping->start,
mapping->last, update_flags, mapping->last, update_flags,
mapping->offset, mem, mapping->offset, mem,
pages_addr, last_update, table_freed); pages_addr, last_update);
if (r) if (r)
return r; return r;
} }
...@@ -1281,7 +1275,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, ...@@ -1281,7 +1275,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false,
resv, mapping->start, resv, mapping->start,
mapping->last, init_pte_value, mapping->last, init_pte_value,
0, NULL, NULL, &f, NULL); 0, NULL, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f); amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) { if (r) {
dma_fence_put(f); dma_fence_put(f);
...@@ -1323,7 +1317,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, ...@@ -1323,7 +1317,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */ /* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r) if (r)
return r; return r;
} }
...@@ -1342,7 +1336,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, ...@@ -1342,7 +1336,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
else else
clear = true; clear = true;
r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r) if (r)
return r; return r;
...@@ -2526,8 +2520,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, ...@@ -2526,8 +2520,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
} }
r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr, r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
addr, flags, value, NULL, NULL, NULL, addr, flags, value, NULL, NULL, NULL);
NULL);
if (r) if (r)
goto error_unlock; goto error_unlock;
......
...@@ -410,10 +410,10 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -410,10 +410,10 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
uint64_t flags, uint64_t offset, uint64_t flags, uint64_t offset,
struct ttm_resource *res, struct ttm_resource *res,
dma_addr_t *pages_addr, dma_addr_t *pages_addr,
struct dma_fence **fence, bool *free_table); struct dma_fence **fence);
int amdgpu_vm_bo_update(struct amdgpu_device *adev, int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, struct amdgpu_bo_va *bo_va,
bool clear, bool *table_freed); bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo); bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted); struct amdgpu_bo *bo, bool evicted);
......
...@@ -1190,7 +1190,7 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -1190,7 +1190,7 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL, return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
start, last, init_pte_value, 0, start, last, init_pte_value, 0,
NULL, NULL, fence, NULL); NULL, NULL, fence);
} }
static int static int
...@@ -1282,8 +1282,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, ...@@ -1282,8 +1282,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
prange->start + i, pte_flags, prange->start + i, pte_flags,
last_start - prange->start, last_start - prange->start,
NULL, dma_addr, NULL, dma_addr,
&vm->last_update, &vm->last_update);
NULL);
for (j = last_start - prange->start; j <= i; j++) for (j = last_start - prange->start; j <= i; j++)
dma_addr[j] |= last_domain; dma_addr[j] |= last_domain;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment