Commit 4d30a83c authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdkfd: use tlb_seq from the VM subsystem for SVM as well v2

Instead of hand rolling the table_freed parameter.

v2: add some changes suggested by Philip
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Philip Yang<Philip.Yang@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent bffa91da
...@@ -273,9 +273,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -273,9 +273,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
uint64_t *size); uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, struct kgd_mem *mem, void *drm_priv);
bool *table_freed);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory( int amdgpu_amdkfd_gpuvm_sync_memory(
......
...@@ -1093,8 +1093,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem, ...@@ -1093,8 +1093,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
static int update_gpuvm_pte(struct kgd_mem *mem, static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry, struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync, struct amdgpu_sync *sync)
bool *table_freed)
{ {
struct amdgpu_bo_va *bo_va = entry->bo_va; struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev; struct amdgpu_device *adev = entry->adev;
...@@ -1105,7 +1104,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, ...@@ -1105,7 +1104,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret; return ret;
/* Update the page tables */ /* Update the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed); ret = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
if (ret) { if (ret) {
pr_err("amdgpu_vm_bo_update failed\n"); pr_err("amdgpu_vm_bo_update failed\n");
return ret; return ret;
...@@ -1117,8 +1116,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, ...@@ -1117,8 +1116,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
static int map_bo_to_gpuvm(struct kgd_mem *mem, static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry, struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync, struct amdgpu_sync *sync,
bool no_update_pte, bool no_update_pte)
bool *table_freed)
{ {
int ret; int ret;
...@@ -1135,7 +1133,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem, ...@@ -1135,7 +1133,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
if (no_update_pte) if (no_update_pte)
return 0; return 0;
ret = update_gpuvm_pte(mem, entry, sync, table_freed); ret = update_gpuvm_pte(mem, entry, sync);
if (ret) { if (ret) {
pr_err("update_gpuvm_pte() failed\n"); pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed; goto update_gpuvm_pte_failed;
...@@ -1745,7 +1743,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -1745,7 +1743,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_device *adev, struct kgd_mem *mem,
void *drm_priv, bool *table_freed) void *drm_priv)
{ {
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
int ret; int ret;
...@@ -1832,7 +1830,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ...@@ -1832,7 +1830,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
entry->va, entry->va + bo_size, entry); entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync, ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
is_invalid_userptr, table_freed); is_invalid_userptr);
if (ret) { if (ret) {
pr_err("Failed to map bo to gpuvm\n"); pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve; goto out_unreserve;
...@@ -2300,7 +2298,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -2300,7 +2298,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
continue; continue;
kfd_mem_dmaunmap_attachment(mem, attachment); kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync, NULL); ret = update_gpuvm_pte(mem, attachment, &sync);
if (ret) { if (ret) {
pr_err("%s: update PTE failed\n", __func__); pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */ /* make sure this gets validated again */
...@@ -2506,7 +2504,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) ...@@ -2506,7 +2504,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
continue; continue;
kfd_mem_dmaunmap_attachment(mem, attachment); kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) { if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n"); pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail; goto validate_map_fail;
......
...@@ -1146,7 +1146,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, ...@@ -1146,7 +1146,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
long err = 0; long err = 0;
int i; int i;
uint32_t *devices_arr = NULL; uint32_t *devices_arr = NULL;
bool table_freed = false;
if (!args->n_devices) { if (!args->n_devices) {
pr_debug("Device IDs array empty\n"); pr_debug("Device IDs array empty\n");
...@@ -1208,7 +1207,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, ...@@ -1208,7 +1207,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->dev->adev, (struct kgd_mem *)mem,
peer_pdd->drm_priv, &table_freed); peer_pdd->drm_priv);
if (err) { if (err) {
struct pci_dev *pdev = peer_pdd->dev->adev->pdev; struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
...@@ -1233,14 +1232,12 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, ...@@ -1233,14 +1232,12 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
} }
/* Flush TLBs after waiting for the page table updates to complete */ /* Flush TLBs after waiting for the page table updates to complete */
if (table_freed || !kfd_flush_tlb_after_unmap(dev)) {
for (i = 0; i < args->n_devices; i++) { for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd)) if (WARN_ON_ONCE(!peer_pdd))
continue; continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
} }
}
kfree(devices_arr); kfree(devices_arr);
return err; return err;
...@@ -2206,8 +2203,8 @@ static int criu_restore_bo(struct kfd_process *p, ...@@ -2206,8 +2203,8 @@ static int criu_restore_bo(struct kfd_process *p,
if (IS_ERR(peer_pdd)) if (IS_ERR(peer_pdd))
return PTR_ERR(peer_pdd); return PTR_ERR(peer_pdd);
ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem, peer_pdd->drm_priv, ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
NULL); peer_pdd->drm_priv);
if (ret) { if (ret) {
pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds); pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
return ret; return ret;
......
...@@ -722,7 +722,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, ...@@ -722,7 +722,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
goto err_alloc_mem; goto err_alloc_mem;
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem, err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
pdd->drm_priv, NULL); pdd->drm_priv);
if (err) if (err)
goto err_map_mem; goto err_map_mem;
......
...@@ -1243,7 +1243,6 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, ...@@ -1243,7 +1243,6 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
{ {
struct amdgpu_device *adev = pdd->dev->adev; struct amdgpu_device *adev = pdd->dev->adev;
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
bool table_freed = false;
uint64_t pte_flags; uint64_t pte_flags;
unsigned long last_start; unsigned long last_start;
int last_domain; int last_domain;
...@@ -1284,7 +1283,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, ...@@ -1284,7 +1283,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
last_start - prange->start, last_start - prange->start,
NULL, dma_addr, NULL, dma_addr,
&vm->last_update, &vm->last_update,
&table_freed); NULL);
for (j = last_start - prange->start; j <= i; j++) for (j = last_start - prange->start; j <= i; j++)
dma_addr[j] |= last_domain; dma_addr[j] |= last_domain;
...@@ -1306,8 +1305,6 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, ...@@ -1306,8 +1305,6 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
if (fence) if (fence)
*fence = dma_fence_get(vm->last_update); *fence = dma_fence_get(vm->last_update);
if (table_freed)
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
out: out:
return r; return r;
} }
...@@ -1363,6 +1360,8 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, ...@@ -1363,6 +1360,8 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
break; break;
} }
} }
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
} }
return r; return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment