Commit 101b8104 authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdkfd: Move dma unmapping after TLB flush

Otherwise GPU may access the stale mapping and generate IOMMU
IO_PAGE_FAULT.

Move this to inside p->mutex to prevent multiple threads mapping and
unmapping concurrently race condition.

After kfd_mem_dmaunmap_attachment is removed from unmap_bo_from_gpuvm,
kfd_mem_dmaunmap_attachment is called if failed to map to GPUs, and
before free the mem attachment in case failed to unmap from GPUs.
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 08abccc9
...@@ -303,6 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, ...@@ -303,6 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv); struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory( int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
......
...@@ -733,7 +733,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem, ...@@ -733,7 +733,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
enum dma_data_direction dir; enum dma_data_direction dir;
if (unlikely(!ttm->sg)) { if (unlikely(!ttm->sg)) {
pr_err("SG Table of BO is UNEXPECTEDLY NULL"); pr_debug("SG Table of BO is NULL");
return; return;
} }
...@@ -1202,8 +1202,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem, ...@@ -1202,8 +1202,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
amdgpu_sync_fence(sync, bo_va->last_pt_update); amdgpu_sync_fence(sync, bo_va->last_pt_update);
kfd_mem_dmaunmap_attachment(mem, entry);
} }
static int update_gpuvm_pte(struct kgd_mem *mem, static int update_gpuvm_pte(struct kgd_mem *mem,
...@@ -1258,6 +1256,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem, ...@@ -1258,6 +1256,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
update_gpuvm_pte_failed: update_gpuvm_pte_failed:
unmap_bo_from_gpuvm(mem, entry, sync); unmap_bo_from_gpuvm(mem, entry, sync);
kfd_mem_dmaunmap_attachment(mem, entry);
return ret; return ret;
} }
...@@ -1863,8 +1862,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -1863,8 +1862,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mem->va + bo_size * (1 + mem->aql_queue)); mem->va + bo_size * (1 + mem->aql_queue));
/* Remove from VM internal data structures */ /* Remove from VM internal data structures */
list_for_each_entry_safe(entry, tmp, &mem->attachments, list) list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
kfd_mem_dmaunmap_attachment(mem, entry);
kfd_mem_detach(entry); kfd_mem_detach(entry);
}
ret = unreserve_bo_and_vms(&ctx, false, false); ret = unreserve_bo_and_vms(&ctx, false, false);
...@@ -2038,6 +2039,23 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ...@@ -2038,6 +2039,23 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
return ret; return ret;
} }
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
{
struct kfd_mem_attachment *entry;
struct amdgpu_vm *vm;
vm = drm_priv_to_vm(drm_priv);
mutex_lock(&mem->lock);
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm == vm)
kfd_mem_dmaunmap_attachment(mem, entry);
}
mutex_unlock(&mem->lock);
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{ {
......
...@@ -1432,17 +1432,21 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, ...@@ -1432,17 +1432,21 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
goto sync_memory_failed; goto sync_memory_failed;
} }
} }
mutex_unlock(&p->mutex);
if (flush_tlb) { /* Flush TLBs after waiting for the page table updates to complete */
/* Flush TLBs after waiting for the page table updates to complete */ for (i = 0; i < args->n_devices; i++) {
for (i = 0; i < args->n_devices; i++) { peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); if (WARN_ON_ONCE(!peer_pdd))
if (WARN_ON_ONCE(!peer_pdd)) continue;
continue; if (flush_tlb)
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
}
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
} }
mutex_unlock(&p->mutex);
kfree(devices_arr); kfree(devices_arr);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment