Commit d4ec4bdc authored by Felix Kuehling's avatar Felix Kuehling Committed by Alex Deucher

drm/amdkfd: Allow access for mmapping KFD BOs

DRM render node file handles are used for CPU mapping of BOs using mmap
by the Thunk. It uses the DRM render node of the GPU where the BO was
allocated.

DRM allows mmap access automatically when it creates a GEM handle for a
BO. KFD BOs don't have GEM handles, so KFD needs to manage access
manually. Use drm_vma_node_allow to allow user mode to mmap BOs allocated
with kfd_ioctl_alloc_memory_of_gpu through the DRM render node that was
used in the kfd_ioctl_acquire_vm call for the same GPU.
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarPhilip Yang <philip.yang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b40a6ab2
...@@ -245,7 +245,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -245,7 +245,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
void *drm_priv, struct kgd_mem **mem, void *drm_priv, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags); uint64_t *offset, uint32_t flags);
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size); struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
......
...@@ -1229,6 +1229,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1229,6 +1229,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain_string(alloc_domain), ret); domain_string(alloc_domain), ret);
goto err_bo_create; goto err_bo_create;
} }
ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
if (ret) {
pr_debug("Failed to allow vma node access. ret %d\n", ret);
goto err_node_allow;
}
bo = gem_to_amdgpu_bo(gobj); bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) { if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg; bo->tbo.sg = sg;
...@@ -1258,6 +1263,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1258,6 +1263,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
allocate_init_user_pages_failed: allocate_init_user_pages_failed:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
amdgpu_bo_unref(&bo); amdgpu_bo_unref(&bo);
/* Don't unreserve system mem limit twice */ /* Don't unreserve system mem limit twice */
goto err_reserve_limit; goto err_reserve_limit;
...@@ -1275,7 +1282,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1275,7 +1282,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
} }
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size) struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
uint64_t *size)
{ {
struct amdkfd_process_info *process_info = mem->process_info; struct amdkfd_process_info *process_info = mem->process_info;
unsigned long bo_size = mem->bo->tbo.base.size; unsigned long bo_size = mem->bo->tbo.base.size;
...@@ -1352,6 +1360,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -1352,6 +1360,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
} }
/* Free the BO*/ /* Free the BO*/
drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
drm_gem_object_put(&mem->bo->tbo.base); drm_gem_object_put(&mem->bo->tbo.base);
mutex_destroy(&mem->lock); mutex_destroy(&mem->lock);
kfree(mem); kfree(mem);
...@@ -1663,6 +1672,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, ...@@ -1663,6 +1672,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
int ret;
if (dma_buf->ops != &amdgpu_dmabuf_ops) if (dma_buf->ops != &amdgpu_dmabuf_ops)
/* Can't handle non-graphics buffers */ /* Can't handle non-graphics buffers */
...@@ -1683,6 +1693,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, ...@@ -1683,6 +1693,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
if (!*mem) if (!*mem)
return -ENOMEM; return -ENOMEM;
ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
if (ret) {
kfree(mem);
return ret;
}
if (size) if (size)
*size = amdgpu_bo_size(bo); *size = amdgpu_bo_size(bo);
......
...@@ -1328,7 +1328,8 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, ...@@ -1328,7 +1328,8 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return 0; return 0;
err_free: err_free:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL); amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem,
pdd->drm_priv, NULL);
err_unlock: err_unlock:
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
return err; return err;
...@@ -1365,7 +1366,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, ...@@ -1365,7 +1366,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
} }
ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
(struct kgd_mem *)mem, &size); (struct kgd_mem *)mem, pdd->drm_priv, &size);
/* If freeing the buffer failed, leave the handle in place for /* If freeing the buffer failed, leave the handle in place for
* clean-up during process tear-down. * clean-up during process tear-down.
...@@ -1721,7 +1722,8 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, ...@@ -1721,7 +1722,8 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
return 0; return 0;
err_free: err_free:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL); amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem,
pdd->drm_priv, NULL);
err_unlock: err_unlock:
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
......
...@@ -648,7 +648,8 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem, ...@@ -648,7 +648,8 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_dev *dev = pdd->dev; struct kfd_dev *dev = pdd->dev;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv); amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL); amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv,
NULL);
} }
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
...@@ -712,7 +713,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, ...@@ -712,7 +713,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
return err; return err;
err_map_mem: err_map_mem:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL); amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv,
NULL);
err_alloc_mem: err_alloc_mem:
*kptr = NULL; *kptr = NULL;
return err; return err;
...@@ -907,7 +909,8 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) ...@@ -907,7 +909,8 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
peer_pdd->dev->kgd, mem, peer_pdd->drm_priv); peer_pdd->dev->kgd, mem, peer_pdd->drm_priv);
} }
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL); amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem,
pdd->drm_priv, NULL);
kfd_process_device_remove_obj_handle(pdd, id); kfd_process_device_remove_obj_handle(pdd, id);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment