Commit 9da050b0 authored by Chia-I Wu's avatar Chia-I Wu Committed by Alex Deucher

drm/amdkfd: fix potential kgd_mem UAFs

kgd_mem pointers returned by kfd_process_device_translate_handle are
only guaranteed to be valid while p->mutex is held. As soon as the mutex
is unlocked, another thread can free the BO.
Signed-off-by: default avatarChia-I Wu <olvaffe@gmail.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d71e38df
...@@ -1312,14 +1312,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, ...@@ -1312,14 +1312,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
args->n_success = i+1; args->n_success = i+1;
} }
mutex_unlock(&p->mutex);
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) { if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n"); pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed; goto sync_memory_failed;
} }
mutex_unlock(&p->mutex);
/* Flush TLBs after waiting for the page table updates to complete */ /* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) { for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
...@@ -1335,9 +1335,9 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, ...@@ -1335,9 +1335,9 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
bind_process_to_device_failed: bind_process_to_device_failed:
get_mem_obj_from_handle_failed: get_mem_obj_from_handle_failed:
map_memory_to_gpu_failed: map_memory_to_gpu_failed:
sync_memory_failed:
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
copy_from_user_failed: copy_from_user_failed:
sync_memory_failed:
kfree(devices_arr); kfree(devices_arr);
return err; return err;
...@@ -1351,6 +1351,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, ...@@ -1351,6 +1351,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
void *mem; void *mem;
long err = 0; long err = 0;
uint32_t *devices_arr = NULL, i; uint32_t *devices_arr = NULL, i;
bool flush_tlb;
if (!args->n_devices) { if (!args->n_devices) {
pr_debug("Device IDs array empty\n"); pr_debug("Device IDs array empty\n");
...@@ -1403,16 +1404,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, ...@@ -1403,16 +1404,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
} }
args->n_success = i+1; args->n_success = i+1;
} }
mutex_unlock(&p->mutex);
if (kfd_flush_tlb_after_unmap(pdd->dev)) { flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true); (struct kgd_mem *) mem, true);
if (err) { if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n"); pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed; goto sync_memory_failed;
} }
}
mutex_unlock(&p->mutex);
if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */ /* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) { for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
...@@ -1428,9 +1432,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, ...@@ -1428,9 +1432,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
bind_process_to_device_failed: bind_process_to_device_failed:
get_mem_obj_from_handle_failed: get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed: unmap_memory_from_gpu_failed:
sync_memory_failed:
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
copy_from_user_failed: copy_from_user_failed:
sync_memory_failed:
kfree(devices_arr); kfree(devices_arr);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment