Commit f95f51a4 authored by Felix Kuehling's avatar Felix Kuehling Committed by Alex Deucher

drm/amdgpu: Add notifier lock for KFD userptrs

Add a per-process MMU notifier lock for processing notifiers from
userptrs. Use that lock to properly synchronize page table updates with
MMU notifiers.
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Xiaogang Chen<Xiaogang.Chen@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent fe6872ad
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mmu_notifier.h>
#include <kgd_kfd_interface.h> #include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_execbuf_util.h>
#include "amdgpu_sync.h" #include "amdgpu_sync.h"
...@@ -65,6 +66,7 @@ struct kgd_mem { ...@@ -65,6 +66,7 @@ struct kgd_mem {
struct mutex lock; struct mutex lock;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct dma_buf *dmabuf; struct dma_buf *dmabuf;
struct hmm_range *range;
struct list_head attachments; struct list_head attachments;
/* protected by amdkfd_process_info.lock */ /* protected by amdkfd_process_info.lock */
struct ttm_validate_buffer validate_list; struct ttm_validate_buffer validate_list;
...@@ -75,7 +77,7 @@ struct kgd_mem { ...@@ -75,7 +77,7 @@ struct kgd_mem {
uint32_t alloc_flags; uint32_t alloc_flags;
atomic_t invalid; uint32_t invalid;
struct amdkfd_process_info *process_info; struct amdkfd_process_info *process_info;
struct amdgpu_sync sync; struct amdgpu_sync sync;
...@@ -131,7 +133,8 @@ struct amdkfd_process_info { ...@@ -131,7 +133,8 @@ struct amdkfd_process_info {
struct amdgpu_amdkfd_fence *eviction_fence; struct amdgpu_amdkfd_fence *eviction_fence;
/* MMU-notifier related fields */ /* MMU-notifier related fields */
atomic_t evicted_bos; struct mutex notifier_lock;
uint32_t evicted_bos;
struct delayed_work restore_userptr_work; struct delayed_work restore_userptr_work;
struct pid *pid; struct pid *pid;
bool block_mmu_notifications; bool block_mmu_notifications;
...@@ -180,7 +183,8 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); ...@@ -180,7 +183,8 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem);
#else #else
static inline static inline
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
...@@ -201,7 +205,8 @@ int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) ...@@ -201,7 +205,8 @@ int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
} }
static inline static inline
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem)
{ {
return 0; return 0;
} }
......
...@@ -964,7 +964,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, ...@@ -964,7 +964,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
* later stage when it is scheduled by another ioctl called by * later stage when it is scheduled by another ioctl called by
* CRIU master process for the target pid for restore. * CRIU master process for the target pid for restore.
*/ */
atomic_inc(&mem->invalid); mutex_lock(&process_info->notifier_lock);
mem->invalid++;
mutex_unlock(&process_info->notifier_lock);
mutex_unlock(&process_info->lock); mutex_unlock(&process_info->lock);
return 0; return 0;
} }
...@@ -1301,6 +1303,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, ...@@ -1301,6 +1303,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
return -ENOMEM; return -ENOMEM;
mutex_init(&info->lock); mutex_init(&info->lock);
mutex_init(&info->notifier_lock);
INIT_LIST_HEAD(&info->vm_list_head); INIT_LIST_HEAD(&info->vm_list_head);
INIT_LIST_HEAD(&info->kfd_bo_list); INIT_LIST_HEAD(&info->kfd_bo_list);
INIT_LIST_HEAD(&info->userptr_valid_list); INIT_LIST_HEAD(&info->userptr_valid_list);
...@@ -1317,7 +1320,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, ...@@ -1317,7 +1320,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
} }
info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
atomic_set(&info->evicted_bos, 0);
INIT_DELAYED_WORK(&info->restore_userptr_work, INIT_DELAYED_WORK(&info->restore_userptr_work,
amdgpu_amdkfd_restore_userptr_worker); amdgpu_amdkfd_restore_userptr_worker);
...@@ -1372,6 +1374,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, ...@@ -1372,6 +1374,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
put_pid(info->pid); put_pid(info->pid);
create_evict_fence_fail: create_evict_fence_fail:
mutex_destroy(&info->lock); mutex_destroy(&info->lock);
mutex_destroy(&info->notifier_lock);
kfree(info); kfree(info);
} }
return ret; return ret;
...@@ -1496,6 +1499,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, ...@@ -1496,6 +1499,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
cancel_delayed_work_sync(&process_info->restore_userptr_work); cancel_delayed_work_sync(&process_info->restore_userptr_work);
put_pid(process_info->pid); put_pid(process_info->pid);
mutex_destroy(&process_info->lock); mutex_destroy(&process_info->lock);
mutex_destroy(&process_info->notifier_lock);
kfree(process_info); kfree(process_info);
} }
} }
...@@ -1548,7 +1552,9 @@ int amdgpu_amdkfd_criu_resume(void *p) ...@@ -1548,7 +1552,9 @@ int amdgpu_amdkfd_criu_resume(void *p)
mutex_lock(&pinfo->lock); mutex_lock(&pinfo->lock);
pr_debug("scheduling work\n"); pr_debug("scheduling work\n");
atomic_inc(&pinfo->evicted_bos); mutex_lock(&pinfo->notifier_lock);
pinfo->evicted_bos++;
mutex_unlock(&pinfo->notifier_lock);
if (!READ_ONCE(pinfo->block_mmu_notifications)) { if (!READ_ONCE(pinfo->block_mmu_notifications)) {
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
...@@ -1773,8 +1779,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -1773,8 +1779,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
list_del(&bo_list_entry->head); list_del(&bo_list_entry->head);
mutex_unlock(&process_info->lock); mutex_unlock(&process_info->lock);
/* No more MMU notifiers */ /* Cleanup user pages and MMU notifiers */
amdgpu_hmm_unregister(mem->bo); if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
amdgpu_hmm_unregister(mem->bo);
mutex_lock(&process_info->notifier_lock);
amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
mutex_unlock(&process_info->notifier_lock);
}
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret)) if (unlikely(ret))
...@@ -1864,6 +1875,16 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ...@@ -1864,6 +1875,16 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
*/ */
mutex_lock(&mem->process_info->lock); mutex_lock(&mem->process_info->lock);
/* Lock notifier lock. If we find an invalid userptr BO, we can be
* sure that the MMU notifier is no longer running
* concurrently and the queues are actually stopped
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
mutex_lock(&mem->process_info->notifier_lock);
is_invalid_userptr = !!mem->invalid;
mutex_unlock(&mem->process_info->notifier_lock);
}
mutex_lock(&mem->lock); mutex_lock(&mem->lock);
domain = mem->domain; domain = mem->domain;
...@@ -2241,34 +2262,38 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, ...@@ -2241,34 +2262,38 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
* *
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
* cannot do any memory allocations, and cannot take any locks that * cannot do any memory allocations, and cannot take any locks that
* are held elsewhere while allocating memory. Therefore this is as * are held elsewhere while allocating memory.
* simple as possible, using atomic counters.
* *
* It doesn't do anything to the BO itself. The real work happens in * It doesn't do anything to the BO itself. The real work happens in
* restore, where we get updated page addresses. This function only * restore, where we get updated page addresses. This function only
* ensures that GPU access to the BO is stopped. * ensures that GPU access to the BO is stopped.
*/ */
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
struct mm_struct *mm) unsigned long cur_seq, struct kgd_mem *mem)
{ {
struct amdkfd_process_info *process_info = mem->process_info; struct amdkfd_process_info *process_info = mem->process_info;
int evicted_bos;
int r = 0; int r = 0;
/* Do not process MMU notifications until stage-4 IOCTL is received */ /* Do not process MMU notifications during CRIU restore until
* KFD_CRIU_OP_RESUME IOCTL is received
*/
if (READ_ONCE(process_info->block_mmu_notifications)) if (READ_ONCE(process_info->block_mmu_notifications))
return 0; return 0;
atomic_inc(&mem->invalid); mutex_lock(&process_info->notifier_lock);
evicted_bos = atomic_inc_return(&process_info->evicted_bos); mmu_interval_set_seq(mni, cur_seq);
if (evicted_bos == 1) {
mem->invalid++;
if (++process_info->evicted_bos == 1) {
/* First eviction, stop the queues */ /* First eviction, stop the queues */
r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR); r = kgd2kfd_quiesce_mm(mni->mm,
KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
if (r) if (r)
pr_err("Failed to quiesce KFD\n"); pr_err("Failed to quiesce KFD\n");
schedule_delayed_work(&process_info->restore_userptr_work, schedule_delayed_work(&process_info->restore_userptr_work,
msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
} }
mutex_unlock(&process_info->notifier_lock);
return r; return r;
} }
...@@ -2285,54 +2310,58 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ...@@ -2285,54 +2310,58 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
struct kgd_mem *mem, *tmp_mem; struct kgd_mem *mem, *tmp_mem;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
int invalid, ret; uint32_t invalid;
int ret = 0;
/* Move all invalidated BOs to the userptr_inval_list and mutex_lock(&process_info->notifier_lock);
* release their user pages by migration to the CPU domain
*/ /* Move all invalidated BOs to the userptr_inval_list */
list_for_each_entry_safe(mem, tmp_mem, list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_valid_list, &process_info->userptr_valid_list,
validate_list.head) { validate_list.head)
if (!atomic_read(&mem->invalid)) if (mem->invalid)
continue; /* BO is still valid */ list_move_tail(&mem->validate_list.head,
&process_info->userptr_inval_list);
bo = mem->bo;
if (amdgpu_bo_reserve(bo, true))
return -EAGAIN;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (ret) {
pr_err("%s: Failed to invalidate userptr BO\n",
__func__);
return -EAGAIN;
}
list_move_tail(&mem->validate_list.head,
&process_info->userptr_inval_list);
}
if (list_empty(&process_info->userptr_inval_list))
return 0; /* All evicted userptr BOs were freed */
/* Go through userptr_inval_list and update any invalid user_pages */ /* Go through userptr_inval_list and update any invalid user_pages */
list_for_each_entry(mem, &process_info->userptr_inval_list, list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list.head) { validate_list.head) {
struct hmm_range *range; invalid = mem->invalid;
invalid = atomic_read(&mem->invalid);
if (!invalid) if (!invalid)
/* BO hasn't been invalidated since the last /* BO hasn't been invalidated since the last
* revalidation attempt. Keep its BO list. * revalidation attempt. Keep its page list.
*/ */
continue; continue;
bo = mem->bo; bo = mem->bo;
amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
mem->range = NULL;
/* BO reservations and getting user pages (hmm_range_fault)
* must happen outside the notifier lock
*/
mutex_unlock(&process_info->notifier_lock);
/* Move the BO to system (CPU) domain if necessary to unmap
* and free the SG table
*/
if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
if (amdgpu_bo_reserve(bo, true))
return -EAGAIN;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (ret) {
pr_err("%s: Failed to invalidate userptr BO\n",
__func__);
return -EAGAIN;
}
}
/* Get updated user pages */ /* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
&range); &mem->range);
if (ret) { if (ret) {
pr_debug("Failed %d to get user pages\n", ret); pr_debug("Failed %d to get user pages\n", ret);
...@@ -2345,30 +2374,32 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ...@@ -2345,30 +2374,32 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
*/ */
if (ret != -EFAULT) if (ret != -EFAULT)
return ret; return ret;
} else {
/* ret = 0;
* FIXME: Cannot ignore the return code, must hold
* notifier_lock
*/
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
} }
mutex_lock(&process_info->notifier_lock);
/* Mark the BO as valid unless it was invalidated /* Mark the BO as valid unless it was invalidated
* again concurrently. * again concurrently.
*/ */
if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) if (mem->invalid != invalid) {
return -EAGAIN; ret = -EAGAIN;
goto unlock_out;
}
mem->invalid = 0;
} }
return 0; unlock_out:
mutex_unlock(&process_info->notifier_lock);
return ret;
} }
/* Validate invalid userptr BOs /* Validate invalid userptr BOs
* *
* Validates BOs on the userptr_inval_list, and moves them back to the * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
* userptr_valid_list. Also updates GPUVM page tables with new page * with new page addresses and waits for the page table updates to complete.
* addresses and waits for the page table updates to complete.
*/ */
static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
{ {
...@@ -2439,9 +2470,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -2439,9 +2470,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
} }
} }
list_move_tail(&mem->validate_list.head,
&process_info->userptr_valid_list);
/* Update mapping. If the BO was not validated /* Update mapping. If the BO was not validated
* (because we couldn't get user pages), this will * (because we couldn't get user pages), this will
* clear the page table entries, which will result in * clear the page table entries, which will result in
...@@ -2457,7 +2485,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -2457,7 +2485,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
if (ret) { if (ret) {
pr_err("%s: update PTE failed\n", __func__); pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */ /* make sure this gets validated again */
atomic_inc(&mem->invalid); mutex_lock(&process_info->notifier_lock);
mem->invalid++;
mutex_unlock(&process_info->notifier_lock);
goto unreserve_out; goto unreserve_out;
} }
} }
...@@ -2477,6 +2507,36 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -2477,6 +2507,36 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
return ret; return ret;
} }
/* Confirm that all user pages are valid while holding the notifier lock
*
* Moves valid BOs from the userptr_inval_list back to userptr_val_list.
*/
static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
{
struct kgd_mem *mem, *tmp_mem;
int ret = 0;
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
validate_list.head) {
bool valid = amdgpu_ttm_tt_get_user_pages_done(
mem->bo->tbo.ttm, mem->range);
mem->range = NULL;
if (!valid) {
WARN(!mem->invalid, "Invalid BO not marked invalid");
ret = -EAGAIN;
continue;
}
WARN(mem->invalid, "Valid BO is marked invalid");
list_move_tail(&mem->validate_list.head,
&process_info->userptr_valid_list);
}
return ret;
}
/* Worker callback to restore evicted userptr BOs /* Worker callback to restore evicted userptr BOs
* *
* Tries to update and validate all userptr BOs. If successful and no * Tries to update and validate all userptr BOs. If successful and no
...@@ -2491,9 +2551,11 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) ...@@ -2491,9 +2551,11 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
restore_userptr_work); restore_userptr_work);
struct task_struct *usertask; struct task_struct *usertask;
struct mm_struct *mm; struct mm_struct *mm;
int evicted_bos; uint32_t evicted_bos;
evicted_bos = atomic_read(&process_info->evicted_bos); mutex_lock(&process_info->notifier_lock);
evicted_bos = process_info->evicted_bos;
mutex_unlock(&process_info->notifier_lock);
if (!evicted_bos) if (!evicted_bos)
return; return;
...@@ -2516,9 +2578,6 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) ...@@ -2516,9 +2578,6 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
* and we can just restart the queues. * and we can just restart the queues.
*/ */
if (!list_empty(&process_info->userptr_inval_list)) { if (!list_empty(&process_info->userptr_inval_list)) {
if (atomic_read(&process_info->evicted_bos) != evicted_bos)
goto unlock_out; /* Concurrent eviction, try again */
if (validate_invalid_user_pages(process_info)) if (validate_invalid_user_pages(process_info))
goto unlock_out; goto unlock_out;
} }
...@@ -2527,10 +2586,17 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) ...@@ -2527,10 +2586,17 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
* be a first eviction that calls quiesce_mm. The eviction * be a first eviction that calls quiesce_mm. The eviction
* reference counting inside KFD will handle this case. * reference counting inside KFD will handle this case.
*/ */
if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != mutex_lock(&process_info->notifier_lock);
evicted_bos) if (process_info->evicted_bos != evicted_bos)
goto unlock_out; goto unlock_notifier_out;
evicted_bos = 0;
if (confirm_valid_user_pages_locked(process_info)) {
WARN(1, "User pages unexpectedly invalid");
goto unlock_notifier_out;
}
process_info->evicted_bos = evicted_bos = 0;
if (kgd2kfd_resume_mm(mm)) { if (kgd2kfd_resume_mm(mm)) {
pr_err("%s: Failed to resume KFD\n", __func__); pr_err("%s: Failed to resume KFD\n", __func__);
/* No recovery from this failure. Probably the CP is /* No recovery from this failure. Probably the CP is
...@@ -2538,6 +2604,8 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) ...@@ -2538,6 +2604,8 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
*/ */
} }
unlock_notifier_out:
mutex_unlock(&process_info->notifier_lock);
unlock_out: unlock_out:
mutex_unlock(&process_info->lock); mutex_unlock(&process_info->lock);
......
...@@ -105,17 +105,11 @@ static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni, ...@@ -105,17 +105,11 @@ static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni,
unsigned long cur_seq) unsigned long cur_seq)
{ {
struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
if (!mmu_notifier_range_blockable(range)) if (!mmu_notifier_range_blockable(range))
return false; return false;
mutex_lock(&adev->notifier_lock); amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo);
mmu_interval_set_seq(mni, cur_seq);
amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
mutex_unlock(&adev->notifier_lock);
return true; return true;
} }
...@@ -244,9 +238,9 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, ...@@ -244,9 +238,9 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
return r; return r;
} }
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
{ {
int r; bool r;
r = mmu_interval_read_retry(hmm_range->notifier, r = mmu_interval_read_retry(hmm_range->notifier,
hmm_range->notifier_seq); hmm_range->notifier_seq);
......
...@@ -29,12 +29,13 @@ ...@@ -29,12 +29,13 @@
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/interval_tree.h> #include <linux/interval_tree.h>
#include <linux/mmu_notifier.h>
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly, uint64_t start, uint64_t npages, bool readonly,
void *owner, struct page **pages, void *owner, struct page **pages,
struct hmm_range **phmm_range); struct hmm_range **phmm_range);
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR) #if defined(CONFIG_HMM_MIRROR)
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr); int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
......
...@@ -695,8 +695,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, ...@@ -695,8 +695,19 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
return r; return r;
} }
/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
*/
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
if (gtt && gtt->userptr && range)
amdgpu_hmm_range_get_pages_done(range);
}
/* /*
* amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
* Check if the pages backing this ttm range have been invalidated * Check if the pages backing this ttm range have been invalidated
* *
* Returns: true if pages are still valid * Returns: true if pages are still valid
...@@ -714,10 +725,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, ...@@ -714,10 +725,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
/*
* FIXME: Must always hold notifier_lock for this, and must
* not ignore the return code.
*/
return !amdgpu_hmm_range_get_pages_done(range); return !amdgpu_hmm_range_get_pages_done(range);
} }
#endif #endif
......
...@@ -159,6 +159,8 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); ...@@ -159,6 +159,8 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
struct hmm_range **range); struct hmm_range **range);
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range); struct hmm_range *range);
#else #else
...@@ -168,6 +170,10 @@ static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, ...@@ -168,6 +170,10 @@ static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
{ {
return -EPERM; return -EPERM;
} }
static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range)
{
}
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range) struct hmm_range *range)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment