Commit d4cbff46 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: cleanup amdgpu_hmm_range_get_pages

Remove unused parameters and cleanup dead code.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d9483ecd
...@@ -158,10 +158,9 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) ...@@ -158,10 +158,9 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
} }
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages, uint64_t start, uint64_t npages, bool readonly,
uint64_t start, uint64_t npages, void *owner, struct page **pages,
struct hmm_range **phmm_range, bool readonly, struct hmm_range **phmm_range)
bool mmap_locked, void *owner)
{ {
struct hmm_range *hmm_range; struct hmm_range *hmm_range;
unsigned long timeout; unsigned long timeout;
...@@ -194,14 +193,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, ...@@ -194,14 +193,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
retry: retry:
hmm_range->notifier_seq = mmu_interval_read_begin(notifier); hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
if (likely(!mmap_locked))
mmap_read_lock(mm);
r = hmm_range_fault(hmm_range); r = hmm_range_fault(hmm_range);
if (likely(!mmap_locked))
mmap_read_unlock(mm);
if (unlikely(r)) { if (unlikely(r)) {
/* /*
* FIXME: This timeout should encompass the retry from * FIXME: This timeout should encompass the retry from
......
...@@ -31,10 +31,9 @@ ...@@ -31,10 +31,9 @@
#include <linux/interval_tree.h> #include <linux/interval_tree.h>
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages, uint64_t start, uint64_t npages, bool readonly,
uint64_t start, uint64_t npages, void *owner, struct page **pages,
struct hmm_range **phmm_range, bool readonly, struct hmm_range **phmm_range);
bool mmap_locked, void *owner);
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR) #if defined(CONFIG_HMM_MIRROR)
......
...@@ -692,9 +692,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, ...@@ -692,9 +692,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
} }
readonly = amdgpu_ttm_tt_is_readonly(ttm); readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
ttm->num_pages, range, readonly, readonly, NULL, pages, range);
true, NULL);
out_unlock: out_unlock:
mmap_read_unlock(mm); mmap_read_unlock(mm);
if (r) if (r)
......
...@@ -1596,9 +1596,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm, ...@@ -1596,9 +1596,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end); next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT; npages = (next - addr) >> PAGE_SHIFT;
WRITE_ONCE(p->svms.faulting_task, current); WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
addr, npages, &hmm_range, readonly, owner, NULL,
readonly, true, owner); &hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL); WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) { if (r) {
pr_debug("failed %d to get svm range pages\n", r); pr_debug("failed %d to get svm range pages\n", r);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment