Commit 9d0a1665 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe

amdgpu: remove -EAGAIN handling for hmm_range_fault

hmm_range_fault can only return -EAGAIN if called with the
HMM_FAULT_ALLOW_RETRY flag, which amdgpu never does.  Remove the handling
for the -EAGAIN case with its non-standard locking scheme.

Link: https://lore.kernel.org/r/20190806160554.14046-2-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent cc374377
...@@ -778,7 +778,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) ...@@ -778,7 +778,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
struct hmm_range *range; struct hmm_range *range;
unsigned long i; unsigned long i;
uint64_t *pfns; uint64_t *pfns;
int retry = 0;
int r = 0; int r = 0;
if (!mm) /* Happens during process shutdown */ if (!mm) /* Happens during process shutdown */
...@@ -822,7 +821,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) ...@@ -822,7 +821,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
hmm_range_register(range, mirror, start, hmm_range_register(range, mirror, start,
start + ttm->num_pages * PAGE_SIZE, PAGE_SHIFT); start + ttm->num_pages * PAGE_SIZE, PAGE_SHIFT);
retry:
/* /*
* Just wait for range to be valid, safe to ignore return value as we * Just wait for range to be valid, safe to ignore return value as we
* will use the return value of hmm_range_fault() below under the * will use the return value of hmm_range_fault() below under the
...@@ -831,24 +829,12 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) ...@@ -831,24 +829,12 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT); hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
r = hmm_range_fault(range, 0); r = hmm_range_fault(range, 0);
if (unlikely(r < 0)) {
if (likely(r == -EAGAIN)) {
/*
* return -EAGAIN, mmap_sem is dropped
*/
if (retry++ < MAX_RETRY_HMM_RANGE_FAULT)
goto retry;
else
pr_err("Retry hmm fault too many times\n");
}
goto out_up_read;
}
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (unlikely(r < 0))
goto out_free_pfns;
for (i = 0; i < ttm->num_pages; i++) { for (i = 0; i < ttm->num_pages; i++) {
pages[i] = hmm_device_entry_to_page(range, pfns[i]); pages[i] = hmm_device_entry_to_page(range, pfns[i]);
if (unlikely(!pages[i])) { if (unlikely(!pages[i])) {
...@@ -864,9 +850,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) ...@@ -864,9 +850,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
return 0; return 0;
out_up_read:
if (likely(r != -EAGAIN))
up_read(&mm->mmap_sem);
out_free_pfns: out_free_pfns:
hmm_range_unregister(range); hmm_range_unregister(range);
kvfree(pfns); kvfree(pfns);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment