Commit de8dfb8e authored by Tom St Denis's avatar Tom St Denis Committed by Alex Deucher

drm/ttm: Remove unncessary retval from ttm_bo_vm_fault()

The dual ret/retval was more complex than need be.  Now
we drop the retval variable and assign the appropriate VM
codes to ret instead.
Signed-off-by: default avatarTom St Denis <tom.stdenis@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 449f797a
......@@ -118,7 +118,6 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
int ret;
int i;
unsigned long address = vmf->address;
int retval = VM_FAULT_NOPAGE;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
struct vm_area_struct cvma;
......@@ -158,7 +157,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
* (if at all) by redirecting mmap to the exporter.
*/
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
retval = VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
......@@ -169,10 +168,10 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
break;
case -EBUSY:
case -ERESTARTSYS:
retval = VM_FAULT_NOPAGE;
ret = VM_FAULT_NOPAGE;
goto out_unlock;
default:
retval = VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
}
......@@ -183,12 +182,10 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
*/
ret = ttm_bo_vm_fault_idle(bo, vmf);
if (unlikely(ret != 0)) {
retval = ret;
if (retval == VM_FAULT_RETRY &&
if (ret == VM_FAULT_RETRY &&
!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
/* The BO has already been unreserved. */
return retval;
return ret;
}
goto out_unlock;
......@@ -196,12 +193,12 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
ret = ttm_mem_io_lock(man, true);
if (unlikely(ret != 0)) {
retval = VM_FAULT_NOPAGE;
ret = VM_FAULT_NOPAGE;
goto out_unlock;
}
ret = ttm_mem_io_reserve_vm(bo);
if (unlikely(ret != 0)) {
retval = VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
goto out_io_unlock;
}
......@@ -211,7 +208,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
drm_vma_node_start(&bo->vma_node);
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
goto out_io_unlock;
}
......@@ -238,7 +235,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
/* Allocate all page at once, most common usage */
if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) {
retval = VM_FAULT_OOM;
ret = VM_FAULT_OOM;
goto out_io_unlock;
}
}
......@@ -255,7 +252,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
} else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
ret = VM_FAULT_OOM;
goto out_io_unlock;
} else if (unlikely(!page)) {
break;
......@@ -280,7 +277,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
break;
else if (unlikely(ret != 0)) {
retval =
ret =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_io_unlock;
}
......@@ -289,11 +286,12 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
if (unlikely(++page_offset >= page_last))
break;
}
ret = VM_FAULT_NOPAGE;
out_io_unlock:
ttm_mem_io_unlock(man);
out_unlock:
ttm_bo_unreserve(bo);
return retval;
return ret;
}
static void ttm_bo_vm_open(struct vm_area_struct *vma)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment