Commit 4daa4fba authored by Souptick Joarder's avatar Souptick Joarder Committed by Alex Deucher

gpu: drm: ttm: Adding new return type vm_fault_t

Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

Ref-> commit 1c8f4220 ("mm: change return type to vm_fault_t")

Previously vm_insert_{mixed,pfn} returns err which driver
mapped into VM_FAULT_* type. The new function
vmf_insert_{mixed,pfn} will replace this inefficiency by
returning VM_FAULT_* type.
Signed-off-by: default avatarSouptick Joarder <jrdr.linux@gmail.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 2bfb0b67
...@@ -44,10 +44,11 @@ ...@@ -44,10 +44,11 @@
#define TTM_BO_VM_NUM_PREFAULT 16 #define TTM_BO_VM_NUM_PREFAULT 16
static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
int ret = 0; vm_fault_t ret = 0;
int err = 0;
if (likely(!bo->moving)) if (likely(!bo->moving))
goto out_unlock; goto out_unlock;
...@@ -78,9 +79,9 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, ...@@ -78,9 +79,9 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/* /*
* Ordinary wait. * Ordinary wait.
*/ */
ret = dma_fence_wait(bo->moving, true); err = dma_fence_wait(bo->moving, true);
if (unlikely(ret != 0)) { if (unlikely(err != 0)) {
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE; VM_FAULT_NOPAGE;
goto out_unlock; goto out_unlock;
} }
...@@ -105,7 +106,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, ...@@ -105,7 +106,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset; + page_offset;
} }
static int ttm_bo_vm_fault(struct vm_fault *vmf) static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *) struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
...@@ -116,8 +117,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -116,8 +117,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
unsigned long pfn; unsigned long pfn;
struct ttm_tt *ttm = NULL; struct ttm_tt *ttm = NULL;
struct page *page; struct page *page;
int ret; int err;
int i; int i;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address = vmf->address; unsigned long address = vmf->address;
struct ttm_mem_type_manager *man = struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type]; &bdev->man[bo->mem.mem_type];
...@@ -129,9 +131,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -129,9 +131,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after waiting * for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved. * for the buffer to become unreserved.
*/ */
ret = ttm_bo_reserve(bo, true, true, NULL); err = ttm_bo_reserve(bo, true, true, NULL);
if (unlikely(ret != 0)) { if (unlikely(err != 0)) {
if (ret != -EBUSY) if (err != -EBUSY)
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
...@@ -163,8 +165,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -163,8 +165,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
} }
if (bdev->driver->fault_reserve_notify) { if (bdev->driver->fault_reserve_notify) {
ret = bdev->driver->fault_reserve_notify(bo); err = bdev->driver->fault_reserve_notify(bo);
switch (ret) { switch (err) {
case 0: case 0:
break; break;
case -EBUSY: case -EBUSY:
...@@ -192,13 +194,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -192,13 +194,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
goto out_unlock; goto out_unlock;
} }
ret = ttm_mem_io_lock(man, true); err = ttm_mem_io_lock(man, true);
if (unlikely(ret != 0)) { if (unlikely(err != 0)) {
ret = VM_FAULT_NOPAGE; ret = VM_FAULT_NOPAGE;
goto out_unlock; goto out_unlock;
} }
ret = ttm_mem_io_reserve_vm(bo); err = ttm_mem_io_reserve_vm(bo);
if (unlikely(ret != 0)) { if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
goto out_io_unlock; goto out_io_unlock;
} }
...@@ -266,23 +268,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -266,23 +268,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
} }
if (vma->vm_flags & VM_MIXEDMAP) if (vma->vm_flags & VM_MIXEDMAP)
ret = vm_insert_mixed(&cvma, address, ret = vmf_insert_mixed(&cvma, address,
__pfn_to_pfn_t(pfn, PFN_DEV)); __pfn_to_pfn_t(pfn, PFN_DEV));
else else
ret = vm_insert_pfn(&cvma, address, pfn); ret = vmf_insert_pfn(&cvma, address, pfn);
/* /*
* Somebody beat us to this PTE or prefaulting to * Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error. * an already populated PTE, or prefaulting error.
*/ */
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
break; break;
else if (unlikely(ret != 0)) { else if (unlikely(ret & VM_FAULT_ERROR))
ret =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_io_unlock; goto out_io_unlock;
}
address += PAGE_SIZE; address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last)) if (unlikely(++page_offset >= page_last))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment