Commit b57e622e authored by Souptick Joarder's avatar Souptick Joarder Committed by Linus Torvalds

mm/hmm: convert to use vm_fault_t

Convert to use vm_fault_t type as return type for fault handler.

kbuild reported warning during testing of
*mm-create-the-new-vm_fault_t-type.patch* available in below link -
https://patchwork.kernel.org/patch/10752741/

  kernel/memremap.c:46:34: warning: incorrect type in return expression
                           (different base types)
  kernel/memremap.c:46:34: expected restricted vm_fault_t
  kernel/memremap.c:46:34: got int

This patch has fixed the warnings and also hmm_devmem_fault() is
converted to return vm_fault_t to avoid further warnings.

[sfr@canb.auug.org.au: drm/nouveau/dmem: update for struct hmm_devmem_ops member change]
  Link: http://lkml.kernel.org/r/20190220174407.753d94e5@canb.auug.org.au
Link: http://lkml.kernel.org/r/20190110145900.GA1317@jordon-HP-15-Notebook-PCSigned-off-by: default avatarSouptick Joarder <jrdr.linux@gmail.com>
Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Reviewed-by: default avatarJérôme Glisse <jglisse@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2bc4fc60
...@@ -261,7 +261,7 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = { ...@@ -261,7 +261,7 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
.finalize_and_map = nouveau_dmem_fault_finalize_and_map, .finalize_and_map = nouveau_dmem_fault_finalize_and_map,
}; };
static int static vm_fault_t
nouveau_dmem_fault(struct hmm_devmem *devmem, nouveau_dmem_fault(struct hmm_devmem *devmem,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
......
...@@ -468,7 +468,7 @@ struct hmm_devmem_ops { ...@@ -468,7 +468,7 @@ struct hmm_devmem_ops {
* Note that mmap semaphore is held in read mode at least when this * Note that mmap semaphore is held in read mode at least when this
* callback occurs, hence the vma is valid upon callback entry. * callback occurs, hence the vma is valid upon callback entry.
*/ */
int (*fault)(struct hmm_devmem *devmem, vm_fault_t (*fault)(struct hmm_devmem *devmem,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
const struct page *page, const struct page *page,
...@@ -511,7 +511,7 @@ struct hmm_devmem_ops { ...@@ -511,7 +511,7 @@ struct hmm_devmem_ops {
* chunk, as an optimization. It must, however, prioritize the faulting address * chunk, as an optimization. It must, however, prioritize the faulting address
* over all the others. * over all the others.
*/ */
typedef int (*dev_page_fault_t)(struct vm_area_struct *vma, typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
const struct page *page, const struct page *page,
unsigned int flags, unsigned int flags,
......
...@@ -990,7 +990,7 @@ static void hmm_devmem_ref_kill(struct percpu_ref *ref) ...@@ -990,7 +990,7 @@ static void hmm_devmem_ref_kill(struct percpu_ref *ref)
percpu_ref_kill(ref); percpu_ref_kill(ref);
} }
static int hmm_devmem_fault(struct vm_area_struct *vma, static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
const struct page *page, const struct page *page,
unsigned int flags, unsigned int flags,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment