Commit 08232a45 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

mm/hmm: use struct for hmm_vma_fault(), hmm_vma_get_pfns() parameters

Both hmm_vma_fault() and hmm_vma_get_pfns() were taking a hmm_range struct
as parameter and were initializing that struct with others of their
parameters.  Have caller of those function do this as they are likely to
already do and only pass this struct to both function this shorten
function signature and make it easier in the future to add new parameters
by simply adding them to the structure.

Link: http://lkml.kernel.org/r/20180323005527.758-7-jglisse@redhat.comSigned-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Cc: Evgeny Baskakov <ebaskakov@nvidia.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mark Hairgrove <mhairgrove@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c719547f
...@@ -274,6 +274,7 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror); ...@@ -274,6 +274,7 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror);
/* /*
* struct hmm_range - track invalidation lock on virtual address range * struct hmm_range - track invalidation lock on virtual address range
* *
* @vma: the vm area struct for the range
* @list: all range lock are on a list * @list: all range lock are on a list
* @start: range virtual start address (inclusive) * @start: range virtual start address (inclusive)
* @end: range virtual end address (exclusive) * @end: range virtual end address (exclusive)
...@@ -281,6 +282,7 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror); ...@@ -281,6 +282,7 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror);
* @valid: pfns array did not change since it has been fill by an HMM function * @valid: pfns array did not change since it has been fill by an HMM function
*/ */
struct hmm_range { struct hmm_range {
struct vm_area_struct *vma;
struct list_head list; struct list_head list;
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
...@@ -301,12 +303,8 @@ struct hmm_range { ...@@ -301,12 +303,8 @@ struct hmm_range {
* *
* IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID ! * IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID !
*/ */
int hmm_vma_get_pfns(struct vm_area_struct *vma, int hmm_vma_get_pfns(struct hmm_range *range);
struct hmm_range *range, bool hmm_vma_range_done(struct hmm_range *range);
unsigned long start,
unsigned long end,
hmm_pfn_t *pfns);
bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range);
/* /*
...@@ -327,13 +325,7 @@ bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range); ...@@ -327,13 +325,7 @@ bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range);
* *
* See the function description in mm/hmm.c for further documentation. * See the function description in mm/hmm.c for further documentation.
*/ */
int hmm_vma_fault(struct vm_area_struct *vma, int hmm_vma_fault(struct hmm_range *range, bool write, bool block);
struct hmm_range *range,
unsigned long start,
unsigned long end,
hmm_pfn_t *pfns,
bool write,
bool block);
#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
......
...@@ -533,11 +533,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -533,11 +533,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
/* /*
* hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses
* @vma: virtual memory area containing the virtual address range * @range: range being snapshotted
* @range: used to track snapshot validity
* @start: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
* @entries: array of hmm_pfn_t: provided by the caller, filled in by function
* Returns: -EINVAL if invalid argument, -ENOMEM out of memory, 0 success * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, 0 success
* *
* This snapshots the CPU page table for a range of virtual addresses. Snapshot * This snapshots the CPU page table for a range of virtual addresses. Snapshot
...@@ -551,26 +547,23 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -551,26 +547,23 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
* NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS
* MEMORY CORRUPTION ! YOU HAVE BEEN WARNED ! * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED !
*/ */
int hmm_vma_get_pfns(struct vm_area_struct *vma, int hmm_vma_get_pfns(struct hmm_range *range)
struct hmm_range *range,
unsigned long start,
unsigned long end,
hmm_pfn_t *pfns)
{ {
struct vm_area_struct *vma = range->vma;
struct hmm_vma_walk hmm_vma_walk; struct hmm_vma_walk hmm_vma_walk;
struct mm_walk mm_walk; struct mm_walk mm_walk;
struct hmm *hmm; struct hmm *hmm;
/* FIXME support hugetlb fs */ /* FIXME support hugetlb fs */
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
hmm_pfns_special(pfns, start, end); hmm_pfns_special(range->pfns, range->start, range->end);
return -EINVAL; return -EINVAL;
} }
/* Sanity check, this really should not happen ! */ /* Sanity check, this really should not happen ! */
if (start < vma->vm_start || start >= vma->vm_end) if (range->start < vma->vm_start || range->start >= vma->vm_end)
return -EINVAL; return -EINVAL;
if (end < vma->vm_start || end > vma->vm_end) if (range->end < vma->vm_start || range->end > vma->vm_end)
return -EINVAL; return -EINVAL;
hmm = hmm_register(vma->vm_mm); hmm = hmm_register(vma->vm_mm);
...@@ -581,9 +574,6 @@ int hmm_vma_get_pfns(struct vm_area_struct *vma, ...@@ -581,9 +574,6 @@ int hmm_vma_get_pfns(struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
/* Initialize range to track CPU page table update */ /* Initialize range to track CPU page table update */
range->start = start;
range->pfns = pfns;
range->end = end;
spin_lock(&hmm->lock); spin_lock(&hmm->lock);
range->valid = true; range->valid = true;
list_add_rcu(&range->list, &hmm->ranges); list_add_rcu(&range->list, &hmm->ranges);
...@@ -601,14 +591,13 @@ int hmm_vma_get_pfns(struct vm_area_struct *vma, ...@@ -601,14 +591,13 @@ int hmm_vma_get_pfns(struct vm_area_struct *vma,
mm_walk.pmd_entry = hmm_vma_walk_pmd; mm_walk.pmd_entry = hmm_vma_walk_pmd;
mm_walk.pte_hole = hmm_vma_walk_hole; mm_walk.pte_hole = hmm_vma_walk_hole;
walk_page_range(start, end, &mm_walk); walk_page_range(range->start, range->end, &mm_walk);
return 0; return 0;
} }
EXPORT_SYMBOL(hmm_vma_get_pfns); EXPORT_SYMBOL(hmm_vma_get_pfns);
/* /*
* hmm_vma_range_done() - stop tracking change to CPU page table over a range * hmm_vma_range_done() - stop tracking change to CPU page table over a range
* @vma: virtual memory area containing the virtual address range
* @range: range being tracked * @range: range being tracked
* Returns: false if range data has been invalidated, true otherwise * Returns: false if range data has been invalidated, true otherwise
* *
...@@ -628,10 +617,10 @@ EXPORT_SYMBOL(hmm_vma_get_pfns); ...@@ -628,10 +617,10 @@ EXPORT_SYMBOL(hmm_vma_get_pfns);
* *
* There are two ways to use this : * There are two ways to use this :
* again: * again:
* hmm_vma_get_pfns(vma, range, start, end, pfns); or hmm_vma_fault(...); * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
* trans = device_build_page_table_update_transaction(pfns); * trans = device_build_page_table_update_transaction(pfns);
* device_page_table_lock(); * device_page_table_lock();
* if (!hmm_vma_range_done(vma, range)) { * if (!hmm_vma_range_done(range)) {
* device_page_table_unlock(); * device_page_table_unlock();
* goto again; * goto again;
* } * }
...@@ -639,13 +628,13 @@ EXPORT_SYMBOL(hmm_vma_get_pfns); ...@@ -639,13 +628,13 @@ EXPORT_SYMBOL(hmm_vma_get_pfns);
* device_page_table_unlock(); * device_page_table_unlock();
* *
* Or: * Or:
* hmm_vma_get_pfns(vma, range, start, end, pfns); or hmm_vma_fault(...); * hmm_vma_get_pfns(range); or hmm_vma_fault(...);
* device_page_table_lock(); * device_page_table_lock();
* hmm_vma_range_done(vma, range); * hmm_vma_range_done(range);
* device_update_page_table(pfns); * device_update_page_table(range->pfns);
* device_page_table_unlock(); * device_page_table_unlock();
*/ */
bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range) bool hmm_vma_range_done(struct hmm_range *range)
{ {
unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; unsigned long npages = (range->end - range->start) >> PAGE_SHIFT;
struct hmm *hmm; struct hmm *hmm;
...@@ -655,7 +644,7 @@ bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range) ...@@ -655,7 +644,7 @@ bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range)
return false; return false;
} }
hmm = hmm_register(vma->vm_mm); hmm = hmm_register(range->vma->vm_mm);
if (!hmm) { if (!hmm) {
memset(range->pfns, 0, sizeof(*range->pfns) * npages); memset(range->pfns, 0, sizeof(*range->pfns) * npages);
return false; return false;
...@@ -671,11 +660,7 @@ EXPORT_SYMBOL(hmm_vma_range_done); ...@@ -671,11 +660,7 @@ EXPORT_SYMBOL(hmm_vma_range_done);
/* /*
* hmm_vma_fault() - try to fault some address in a virtual address range * hmm_vma_fault() - try to fault some address in a virtual address range
* @vma: virtual memory area containing the virtual address range * @range: range being faulted
* @range: use to track pfns array content validity
* @start: fault range virtual start address (inclusive)
* @end: fault range virtual end address (exclusive)
* @pfns: array of hmm_pfn_t, only entry with fault flag set will be faulted
* @write: is it a write fault * @write: is it a write fault
* @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
* Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop) * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop)
...@@ -691,10 +676,10 @@ EXPORT_SYMBOL(hmm_vma_range_done); ...@@ -691,10 +676,10 @@ EXPORT_SYMBOL(hmm_vma_range_done);
* down_read(&mm->mmap_sem); * down_read(&mm->mmap_sem);
* // Find vma and address device wants to fault, initialize hmm_pfn_t * // Find vma and address device wants to fault, initialize hmm_pfn_t
* // array accordingly * // array accordingly
* ret = hmm_vma_fault(vma, start, end, pfns, allow_retry); * ret = hmm_vma_fault(range, write, block);
* switch (ret) { * switch (ret) {
* case -EAGAIN: * case -EAGAIN:
* hmm_vma_range_done(vma, range); * hmm_vma_range_done(range);
* // You might want to rate limit or yield to play nicely, you may * // You might want to rate limit or yield to play nicely, you may
* // also commit any valid pfn in the array assuming that you are * // also commit any valid pfn in the array assuming that you are
* // getting true from hmm_vma_range_monitor_end() * // getting true from hmm_vma_range_monitor_end()
...@@ -708,7 +693,7 @@ EXPORT_SYMBOL(hmm_vma_range_done); ...@@ -708,7 +693,7 @@ EXPORT_SYMBOL(hmm_vma_range_done);
* } * }
* // Take device driver lock that serialize device page table update * // Take device driver lock that serialize device page table update
* driver_lock_device_page_table_update(); * driver_lock_device_page_table_update();
* hmm_vma_range_done(vma, range); * hmm_vma_range_done(range);
* // Commit pfns we got from hmm_vma_fault() * // Commit pfns we got from hmm_vma_fault()
* driver_unlock_device_page_table_update(); * driver_unlock_device_page_table_update();
* up_read(&mm->mmap_sem) * up_read(&mm->mmap_sem)
...@@ -718,28 +703,24 @@ EXPORT_SYMBOL(hmm_vma_range_done); ...@@ -718,28 +703,24 @@ EXPORT_SYMBOL(hmm_vma_range_done);
* *
* YOU HAVE BEEN WARNED ! * YOU HAVE BEEN WARNED !
*/ */
int hmm_vma_fault(struct vm_area_struct *vma, int hmm_vma_fault(struct hmm_range *range, bool write, bool block)
struct hmm_range *range,
unsigned long start,
unsigned long end,
hmm_pfn_t *pfns,
bool write,
bool block)
{ {
struct vm_area_struct *vma = range->vma;
unsigned long start = range->start;
struct hmm_vma_walk hmm_vma_walk; struct hmm_vma_walk hmm_vma_walk;
struct mm_walk mm_walk; struct mm_walk mm_walk;
struct hmm *hmm; struct hmm *hmm;
int ret; int ret;
/* Sanity check, this really should not happen ! */ /* Sanity check, this really should not happen ! */
if (start < vma->vm_start || start >= vma->vm_end) if (range->start < vma->vm_start || range->start >= vma->vm_end)
return -EINVAL; return -EINVAL;
if (end < vma->vm_start || end > vma->vm_end) if (range->end < vma->vm_start || range->end > vma->vm_end)
return -EINVAL; return -EINVAL;
hmm = hmm_register(vma->vm_mm); hmm = hmm_register(vma->vm_mm);
if (!hmm) { if (!hmm) {
hmm_pfns_clear(pfns, start, end); hmm_pfns_clear(range->pfns, range->start, range->end);
return -ENOMEM; return -ENOMEM;
} }
/* Caller must have registered a mirror using hmm_mirror_register() */ /* Caller must have registered a mirror using hmm_mirror_register() */
...@@ -747,9 +728,6 @@ int hmm_vma_fault(struct vm_area_struct *vma, ...@@ -747,9 +728,6 @@ int hmm_vma_fault(struct vm_area_struct *vma,
return -EINVAL; return -EINVAL;
/* Initialize range to track CPU page table update */ /* Initialize range to track CPU page table update */
range->start = start;
range->pfns = pfns;
range->end = end;
spin_lock(&hmm->lock); spin_lock(&hmm->lock);
range->valid = true; range->valid = true;
list_add_rcu(&range->list, &hmm->ranges); list_add_rcu(&range->list, &hmm->ranges);
...@@ -757,7 +735,7 @@ int hmm_vma_fault(struct vm_area_struct *vma, ...@@ -757,7 +735,7 @@ int hmm_vma_fault(struct vm_area_struct *vma,
/* FIXME support hugetlb fs */ /* FIXME support hugetlb fs */
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) {
hmm_pfns_special(pfns, start, end); hmm_pfns_special(range->pfns, range->start, range->end);
return 0; return 0;
} }
...@@ -777,7 +755,7 @@ int hmm_vma_fault(struct vm_area_struct *vma, ...@@ -777,7 +755,7 @@ int hmm_vma_fault(struct vm_area_struct *vma,
mm_walk.pte_hole = hmm_vma_walk_hole; mm_walk.pte_hole = hmm_vma_walk_hole;
do { do {
ret = walk_page_range(start, end, &mm_walk); ret = walk_page_range(start, range->end, &mm_walk);
start = hmm_vma_walk.last; start = hmm_vma_walk.last;
} while (ret == -EAGAIN); } while (ret == -EAGAIN);
...@@ -785,8 +763,8 @@ int hmm_vma_fault(struct vm_area_struct *vma, ...@@ -785,8 +763,8 @@ int hmm_vma_fault(struct vm_area_struct *vma,
unsigned long i; unsigned long i;
i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
hmm_pfns_clear(&pfns[i], hmm_vma_walk.last, end); hmm_pfns_clear(&range->pfns[i], hmm_vma_walk.last, range->end);
hmm_vma_range_done(vma, range); hmm_vma_range_done(range);
} }
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment