Commit 085ea250 authored by Ralph Campbell's avatar Ralph Campbell Committed by Jason Gunthorpe

mm/hmm: clean up some coding style and comments

There are no functional changes, just some coding style clean ups and
minor comment changes.

Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Reviewed-by: default avatarJérôme Glisse <jglisse@redhat.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 2076e5c0
This diff is collapsed.
...@@ -153,9 +153,8 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -153,9 +153,8 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
/* Wake-up everyone waiting on any range. */ /* Wake-up everyone waiting on any range. */
mutex_lock(&hmm->lock); mutex_lock(&hmm->lock);
list_for_each_entry(range, &hmm->ranges, list) { list_for_each_entry(range, &hmm->ranges, list)
range->valid = false; range->valid = false;
}
wake_up_all(&hmm->wq); wake_up_all(&hmm->wq);
mutex_unlock(&hmm->lock); mutex_unlock(&hmm->lock);
...@@ -166,9 +165,10 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -166,9 +165,10 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
list_del_init(&mirror->list); list_del_init(&mirror->list);
if (mirror->ops->release) { if (mirror->ops->release) {
/* /*
* Drop mirrors_sem so callback can wait on any pending * Drop mirrors_sem so the release callback can wait
* work that might itself trigger mmu_notifier callback * on any pending work that might itself trigger a
* and thus would deadlock with us. * mmu_notifier callback and thus would deadlock with
* us.
*/ */
up_write(&hmm->mirrors_sem); up_write(&hmm->mirrors_sem);
mirror->ops->release(mirror); mirror->ops->release(mirror);
...@@ -223,11 +223,8 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn, ...@@ -223,11 +223,8 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
int ret; int ret;
ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update); ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
if (!update.blockable && ret == -EAGAIN) { if (!update.blockable && ret == -EAGAIN)
up_read(&hmm->mirrors_sem); break;
ret = -EAGAIN;
goto out;
}
} }
up_read(&hmm->mirrors_sem); up_read(&hmm->mirrors_sem);
...@@ -271,6 +268,7 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { ...@@ -271,6 +268,7 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
* *
* @mirror: new mirror struct to register * @mirror: new mirror struct to register
* @mm: mm to register against * @mm: mm to register against
* Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
* *
* To start mirroring a process address space, the device driver must register * To start mirroring a process address space, the device driver must register
* an HMM mirror struct. * an HMM mirror struct.
...@@ -298,7 +296,7 @@ EXPORT_SYMBOL(hmm_mirror_register); ...@@ -298,7 +296,7 @@ EXPORT_SYMBOL(hmm_mirror_register);
/* /*
* hmm_mirror_unregister() - unregister a mirror * hmm_mirror_unregister() - unregister a mirror
* *
* @mirror: new mirror struct to register * @mirror: mirror struct to unregister
* *
* Stop mirroring a process address space, and cleanup. * Stop mirroring a process address space, and cleanup.
*/ */
...@@ -372,7 +370,7 @@ static int hmm_pfns_bad(unsigned long addr, ...@@ -372,7 +370,7 @@ static int hmm_pfns_bad(unsigned long addr,
* @fault: should we fault or not ? * @fault: should we fault or not ?
* @write_fault: write fault ? * @write_fault: write fault ?
* @walk: mm_walk structure * @walk: mm_walk structure
* Returns: 0 on success, -EBUSY after page fault, or page fault error * Return: 0 on success, -EBUSY after page fault, or page fault error
* *
* This function will be called whenever pmd_none() or pte_none() returns true, * This function will be called whenever pmd_none() or pte_none() returns true,
* or whenever there is no page directory covering the virtual address range. * or whenever there is no page directory covering the virtual address range.
...@@ -911,6 +909,7 @@ int hmm_range_register(struct hmm_range *range, ...@@ -911,6 +909,7 @@ int hmm_range_register(struct hmm_range *range,
unsigned page_shift) unsigned page_shift)
{ {
unsigned long mask = ((1UL << page_shift) - 1UL); unsigned long mask = ((1UL << page_shift) - 1UL);
struct hmm *hmm;
range->valid = false; range->valid = false;
range->hmm = NULL; range->hmm = NULL;
...@@ -924,28 +923,29 @@ int hmm_range_register(struct hmm_range *range, ...@@ -924,28 +923,29 @@ int hmm_range_register(struct hmm_range *range,
range->start = start; range->start = start;
range->end = end; range->end = end;
range->hmm = hmm_get_or_create(mm); hmm = hmm_get_or_create(mm);
if (!range->hmm) if (!hmm)
return -EFAULT; return -EFAULT;
/* Check if hmm_mm_destroy() was call. */ /* Check if hmm_mm_destroy() was call. */
if (range->hmm->mm == NULL || range->hmm->dead) { if (hmm->mm == NULL || hmm->dead) {
hmm_put(range->hmm); hmm_put(hmm);
return -EFAULT; return -EFAULT;
} }
/* Initialize range to track CPU page table update */ /* Initialize range to track CPU page table updates. */
mutex_lock(&range->hmm->lock); mutex_lock(&hmm->lock);
list_add_rcu(&range->list, &range->hmm->ranges); range->hmm = hmm;
list_add_rcu(&range->list, &hmm->ranges);
/* /*
* If there are any concurrent notifiers we have to wait for them for * If there are any concurrent notifiers we have to wait for them for
* the range to be valid (see hmm_range_wait_until_valid()). * the range to be valid (see hmm_range_wait_until_valid()).
*/ */
if (!range->hmm->notifiers) if (!hmm->notifiers)
range->valid = true; range->valid = true;
mutex_unlock(&range->hmm->lock); mutex_unlock(&hmm->lock);
return 0; return 0;
} }
...@@ -960,17 +960,19 @@ EXPORT_SYMBOL(hmm_range_register); ...@@ -960,17 +960,19 @@ EXPORT_SYMBOL(hmm_range_register);
*/ */
void hmm_range_unregister(struct hmm_range *range) void hmm_range_unregister(struct hmm_range *range)
{ {
struct hmm *hmm = range->hmm;
/* Sanity check this really should not happen. */ /* Sanity check this really should not happen. */
if (range->hmm == NULL || range->end <= range->start) if (hmm == NULL || range->end <= range->start)
return; return;
mutex_lock(&range->hmm->lock); mutex_lock(&hmm->lock);
list_del_rcu(&range->list); list_del_rcu(&range->list);
mutex_unlock(&range->hmm->lock); mutex_unlock(&hmm->lock);
/* Drop reference taken by hmm_range_register() */ /* Drop reference taken by hmm_range_register() */
range->valid = false; range->valid = false;
hmm_put(range->hmm); hmm_put(hmm);
range->hmm = NULL; range->hmm = NULL;
} }
EXPORT_SYMBOL(hmm_range_unregister); EXPORT_SYMBOL(hmm_range_unregister);
...@@ -978,7 +980,7 @@ EXPORT_SYMBOL(hmm_range_unregister); ...@@ -978,7 +980,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
/* /*
* hmm_range_snapshot() - snapshot CPU page table for a range * hmm_range_snapshot() - snapshot CPU page table for a range
* @range: range * @range: range
* Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
* permission (for instance asking for write and range is read only), * permission (for instance asking for write and range is read only),
* -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
* vma or it is illegal to access that range), number of valid pages * vma or it is illegal to access that range), number of valid pages
...@@ -1061,7 +1063,7 @@ EXPORT_SYMBOL(hmm_range_snapshot); ...@@ -1061,7 +1063,7 @@ EXPORT_SYMBOL(hmm_range_snapshot);
* hmm_range_fault() - try to fault some address in a virtual address range * hmm_range_fault() - try to fault some address in a virtual address range
* @range: range being faulted * @range: range being faulted
* @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
* Returns: number of valid pages in range->pfns[] (from range start * Return: number of valid pages in range->pfns[] (from range start
* address). This may be zero. If the return value is negative, * address). This may be zero. If the return value is negative,
* then one of the following values may be returned: * then one of the following values may be returned:
* *
...@@ -1179,7 +1181,7 @@ EXPORT_SYMBOL(hmm_range_fault); ...@@ -1179,7 +1181,7 @@ EXPORT_SYMBOL(hmm_range_fault);
* @device: device against to dma map page to * @device: device against to dma map page to
* @daddrs: dma address of mapped pages * @daddrs: dma address of mapped pages
* @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
* Returns: number of pages mapped on success, -EAGAIN if mmap_sem have been * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
* drop and you need to try again, some other error value otherwise * drop and you need to try again, some other error value otherwise
* *
* Note same usage pattern as hmm_range_fault(). * Note same usage pattern as hmm_range_fault().
...@@ -1267,7 +1269,7 @@ EXPORT_SYMBOL(hmm_range_dma_map); ...@@ -1267,7 +1269,7 @@ EXPORT_SYMBOL(hmm_range_dma_map);
* @device: device against which dma map was done * @device: device against which dma map was done
* @daddrs: dma address of mapped pages * @daddrs: dma address of mapped pages
* @dirty: dirty page if it had the write flag set * @dirty: dirty page if it had the write flag set
* Returns: number of page unmapped on success, -EINVAL otherwise * Return: number of page unmapped on success, -EINVAL otherwise
* *
* Note that caller MUST abide by mmu notifier or use HMM mirror and abide * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
* to the sync_cpu_device_pagetables() callback so that it is safe here to * to the sync_cpu_device_pagetables() callback so that it is safe here to
...@@ -1390,7 +1392,7 @@ static void hmm_devmem_free(struct page *page, void *data) ...@@ -1390,7 +1392,7 @@ static void hmm_devmem_free(struct page *page, void *data)
* @ops: memory event device driver callback (see struct hmm_devmem_ops) * @ops: memory event device driver callback (see struct hmm_devmem_ops)
* @device: device struct to bind the resource too * @device: device struct to bind the resource too
* @size: size in bytes of the device memory to add * @size: size in bytes of the device memory to add
* Returns: pointer to new hmm_devmem struct ERR_PTR otherwise * Return: pointer to new hmm_devmem struct ERR_PTR otherwise
* *
* This function first finds an empty range of physical address big enough to * This function first finds an empty range of physical address big enough to
* contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment