Commit d2e8d551 authored by Ralph Campbell's avatar Ralph Campbell Committed by Jason Gunthorpe

mm/hmm: a few more C style and comment clean ups

A few more comments and minor programming style clean ups.  There should
be no functional changes.

Link: https://lore.kernel.org/r/20190726005650.2566-3-rcampbell@nvidia.comSigned-off-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 1f961807
...@@ -32,7 +32,7 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops; ...@@ -32,7 +32,7 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
* hmm_get_or_create - register HMM against an mm (HMM internal) * hmm_get_or_create - register HMM against an mm (HMM internal)
* *
* @mm: mm struct to attach to * @mm: mm struct to attach to
* Returns: returns an HMM object, either by referencing the existing * Return: an HMM object, either by referencing the existing
* (per-process) object, or by creating a new one. * (per-process) object, or by creating a new one.
* *
* This is not intended to be used directly by device drivers. If mm already * This is not intended to be used directly by device drivers. If mm already
...@@ -325,8 +325,8 @@ static int hmm_pfns_bad(unsigned long addr, ...@@ -325,8 +325,8 @@ static int hmm_pfns_bad(unsigned long addr,
} }
/* /*
* hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
* @start: range virtual start address (inclusive) * @addr: range virtual start address (inclusive)
* @end: range virtual end address (exclusive) * @end: range virtual end address (exclusive)
* @fault: should we fault or not ? * @fault: should we fault or not ?
* @write_fault: write fault ? * @write_fault: write fault ?
...@@ -376,9 +376,9 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, ...@@ -376,9 +376,9 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
/* /*
* So we not only consider the individual per page request we also * So we not only consider the individual per page request we also
* consider the default flags requested for the range. The API can * consider the default flags requested for the range. The API can
* be use in 2 fashions. The first one where the HMM user coalesce * be used 2 ways. The first one where the HMM user coalesces
* multiple page fault into one request and set flags per pfns for * multiple page faults into one request and sets flags per pfn for
* of those faults. The second one where the HMM user want to pre- * those faults. The second one where the HMM user wants to pre-
* fault a range with specific flags. For the latter one it is a * fault a range with specific flags. For the latter one it is a
* waste to have the user pre-fill the pfn arrays with a default * waste to have the user pre-fill the pfn arrays with a default
* flags value. * flags value.
...@@ -388,7 +388,7 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, ...@@ -388,7 +388,7 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
/* We aren't ask to do anything ... */ /* We aren't ask to do anything ... */
if (!(pfns & range->flags[HMM_PFN_VALID])) if (!(pfns & range->flags[HMM_PFN_VALID]))
return; return;
/* If this is device memory than only fault if explicitly requested */ /* If this is device memory then only fault if explicitly requested */
if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
/* Do we fault on device memory ? */ /* Do we fault on device memory ? */
if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
...@@ -502,7 +502,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, ...@@ -502,7 +502,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
hmm_vma_walk->last = end; hmm_vma_walk->last = end;
return 0; return 0;
#else #else
/* If THP is not enabled then we should never reach that code ! */ /* If THP is not enabled then we should never reach this code ! */
return -EINVAL; return -EINVAL;
#endif #endif
} }
...@@ -522,7 +522,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, ...@@ -522,7 +522,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
{ {
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range; struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma;
bool fault, write_fault; bool fault, write_fault;
uint64_t cpu_flags; uint64_t cpu_flags;
pte_t pte = *ptep; pte_t pte = *ptep;
...@@ -571,8 +570,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, ...@@ -571,8 +570,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
if (fault || write_fault) { if (fault || write_fault) {
pte_unmap(ptep); pte_unmap(ptep);
hmm_vma_walk->last = addr; hmm_vma_walk->last = addr;
migration_entry_wait(vma->vm_mm, migration_entry_wait(walk->mm, pmdp, addr);
pmdp, addr);
return -EBUSY; return -EBUSY;
} }
return 0; return 0;
...@@ -620,13 +618,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -620,13 +618,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
{ {
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range; struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma;
uint64_t *pfns = range->pfns; uint64_t *pfns = range->pfns;
unsigned long addr = start, i; unsigned long addr = start, i;
pte_t *ptep; pte_t *ptep;
pmd_t pmd; pmd_t pmd;
again: again:
pmd = READ_ONCE(*pmdp); pmd = READ_ONCE(*pmdp);
if (pmd_none(pmd)) if (pmd_none(pmd))
...@@ -648,7 +644,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -648,7 +644,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
0, &fault, &write_fault); 0, &fault, &write_fault);
if (fault || write_fault) { if (fault || write_fault) {
hmm_vma_walk->last = addr; hmm_vma_walk->last = addr;
pmd_migration_entry_wait(vma->vm_mm, pmdp); pmd_migration_entry_wait(walk->mm, pmdp);
return -EBUSY; return -EBUSY;
} }
return 0; return 0;
...@@ -657,11 +653,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -657,11 +653,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/* /*
* No need to take pmd_lock here, even if some other threads * No need to take pmd_lock here, even if some other thread
* is splitting the huge pmd we will get that event through * is splitting the huge pmd we will get that event through
* mmu_notifier callback. * mmu_notifier callback.
* *
* So just read pmd value and check again its a transparent * So just read pmd value and check again it's a transparent
* huge or device mapping one and compute corresponding pfn * huge or device mapping one and compute corresponding pfn
* values. * values.
*/ */
...@@ -675,7 +671,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, ...@@ -675,7 +671,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
} }
/* /*
* We have handled all the valid case above ie either none, migration, * We have handled all the valid cases above ie either none, migration,
* huge or transparent huge. At this point either it is a valid pmd * huge or transparent huge. At this point either it is a valid pmd
* entry pointing to pte directory or it is a bad pmd that will not * entry pointing to pte directory or it is a bad pmd that will not
* recover. * recover.
...@@ -795,10 +791,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, ...@@ -795,10 +791,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry; pte_t entry;
int ret = 0; int ret = 0;
size = 1UL << huge_page_shift(h); size = huge_page_size(h);
mask = size - 1; mask = size - 1;
if (range->page_shift != PAGE_SHIFT) { if (range->page_shift != PAGE_SHIFT) {
/* Make sure we are looking at full page. */ /* Make sure we are looking at a full page. */
if (start & mask) if (start & mask)
return -EINVAL; return -EINVAL;
if (end < (start + size)) if (end < (start + size))
...@@ -809,8 +805,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, ...@@ -809,8 +805,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
size = PAGE_SIZE; size = PAGE_SIZE;
} }
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
entry = huge_ptep_get(pte); entry = huge_ptep_get(pte);
i = (start - range->start) >> range->page_shift; i = (start - range->start) >> range->page_shift;
...@@ -859,7 +854,7 @@ static void hmm_pfns_clear(struct hmm_range *range, ...@@ -859,7 +854,7 @@ static void hmm_pfns_clear(struct hmm_range *range,
* @start: start virtual address (inclusive) * @start: start virtual address (inclusive)
* @end: end virtual address (exclusive) * @end: end virtual address (exclusive)
* @page_shift: expect page shift for the range * @page_shift: expect page shift for the range
* Returns 0 on success, -EFAULT if the address space is no longer valid * Return: 0 on success, -EFAULT if the address space is no longer valid
* *
* Track updates to the CPU page table see include/linux/hmm.h * Track updates to the CPU page table see include/linux/hmm.h
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment