Commit 515f12b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull HMM fixes from Jason Gunthorpe:
 "Fix the locking around nouveau's use of the hmm_range_* APIs. It works
  correctly in the success case, but many of the the edge cases have
  missing unlocks or double unlocks.

  The diffstat is a bit big as Christoph did a comprehensive job to move
  the obsolete API from the core header and into the driver before
  fixing its flow, but the risk of regression from this code motion is
  low"

* tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  nouveau: unlock mmap_sem on all errors from nouveau_range_fault
  nouveau: remove the block parameter to nouveau_range_fault
  mm/hmm: move hmm_vma_range_done and hmm_vma_fault to nouveau
  mm/hmm: always return EBUSY for invalid ranges in hmm_range_{fault,snapshot}
parents 2a11c76e de4ee728
...@@ -237,7 +237,7 @@ The usage pattern is:: ...@@ -237,7 +237,7 @@ The usage pattern is::
ret = hmm_range_snapshot(&range); ret = hmm_range_snapshot(&range);
if (ret) { if (ret) {
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (ret == -EAGAIN) { if (ret == -EBUSY) {
/* /*
* No need to check hmm_range_wait_until_valid() return value * No need to check hmm_range_wait_until_valid() return value
* on retry we will get proper error with hmm_range_snapshot() * on retry we will get proper error with hmm_range_snapshot()
......
...@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm, ...@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access); fault->inst, fault->addr, fault->access);
} }
static inline bool
nouveau_range_done(struct hmm_range *range)
{
bool ret = hmm_range_valid(range);
hmm_range_unregister(range);
return ret;
}
static int
nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
{
long ret;
range->default_flags = 0;
range->pfn_flags_mask = -1UL;
ret = hmm_range_register(range, mirror,
range->start, range->end,
PAGE_SHIFT);
if (ret) {
up_read(&range->vma->vm_mm->mmap_sem);
return (int)ret;
}
if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
up_read(&range->vma->vm_mm->mmap_sem);
return -EAGAIN;
}
ret = hmm_range_fault(range, true);
if (ret <= 0) {
if (ret == 0)
ret = -EBUSY;
up_read(&range->vma->vm_mm->mmap_sem);
hmm_range_unregister(range);
return ret;
}
return 0;
}
static int static int
nouveau_svm_fault(struct nvif_notify *notify) nouveau_svm_fault(struct nvif_notify *notify)
{ {
...@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify) ...@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values; range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again: again:
ret = hmm_vma_fault(&svmm->mirror, &range, true); ret = nouveau_range_fault(&svmm->mirror, &range);
if (ret == 0) { if (ret == 0) {
mutex_lock(&svmm->mutex); mutex_lock(&svmm->mutex);
if (!hmm_vma_range_done(&range)) { if (!nouveau_range_done(&range)) {
mutex_unlock(&svmm->mutex); mutex_unlock(&svmm->mutex);
goto again; goto again;
} }
...@@ -666,8 +707,8 @@ nouveau_svm_fault(struct nvif_notify *notify) ...@@ -666,8 +707,8 @@ nouveau_svm_fault(struct nvif_notify *notify)
NULL); NULL);
svmm->vmm->vmm.object.client->super = false; svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex); mutex_unlock(&svmm->mutex);
up_read(&svmm->mm->mmap_sem);
} }
up_read(&svmm->mm->mmap_sem);
/* Cancel any faults in the window whose pages didn't manage /* Cancel any faults in the window whose pages didn't manage
* to keep their valid bit, or stay writeable when required. * to keep their valid bit, or stay writeable when required.
......
...@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range, ...@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
*/ */
#define HMM_RANGE_DEFAULT_TIMEOUT 1000 #define HMM_RANGE_DEFAULT_TIMEOUT 1000
/* This is a temporary helper to avoid merge conflict between trees. */
static inline bool hmm_vma_range_done(struct hmm_range *range)
{
bool ret = hmm_range_valid(range);
hmm_range_unregister(range);
return ret;
}
/* This is a temporary helper to avoid merge conflict between trees. */
static inline int hmm_vma_fault(struct hmm_mirror *mirror,
struct hmm_range *range, bool block)
{
long ret;
/*
* With the old API the driver must set each individual entries with
* the requested flags (valid, write, ...). So here we set the mask to
* keep intact the entries provided by the driver and zero out the
* default_flags.
*/
range->default_flags = 0;
range->pfn_flags_mask = -1UL;
ret = hmm_range_register(range, mirror,
range->start, range->end,
PAGE_SHIFT);
if (ret)
return (int)ret;
if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
/*
* The mmap_sem was taken by driver we release it here and
* returns -EAGAIN which correspond to mmap_sem have been
* drop in the old API.
*/
up_read(&range->vma->vm_mm->mmap_sem);
return -EAGAIN;
}
ret = hmm_range_fault(range, block);
if (ret <= 0) {
if (ret == -EBUSY || !ret) {
/* Same as above, drop mmap_sem to match old API. */
up_read(&range->vma->vm_mm->mmap_sem);
ret = -EBUSY;
} else if (ret == -EAGAIN)
ret = -EBUSY;
hmm_range_unregister(range);
return ret;
}
return 0;
}
/* Below are for HMM internal use only! Not to be used by device driver! */ /* Below are for HMM internal use only! Not to be used by device driver! */
static inline void hmm_mm_init(struct mm_struct *mm) static inline void hmm_mm_init(struct mm_struct *mm)
{ {
......
...@@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister); ...@@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
* @range: range * @range: range
* Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
* permission (for instance asking for write and range is read only), * permission (for instance asking for write and range is read only),
* -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
* vma or it is illegal to access that range), number of valid pages * vma or it is illegal to access that range), number of valid pages
* in range->pfns[] (from range start address). * in range->pfns[] (from range start address).
* *
...@@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range) ...@@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range)
do { do {
/* If range is no longer valid force retry. */ /* If range is no longer valid force retry. */
if (!range->valid) if (!range->valid)
return -EAGAIN; return -EBUSY;
vma = find_vma(hmm->mm, start); vma = find_vma(hmm->mm, start);
if (vma == NULL || (vma->vm_flags & device_vma)) if (vma == NULL || (vma->vm_flags & device_vma))
...@@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block) ...@@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)
do { do {
/* If range is no longer valid force retry. */ /* If range is no longer valid force retry. */
if (!range->valid) { if (!range->valid)
up_read(&hmm->mm->mmap_sem); return -EBUSY;
return -EAGAIN;
}
vma = find_vma(hmm->mm, start); vma = find_vma(hmm->mm, start);
if (vma == NULL || (vma->vm_flags & device_vma)) if (vma == NULL || (vma->vm_flags & device_vma))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment