Commit 47f24598 authored by Jason Gunthorpe's avatar Jason Gunthorpe

mm/hmm: Hold on to the mmget for the lifetime of the range

Range functions like hmm_range_snapshot() and hmm_range_fault() call
find_vma, which requires hodling the mmget() and the mmap_sem for the mm.

Make this simpler for the callers by holding the mmget() inside the range
for the lifetime of the range. Other functions that accept a range should
only be called if the range is registered.

This has the side effect of directly preventing hmm_release() from
happening while a range is registered. That means range->dead cannot be
false during the lifetime of the range, so remove dead and
hmm_mirror_mm_is_alive() entirely.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarPhilip Yang <Philip.Yang@amd.com>
parent 157816f3
...@@ -82,7 +82,6 @@ ...@@ -82,7 +82,6 @@
* @mirrors_sem: read/write semaphore protecting the mirrors list * @mirrors_sem: read/write semaphore protecting the mirrors list
* @wq: wait queue for user waiting on a range invalidation * @wq: wait queue for user waiting on a range invalidation
* @notifiers: count of active mmu notifiers * @notifiers: count of active mmu notifiers
* @dead: is the mm dead ?
*/ */
struct hmm { struct hmm {
struct mm_struct *mm; struct mm_struct *mm;
...@@ -95,7 +94,6 @@ struct hmm { ...@@ -95,7 +94,6 @@ struct hmm {
wait_queue_head_t wq; wait_queue_head_t wq;
struct rcu_head rcu; struct rcu_head rcu;
long notifiers; long notifiers;
bool dead;
}; };
/* /*
...@@ -459,30 +457,6 @@ struct hmm_mirror { ...@@ -459,30 +457,6 @@ struct hmm_mirror {
int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
void hmm_mirror_unregister(struct hmm_mirror *mirror); void hmm_mirror_unregister(struct hmm_mirror *mirror);
/*
* hmm_mirror_mm_is_alive() - test if mm is still alive
* @mirror: the HMM mm mirror for which we want to lock the mmap_sem
* Return: false if the mm is dead, true otherwise
*
* This is an optimization, it will not always accurately return false if the
* mm is dead; i.e., there can be false negatives (process is being killed but
* HMM is not yet informed of that). It is only intended to be used to optimize
* out cases where the driver is about to do something time consuming and it
* would be better to skip it if the mm is dead.
*/
static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
{
struct mm_struct *mm;
if (!mirror || !mirror->hmm)
return false;
mm = READ_ONCE(mirror->hmm->mm);
if (mirror->hmm->dead || !mm)
return false;
return true;
}
/* /*
* Please see Documentation/vm/hmm.rst for how to use the range API. * Please see Documentation/vm/hmm.rst for how to use the range API.
*/ */
......
...@@ -67,7 +67,6 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm) ...@@ -67,7 +67,6 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
mutex_init(&hmm->lock); mutex_init(&hmm->lock);
kref_init(&hmm->kref); kref_init(&hmm->kref);
hmm->notifiers = 0; hmm->notifiers = 0;
hmm->dead = false;
hmm->mm = mm; hmm->mm = mm;
hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
...@@ -120,21 +119,16 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -120,21 +119,16 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{ {
struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
struct hmm_mirror *mirror; struct hmm_mirror *mirror;
struct hmm_range *range;
/* Bail out if hmm is in the process of being freed */ /* Bail out if hmm is in the process of being freed */
if (!kref_get_unless_zero(&hmm->kref)) if (!kref_get_unless_zero(&hmm->kref))
return; return;
/* Report this HMM as dying. */ /*
hmm->dead = true; * Since hmm_range_register() holds the mmget() lock hmm_release() is
* prevented as long as a range exists.
/* Wake-up everyone waiting on any range. */ */
mutex_lock(&hmm->lock); WARN_ON(!list_empty_careful(&hmm->ranges));
list_for_each_entry(range, &hmm->ranges, list)
range->valid = false;
wake_up_all(&hmm->wq);
mutex_unlock(&hmm->lock);
down_write(&hmm->mirrors_sem); down_write(&hmm->mirrors_sem);
mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
...@@ -903,8 +897,8 @@ int hmm_range_register(struct hmm_range *range, ...@@ -903,8 +897,8 @@ int hmm_range_register(struct hmm_range *range,
range->start = start; range->start = start;
range->end = end; range->end = end;
/* Check if hmm_mm_destroy() was call. */ /* Prevent hmm_release() from running while the range is valid */
if (hmm->mm == NULL || hmm->dead) if (!mmget_not_zero(hmm->mm))
return -EFAULT; return -EFAULT;
/* Initialize range to track CPU page table updates. */ /* Initialize range to track CPU page table updates. */
...@@ -942,11 +936,12 @@ void hmm_range_unregister(struct hmm_range *range) ...@@ -942,11 +936,12 @@ void hmm_range_unregister(struct hmm_range *range)
return; return;
mutex_lock(&hmm->lock); mutex_lock(&hmm->lock);
list_del(&range->list); list_del_init(&range->list);
mutex_unlock(&hmm->lock); mutex_unlock(&hmm->lock);
/* Drop reference taken by hmm_range_register() */ /* Drop reference taken by hmm_range_register() */
range->valid = false; range->valid = false;
mmput(hmm->mm);
hmm_put(hmm); hmm_put(hmm);
range->hmm = NULL; range->hmm = NULL;
} }
...@@ -974,10 +969,7 @@ long hmm_range_snapshot(struct hmm_range *range) ...@@ -974,10 +969,7 @@ long hmm_range_snapshot(struct hmm_range *range)
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct mm_walk mm_walk; struct mm_walk mm_walk;
/* Check if hmm_mm_destroy() was call. */ lockdep_assert_held(&hmm->mm->mmap_sem);
if (hmm->mm == NULL || hmm->dead)
return -EFAULT;
do { do {
/* If range is no longer valid force retry. */ /* If range is no longer valid force retry. */
if (!range->valid) if (!range->valid)
...@@ -1072,9 +1064,7 @@ long hmm_range_fault(struct hmm_range *range, bool block) ...@@ -1072,9 +1064,7 @@ long hmm_range_fault(struct hmm_range *range, bool block)
struct mm_walk mm_walk; struct mm_walk mm_walk;
int ret; int ret;
/* Check if hmm_mm_destroy() was call. */ lockdep_assert_held(&hmm->mm->mmap_sem);
if (hmm->mm == NULL || hmm->dead)
return -EFAULT;
do { do {
/* If range is no longer valid force retry. */ /* If range is no longer valid force retry. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment