Commit ba85c702 authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds

userfaultfd: wake pending userfaults

This is an optimization but it's a userland visible one and it affects
the API.

The downside of this optimization is that if you call poll() and you
get POLLIN, read(ufd) may still return -EAGAIN. The blocked userfault
may be waken by a different thread, before read(ufd) comes
around. This in short means that poll() isn't really usable if the
userfaultfd is opened in blocking mode.

userfaults won't wait in "pending" state to be read anymore and any
UFFDIO_WAKE or similar operations that has the objective of waking
userfaults after their resolution, will wake all blocked userfaults
for the resolved range, including those that haven't been read() by
userland yet.

The behavior of poll() becomes not standard, but this obviates the
need of "spurious" UFFDIO_WAKE and it lets the userland threads to
restart immediately without requiring an UFFDIO_WAKE. This is even
more significant in case of repeated faults on the same address from
multiple threads.

This optimization is justified by the measurement that the number of
spurious UFFDIO_WAKE accounts for 5% and 10% of the total
userfaults for heavy workloads, so it's worth optimizing those away.
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Acked-by: default avatarPavel Emelyanov <xemul@parallels.com>
Cc: Sanidhya Kashyap <sanidhya.gatech@gmail.com>
Cc: zhang.zhanghailiang@huawei.com
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Hugh Dickins <hughd@google.com>
Cc: Peter Feiner <pfeiner@google.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "Huangpeng (Peter)" <peter.huangpeng@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a9b85f94
...@@ -52,6 +52,10 @@ struct userfaultfd_ctx { ...@@ -52,6 +52,10 @@ struct userfaultfd_ctx {
struct userfaultfd_wait_queue { struct userfaultfd_wait_queue {
struct uffd_msg msg; struct uffd_msg msg;
wait_queue_t wq; wait_queue_t wq;
/*
* Only relevant when queued in fault_wqh and only used by the
* read operation to avoid reading the same userfault twice.
*/
bool pending; bool pending;
struct userfaultfd_ctx *ctx; struct userfaultfd_ctx *ctx;
}; };
...@@ -71,9 +75,6 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, ...@@ -71,9 +75,6 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
uwq = container_of(wq, struct userfaultfd_wait_queue, wq); uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
ret = 0; ret = 0;
/* don't wake the pending ones to avoid reads to block */
if (uwq->pending && !ACCESS_ONCE(uwq->ctx->released))
goto out;
/* len == 0 means wake all */ /* len == 0 means wake all */
start = range->start; start = range->start;
len = range->len; len = range->len;
...@@ -196,12 +197,14 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -196,12 +197,14 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct userfaultfd_ctx *ctx; struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue uwq; struct userfaultfd_wait_queue uwq;
int ret;
BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
ret = VM_FAULT_SIGBUS;
ctx = vma->vm_userfaultfd_ctx.ctx; ctx = vma->vm_userfaultfd_ctx.ctx;
if (!ctx) if (!ctx)
return VM_FAULT_SIGBUS; goto out;
BUG_ON(ctx->mm != mm); BUG_ON(ctx->mm != mm);
...@@ -214,7 +217,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -214,7 +217,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
* caller of handle_userfault to release the mmap_sem. * caller of handle_userfault to release the mmap_sem.
*/ */
if (unlikely(ACCESS_ONCE(ctx->released))) if (unlikely(ACCESS_ONCE(ctx->released)))
return VM_FAULT_SIGBUS; goto out;
/* /*
* Check that we can return VM_FAULT_RETRY. * Check that we can return VM_FAULT_RETRY.
...@@ -240,15 +243,16 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -240,15 +243,16 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
dump_stack(); dump_stack();
} }
#endif #endif
return VM_FAULT_SIGBUS; goto out;
} }
/* /*
* Handle nowait, not much to do other than tell it to retry * Handle nowait, not much to do other than tell it to retry
* and wait. * and wait.
*/ */
ret = VM_FAULT_RETRY;
if (flags & FAULT_FLAG_RETRY_NOWAIT) if (flags & FAULT_FLAG_RETRY_NOWAIT)
return VM_FAULT_RETRY; goto out;
/* take the reference before dropping the mmap_sem */ /* take the reference before dropping the mmap_sem */
userfaultfd_ctx_get(ctx); userfaultfd_ctx_get(ctx);
...@@ -268,21 +272,23 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -268,21 +272,23 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
* through poll/read(). * through poll/read().
*/ */
__add_wait_queue(&ctx->fault_wqh, &uwq.wq); __add_wait_queue(&ctx->fault_wqh, &uwq.wq);
for (;;) {
set_current_state(TASK_KILLABLE); set_current_state(TASK_KILLABLE);
if (!uwq.pending || ACCESS_ONCE(ctx->released) ||
fatal_signal_pending(current))
break;
spin_unlock(&ctx->fault_wqh.lock); spin_unlock(&ctx->fault_wqh.lock);
if (likely(!ACCESS_ONCE(ctx->released) &&
!fatal_signal_pending(current))) {
wake_up_poll(&ctx->fd_wqh, POLLIN); wake_up_poll(&ctx->fd_wqh, POLLIN);
schedule(); schedule();
ret |= VM_FAULT_MAJOR;
spin_lock(&ctx->fault_wqh.lock);
} }
__remove_wait_queue(&ctx->fault_wqh, &uwq.wq);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
/* see finish_wait() comment for why list_empty_careful() */
if (!list_empty_careful(&uwq.wq.task_list)) {
spin_lock(&ctx->fault_wqh.lock);
list_del_init(&uwq.wq.task_list);
spin_unlock(&ctx->fault_wqh.lock); spin_unlock(&ctx->fault_wqh.lock);
}
/* /*
* ctx may go away after this if the userfault pseudo fd is * ctx may go away after this if the userfault pseudo fd is
...@@ -290,7 +296,8 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, ...@@ -290,7 +296,8 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
*/ */
userfaultfd_ctx_put(ctx); userfaultfd_ctx_put(ctx);
return VM_FAULT_RETRY; out:
return ret;
} }
static int userfaultfd_release(struct inode *inode, struct file *file) static int userfaultfd_release(struct inode *inode, struct file *file)
...@@ -404,6 +411,12 @@ static unsigned int userfaultfd_poll(struct file *file, poll_table *wait) ...@@ -404,6 +411,12 @@ static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
case UFFD_STATE_WAIT_API: case UFFD_STATE_WAIT_API:
return POLLERR; return POLLERR;
case UFFD_STATE_RUNNING: case UFFD_STATE_RUNNING:
/*
* poll() never guarantees that read won't block.
* userfaults can be waken before they're read().
*/
if (unlikely(!(file->f_flags & O_NONBLOCK)))
return POLLERR;
spin_lock(&ctx->fault_wqh.lock); spin_lock(&ctx->fault_wqh.lock);
ret = find_userfault(ctx, NULL); ret = find_userfault(ctx, NULL);
spin_unlock(&ctx->fault_wqh.lock); spin_unlock(&ctx->fault_wqh.lock);
...@@ -834,11 +847,19 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, ...@@ -834,11 +847,19 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
} }
/* /*
* This is mostly needed to re-wakeup those userfaults that were still * userfaultfd_wake is needed in case an userfault is in flight by the
* pending when userland wake them up the first time. We don't wake * time a UFFDIO_COPY (or other ioctl variants) completes. The page
* the pending one to avoid blocking reads to block, or non blocking * may be well get mapped and the page fault if repeated wouldn't lead
* read to return -EAGAIN, if used with POLLIN, to avoid userland * to a userfault anymore, but before scheduling in TASK_KILLABLE mode
* doubts on why POLLIN wasn't reliable. * handle_userfault() doesn't recheck the pagetables and it doesn't
* serialize against UFFDO_COPY (or other ioctl variants). Ultimately
* the knowledge of which pages are mapped is left to userland who is
* responsible for handling the race between read() userfaults and
* background UFFDIO_COPY (or other ioctl variants), if done by
* separate concurrent threads.
*
* userfaultfd_wake may be used in combination with the
* UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
*/ */
static int userfaultfd_wake(struct userfaultfd_ctx *ctx, static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
unsigned long arg) unsigned long arg)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment