Commit a5b729ea authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'work.aio' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull aio fixes from Al Viro:
 "Assorted AIO followups and fixes"

* 'work.aio' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  eventpoll: switch to ->poll_mask
  aio: only return events requested in poll_mask() for IOCB_CMD_POLL
  eventfd: only return events requested in poll_mask()
  aio: mark __aio_sigset::sigmask const
parents 9215310c 11c5ad0e
...@@ -1661,7 +1661,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, ...@@ -1661,7 +1661,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & req->events)) if (mask && !(mask & req->events))
return 0; return 0;
mask = file->f_op->poll_mask(file, req->events); mask = file->f_op->poll_mask(file, req->events) & req->events;
if (!mask) if (!mask)
return 0; return 0;
...@@ -1719,7 +1719,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb) ...@@ -1719,7 +1719,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
spin_lock_irq(&ctx->ctx_lock); spin_lock_irq(&ctx->ctx_lock);
spin_lock(&req->head->lock); spin_lock(&req->head->lock);
mask = req->file->f_op->poll_mask(req->file, req->events); mask = req->file->f_op->poll_mask(req->file, req->events) & req->events;
if (!mask) { if (!mask) {
__add_wait_queue(req->head, &req->wait); __add_wait_queue(req->head, &req->wait);
list_add_tail(&aiocb->ki_list, &ctx->active_reqs); list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
......
...@@ -156,11 +156,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask) ...@@ -156,11 +156,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
count = READ_ONCE(ctx->count); count = READ_ONCE(ctx->count);
if (count > 0) if (count > 0)
events |= EPOLLIN; events |= (EPOLLIN & eventmask);
if (count == ULLONG_MAX) if (count == ULLONG_MAX)
events |= EPOLLERR; events |= EPOLLERR;
if (ULLONG_MAX - 1 > count) if (ULLONG_MAX - 1 > count)
events |= EPOLLOUT; events |= (EPOLLOUT & eventmask);
return events; return events;
} }
......
...@@ -922,13 +922,17 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head ...@@ -922,13 +922,17 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
return 0; return 0;
} }
static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait) static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file,
__poll_t eventmask)
{ {
struct eventpoll *ep = file->private_data; struct eventpoll *ep = file->private_data;
int depth = 0; return &ep->poll_wait;
}
/* Insert inside our poll wait queue */ static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask)
poll_wait(file, &ep->poll_wait, wait); {
struct eventpoll *ep = file->private_data;
int depth = 0;
/* /*
* Proceed to find out if wanted events are really available inside * Proceed to find out if wanted events are really available inside
...@@ -968,7 +972,8 @@ static const struct file_operations eventpoll_fops = { ...@@ -968,7 +972,8 @@ static const struct file_operations eventpoll_fops = {
.show_fdinfo = ep_show_fdinfo, .show_fdinfo = ep_show_fdinfo,
#endif #endif
.release = ep_eventpoll_release, .release = ep_eventpoll_release,
.poll = ep_eventpoll_poll, .get_poll_head = ep_eventpoll_get_poll_head,
.poll_mask = ep_eventpoll_poll_mask,
.llseek = noop_llseek, .llseek = noop_llseek,
}; };
......
...@@ -109,7 +109,7 @@ struct iocb { ...@@ -109,7 +109,7 @@ struct iocb {
#undef IFLITTLE #undef IFLITTLE
struct __aio_sigset { struct __aio_sigset {
sigset_t __user *sigmask; const sigset_t __user *sigmask;
size_t sigsetsize; size_t sigsetsize;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment