Commit 9965ed17 authored by Christoph Hellwig's avatar Christoph Hellwig

fs: add new vfs_poll and file_can_poll helpers

These abstract out calls to the poll method in preparation for changes
in how we poll.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 6e8b704d
...@@ -113,7 +113,7 @@ static void serial2002_tty_read_poll_wait(struct file *f, int timeout) ...@@ -113,7 +113,7 @@ static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
long elapsed; long elapsed;
__poll_t mask; __poll_t mask;
mask = f->f_op->poll(f, &table.pt); mask = vfs_poll(f, &table.pt);
if (mask & (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | if (mask & (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN |
EPOLLHUP | EPOLLERR)) { EPOLLHUP | EPOLLERR)) {
break; break;
...@@ -136,7 +136,7 @@ static int serial2002_tty_read(struct file *f, int timeout) ...@@ -136,7 +136,7 @@ static int serial2002_tty_read(struct file *f, int timeout)
result = -1; result = -1;
if (!IS_ERR(f)) { if (!IS_ERR(f)) {
if (f->f_op->poll) { if (file_can_poll(f)) {
serial2002_tty_read_poll_wait(f, timeout); serial2002_tty_read_poll_wait(f, timeout);
if (kernel_read(f, &ch, 1, &pos) == 1) if (kernel_read(f, &ch, 1, &pos) == 1)
......
...@@ -166,7 +166,7 @@ int vfio_virqfd_enable(void *opaque, ...@@ -166,7 +166,7 @@ int vfio_virqfd_enable(void *opaque,
init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup); init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc); init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt); events = vfs_poll(irqfd.file, &virqfd->pt);
/* /*
* Check if there was an event already pending on the eventfd * Check if there was an event already pending on the eventfd
......
...@@ -208,7 +208,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) ...@@ -208,7 +208,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
if (poll->wqh) if (poll->wqh)
return 0; return 0;
mask = file->f_op->poll(file, &poll->table); mask = vfs_poll(file, &poll->table);
if (mask) if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & EPOLLERR) { if (mask & EPOLLERR) {
......
...@@ -884,8 +884,7 @@ static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, ...@@ -884,8 +884,7 @@ static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
pt->_key = epi->event.events; pt->_key = epi->event.events;
if (!is_file_epoll(epi->ffd.file)) if (!is_file_epoll(epi->ffd.file))
return epi->ffd.file->f_op->poll(epi->ffd.file, pt) & return vfs_poll(epi->ffd.file, pt) & epi->event.events;
epi->event.events;
ep = epi->ffd.file->private_data; ep = epi->ffd.file->private_data;
poll_wait(epi->ffd.file, &ep->poll_wait, pt); poll_wait(epi->ffd.file, &ep->poll_wait, pt);
...@@ -2025,7 +2024,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, ...@@ -2025,7 +2024,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
/* The target file descriptor must support poll */ /* The target file descriptor must support poll */
error = -EPERM; error = -EPERM;
if (!tf.file->f_op->poll) if (!file_can_poll(tf.file))
goto error_tgt_fput; goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */ /* Check if EPOLLWAKEUP is allowed */
......
...@@ -502,14 +502,10 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) ...@@ -502,14 +502,10 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
continue; continue;
f = fdget(i); f = fdget(i);
if (f.file) { if (f.file) {
const struct file_operations *f_op; wait_key_set(wait, in, out, bit,
f_op = f.file->f_op; busy_flag);
mask = DEFAULT_POLLMASK; mask = vfs_poll(f.file, wait);
if (f_op->poll) {
wait_key_set(wait, in, out,
bit, busy_flag);
mask = (*f_op->poll)(f.file, wait);
}
fdput(f); fdput(f);
if ((mask & POLLIN_SET) && (in & bit)) { if ((mask & POLLIN_SET) && (in & bit)) {
res_in |= bit; res_in |= bit;
...@@ -825,13 +821,10 @@ static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait, ...@@ -825,13 +821,10 @@ static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
/* userland u16 ->events contains POLL... bitmap */ /* userland u16 ->events contains POLL... bitmap */
filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP; filter = demangle_poll(pollfd->events) | EPOLLERR | EPOLLHUP;
mask = DEFAULT_POLLMASK;
if (f.file->f_op->poll) {
pwait->_key = filter | busy_flag; pwait->_key = filter | busy_flag;
mask = f.file->f_op->poll(f.file, pwait); mask = vfs_poll(f.file, pwait);
if (mask & busy_flag) if (mask & busy_flag)
*can_busy_poll = true; *can_busy_poll = true;
}
mask &= filter; /* Mask out unneeded events. */ mask &= filter; /* Mask out unneeded events. */
fdput(f); fdput(f);
......
...@@ -74,6 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) ...@@ -74,6 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
pt->_key = ~(__poll_t)0; /* all events enabled */ pt->_key = ~(__poll_t)0; /* all events enabled */
} }
static inline bool file_can_poll(struct file *file)
{
return file->f_op->poll;
}
static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
{
if (unlikely(!file->f_op->poll))
return DEFAULT_POLLMASK;
return file->f_op->poll(file, pt);
}
struct poll_table_entry { struct poll_table_entry {
struct file *filp; struct file *filp;
__poll_t key; __poll_t key;
......
...@@ -3849,7 +3849,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, ...@@ -3849,7 +3849,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
if (ret) if (ret)
goto out_put_css; goto out_put_css;
efile.file->f_op->poll(efile.file, &event->pt); vfs_poll(efile.file, &event->pt);
spin_lock(&memcg->event_list_lock); spin_lock(&memcg->event_list_lock);
list_add(&event->list, &memcg->event_list); list_add(&event->list, &memcg->event_list);
......
...@@ -231,7 +231,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err) ...@@ -231,7 +231,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
static __poll_t static __poll_t
p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err) p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
{ {
__poll_t ret, n; __poll_t ret;
struct p9_trans_fd *ts = NULL; struct p9_trans_fd *ts = NULL;
if (client && client->status == Connected) if (client && client->status == Connected)
...@@ -243,19 +243,9 @@ p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err) ...@@ -243,19 +243,9 @@ p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
return EPOLLERR; return EPOLLERR;
} }
if (!ts->rd->f_op->poll) ret = vfs_poll(ts->rd, pt);
ret = DEFAULT_POLLMASK; if (ts->rd != ts->wr)
else ret = (ret & ~EPOLLOUT) | (vfs_poll(ts->wr, pt) & ~EPOLLIN);
ret = ts->rd->f_op->poll(ts->rd, pt);
if (ts->rd != ts->wr) {
if (!ts->wr->f_op->poll)
n = DEFAULT_POLLMASK;
else
n = ts->wr->f_op->poll(ts->wr, pt);
ret = (ret & ~EPOLLOUT) | (n & ~EPOLLIN);
}
return ret; return ret;
} }
......
...@@ -397,7 +397,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) ...@@ -397,7 +397,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
* Check if there was an event already pending on the eventfd * Check if there was an event already pending on the eventfd
* before we registered, and trigger it as if we didn't miss it. * before we registered, and trigger it as if we didn't miss it.
*/ */
events = f.file->f_op->poll(f.file, &irqfd->pt); events = vfs_poll(f.file, &irqfd->pt);
if (events & EPOLLIN) if (events & EPOLLIN)
schedule_work(&irqfd->inject); schedule_work(&irqfd->inject);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment