Commit 487b8372 authored by Chuck Lever's avatar Chuck Lever Committed by Trond Myklebust

NFS: support EIOCBQUEUED return in direct read path

For async iocb's, the NFS direct read path should return EIOCBQUEUED and
call aio_complete when all the requested reads are finished.  The
synchronous part of the NFS direct read path behaves exactly as it was
before.

Test plan:
aio-stress with "-O".  OraSim.
Signed-off-by: default avatarChuck Lever <cel@netapp.com>
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 99514f8f
......@@ -177,6 +177,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
kref_init(&dreq->kref);
init_waitqueue_head(&dreq->wait);
INIT_LIST_HEAD(&dreq->list);
dreq->iocb = NULL;
atomic_set(&dreq->count, 0);
atomic_set(&dreq->error, 0);
......@@ -213,6 +214,10 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
* We must hold a reference to all the pages in this direct read request
* until the RPCs complete. This could be long *after* we are woken up in
* nfs_direct_read_wait (for instance, if someone hits ^C on a slow server).
*
* In addition, synchronous I/O uses a stack-allocated iocb. Thus we
* can't trust the iocb is still valid here if this is a synchronous
* request. If the waiter is woken prematurely, the iocb is long gone.
*/
static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
{
......@@ -228,7 +233,13 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
if (unlikely(atomic_dec_and_test(&dreq->complete))) {
nfs_free_user_pages(dreq->pages, dreq->npages, 1);
wake_up(&dreq->wait);
if (dreq->iocb) {
long res = atomic_read(&dreq->error);
if (!res)
res = atomic_read(&dreq->count);
aio_complete(dreq->iocb, res, 0);
} else
wake_up(&dreq->wait);
kref_put(&dreq->kref, nfs_direct_req_release);
}
}
......@@ -309,8 +320,13 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long
*/
static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
{
int result = 0;
int result = -EIOCBQUEUED;
/* Async requests don't wait here */
if (dreq->iocb)
goto out;
result = 0;
if (intr) {
result = wait_event_interruptible(dreq->wait,
(atomic_read(&dreq->complete) == 0));
......@@ -323,6 +339,7 @@ static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
if (!result)
result = atomic_read(&dreq->count);
out:
kref_put(&dreq->kref, nfs_direct_req_release);
return (ssize_t) result;
}
......@@ -343,6 +360,8 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
dreq->npages = nr_pages;
dreq->inode = inode;
dreq->filp = iocb->ki_filp;
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
rpc_clnt_sigmask(clnt, &oldset);
......@@ -534,8 +553,6 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count,
file->f_dentry->d_name.name,
(unsigned long) count, (long long) pos);
if (!is_sync_kiocb(iocb))
goto out;
if (count < 0)
goto out;
retval = -EFAULT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment