Commit 82cbdcd3 authored by Miklos Szeredi's avatar Miklos Szeredi

fuse: cleanup fuse_dev_do_read()

 - locked list_add() + list_del_init() cancel out

 - common handling of case when request is ended here in the read phase
Signed-off-by: default avatarMiklos Szeredi <mszeredi@suse.cz>
Reviewed-by: default avatarAshish Samant <ashish.samant@oracle.com>
parent f377cb79
...@@ -1237,7 +1237,7 @@ __releases(fiq->waitq.lock) ...@@ -1237,7 +1237,7 @@ __releases(fiq->waitq.lock)
static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
struct fuse_copy_state *cs, size_t nbytes) struct fuse_copy_state *cs, size_t nbytes)
{ {
int err; ssize_t err;
struct fuse_iqueue *fiq = &fc->iq; struct fuse_iqueue *fiq = &fc->iq;
struct fuse_pqueue *fpq = &fc->pq; struct fuse_pqueue *fpq = &fc->pq;
struct fuse_req *req; struct fuse_req *req;
...@@ -1280,8 +1280,6 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, ...@@ -1280,8 +1280,6 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
spin_unlock(&fiq->waitq.lock); spin_unlock(&fiq->waitq.lock);
spin_lock(&fc->lock); spin_lock(&fc->lock);
list_add(&req->list, &fpq->io);
in = &req->in; in = &req->in;
reqsize = in->h.len; reqsize = in->h.len;
/* If request is too large, reply with an error and restart the read */ /* If request is too large, reply with an error and restart the read */
...@@ -1290,10 +1288,10 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, ...@@ -1290,10 +1288,10 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
/* SETXATTR is special, since it may contain too large data */ /* SETXATTR is special, since it may contain too large data */
if (in->h.opcode == FUSE_SETXATTR) if (in->h.opcode == FUSE_SETXATTR)
req->out.h.error = -E2BIG; req->out.h.error = -E2BIG;
list_del_init(&req->list);
request_end(fc, req); request_end(fc, req);
goto restart; goto restart;
} }
list_add(&req->list, &fpq->io);
spin_unlock(&fc->lock); spin_unlock(&fc->lock);
cs->req = req; cs->req = req;
err = fuse_copy_one(cs, &in->h, sizeof(in->h)); err = fuse_copy_one(cs, &in->h, sizeof(in->h));
...@@ -1304,30 +1302,32 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, ...@@ -1304,30 +1302,32 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
spin_lock(&fc->lock); spin_lock(&fc->lock);
clear_bit(FR_LOCKED, &req->flags); clear_bit(FR_LOCKED, &req->flags);
if (!fpq->connected) { if (!fpq->connected) {
list_del_init(&req->list); err = -ENODEV;
request_end(fc, req); goto out_end;
return -ENODEV;
} }
if (err) { if (err) {
req->out.h.error = -EIO; req->out.h.error = -EIO;
list_del_init(&req->list); goto out_end;
request_end(fc, req);
return err;
} }
if (!test_bit(FR_ISREPLY, &req->flags)) { if (!test_bit(FR_ISREPLY, &req->flags)) {
list_del_init(&req->list); err = reqsize;
request_end(fc, req); goto out_end;
} else {
list_move_tail(&req->list, &fpq->processing);
set_bit(FR_SENT, &req->flags);
/* matches barrier in request_wait_answer() */
smp_mb__after_atomic();
if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fiq, req);
spin_unlock(&fc->lock);
} }
list_move_tail(&req->list, &fpq->processing);
set_bit(FR_SENT, &req->flags);
/* matches barrier in request_wait_answer() */
smp_mb__after_atomic();
if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fiq, req);
spin_unlock(&fc->lock);
return reqsize; return reqsize;
out_end:
list_del_init(&req->list);
request_end(fc, req);
return err;
err_unlock: err_unlock:
spin_unlock(&fiq->waitq.lock); spin_unlock(&fiq->waitq.lock);
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment