Commit 41f98274 authored by Miklos Szeredi's avatar Miklos Szeredi

fuse: rework abort

Splice fc->pending and fc->processing lists into a common kill list while
holding fc->lock.

By the time we release fc->lock, pending and processing lists are empty and
the io list contains only locked requests.
Signed-off-by: default avatarMiklos Szeredi <mszeredi@suse.cz>
Reviewed-by: default avatarAshish Samant <ashish.samant@oracle.com>
parent b716d425
...@@ -2101,9 +2101,6 @@ static void end_polls(struct fuse_conn *fc) ...@@ -2101,9 +2101,6 @@ static void end_polls(struct fuse_conn *fc)
* asynchronous request and the tricky deadlock (see * asynchronous request and the tricky deadlock (see
* Documentation/filesystems/fuse.txt). * Documentation/filesystems/fuse.txt).
* *
* Request progression from one list to the next is prevented by fc->connected
* being false.
*
* Aborting requests under I/O goes as follows: 1: Separate out unlocked * Aborting requests under I/O goes as follows: 1: Separate out unlocked
* requests, they should be finished off immediately. Locked requests will be * requests, they should be finished off immediately. Locked requests will be
* finished after unlock; see unlock_request(). 2: Finish off the unlocked * finished after unlock; see unlock_request(). 2: Finish off the unlocked
...@@ -2116,7 +2113,8 @@ void fuse_abort_conn(struct fuse_conn *fc) ...@@ -2116,7 +2113,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
spin_lock(&fc->lock); spin_lock(&fc->lock);
if (fc->connected) { if (fc->connected) {
struct fuse_req *req, *next; struct fuse_req *req, *next;
LIST_HEAD(to_end); LIST_HEAD(to_end1);
LIST_HEAD(to_end2);
fc->connected = 0; fc->connected = 0;
fc->blocked = 0; fc->blocked = 0;
...@@ -2126,19 +2124,20 @@ void fuse_abort_conn(struct fuse_conn *fc) ...@@ -2126,19 +2124,20 @@ void fuse_abort_conn(struct fuse_conn *fc)
spin_lock(&req->waitq.lock); spin_lock(&req->waitq.lock);
set_bit(FR_ABORTED, &req->flags); set_bit(FR_ABORTED, &req->flags);
if (!test_bit(FR_LOCKED, &req->flags)) if (!test_bit(FR_LOCKED, &req->flags))
list_move(&req->list, &to_end); list_move(&req->list, &to_end1);
spin_unlock(&req->waitq.lock); spin_unlock(&req->waitq.lock);
} }
while (!list_empty(&to_end)) { fc->max_background = UINT_MAX;
req = list_first_entry(&to_end, struct fuse_req, list); flush_bg_queue(fc);
list_splice_init(&fc->pending, &to_end2);
list_splice_init(&fc->processing, &to_end2);
while (!list_empty(&to_end1)) {
req = list_first_entry(&to_end1, struct fuse_req, list);
__fuse_get_request(req); __fuse_get_request(req);
request_end(fc, req); request_end(fc, req);
spin_lock(&fc->lock); spin_lock(&fc->lock);
} }
fc->max_background = UINT_MAX; end_requests(fc, &to_end2);
flush_bg_queue(fc);
end_requests(fc, &fc->pending);
end_requests(fc, &fc->processing);
while (forget_pending(fc)) while (forget_pending(fc))
kfree(dequeue_forget(fc, 1, NULL)); kfree(dequeue_forget(fc, 1, NULL));
end_polls(fc); end_polls(fc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment