Commit 83c4388a authored by Paolo \'Blaisorblade\' Giarrusso's avatar Paolo \'Blaisorblade\' Giarrusso Committed by Linus Torvalds

[PATCH] uml: fix ubd deadlock on SMP

From: BlaisorBlade <blaisorblade_spam@yahoo.it>, Chris Wright <chrisw@osdl.org>

Avoid deadlocking onto the request lock in the UBD driver, i.e.  don't lock
the queue spinlock when called from the request function.

In detail:

Rename ubd_finish() to __ubd_finish() and remove ubd_io_lock from it.  Add
wrapper, ubd_finish(), which grabs lock before calling __ubd_finish().  Update
do_ubd_request to use the lock free __ubd_finish() to avoid deadlock.  Also,
apparently prepare_request is called with ubd_io_lock held, so remove locks
there.
Signed-off-by: default avatarChris Wright <chrisw@osdl.org>
Signed-off-by: default avatarPaolo 'Blaisorblade' Giarrusso <blaisorblade_spam@yahoo.it>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 5317e9c0
...@@ -396,14 +396,13 @@ int thread_fd = -1; ...@@ -396,14 +396,13 @@ int thread_fd = -1;
*/ */
int intr_count = 0; int intr_count = 0;
static void ubd_finish(struct request *req, int error) /* call ubd_finish if you need to serialize */
static void __ubd_finish(struct request *req, int error)
{ {
int nsect; int nsect;
if(error){ if(error){
spin_lock(&ubd_io_lock);
end_request(req, 0); end_request(req, 0);
spin_unlock(&ubd_io_lock);
return; return;
} }
nsect = req->current_nr_sectors; nsect = req->current_nr_sectors;
...@@ -412,11 +411,17 @@ static void ubd_finish(struct request *req, int error) ...@@ -412,11 +411,17 @@ static void ubd_finish(struct request *req, int error)
req->errors = 0; req->errors = 0;
req->nr_sectors -= nsect; req->nr_sectors -= nsect;
req->current_nr_sectors = 0; req->current_nr_sectors = 0;
spin_lock(&ubd_io_lock);
end_request(req, 1); end_request(req, 1);
}
static inline void ubd_finish(struct request *req, int error)
{
spin_lock(&ubd_io_lock);
__ubd_finish(req, error);
spin_unlock(&ubd_io_lock); spin_unlock(&ubd_io_lock);
} }
/* Called without ubd_io_lock held */
static void ubd_handler(void) static void ubd_handler(void)
{ {
struct io_thread_req req; struct io_thread_req req;
...@@ -965,6 +970,7 @@ static int prepare_mmap_request(struct ubd *dev, int fd, __u64 offset, ...@@ -965,6 +970,7 @@ static int prepare_mmap_request(struct ubd *dev, int fd, __u64 offset,
return(0); return(0);
} }
/* Called with ubd_io_lock held */
static int prepare_request(struct request *req, struct io_thread_req *io_req) static int prepare_request(struct request *req, struct io_thread_req *io_req)
{ {
struct gendisk *disk = req->rq_disk; struct gendisk *disk = req->rq_disk;
...@@ -977,9 +983,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req) ...@@ -977,9 +983,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
if((rq_data_dir(req) == WRITE) && !dev->openflags.w){ if((rq_data_dir(req) == WRITE) && !dev->openflags.w){
printk("Write attempted on readonly ubd device %s\n", printk("Write attempted on readonly ubd device %s\n",
disk->disk_name); disk->disk_name);
spin_lock(&ubd_io_lock);
end_request(req, 0); end_request(req, 0);
spin_unlock(&ubd_io_lock);
return(1); return(1);
} }
...@@ -1029,6 +1033,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req) ...@@ -1029,6 +1033,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
return(0); return(0);
} }
/* Called with ubd_io_lock held */
static void do_ubd_request(request_queue_t *q) static void do_ubd_request(request_queue_t *q)
{ {
struct io_thread_req io_req; struct io_thread_req io_req;
...@@ -1040,7 +1045,7 @@ static void do_ubd_request(request_queue_t *q) ...@@ -1040,7 +1045,7 @@ static void do_ubd_request(request_queue_t *q)
err = prepare_request(req, &io_req); err = prepare_request(req, &io_req);
if(!err){ if(!err){
do_io(&io_req); do_io(&io_req);
ubd_finish(req, io_req.error); __ubd_finish(req, io_req.error);
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment