Commit 8672d571 authored by Jens Axboe's avatar Jens Axboe

[IDE] Use the block layer deferred softirq request completion

This patch makes IDE use the new blk_complete_request() interface.
There's still room for improvement, as __ide_end_request() really
could drop the lock after getting HWGROUP->rq (why does it need to
hold it in the first place? If ->rq access isn't serialized, we are
screwed anyways).
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 1aea6434
...@@ -55,9 +55,22 @@ ...@@ -55,9 +55,22 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/bitops.h> #include <asm/bitops.h>
void ide_softirq_done(struct request *rq)
{
request_queue_t *q = rq->q;
add_disk_randomness(rq->rq_disk);
end_that_request_chunk(rq, rq->errors, rq->data_len);
spin_lock_irq(q->queue_lock);
end_that_request_last(rq, rq->errors);
spin_unlock_irq(q->queue_lock);
}
int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
int nr_sectors) int nr_sectors)
{ {
unsigned int nbytes;
int ret = 1; int ret = 1;
BUG_ON(!(rq->flags & REQ_STARTED)); BUG_ON(!(rq->flags & REQ_STARTED));
...@@ -81,17 +94,28 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, ...@@ -81,17 +94,28 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
HWGROUP(drive)->hwif->ide_dma_on(drive); HWGROUP(drive)->hwif->ide_dma_on(drive);
} }
/*
* For partial completions (or non fs/pc requests), use the regular
* direct completion path.
*/
nbytes = nr_sectors << 9;
if (rq_all_done(rq, nbytes)) {
rq->errors = uptodate;
rq->data_len = nbytes;
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
blk_complete_request(rq);
ret = 0;
} else {
if (!end_that_request_first(rq, uptodate, nr_sectors)) { if (!end_that_request_first(rq, uptodate, nr_sectors)) {
add_disk_randomness(rq->rq_disk); add_disk_randomness(rq->rq_disk);
if (blk_rq_tagged(rq))
blk_queue_end_tag(drive->queue, rq);
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;
end_that_request_last(rq, uptodate); end_that_request_last(rq, uptodate);
ret = 0; ret = 0;
} }
}
return ret; return ret;
} }
EXPORT_SYMBOL(__ide_end_request); EXPORT_SYMBOL(__ide_end_request);
...@@ -113,6 +137,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) ...@@ -113,6 +137,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
unsigned long flags; unsigned long flags;
int ret = 1; int ret = 1;
/*
* room for locking improvements here, the calls below don't
* need the queue lock held at all
*/
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
rq = HWGROUP(drive)->rq; rq = HWGROUP(drive)->rq;
......
...@@ -1011,6 +1011,8 @@ static int ide_init_queue(ide_drive_t *drive) ...@@ -1011,6 +1011,8 @@ static int ide_init_queue(ide_drive_t *drive)
blk_queue_max_hw_segments(q, max_sg_entries); blk_queue_max_hw_segments(q, max_sg_entries);
blk_queue_max_phys_segments(q, max_sg_entries); blk_queue_max_phys_segments(q, max_sg_entries);
blk_queue_softirq_done(q, ide_softirq_done);
/* assign drive queue */ /* assign drive queue */
drive->queue = q; drive->queue = q;
......
...@@ -1001,6 +1001,7 @@ extern int noautodma; ...@@ -1001,6 +1001,7 @@ extern int noautodma;
extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs); extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs);
extern void ide_softirq_done(struct request *rq);
/* /*
* This is used on exit from the driver to designate the next irq handler * This is used on exit from the driver to designate the next irq handler
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment