Commit 51da90fc authored by Jens Axboe's avatar Jens Axboe Committed by Jens Axboe

[PATCH] ll_rw_blk: cleanup __make_request()

- Don't assign variables that are only used once.

- Kill spin_lock() prefetching, it's opportunistic at best.
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent cb78b285
...@@ -2885,17 +2885,11 @@ static void init_request_from_bio(struct request *req, struct bio *bio) ...@@ -2885,17 +2885,11 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
static int __make_request(request_queue_t *q, struct bio *bio) static int __make_request(request_queue_t *q, struct bio *bio)
{ {
struct request *req; struct request *req;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; int el_ret, nr_sectors, barrier, err;
unsigned short prio; const unsigned short prio = bio_prio(bio);
sector_t sector; const int sync = bio_sync(bio);
sector = bio->bi_sector;
nr_sectors = bio_sectors(bio); nr_sectors = bio_sectors(bio);
cur_nr_sectors = bio_cur_sectors(bio);
prio = bio_prio(bio);
rw = bio_data_dir(bio);
sync = bio_sync(bio);
/* /*
* low level driver can indicate that it wants pages above a * low level driver can indicate that it wants pages above a
...@@ -2904,8 +2898,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2904,8 +2898,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
*/ */
blk_queue_bounce(q, &bio); blk_queue_bounce(q, &bio);
spin_lock_prefetch(q->queue_lock);
barrier = bio_barrier(bio); barrier = bio_barrier(bio);
if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
...@@ -2953,9 +2945,9 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2953,9 +2945,9 @@ static int __make_request(request_queue_t *q, struct bio *bio)
* not touch req->buffer either... * not touch req->buffer either...
*/ */
req->buffer = bio_data(bio); req->buffer = bio_data(bio);
req->current_nr_sectors = cur_nr_sectors; req->current_nr_sectors = bio_cur_sectors(bio);
req->hard_cur_sectors = cur_nr_sectors; req->hard_cur_sectors = req->current_nr_sectors;
req->sector = req->hard_sector = sector; req->sector = req->hard_sector = bio->bi_sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio); req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0); drive_stat_acct(req, nr_sectors, 0);
...@@ -2973,7 +2965,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2973,7 +2965,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
* Grab a free request. This is might sleep but can not fail. * Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked. * Returns with the queue unlocked.
*/ */
req = get_request_wait(q, rw, bio); req = get_request_wait(q, bio_data_dir(bio), bio);
/* /*
* After dropping the lock and possibly sleeping here, our request * After dropping the lock and possibly sleeping here, our request
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment