Commit 3bec8a5a authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds

[PATCH] cleanup bio_map_user and helper

Bart did this patch, I changed it a bit. Basically it cleans the mapping
interface up a bit, and adds a little helper to set up the request from
the bio.

In addition, it fixes a long standing bug where bio_map_user() would
call blk_queue_bounce() without the direction bit being set, auch.

 - Abstract out bio request preparation
 - Have bio_map_user() set data direction (fixes bug where blk_queue_bounce()
   is called without it set)
 - Split bio_map_user() in two
parent 2f2ba9da
......@@ -2196,6 +2196,21 @@ void end_request(struct request *req, int uptodate)
}
}
void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
{
/* first three bits are identical in rq->flags and bio->bi_rw */
rq->flags |= (bio->bi_rw & 7);
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->nr_hw_segments = bio_hw_segments(q, bio);
rq->current_nr_sectors = bio_cur_sectors(bio);
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->buffer = bio_data(bio);
rq->bio = rq->biotail = bio;
}
int __init blk_dev_init(void)
{
int total_ram = nr_free_pages() << (PAGE_SHIFT - 10);
......@@ -2285,3 +2300,5 @@ EXPORT_SYMBOL(blk_stop_queue);
EXPORT_SYMBOL(__blk_stop_queue);
EXPORT_SYMBOL(blk_run_queue);
EXPORT_SYMBOL(blk_run_queues);
EXPORT_SYMBOL(blk_rq_bio_prep);
......@@ -193,18 +193,6 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
* be a write to vm.
*/
bio = bio_map_user(bdev, uaddr, hdr.dxfer_len, reading);
if (bio) {
if (writing)
bio->bi_rw |= (1 << BIO_RW);
nr_sectors = (bio->bi_size + 511) >> 9;
if (bio->bi_size < hdr.dxfer_len) {
bio_endio(bio, bio->bi_size, 0);
bio_unmap_user(bio, 0);
bio = NULL;
}
}
/*
* if bio setup failed, fall back to slow approach
......@@ -243,21 +231,10 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
rq->hard_nr_sectors = rq->nr_sectors = nr_sectors;
rq->hard_cur_sectors = rq->current_nr_sectors = nr_sectors;
if (bio) {
/*
* subtle -- if bio_map_user() ended up bouncing a bio, it
* would normally disappear when its bi_end_io is run.
* however, we need it for the unmap, so grab an extra
* reference to it
*/
bio_get(bio);
rq->bio = rq->biotail = bio;
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->nr_hw_segments = bio_hw_segments(q, bio);
rq->current_nr_sectors = bio_cur_sectors(bio);
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->buffer = bio_data(bio);
}
if (bio)
blk_rq_bio_prep(q, rq, bio);
rq->data_len = hdr.dxfer_len;
rq->data = buffer;
......@@ -268,8 +245,6 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_TIMEOUT;
rq->bio = rq->biotail = bio;
start_time = jiffies;
/* ignore return value. All information is passed back to caller
......@@ -278,10 +253,8 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
*/
blk_do_rq(q, bdev, rq);
if (bio) {
if (bio)
bio_unmap_user(bio, reading);
bio_put(bio);
}
/* write to all output members */
hdr.status = rq->errors;
......
......@@ -434,19 +434,9 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
return len;
}
/**
* bio_map_user - map user address into bio
* @bdev: destination block device
* @uaddr: start of user address
* @len: length in bytes
* @write_to_vm: bool indicating writing to pages or not
*
* Map the user space address into a bio suitable for io to a block
* device. Caller should check the size of the returned bio, we might
* not have mapped the entire range specified.
*/
struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr,
unsigned int len, int write_to_vm)
static struct bio *__bio_map_user(struct block_device *bdev,
unsigned long uaddr, unsigned int len,
int write_to_vm)
{
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT;
......@@ -510,8 +500,11 @@ struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr,
kfree(pages);
/*
* check if the mapped pages need bouncing for an isa host.
* set data direction, and check if mapped pages need bouncing
*/
if (!write_to_vm)
bio->bi_rw |= (1 << BIO_RW);
blk_queue_bounce(q, &bio);
return bio;
out:
......@@ -521,17 +514,42 @@ struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr,
}
/**
* bio_unmap_user - unmap a bio
* @bio: the bio being unmapped
* @write_to_vm: bool indicating whether pages were written to
*
* Unmap a bio previously mapped by bio_map_user(). The @write_to_vm
* must be the same as passed into bio_map_user(). Must be called with
* a process context.
* bio_map_user - map user address into bio
* @bdev: destination block device
* @uaddr: start of user address
* @len: length in bytes
* @write_to_vm: bool indicating writing to pages or not
*
* bio_unmap_user() may sleep.
* Map the user space address into a bio suitable for io to a block
* device.
*/
void bio_unmap_user(struct bio *bio, int write_to_vm)
struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr,
unsigned int len, int write_to_vm)
{
struct bio *bio;
bio = __bio_map_user(bdev, uaddr, len, write_to_vm);
if (bio) {
if (bio->bi_size < len) {
bio_endio(bio, bio->bi_size, 0);
bio_unmap_user(bio, 0);
return NULL;
}
/*
* subtle -- if __bio_map_user() ended up bouncing a bio,
* it would normally disappear when its bi_end_io is run.
* however, we need it for the unmap, so grab an extra
* reference to it
*/
bio_get(bio);
}
return bio;
}
static void __bio_unmap_user(struct bio *bio, int write_to_vm)
{
struct bio_vec *bvec;
int i;
......@@ -561,6 +579,23 @@ void bio_unmap_user(struct bio *bio, int write_to_vm)
bio_put(bio);
}
/**
* bio_unmap_user - unmap a bio
* @bio: the bio being unmapped
* @write_to_vm: bool indicating whether pages were written to
*
* Unmap a bio previously mapped by bio_map_user(). The @write_to_vm
* must be the same as passed into bio_map_user(). Must be called with
* a process context.
*
* bio_unmap_user() may sleep.
*/
void bio_unmap_user(struct bio *bio, int write_to_vm)
{
__bio_unmap_user(bio, write_to_vm);
bio_put(bio);
}
/*
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
* for performing direct-IO in BIOs.
......
......@@ -391,6 +391,8 @@ extern void blk_queue_free_tags(request_queue_t *);
extern void blk_queue_invalidate_tags(request_queue_t *);
extern void blk_congestion_wait(int rw, long timeout);
extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
#define MAX_SECTORS 255
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment