Commit f695ca38 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove the request_queue argument from blk_queue_split

The queue can be trivially derived from the bio, so pass one less
argument.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ed9b3196
......@@ -283,20 +283,20 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
/**
* __blk_queue_split - split a bio and submit the second half
* @q: [in] request queue pointer
* @bio: [in, out] bio to be split
* @nr_segs: [out] number of segments in the first bio
*
* Split a bio into two bios, chain the two bios, submit the second half and
* store a pointer to the first half in *@bio. If the second bio is still too
* big it will be split by a recursive call to this function. Since this
* function may allocate a new bio from @q->bio_split, it is the responsibility
* of the caller to ensure that @q is only released after processing of the
* function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is
* the responsibility of the caller to ensure that
* @bio->bi_disk->queue->bio_split is only released after processing of the
* split bio has finished.
*/
void __blk_queue_split(struct request_queue *q, struct bio **bio,
unsigned int *nr_segs)
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
{
struct request_queue *q = (*bio)->bi_disk->queue;
struct bio *split = NULL;
switch (bio_op(*bio)) {
......@@ -345,20 +345,19 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
/**
* blk_queue_split - split a bio and submit the second half
* @q: [in] request queue pointer
* @bio: [in, out] bio to be split
*
* Split a bio into two bios, chains the two bios, submit the second half and
* store a pointer to the first half in *@bio. Since this function may allocate
* a new bio from @q->bio_split, it is the responsibility of the caller to
* ensure that @q is only released after processing of the split bio has
* finished.
* a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of
* the caller to ensure that @bio->bi_disk->queue->bio_split is only released
* after processing of the split bio has finished.
*/
void blk_queue_split(struct request_queue *q, struct bio **bio)
void blk_queue_split(struct bio **bio)
{
unsigned int nr_segs;
__blk_queue_split(q, bio, &nr_segs);
__blk_queue_split(bio, &nr_segs);
}
EXPORT_SYMBOL(blk_queue_split);
......
......@@ -2166,7 +2166,7 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_status_t ret;
blk_queue_bounce(q, &bio);
__blk_queue_split(q, &bio, &nr_segs);
__blk_queue_split(&bio, &nr_segs);
if (!bio_integrity_prep(bio))
goto queue_exit;
......
......@@ -220,8 +220,7 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
const char *, size_t);
void __blk_queue_split(struct request_queue *q, struct bio **bio,
unsigned int *nr_segs);
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs);
int ll_front_merge_fn(struct request *req, struct bio *bio,
......
......@@ -1598,7 +1598,7 @@ blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
struct drbd_device *device = bio->bi_disk->private_data;
unsigned long start_jif;
blk_queue_split(q, &bio);
blk_queue_split(&bio);
start_jif = jiffies;
......
......@@ -2434,7 +2434,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
char b[BDEVNAME_SIZE];
struct bio *split;
blk_queue_split(q, &bio);
blk_queue_split(&bio);
pd = q->queuedata;
if (!pd) {
......
......@@ -593,7 +593,7 @@ static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
dev_dbg(&dev->core, "%s\n", __func__);
blk_queue_split(q, &bio);
blk_queue_split(&bio);
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
......
......@@ -123,7 +123,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
struct rsxx_bio_meta *bio_meta;
blk_status_t st = BLK_STS_IOERR;
blk_queue_split(q, &bio);
blk_queue_split(&bio);
might_sleep();
......
......@@ -527,7 +527,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size);
blk_queue_split(q, &bio);
blk_queue_split(&bio);
spin_lock_irq(&card->lock);
*card->biotail = bio;
......
......@@ -63,7 +63,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
* constraint. Writes can be of arbitrary size.
*/
if (bio_data_dir(bio) == READ) {
blk_queue_split(q, &bio);
blk_queue_split(&bio);
pblk_submit_read(pblk, bio);
} else {
/* Prevent deadlock in the case of a modest LUN configuration
......@@ -71,7 +71,7 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
* leaves at least 256KB available for user I/O.
*/
if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
blk_queue_split(q, &bio);
blk_queue_split(&bio);
pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}
......
......@@ -1776,7 +1776,7 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
*/
if (current->bio_list) {
if (is_abnormal_io(bio))
blk_queue_split(md->queue, &bio);
blk_queue_split(&bio);
else
dm_queue_split(md, ti, &bio);
}
......
......@@ -475,7 +475,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
blk_queue_split(q, &bio);
blk_queue_split(&bio);
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
......
......@@ -301,12 +301,11 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
int srcu_idx;
/*
* The namespace might be going away and the bio might
* be moved to a different queue via blk_steal_bios(),
* so we need to use the bio_split pool from the original
* queue to allocate the bvecs from.
* The namespace might be going away and the bio might be moved to a
* different queue via blk_steal_bios(), so we need to use the bio_split
* pool from the original queue to allocate the bvecs from.
*/
blk_queue_split(q, &bio);
blk_queue_split(&bio);
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
......
......@@ -878,7 +878,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
unsigned long source_addr;
unsigned long bytes_done;
blk_queue_split(q, &bio);
blk_queue_split(&bio);
bytes_done = 0;
dev_info = bio->bi_disk->private_data;
......
......@@ -191,7 +191,7 @@ static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
unsigned long page_addr;
unsigned long bytes;
blk_queue_split(q, &bio);
blk_queue_split(&bio);
if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0)
......
......@@ -871,7 +871,7 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
extern void blk_queue_split(struct request_queue *, struct bio **);
extern void blk_queue_split(struct bio **);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
unsigned int, void __user *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment