Commit 133bb595 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block updates from Jens Axboe:
 "This is a bit bigger than it should be, but I could (did) not want to
  send it off last week due to both wanting extra testing, and expecting
  a fix for the bounce regression as well.  In any case, this contains:

   - Fix for the blk-merge.c compilation warning on gcc 5.x from me.

   - A set of back/front SG gap merge fixes, from me and from Sagi.
     This ensures that we honor SG gapping for integrity payloads as
     well.

   - Two small fixes for null_blk from Matias, fixing a leak and a
     capacity propagation issue.

   - A blkcg fix from Tejun, fixing a NULL dereference.

   - A fast clone optimization from Ming, fixing a performance
     regression since the arbitrarily sized bio's were introduced.

   - Also from Ming, a regression fix for bouncing IOs"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: fix bounce_end_io
  block: blk-merge: fast-clone bio when splitting rw bios
  block: blkg_destroy_all() should clear q->root_blkg and ->root_rl.blkg
  block: Copy a user iovec if it includes gaps
  block: Refuse adding appending a gapped integrity page to a bio
  block: Refuse request/bio merges with gaps in the integrity payload
  block: Check for gaps on front and back merges
  null_blk: fix wrong capacity when bs is not 512 bytes
  null_blk: fix memory leak on cleanup
  block: fix bogus compiler warnings in blk-merge.c
parents 590dca3a 99451879
......@@ -140,6 +140,11 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
iv = bip->bip_vec + bip->bip_vcnt;
if (bip->bip_vcnt &&
bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
&bip->bip_vec[bip->bip_vcnt - 1], offset))
return 0;
iv->bv_page = page;
iv->bv_len = len;
iv->bv_offset = offset;
......
......@@ -370,6 +370,9 @@ static void blkg_destroy_all(struct request_queue *q)
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
q->root_blkg = NULL;
q->root_rl.blkg = NULL;
}
/*
......
......@@ -204,6 +204,9 @@ bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
q->limits.max_integrity_segments)
return false;
if (integrity_req_gap_back_merge(req, next->bio))
return false;
return true;
}
EXPORT_SYMBOL(blk_integrity_merge_rq);
......
......@@ -9,6 +9,24 @@
#include "blk.h"
static bool iovec_gap_to_prv(struct request_queue *q,
struct iovec *prv, struct iovec *cur)
{
unsigned long prev_end;
if (!queue_virt_boundary(q))
return false;
if (prv->iov_base == NULL && prv->iov_len == 0)
/* prv is not set - don't check */
return false;
prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
prev_end & queue_virt_boundary(q));
}
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio)
{
......@@ -67,7 +85,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct bio *bio;
int unaligned = 0;
struct iov_iter i;
struct iovec iov;
struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
if (!iter || !iter->count)
return -EINVAL;
......@@ -81,8 +99,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
/*
* Keep going so we check length of all segments
*/
if (uaddr & queue_dma_alignment(q))
if ((uaddr & queue_dma_alignment(q)) ||
iovec_gap_to_prv(q, &prv, &iov))
unaligned = 1;
prv.iov_base = iov.iov_base;
prv.iov_len = iov.iov_len;
}
if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
......
......@@ -66,36 +66,33 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *bio,
struct bio_set *bs)
{
struct bio *split;
struct bio_vec bv, bvprv;
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
unsigned seg_size = 0, nsegs = 0, sectors = 0;
int prev = 0;
bio_for_each_segment(bv, bio, iter) {
sectors += bv.bv_len >> 9;
if (sectors > queue_max_sectors(q))
if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
goto split;
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset))
if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
goto split;
if (prev && blk_queue_cluster(q)) {
if (bvprvp && blk_queue_cluster(q)) {
if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
goto new_segment;
seg_size += bv.bv_len;
bvprv = bv;
prev = 1;
bvprvp = &bv;
sectors += bv.bv_len >> 9;
continue;
}
new_segment:
......@@ -104,23 +101,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
nsegs++;
bvprv = bv;
prev = 1;
bvprvp = &bv;
seg_size = bv.bv_len;
sectors += bv.bv_len >> 9;
}
return NULL;
split:
split = bio_clone_bioset(bio, GFP_NOIO, bs);
split->bi_iter.bi_size -= iter.bi_size;
bio->bi_iter = iter;
if (bio_integrity(bio)) {
bio_integrity_advance(bio, split->bi_iter.bi_size);
bio_integrity_trim(split, 0, bio_sectors(split));
}
return split;
return bio_split(bio, sectors, GFP_NOIO, bs);
}
void blk_queue_split(struct request_queue *q, struct bio **bio,
......@@ -439,6 +427,11 @@ static inline int ll_new_hw_segment(struct request_queue *q,
int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
if (req_gap_back_merge(req, bio))
return 0;
if (blk_integrity_rq(req) &&
integrity_req_gap_back_merge(req, bio))
return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
......@@ -457,6 +450,12 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
if (req_gap_front_merge(req, bio))
return 0;
if (blk_integrity_rq(req) &&
integrity_req_gap_front_merge(req, bio))
return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
......@@ -483,14 +482,6 @@ static bool req_no_special_merge(struct request *req)
return !q->mq_ops && req->special;
}
static int req_gap_to_prev(struct request *req, struct bio *next)
{
struct bio *prev = req->biotail;
return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
next->bi_io_vec[0].bv_offset);
}
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
......@@ -505,7 +496,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
if (req_no_special_merge(req) || req_no_special_merge(next))
return 0;
if (req_gap_to_prev(req, next->bio))
if (req_gap_back_merge(req, next->bio))
return 0;
/*
......@@ -713,10 +704,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
!blk_write_same_mergeable(rq->bio, bio))
return false;
/* Only check gaps if the bio carries data */
if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
return false;
return true;
}
......
......@@ -128,12 +128,14 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec;
int i;
int start = bio_orig->bi_iter.bi_idx;
/*
* free up bounce indirect pages used
*/
bio_for_each_segment_all(bvec, bio, i) {
org_vec = bio_orig->bi_io_vec + i;
org_vec = bio_orig->bi_io_vec + i + start;
if (bvec->bv_page == org_vec->bv_page)
continue;
......
......@@ -406,6 +406,22 @@ static struct blk_mq_ops null_mq_ops = {
.complete = null_softirq_done_fn,
};
static void cleanup_queue(struct nullb_queue *nq)
{
kfree(nq->tag_map);
kfree(nq->cmds);
}
static void cleanup_queues(struct nullb *nullb)
{
int i;
for (i = 0; i < nullb->nr_queues; i++)
cleanup_queue(&nullb->queues[i]);
kfree(nullb->queues);
}
static void null_del_dev(struct nullb *nullb)
{
list_del_init(&nullb->list);
......@@ -415,6 +431,7 @@ static void null_del_dev(struct nullb *nullb)
if (queue_mode == NULL_Q_MQ)
blk_mq_free_tag_set(&nullb->tag_set);
put_disk(nullb->disk);
cleanup_queues(nullb);
kfree(nullb);
}
......@@ -459,22 +476,6 @@ static int setup_commands(struct nullb_queue *nq)
return 0;
}
static void cleanup_queue(struct nullb_queue *nq)
{
kfree(nq->tag_map);
kfree(nq->cmds);
}
static void cleanup_queues(struct nullb *nullb)
{
int i;
for (i = 0; i < nullb->nr_queues; i++)
cleanup_queue(&nullb->queues[i]);
kfree(nullb->queues);
}
static int setup_queues(struct nullb *nullb)
{
nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
......@@ -588,8 +589,7 @@ static int null_add_dev(void)
blk_queue_physical_block_size(nullb->q, bs);
size = gb * 1024 * 1024 * 1024ULL;
sector_div(size, bs);
set_capacity(disk, size);
set_capacity(disk, size >> 9);
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
......
......@@ -1368,6 +1368,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}
static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
struct bio *next)
{
if (!bio_has_data(prev))
return false;
return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
next->bi_io_vec[0].bv_offset);
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
{
return bio_will_gap(req->q, req->biotail, bio);
}
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
{
return bio_will_gap(req->q, bio, req->bio);
}
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
......@@ -1494,6 +1514,26 @@ queue_max_integrity_segments(struct request_queue *q)
return q->limits.max_integrity_segments;
}
static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next)
{
struct bio_integrity_payload *bip = bio_integrity(req->bio);
struct bio_integrity_payload *bip_next = bio_integrity(next);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
......@@ -1560,6 +1600,16 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
{
return 0;
}
static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio *next)
{
return false;
}
static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio *bio)
{
return false;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment