Commit b6b4aafc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.5-20200103' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Three fixes in here:

   - Fix for a missing split on default memory boundary mask (4G) (Ming)

   - Fix for multi-page read bio truncate (Ming)

   - Fix for null_blk zone close request handling (Damien)"

* tag 'block-5.5-20200103' of git://git.kernel.dk/linux-block:
  null_blk: Fix REQ_OP_ZONE_CLOSE handling
  block: fix splitting segments on boundary masks
  block: add bio_truncate to fix guard_bio_eod
parents bed72351 c7d776f8
......@@ -538,6 +538,45 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
}
EXPORT_SYMBOL(zero_fill_bio_iter);
void bio_truncate(struct bio *bio, unsigned new_size)
{
struct bio_vec bv;
struct bvec_iter iter;
unsigned int done = 0;
bool truncated = false;
if (new_size >= bio->bi_iter.bi_size)
return;
if (bio_data_dir(bio) != READ)
goto exit;
bio_for_each_segment(bv, bio, iter) {
if (done + bv.bv_len > new_size) {
unsigned offset;
if (!truncated)
offset = new_size - done;
else
offset = 0;
zero_user(bv.bv_page, offset, bv.bv_len - offset);
truncated = true;
}
done += bv.bv_len;
}
exit:
/*
* Don't touch bvec table here and make it really immutable, since
* fs bio user has to retrieve all pages via bio_for_each_segment_all
* in its .end_bio() callback.
*
* It is enough to truncate bio by updating .bi_size since we can make
* correct bvec with the updated .bi_size for drivers.
*/
bio->bi_iter.bi_size = new_size;
}
/**
* bio_put - release a reference to a bio
* @bio: bio to release reference to
......
......@@ -157,16 +157,14 @@ static inline unsigned get_max_io_size(struct request_queue *q,
return sectors & (lbs - 1);
}
static unsigned get_max_segment_size(const struct request_queue *q,
unsigned offset)
static inline unsigned get_max_segment_size(const struct request_queue *q,
struct page *start_page,
unsigned long offset)
{
unsigned long mask = queue_segment_boundary(q);
/* default segment boundary mask means no boundary limit */
if (mask == BLK_SEG_BOUNDARY_MASK)
return queue_max_segment_size(q);
return min_t(unsigned long, mask - (mask & offset) + 1,
offset = mask & (page_to_phys(start_page) + offset);
return min_t(unsigned long, mask - offset + 1,
queue_max_segment_size(q));
}
......@@ -201,7 +199,8 @@ static bool bvec_split_segs(const struct request_queue *q,
unsigned seg_size = 0;
while (len && *nsegs < max_segs) {
seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
seg_size = get_max_segment_size(q, bv->bv_page,
bv->bv_offset + total_len);
seg_size = min(seg_size, len);
(*nsegs)++;
......@@ -419,7 +418,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
unsigned len = min(get_max_segment_size(q, offset), nbytes);
unsigned len = min(get_max_segment_size(q, bvec->bv_page,
offset), nbytes);
struct page *page = bvec->bv_page;
/*
......
......@@ -186,7 +186,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
if (zone->cond == BLK_ZONE_COND_FULL)
return BLK_STS_IOERR;
zone->cond = BLK_ZONE_COND_CLOSED;
if (zone->wp == zone->start)
zone->cond = BLK_ZONE_COND_EMPTY;
else
zone->cond = BLK_ZONE_COND_CLOSED;
break;
case REQ_OP_ZONE_FINISH:
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
......
......@@ -3034,8 +3034,6 @@ static void end_bio_bh_io_sync(struct bio *bio)
void guard_bio_eod(int op, struct bio *bio)
{
sector_t maxsector;
struct bio_vec *bvec = bio_last_bvec_all(bio);
unsigned truncated_bytes;
struct hd_struct *part;
rcu_read_lock();
......@@ -3061,28 +3059,7 @@ void guard_bio_eod(int op, struct bio *bio)
if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
return;
/* Uhhuh. We've got a bio that straddles the device size! */
truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
/*
* The bio contains more than one segment which spans EOD, just return
* and let IO layer turn it into an EIO
*/
if (truncated_bytes > bvec->bv_len)
return;
/* Truncate the bio.. */
bio->bi_iter.bi_size -= truncated_bytes;
bvec->bv_len -= truncated_bytes;
/* ..and clear the end of the buffer for reads */
if (op == REQ_OP_READ) {
struct bio_vec bv;
mp_bvec_last_segment(bvec, &bv);
zero_user(bv.bv_page, bv.bv_offset + bv.bv_len,
truncated_bytes);
}
bio_truncate(bio, maxsector << 9);
}
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
......
......@@ -470,6 +470,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
gfp_t);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
void bio_truncate(struct bio *bio, unsigned new_size);
static inline void zero_fill_bio(struct bio *bio)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment