Commit cd2c7545 authored by Changheun Lee's avatar Changheun Lee Committed by Jens Axboe

bio: limit bio max size

bio size can grow up to 4GB when muli-page bvec is enabled.
but sometimes it would lead to inefficient behaviors.
in case of large chunk direct I/O, - 32MB chunk read in user space -
all pages for 32MB would be merged to a bio structure if the pages
physical addresses are contiguous. it makes some delay to submit
until merge complete. bio max size should be limited to a proper size.

When 32MB chunk read with direct I/O option is coming from userspace,
kernel behavior is below now in do_direct_IO() loop. it's timeline.

 | bio merge for 32MB. total 8,192 pages are merged.
 | total elapsed time is over 2ms.
 |------------------ ... ----------------------->|
                                                 | 8,192 pages merged a bio.
                                                 | at this time, first bio submit is done.
                                                 | 1 bio is split to 32 read request and issue.
                                                 |--------------->
                                                  |--------------->
                                                   |--------------->
                                                              ......
                                                                   |--------------->
                                                                    |--------------->|
                          total 19ms elapsed to complete 32MB read done from device. |

If bio max size is limited with 1MB, behavior is changed below.

 | bio merge for 1MB. 256 pages are merged for each bio.
 | total 32 bio will be made.
 | total elapsed time is over 2ms. it's same.
 | but, first bio submit timing is fast. about 100us.
 |--->|--->|--->|---> ... -->|--->|--->|--->|--->|
      | 256 pages merged a bio.
      | at this time, first bio submit is done.
      | and 1 read request is issued for 1 bio.
      |--------------->
           |--------------->
                |--------------->
                                      ......
                                                 |--------------->
                                                  |--------------->|
        total 17ms elapsed to complete 32MB read done from device. |

As a result, read request issue timing is faster if bio max size is limited.
Current kernel behavior with multipage bvec, super large bio can be created.
And it lead to delay first I/O request issue.
Signed-off-by: default avatarChangheun Lee <nanich.lee@samsung.com>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20210503095203.29076-1-nanich.lee@samsung.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c646790a
...@@ -255,6 +255,13 @@ void bio_init(struct bio *bio, struct bio_vec *table, ...@@ -255,6 +255,13 @@ void bio_init(struct bio *bio, struct bio_vec *table,
} }
EXPORT_SYMBOL(bio_init); EXPORT_SYMBOL(bio_init);
unsigned int bio_max_size(struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
return bdev ? bdev->bd_disk->queue->limits.bio_max_bytes : UINT_MAX;
}
/** /**
* bio_reset - reinitialize a bio * bio_reset - reinitialize a bio
* @bio: bio to reset * @bio: bio to reset
...@@ -866,7 +873,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, ...@@ -866,7 +873,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) { if (page_is_mergeable(bv, page, len, off, same_page)) {
if (bio->bi_iter.bi_size > UINT_MAX - len) { if (bio->bi_iter.bi_size > bio_max_size(bio) - len) {
*same_page = false; *same_page = false;
return false; return false;
} }
...@@ -995,6 +1002,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) ...@@ -995,6 +1002,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{ {
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
unsigned int bytes_left = bio_max_size(bio) - bio->bi_iter.bi_size;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv; struct page **pages = (struct page **)bv;
bool same_page = false; bool same_page = false;
...@@ -1010,7 +1018,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) ...@@ -1010,7 +1018,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); size = iov_iter_get_pages(iter, pages, bytes_left, nr_pages,
&offset);
if (unlikely(size <= 0)) if (unlikely(size <= 0))
return size ? size : -EFAULT; return size ? size : -EFAULT;
......
...@@ -31,6 +31,7 @@ EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); ...@@ -31,6 +31,7 @@ EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
*/ */
void blk_set_default_limits(struct queue_limits *lim) void blk_set_default_limits(struct queue_limits *lim)
{ {
lim->bio_max_bytes = UINT_MAX;
lim->max_segments = BLK_MAX_SEGMENTS; lim->max_segments = BLK_MAX_SEGMENTS;
lim->max_discard_segments = 1; lim->max_discard_segments = 1;
lim->max_integrity_segments = 0; lim->max_integrity_segments = 0;
...@@ -139,6 +140,10 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto ...@@ -139,6 +140,10 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
limits->logical_block_size >> SECTOR_SHIFT); limits->logical_block_size >> SECTOR_SHIFT);
limits->max_sectors = max_sectors; limits->max_sectors = max_sectors;
if (check_shl_overflow(max_sectors, SECTOR_SHIFT,
&limits->bio_max_bytes))
limits->bio_max_bytes = UINT_MAX;
q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
} }
EXPORT_SYMBOL(blk_queue_max_hw_sectors); EXPORT_SYMBOL(blk_queue_max_hw_sectors);
......
...@@ -106,6 +106,8 @@ static inline void *bio_data(struct bio *bio) ...@@ -106,6 +106,8 @@ static inline void *bio_data(struct bio *bio)
return NULL; return NULL;
} }
extern unsigned int bio_max_size(struct bio *bio);
/** /**
* bio_full - check if the bio is full * bio_full - check if the bio is full
* @bio: bio to check * @bio: bio to check
...@@ -119,7 +121,7 @@ static inline bool bio_full(struct bio *bio, unsigned len) ...@@ -119,7 +121,7 @@ static inline bool bio_full(struct bio *bio, unsigned len)
if (bio->bi_vcnt >= bio->bi_max_vecs) if (bio->bi_vcnt >= bio->bi_max_vecs)
return true; return true;
if (bio->bi_iter.bi_size > UINT_MAX - len) if (bio->bi_iter.bi_size > bio_max_size(bio) - len)
return true; return true;
return false; return false;
......
...@@ -327,6 +327,8 @@ enum blk_bounce { ...@@ -327,6 +327,8 @@ enum blk_bounce {
}; };
struct queue_limits { struct queue_limits {
unsigned int bio_max_bytes;
enum blk_bounce bounce; enum blk_bounce bounce;
unsigned long seg_boundary_mask; unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask; unsigned long virt_boundary_mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment