Commit d1916c86 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: move same page handling from __bio_add_pc_page to the callers

Hiding page refcount manipulation inside a low-level bio helper is
somewhat awkward.  Instead return the same page information to the
callers, where it fits in much better.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 384209cd
...@@ -669,7 +669,7 @@ static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio, ...@@ -669,7 +669,7 @@ static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
* @page: page to add * @page: page to add
* @len: vec entry length * @len: vec entry length
* @offset: vec entry offset * @offset: vec entry offset
* @put_same_page: put the page if it is same with last added page * @same_page: return if the merge happen inside the same page
* *
* Attempt to add a page to the bio_vec maplist. This can fail for a * Attempt to add a page to the bio_vec maplist. This can fail for a
* number of reasons, such as the bio being full or target block device * number of reasons, such as the bio being full or target block device
...@@ -680,10 +680,9 @@ static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio, ...@@ -680,10 +680,9 @@ static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
*/ */
static int __bio_add_pc_page(struct request_queue *q, struct bio *bio, static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset, struct page *page, unsigned int len, unsigned int offset,
bool put_same_page) bool *same_page)
{ {
struct bio_vec *bvec; struct bio_vec *bvec;
bool same_page = false;
/* /*
* cloned bio must not modify vec list * cloned bio must not modify vec list
...@@ -695,12 +694,8 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio, ...@@ -695,12 +694,8 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
return 0; return 0;
if (bio->bi_vcnt > 0) { if (bio->bi_vcnt > 0) {
if (bio_try_merge_pc_page(q, bio, page, len, offset, if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
&same_page)) {
if (put_same_page && same_page)
put_page(page);
return len; return len;
}
/* /*
* If the queue doesn't support SG gaps and adding this segment * If the queue doesn't support SG gaps and adding this segment
...@@ -729,7 +724,8 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio, ...@@ -729,7 +724,8 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
int bio_add_pc_page(struct request_queue *q, struct bio *bio, int bio_add_pc_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset) struct page *page, unsigned int len, unsigned int offset)
{ {
return __bio_add_pc_page(q, bio, page, len, offset, false); bool same_page = false;
return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
} }
EXPORT_SYMBOL(bio_add_pc_page); EXPORT_SYMBOL(bio_add_pc_page);
...@@ -1373,13 +1369,17 @@ struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -1373,13 +1369,17 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < npages; j++) { for (j = 0; j < npages; j++) {
struct page *page = pages[j]; struct page *page = pages[j];
unsigned int n = PAGE_SIZE - offs; unsigned int n = PAGE_SIZE - offs;
bool same_page = false;
if (n > bytes) if (n > bytes)
n = bytes; n = bytes;
if (!__bio_add_pc_page(q, bio, page, n, offs, if (!__bio_add_pc_page(q, bio, page, n, offs,
true)) &same_page)) {
if (same_page)
put_page(page);
break; break;
}
added += n; added += n;
bytes -= n; bytes -= n;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment