Commit 2b04e8f6 authored by Al Viro's avatar Al Viro

more bio_map_user_iov() leak fixes

we need to take care of failure exit as well - pages already
in bio should be dropped by analogue of bio_unmap_pages(),
since their refcounts had been bumped only once per reference
in bio.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 95d78c28
...@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
int ret, offset; int ret, offset;
struct iov_iter i; struct iov_iter i;
struct iovec iov; struct iovec iov;
struct bio_vec *bvec;
iov_for_each(iov, i, *iter) { iov_for_each(iov, i, *iter) {
unsigned long uaddr = (unsigned long) iov.iov_base; unsigned long uaddr = (unsigned long) iov.iov_base;
...@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
ret = get_user_pages_fast(uaddr, local_nr_pages, ret = get_user_pages_fast(uaddr, local_nr_pages,
(iter->type & WRITE) != WRITE, (iter->type & WRITE) != WRITE,
&pages[cur_page]); &pages[cur_page]);
if (ret < local_nr_pages) { if (unlikely(ret < local_nr_pages)) {
for (j = cur_page; j < page_limit; j++) {
if (!pages[j])
break;
put_page(pages[j]);
}
ret = -EFAULT; ret = -EFAULT;
goto out_unmap; goto out_unmap;
} }
...@@ -1431,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -1431,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
return bio; return bio;
out_unmap: out_unmap:
for (j = 0; j < nr_pages; j++) { bio_for_each_segment_all(bvec, bio, j) {
if (!pages[j]) put_page(bvec->bv_page);
break;
put_page(pages[j]);
} }
out: out:
kfree(pages); kfree(pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment