Commit 981f95a5 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

ublk: cleanup ublk_copy_user_pages

Clean up ublk_copy_user_pages() by using iov_iter_get_pages2, and code
gets simplified a lot and becomes much more readable than before.
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20230519065030.351216-4-ming.lei@redhat.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f236a214
......@@ -412,49 +412,39 @@ static const struct block_device_operations ub_fops = {
#define UBLK_MAX_PIN_PAGES 32
struct ublk_map_data {
const struct request *rq;
unsigned long ubuf;
unsigned int len;
};
struct ublk_io_iter {
struct page *pages[UBLK_MAX_PIN_PAGES];
unsigned pg_off; /* offset in the 1st page in pages */
int nr_pages; /* how many page pointers in pages */
struct bio *bio;
struct bvec_iter iter;
};
static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
unsigned max_bytes, bool to_vm)
/* return how many pages are copied */
static void ublk_copy_io_pages(struct ublk_io_iter *data,
size_t total, size_t pg_off, int dir)
{
const unsigned total = min_t(unsigned, max_bytes,
PAGE_SIZE - data->pg_off +
((data->nr_pages - 1) << PAGE_SHIFT));
unsigned done = 0;
unsigned pg_idx = 0;
while (done < total) {
struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
const unsigned int bytes = min3(bv.bv_len, total - done,
(unsigned)(PAGE_SIZE - data->pg_off));
unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
(unsigned)(PAGE_SIZE - pg_off));
void *bv_buf = bvec_kmap_local(&bv);
void *pg_buf = kmap_local_page(data->pages[pg_idx]);
if (to_vm)
memcpy(pg_buf + data->pg_off, bv_buf, bytes);
if (dir == ITER_DEST)
memcpy(pg_buf + pg_off, bv_buf, bytes);
else
memcpy(bv_buf, pg_buf + data->pg_off, bytes);
memcpy(bv_buf, pg_buf + pg_off, bytes);
kunmap_local(pg_buf);
kunmap_local(bv_buf);
/* advance page array */
data->pg_off += bytes;
if (data->pg_off == PAGE_SIZE) {
pg_off += bytes;
if (pg_off == PAGE_SIZE) {
pg_idx += 1;
data->pg_off = 0;
pg_off = 0;
}
done += bytes;
......@@ -468,41 +458,40 @@ static inline unsigned ublk_copy_io_pages(struct ublk_io_iter *data,
data->iter = data->bio->bi_iter;
}
}
return done;
}
static int ublk_copy_user_pages(struct ublk_map_data *data, bool to_vm)
/*
* Copy data between request pages and io_iter, and 'offset'
* is the start point of linear offset of request.
*/
static size_t ublk_copy_user_pages(const struct request *req,
struct iov_iter *uiter, int dir)
{
const unsigned int gup_flags = to_vm ? FOLL_WRITE : 0;
const unsigned long start_vm = data->ubuf;
unsigned int done = 0;
struct ublk_io_iter iter = {
.pg_off = start_vm & (PAGE_SIZE - 1),
.bio = data->rq->bio,
.iter = data->rq->bio->bi_iter,
.bio = req->bio,
.iter = req->bio->bi_iter,
};
const unsigned int nr_pages = round_up(data->len +
(start_vm & (PAGE_SIZE - 1)), PAGE_SIZE) >> PAGE_SHIFT;
while (done < nr_pages) {
const unsigned to_pin = min_t(unsigned, UBLK_MAX_PIN_PAGES,
nr_pages - done);
unsigned i, len;
iter.nr_pages = get_user_pages_fast(start_vm +
(done << PAGE_SHIFT), to_pin, gup_flags,
iter.pages);
if (iter.nr_pages <= 0)
return done == 0 ? iter.nr_pages : done;
len = ublk_copy_io_pages(&iter, data->len, to_vm);
for (i = 0; i < iter.nr_pages; i++) {
if (to_vm)
size_t done = 0;
while (iov_iter_count(uiter) && iter.bio) {
unsigned nr_pages;
size_t len, off;
int i;
len = iov_iter_get_pages2(uiter, iter.pages,
iov_iter_count(uiter),
UBLK_MAX_PIN_PAGES, &off);
if (len <= 0)
return done;
ublk_copy_io_pages(&iter, len, off, dir);
nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
for (i = 0; i < nr_pages; i++) {
if (dir == ITER_DEST)
set_page_dirty(iter.pages[i]);
put_page(iter.pages[i]);
}
data->len -= len;
done += iter.nr_pages;
done += len;
}
return done;
......@@ -529,15 +518,14 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
* context is pretty fast, see ublk_pin_user_pages
*/
if (ublk_need_map_req(req)) {
struct ublk_map_data data = {
.rq = req,
.ubuf = io->addr,
.len = rq_bytes,
};
struct iov_iter iter;
struct iovec iov;
const int dir = ITER_DEST;
ublk_copy_user_pages(&data, true);
import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
&iov, &iter);
return rq_bytes - data.len;
return ublk_copy_user_pages(req, &iter, dir);
}
return rq_bytes;
}
......@@ -549,17 +537,15 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
const unsigned int rq_bytes = blk_rq_bytes(req);
if (ublk_need_unmap_req(req)) {
struct ublk_map_data data = {
.rq = req,
.ubuf = io->addr,
.len = io->res,
};
struct iov_iter iter;
struct iovec iov;
const int dir = ITER_SOURCE;
WARN_ON_ONCE(io->res > rq_bytes);
ublk_copy_user_pages(&data, false);
return io->res - data.len;
import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
&iov, &iter);
return ublk_copy_user_pages(req, &iter, dir);
}
return rq_bytes;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment