Commit 3e609135 authored by Xiubo Li's avatar Xiubo Li Committed by Nicholas Bellinger

tcmu: clean up the scatter helper

Add some comments to make the scatter code to be more readable,
and drop unused arg to new_iov.
Signed-off-by: default avatarXiubo Li <lixiubo@cmss.chinamobile.com>
Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 3c0f26ff
...@@ -492,8 +492,7 @@ static inline size_t head_to_end(size_t head, size_t size) ...@@ -492,8 +492,7 @@ static inline size_t head_to_end(size_t head, size_t size)
return size - head; return size - head;
} }
static inline void new_iov(struct iovec **iov, int *iov_cnt, static inline void new_iov(struct iovec **iov, int *iov_cnt)
struct tcmu_dev *udev)
{ {
struct iovec *iovec; struct iovec *iovec;
...@@ -546,19 +545,38 @@ static void scatter_data_area(struct tcmu_dev *udev, ...@@ -546,19 +545,38 @@ static void scatter_data_area(struct tcmu_dev *udev,
to = kmap_atomic(page); to = kmap_atomic(page);
} }
copy_bytes = min_t(size_t, sg_remaining, /*
block_remaining); * Covert to virtual offset of the ring data area.
*/
to_offset = get_block_offset_user(udev, dbi, to_offset = get_block_offset_user(udev, dbi,
block_remaining); block_remaining);
/*
* The following code will gather and map the blocks
* to the same iovec when the blocks are all next to
* each other.
*/
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
if (*iov_cnt != 0 && if (*iov_cnt != 0 &&
to_offset == iov_tail(*iov)) { to_offset == iov_tail(*iov)) {
/*
* Will append to the current iovec, because
* the current block page is next to the
* previous one.
*/
(*iov)->iov_len += copy_bytes; (*iov)->iov_len += copy_bytes;
} else { } else {
new_iov(iov, iov_cnt, udev); /*
* Will allocate a new iovec because we are
* first time here or the current block page
* is not next to the previous one.
*/
new_iov(iov, iov_cnt);
(*iov)->iov_base = (void __user *)to_offset; (*iov)->iov_base = (void __user *)to_offset;
(*iov)->iov_len = copy_bytes; (*iov)->iov_len = copy_bytes;
} }
if (copy_data) { if (copy_data) {
offset = DATA_BLOCK_SIZE - block_remaining; offset = DATA_BLOCK_SIZE - block_remaining;
memcpy(to + offset, memcpy(to + offset,
...@@ -566,11 +584,13 @@ static void scatter_data_area(struct tcmu_dev *udev, ...@@ -566,11 +584,13 @@ static void scatter_data_area(struct tcmu_dev *udev,
copy_bytes); copy_bytes);
tcmu_flush_dcache_range(to, copy_bytes); tcmu_flush_dcache_range(to, copy_bytes);
} }
sg_remaining -= copy_bytes; sg_remaining -= copy_bytes;
block_remaining -= copy_bytes; block_remaining -= copy_bytes;
} }
kunmap_atomic(from - sg->offset); kunmap_atomic(from - sg->offset);
} }
if (to) if (to)
kunmap_atomic(to); kunmap_atomic(to);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment