Commit 21b56c84 authored by Al Viro's avatar Al Viro

iov_iter: get rid of separate bvec and xarray callbacks

After the previous commit we have
	* xarray and bvec callbacks idential in all cases
	* both equivalent to kvec callback wrapped into
kmap_local_page()/kunmap_local() pair.

So we can pass only two (iovec and kvec) callbacks to
iterate_and_advance() and let iterate_{bvec,xarray} wrap
it into kmap_local_page()/kunmap_local_page().
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 1b4fb5ff
...@@ -42,18 +42,20 @@ ...@@ -42,18 +42,20 @@
while (n) { \ while (n) { \
unsigned offset = p->bv_offset + skip; \ unsigned offset = p->bv_offset + skip; \
unsigned left; \ unsigned left; \
__v.bv_offset = offset % PAGE_SIZE; \ void *kaddr = kmap_local_page(p->bv_page + \
__v.bv_page = p->bv_page + offset / PAGE_SIZE; \ offset / PAGE_SIZE); \
__v.bv_len = min(min(n, p->bv_len - skip), \ __v.iov_base = kaddr + offset % PAGE_SIZE; \
__v.iov_len = min(min(n, p->bv_len - skip), \
(size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
left = (STEP); \ left = (STEP); \
__v.bv_len -= left; \ kunmap_local(kaddr); \
skip += __v.bv_len; \ __v.iov_len -= left; \
skip += __v.iov_len; \
if (skip == p->bv_len) { \ if (skip == p->bv_len) { \
skip = 0; \ skip = 0; \
p++; \ p++; \
} \ } \
n -= __v.bv_len; \ n -= __v.iov_len; \
if (left) \ if (left) \
break; \ break; \
} \ } \
...@@ -81,15 +83,16 @@ ...@@ -81,15 +83,16 @@
break; \ break; \
for (j = (head->index < index) ? index - head->index : 0; \ for (j = (head->index < index) ? index - head->index : 0; \
j < thp_nr_pages(head); j++) { \ j < thp_nr_pages(head); j++) { \
__v.bv_page = head + j; \ void *kaddr = kmap_local_page(head + j); \
offset = (i->xarray_start + skip) & ~PAGE_MASK; \ offset = (i->xarray_start + skip) % PAGE_SIZE; \
__v.iov_base = kaddr + offset; \
seg = PAGE_SIZE - offset; \ seg = PAGE_SIZE - offset; \
__v.bv_offset = offset; \ __v.iov_len = min(n, seg); \
__v.bv_len = min(n, seg); \
left = (STEP); \ left = (STEP); \
__v.bv_len -= left; \ kunmap_local(kaddr); \
n -= __v.bv_len; \ __v.iov_len -= left; \
skip += __v.bv_len; \ n -= __v.iov_len; \
skip += __v.iov_len; \
if (left || n == 0) \ if (left || n == 0) \
goto __out; \ goto __out; \
} \ } \
...@@ -99,7 +102,7 @@ __out: \ ...@@ -99,7 +102,7 @@ __out: \
n = wanted - n; \ n = wanted - n; \
} }
#define __iterate_and_advance(i, n, v, I, B, K, X) { \ #define __iterate_and_advance(i, n, v, I, K) { \
if (unlikely(i->count < n)) \ if (unlikely(i->count < n)) \
n = i->count; \ n = i->count; \
if (likely(n)) { \ if (likely(n)) { \
...@@ -112,8 +115,8 @@ __out: \ ...@@ -112,8 +115,8 @@ __out: \
i->iov = iov; \ i->iov = iov; \
} else if (iov_iter_is_bvec(i)) { \ } else if (iov_iter_is_bvec(i)) { \
const struct bio_vec *bvec = i->bvec; \ const struct bio_vec *bvec = i->bvec; \
struct bio_vec v; \ struct kvec v; \
iterate_bvec(i, n, v, bvec, skip, (B)) \ iterate_bvec(i, n, v, bvec, skip, (K)) \
i->nr_segs -= bvec - i->bvec; \ i->nr_segs -= bvec - i->bvec; \
i->bvec = bvec; \ i->bvec = bvec; \
} else if (iov_iter_is_kvec(i)) { \ } else if (iov_iter_is_kvec(i)) { \
...@@ -123,16 +126,15 @@ __out: \ ...@@ -123,16 +126,15 @@ __out: \
i->nr_segs -= kvec - i->kvec; \ i->nr_segs -= kvec - i->kvec; \
i->kvec = kvec; \ i->kvec = kvec; \
} else if (iov_iter_is_xarray(i)) { \ } else if (iov_iter_is_xarray(i)) { \
struct bio_vec v; \ struct kvec v; \
iterate_xarray(i, n, v, skip, (X)) \ iterate_xarray(i, n, v, skip, (K)) \
} \ } \
i->count -= n; \ i->count -= n; \
i->iov_offset = skip; \ i->iov_offset = skip; \
} \ } \
} }
#define iterate_and_advance(i, n, v, I, B, K, X) \ #define iterate_and_advance(i, n, v, I, K) \
__iterate_and_advance(i, n, v, I, ((void)(B),0), \ __iterate_and_advance(i, n, v, I, ((void)(K),0))
((void)(K),0), ((void)(X),0))
static int copyout(void __user *to, const void *from, size_t n) static int copyout(void __user *to, const void *from, size_t n)
{ {
...@@ -612,11 +614,7 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) ...@@ -612,11 +614,7 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
might_fault(); might_fault();
iterate_and_advance(i, bytes, v, iterate_and_advance(i, bytes, v,
copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
memcpy_to_page(v.bv_page, v.bv_offset, memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
(from += v.bv_len) - v.bv_len, v.bv_len),
memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
memcpy_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len)
) )
return bytes; return bytes;
...@@ -714,12 +712,8 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) ...@@ -714,12 +712,8 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
__iterate_and_advance(i, bytes, v, __iterate_and_advance(i, bytes, v,
copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len, copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
v.iov_len), v.iov_len),
copy_mc_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len),
copy_mc_to_kernel(v.iov_base, (from += v.iov_len) copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
- v.iov_len, v.iov_len), - v.iov_len, v.iov_len)
copy_mc_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len)
) )
return bytes; return bytes;
...@@ -738,11 +732,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) ...@@ -738,11 +732,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
might_fault(); might_fault();
iterate_and_advance(i, bytes, v, iterate_and_advance(i, bytes, v,
copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len)
) )
return bytes; return bytes;
...@@ -759,11 +749,7 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) ...@@ -759,11 +749,7 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
iterate_and_advance(i, bytes, v, iterate_and_advance(i, bytes, v,
__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len), v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len)
) )
return bytes; return bytes;
...@@ -795,12 +781,8 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) ...@@ -795,12 +781,8 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
iterate_and_advance(i, bytes, v, iterate_and_advance(i, bytes, v,
__copy_from_user_flushcache((to += v.iov_len) - v.iov_len, __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len), v.iov_base, v.iov_len),
memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
v.iov_len), v.iov_len)
memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len)
) )
return bytes; return bytes;
...@@ -931,9 +913,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) ...@@ -931,9 +913,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
return pipe_zero(bytes, i); return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v, iterate_and_advance(i, bytes, v,
clear_user(v.iov_base, v.iov_len), clear_user(v.iov_base, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len), memset(v.iov_base, 0, v.iov_len)
memset(v.iov_base, 0, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len)
) )
return bytes; return bytes;
...@@ -955,11 +935,7 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt ...@@ -955,11 +935,7 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt
} }
iterate_and_advance(i, bytes, v, iterate_and_advance(i, bytes, v,
copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
v.bv_offset, v.bv_len),
memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len)
) )
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
return bytes; return bytes;
...@@ -1698,24 +1674,10 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, ...@@ -1698,24 +1674,10 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
} }
next ? 0 : v.iov_len; next ? 0 : v.iov_len;
}), ({ }), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
p + v.bv_offset, v.bv_len,
sum, off);
kunmap_atomic(p);
off += v.bv_len;
}),({
sum = csum_and_memcpy((to += v.iov_len) - v.iov_len, sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len, v.iov_base, v.iov_len,
sum, off); sum, off);
off += v.iov_len; off += v.iov_len;
}), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
p + v.bv_offset, v.bv_len,
sum, off);
kunmap_atomic(p);
off += v.bv_len;
}) })
) )
*csum = sum; *csum = sum;
...@@ -1750,24 +1712,10 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, ...@@ -1750,24 +1712,10 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
} }
next ? 0 : v.iov_len; next ? 0 : v.iov_len;
}), ({ }), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy(p + v.bv_offset,
(from += v.bv_len) - v.bv_len,
v.bv_len, sum, off);
kunmap_atomic(p);
off += v.bv_len;
}),({
sum = csum_and_memcpy(v.iov_base, sum = csum_and_memcpy(v.iov_base,
(from += v.iov_len) - v.iov_len, (from += v.iov_len) - v.iov_len,
v.iov_len, sum, off); v.iov_len, sum, off);
off += v.iov_len; off += v.iov_len;
}), ({
char *p = kmap_atomic(v.bv_page);
sum = csum_and_memcpy(p + v.bv_offset,
(from += v.bv_len) - v.bv_len,
v.bv_len, sum, off);
kunmap_atomic(p);
off += v.bv_len;
}) })
) )
csstate->csum = csum_shift(sum, csstate->off); csstate->csum = csum_shift(sum, csstate->off);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment