Commit 3255aa2e authored by Ingo Molnar's avatar Ingo Molnar

x86, mm: pass in 'total' to __copy_from_user_*nocache()

Impact: cleanup, enable future change

Add a 'total bytes copied' parameter to __copy_from_user_*nocache(),
and update all the callsites.

The parameter is not used yet - architecture code can use it to
more intelligently decide whether the copy should be cached or
non-temporal.

Cc: Salman Qazi <sqazi@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 95f66b37
...@@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
} }
static __always_inline unsigned long __copy_from_user_nocache(void *to, static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n, unsigned long total)
{ {
might_fault(); might_fault();
if (__builtin_constant_p(n)) { if (__builtin_constant_p(n)) {
...@@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, ...@@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
static __always_inline unsigned long static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from, __copy_from_user_inatomic_nocache(void *to, const void __user *from,
unsigned long n) unsigned long n, unsigned long total)
{ {
return __copy_from_user_ll_nocache_nozero(to, from, n); return __copy_from_user_ll_nocache_nozero(to, from, n);
} }
......
...@@ -189,7 +189,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src, ...@@ -189,7 +189,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest); unsigned size, int zerorest);
static inline int __copy_from_user_nocache(void *dst, const void __user *src, static inline int __copy_from_user_nocache(void *dst, const void __user *src,
unsigned size) unsigned size, unsigned long total)
{ {
might_sleep(); might_sleep();
/* /*
...@@ -205,8 +205,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, ...@@ -205,8 +205,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src,
} }
static inline int __copy_from_user_inatomic_nocache(void *dst, static inline int __copy_from_user_inatomic_nocache(void *dst,
const void __user *src, const void __user *src, unsigned size, unsigned total)
unsigned size)
{ {
if (likely(size >= PAGE_SIZE)) if (likely(size >= PAGE_SIZE))
return __copy_user_nocache(dst, src, size, 0); return __copy_user_nocache(dst, src, size, 0);
......
...@@ -215,7 +215,7 @@ fast_user_write(struct io_mapping *mapping, ...@@ -215,7 +215,7 @@ fast_user_write(struct io_mapping *mapping,
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length); user_data, length, length);
io_mapping_unmap_atomic(vaddr_atomic); io_mapping_unmap_atomic(vaddr_atomic);
if (unwritten) if (unwritten)
return -EFAULT; return -EFAULT;
......
...@@ -41,13 +41,13 @@ static inline void pagefault_enable(void) ...@@ -41,13 +41,13 @@ static inline void pagefault_enable(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS #ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to, static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n, unsigned long total)
{ {
return __copy_from_user_inatomic(to, from, n); return __copy_from_user_inatomic(to, from, n);
} }
static inline unsigned long __copy_from_user_nocache(void *to, static inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n, unsigned long total)
{ {
return __copy_from_user(to, from, n); return __copy_from_user(to, from, n);
} }
......
...@@ -1816,14 +1816,14 @@ EXPORT_SYMBOL(file_remove_suid); ...@@ -1816,14 +1816,14 @@ EXPORT_SYMBOL(file_remove_suid);
static size_t __iovec_copy_from_user_inatomic(char *vaddr, static size_t __iovec_copy_from_user_inatomic(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes) const struct iovec *iov, size_t base, size_t bytes)
{ {
size_t copied = 0, left = 0; size_t copied = 0, left = 0, total = bytes;
while (bytes) { while (bytes) {
char __user *buf = iov->iov_base + base; char __user *buf = iov->iov_base + base;
int copy = min(bytes, iov->iov_len - base); int copy = min(bytes, iov->iov_len - base);
base = 0; base = 0;
left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); left = __copy_from_user_inatomic_nocache(vaddr, buf, copy, total);
copied += copy; copied += copy;
bytes -= copy; bytes -= copy;
vaddr += copy; vaddr += copy;
...@@ -1851,8 +1851,9 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, ...@@ -1851,8 +1851,9 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
if (likely(i->nr_segs == 1)) { if (likely(i->nr_segs == 1)) {
int left; int left;
char __user *buf = i->iov->iov_base + i->iov_offset; char __user *buf = i->iov->iov_base + i->iov_offset;
left = __copy_from_user_inatomic_nocache(kaddr + offset, left = __copy_from_user_inatomic_nocache(kaddr + offset,
buf, bytes); buf, bytes, bytes);
copied = bytes - left; copied = bytes - left;
} else { } else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset, copied = __iovec_copy_from_user_inatomic(kaddr + offset,
...@@ -1880,7 +1881,8 @@ size_t iov_iter_copy_from_user(struct page *page, ...@@ -1880,7 +1881,8 @@ size_t iov_iter_copy_from_user(struct page *page,
if (likely(i->nr_segs == 1)) { if (likely(i->nr_segs == 1)) {
int left; int left;
char __user *buf = i->iov->iov_base + i->iov_offset; char __user *buf = i->iov->iov_base + i->iov_offset;
left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
left = __copy_from_user_nocache(kaddr + offset, buf, bytes, bytes);
copied = bytes - left; copied = bytes - left;
} else { } else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset, copied = __iovec_copy_from_user_inatomic(kaddr + offset,
......
...@@ -354,7 +354,7 @@ __xip_file_write(struct file *filp, const char __user *buf, ...@@ -354,7 +354,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
break; break;
copied = bytes - copied = bytes -
__copy_from_user_nocache(xip_mem + offset, buf, bytes); __copy_from_user_nocache(xip_mem + offset, buf, bytes, bytes);
if (likely(copied > 0)) { if (likely(copied > 0)) {
status = copied; status = copied;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment