Commit d6a428fb authored by James Hogan's avatar James Hogan Committed by Ralf Baechle

MIPS: uaccess: Take EVA into account in [__]clear_user

__clear_user() (and clear_user() which uses it), always access the user
mode address space, which results in EVA store instructions when EVA is
enabled even if the current user address limit is KERNEL_DS.

Fix this by adding a new symbol __bzero_kernel for the normal kernel
address space bzero in EVA mode, and call that from __clear_user() if
eva_kernel_access().
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Markos Chandras <markos.chandras@imgtec.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/10844/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 6f06a2c4
...@@ -1235,6 +1235,17 @@ __clear_user(void __user *addr, __kernel_size_t size) ...@@ -1235,6 +1235,17 @@ __clear_user(void __user *addr, __kernel_size_t size)
{ {
__kernel_size_t res; __kernel_size_t res;
if (eva_kernel_access()) {
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, $0\n\t"
"move\t$6, %2\n\t"
__MODULE_JAL(__bzero_kernel)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
} else {
might_fault(); might_fault();
__asm__ __volatile__( __asm__ __volatile__(
"move\t$4, %1\n\t" "move\t$4, %1\n\t"
...@@ -1245,6 +1256,7 @@ __clear_user(void __user *addr, __kernel_size_t size) ...@@ -1245,6 +1256,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
: "=r" (res) : "=r" (res)
: "r" (addr), "r" (size) : "r" (addr), "r" (size)
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
}
return res; return res;
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/msa.h> #include <asm/msa.h>
extern void *__bzero_kernel(void *__s, size_t __count);
extern void *__bzero(void *__s, size_t __count); extern void *__bzero(void *__s, size_t __count);
extern long __strncpy_from_kernel_nocheck_asm(char *__to, extern long __strncpy_from_kernel_nocheck_asm(char *__to,
const char *__from, long __len); const char *__from, long __len);
...@@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva); ...@@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva);
EXPORT_SYMBOL(__copy_in_user_eva); EXPORT_SYMBOL(__copy_in_user_eva);
EXPORT_SYMBOL(__copy_to_user_eva); EXPORT_SYMBOL(__copy_to_user_eva);
EXPORT_SYMBOL(__copy_user_inatomic_eva); EXPORT_SYMBOL(__copy_user_inatomic_eva);
EXPORT_SYMBOL(__bzero_kernel);
#endif #endif
EXPORT_SYMBOL(__bzero); EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm); EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
......
...@@ -283,6 +283,8 @@ LEAF(memset) ...@@ -283,6 +283,8 @@ LEAF(memset)
1: 1:
#ifndef CONFIG_EVA #ifndef CONFIG_EVA
FEXPORT(__bzero) FEXPORT(__bzero)
#else
FEXPORT(__bzero_kernel)
#endif #endif
__BUILD_BZERO LEGACY_MODE __BUILD_BZERO LEGACY_MODE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment