Commit e50be648 authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Will Deacon

arm64: uaccess: Remove uaccess_*_not_uao asm macros

It is safer and simpler to drop the uaccess assembly macros in favour of
inline C functions. Although this bloats the Image size slightly, it
aligns our user copy routines with '{get,put}_user()' and generally
makes the code a lot easier to reason about.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
[will: tweaked commit message and changed temporary variable names]
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 94bb804e
...@@ -58,23 +58,6 @@ alternative_else_nop_endif ...@@ -58,23 +58,6 @@ alternative_else_nop_endif
.endm .endm
#endif #endif
/*
* These macros are no-ops when UAO is present.
*/
.macro uaccess_disable_not_uao, tmp1, tmp2
uaccess_ttbr0_disable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
alternative_else_nop_endif
.endm
.macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
alternative_else_nop_endif
.endm
/* /*
* Remove the address tag from a virtual address, if present. * Remove the address tag from a virtual address, if present.
*/ */
......
...@@ -378,20 +378,34 @@ do { \ ...@@ -378,20 +378,34 @@ do { \
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
#define raw_copy_from_user(to, from, n) \ #define raw_copy_from_user(to, from, n) \
({ \ ({ \
__arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ unsigned long __acfu_ret; \
uaccess_enable_not_uao(); \
__acfu_ret = __arch_copy_from_user((to), \
__uaccess_mask_ptr(from), (n)); \
uaccess_disable_not_uao(); \
__acfu_ret; \
}) })
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
#define raw_copy_to_user(to, from, n) \ #define raw_copy_to_user(to, from, n) \
({ \ ({ \
__arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ unsigned long __actu_ret; \
uaccess_enable_not_uao(); \
__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
(from), (n)); \
uaccess_disable_not_uao(); \
__actu_ret; \
}) })
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
#define raw_copy_in_user(to, from, n) \ #define raw_copy_in_user(to, from, n) \
({ \ ({ \
__arch_copy_in_user(__uaccess_mask_ptr(to), \ unsigned long __aciu_ret; \
__uaccess_mask_ptr(from), (n)); \ uaccess_enable_not_uao(); \
__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
__uaccess_mask_ptr(from), (n)); \
uaccess_disable_not_uao(); \
__aciu_ret; \
}) })
#define INLINE_COPY_TO_USER #define INLINE_COPY_TO_USER
...@@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi ...@@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{ {
if (access_ok(to, n)) if (access_ok(to, n)) {
uaccess_enable_not_uao();
n = __arch_clear_user(__uaccess_mask_ptr(to), n); n = __arch_clear_user(__uaccess_mask_ptr(to), n);
uaccess_disable_not_uao();
}
return n; return n;
} }
#define clear_user __clear_user #define clear_user __clear_user
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
* Alignment fixed up by hardware. * Alignment fixed up by hardware.
*/ */
ENTRY(__arch_clear_user) ENTRY(__arch_clear_user)
uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return mov x2, x1 // save the size for fixup return
subs x1, x1, #8 subs x1, x1, #8
b.mi 2f b.mi 2f
...@@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 ...@@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0 5: mov x0, #0
uaccess_disable_not_uao x2, x3
ret ret
ENDPROC(__arch_clear_user) ENDPROC(__arch_clear_user)
EXPORT_SYMBOL(__arch_clear_user) EXPORT_SYMBOL(__arch_clear_user)
...@@ -48,6 +46,5 @@ EXPORT_SYMBOL(__arch_clear_user) ...@@ -48,6 +46,5 @@ EXPORT_SYMBOL(__arch_clear_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 2 .align 2
9: mov x0, x2 // return the original size 9: mov x0, x2 // return the original size
uaccess_disable_not_uao x2, x3
ret ret
.previous .previous
...@@ -54,10 +54,8 @@ ...@@ -54,10 +54,8 @@
end .req x5 end .req x5
ENTRY(__arch_copy_from_user) ENTRY(__arch_copy_from_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0 // Nothing to copy mov x0, #0 // Nothing to copy
ret ret
ENDPROC(__arch_copy_from_user) ENDPROC(__arch_copy_from_user)
...@@ -66,6 +64,5 @@ EXPORT_SYMBOL(__arch_copy_from_user) ...@@ -66,6 +64,5 @@ EXPORT_SYMBOL(__arch_copy_from_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 2 .align 2
9998: sub x0, end, dst // bytes not copied 9998: sub x0, end, dst // bytes not copied
uaccess_disable_not_uao x3, x4
ret ret
.previous .previous
...@@ -56,10 +56,8 @@ ...@@ -56,10 +56,8 @@
end .req x5 end .req x5
ENTRY(__arch_copy_in_user) ENTRY(__arch_copy_in_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__arch_copy_in_user) ENDPROC(__arch_copy_in_user)
...@@ -68,6 +66,5 @@ EXPORT_SYMBOL(__arch_copy_in_user) ...@@ -68,6 +66,5 @@ EXPORT_SYMBOL(__arch_copy_in_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 2 .align 2
9998: sub x0, end, dst // bytes not copied 9998: sub x0, end, dst // bytes not copied
uaccess_disable_not_uao x3, x4
ret ret
.previous .previous
...@@ -53,10 +53,8 @@ ...@@ -53,10 +53,8 @@
end .req x5 end .req x5
ENTRY(__arch_copy_to_user) ENTRY(__arch_copy_to_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__arch_copy_to_user) ENDPROC(__arch_copy_to_user)
...@@ -65,6 +63,5 @@ EXPORT_SYMBOL(__arch_copy_to_user) ...@@ -65,6 +63,5 @@ EXPORT_SYMBOL(__arch_copy_to_user)
.section .fixup,"ax" .section .fixup,"ax"
.align 2 .align 2
9998: sub x0, end, dst // bytes not copied 9998: sub x0, end, dst // bytes not copied
uaccess_disable_not_uao x3, x4
ret ret
.previous .previous
...@@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset, ...@@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
unsigned long __copy_user_flushcache(void *to, const void __user *from, unsigned long __copy_user_flushcache(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
unsigned long rc = __arch_copy_from_user(to, from, n); unsigned long rc;
uaccess_enable_not_uao();
rc = __arch_copy_from_user(to, from, n);
uaccess_disable_not_uao();
/* See above */ /* See above */
__clean_dcache_area_pop(to, n - rc); __clean_dcache_area_pop(to, n - rc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment