Commit a1d5ebaf authored by Matthew Leach's avatar Matthew Leach Committed by Catalin Marinas

arm64: big-endian: don't treat code as data when copying sigret code

Currently the sigreturn compat code is copied to an offset in the
vectors table. When using a BE kernel this data will be stored in the
wrong endianess so when returning from a signal on a 32-bit BE system,
arbitrary code will be executed.

Instead of declaring the code inside a struct and copying that, use
the assembler's .byte directives to store the code in the correct
endianess regardless of platform endianess.
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarMatthew Leach <matthew.leach@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 55b89540
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
* *
* See Documentation/arm/kernel_user_helpers.txt for formal definitions. * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/ */
#include <asm/unistd32.h>
.align 5 .align 5
.globl __kuser_helper_start .globl __kuser_helper_start
__kuser_helper_start: __kuser_helper_start:
...@@ -75,3 +78,42 @@ __kuser_helper_version: // 0xffff0ffc ...@@ -75,3 +78,42 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5) .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end .globl __kuser_helper_end
__kuser_helper_end: __kuser_helper_end:
/*
* AArch32 sigreturn code
*
* For ARM syscalls, the syscall number has to be loaded into r7.
* We do not support an OABI userspace.
*
* For Thumb syscalls, we also pass the syscall number via r7. We therefore
* need two 16-bit instructions.
*/
.globl __aarch32_sigret_code_start
__aarch32_sigret_code_start:
/*
* ARM Code
*/
.byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
.byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
/*
* Thumb code
*/
.byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
.byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
/*
* ARM code
*/
.byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
.byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
/*
* Thumb code
*/
.byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
.byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
.globl __aarch32_sigret_code_end
__aarch32_sigret_code_end:
...@@ -100,34 +100,6 @@ struct compat_rt_sigframe { ...@@ -100,34 +100,6 @@ struct compat_rt_sigframe {
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/*
* For ARM syscalls, the syscall number has to be loaded into r7.
* We do not support an OABI userspace.
*/
#define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
#define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
#define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
/*
* For Thumb syscalls, we also pass the syscall number via r7. We therefore
* need two 16-bit instructions.
*/
#define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
0x2700 | __NR_compat_sigreturn)
#define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
0x2700 | __NR_compat_rt_sigreturn)
const compat_ulong_t aarch32_sigret_code[6] = {
/*
* AArch32 sigreturn code.
* We don't construct an OABI SWI - instead we just set the imm24 field
* to the EABI syscall number so that we create a sane disassembly.
*/
MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
};
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{ {
compat_sigset_t cset; compat_sigset_t cset;
......
...@@ -58,7 +58,10 @@ static struct page *vectors_page[1]; ...@@ -58,7 +58,10 @@ static struct page *vectors_page[1];
static int alloc_vectors_page(void) static int alloc_vectors_page(void)
{ {
extern char __kuser_helper_start[], __kuser_helper_end[]; extern char __kuser_helper_start[], __kuser_helper_end[];
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start; int kuser_sz = __kuser_helper_end - __kuser_helper_start;
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long vpage; unsigned long vpage;
vpage = get_zeroed_page(GFP_ATOMIC); vpage = get_zeroed_page(GFP_ATOMIC);
...@@ -72,7 +75,7 @@ static int alloc_vectors_page(void) ...@@ -72,7 +75,7 @@ static int alloc_vectors_page(void)
/* sigreturn code */ /* sigreturn code */
memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
aarch32_sigret_code, sizeof(aarch32_sigret_code)); __aarch32_sigret_code_start, sigret_sz);
flush_icache_range(vpage, vpage + PAGE_SIZE); flush_icache_range(vpage, vpage + PAGE_SIZE);
vectors_page[0] = virt_to_page(vpage); vectors_page[0] = virt_to_page(vpage);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment