Commit 09f33601 authored by Huacai Chen's avatar Huacai Chen

LoongArch: Add basic STACKPROTECTOR support

Add basic stack protector support similar to other architectures. A
constant canary value is set at boot time, and with help of compiler's
-fstack-protector we can detect stack corruption.
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent 7db54bfe
...@@ -103,6 +103,7 @@ config LOONGARCH ...@@ -103,6 +103,7 @@ config LOONGARCH
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SETUP_PER_CPU_AREA if NUMA select HAVE_SETUP_PER_CPU_AREA if NUMA
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ select HAVE_TIF_NOHZ
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* GCC stack protector support.
*
* Stack protector works by putting predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary and
* on LoongArch gcc expects it to be defined by a global variable called
* "__stack_chk_guard".
*/
#ifndef _ASM_STACKPROTECTOR_H
#define _ASM_STACKPROTECTOR_H
#include <linux/random.h>
#include <linux/version.h>
extern unsigned long __stack_chk_guard;
/*
* Initialize the stackprotector canary value.
*
* NOTE: this must only be called from functions that never return,
* and it must always be inlined.
*/
static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary;
/* Try to get a semi random initial value. */
get_random_bytes(&canary, sizeof(canary));
canary ^= LINUX_VERSION_CODE;
current->stack_canary = canary;
__stack_chk_guard = current->stack_canary;
}
#endif /* _ASM_STACKPROTECTOR_H */
...@@ -68,6 +68,9 @@ void output_task_defines(void) ...@@ -68,6 +68,9 @@ void output_task_defines(void)
OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid); OFFSET(TASK_PID, task_struct, pid);
#if defined(CONFIG_STACKPROTECTOR)
OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
#endif
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
BLANK(); BLANK();
} }
......
...@@ -47,6 +47,12 @@ ...@@ -47,6 +47,12 @@
#include <asm/unwind.h> #include <asm/unwind.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
/* /*
* Idle related variables and functions * Idle related variables and functions
*/ */
......
...@@ -23,6 +23,11 @@ SYM_FUNC_START(__switch_to) ...@@ -23,6 +23,11 @@ SYM_FUNC_START(__switch_to)
stptr.d ra, a0, THREAD_REG01 stptr.d ra, a0, THREAD_REG01
stptr.d a3, a0, THREAD_SCHED_RA stptr.d a3, a0, THREAD_SCHED_RA
stptr.d a4, a0, THREAD_SCHED_CFA stptr.d a4, a0, THREAD_SCHED_CFA
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
la t7, __stack_chk_guard
LONG_L t8, a1, TASK_STACK_CANARY
LONG_S t8, t7, 0
#endif
move tp, a2 move tp, a2
cpu_restore_nonscratch a1 cpu_restore_nonscratch a1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment