Commit 13799748 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64: use interrupt restart table to speed up return from interrupt

Use the restart table facility to return from interrupt or system calls
without disabling MSR[EE] or MSR[RI].

Interrupt return asm is put into the low soft-masked region, to prevent
interrupts being processed here, although they are still taken as masked
interrupts which causes SRRs to be clobbered, and a pending soft-masked
interrupt to require replaying.

The return code uses restart table regions to redirct to a fixup handler
rather than continue with the exit, if such an interrupt happens. In
this case the interrupt return is redirected to a fixup handler which
reloads r1 for the interrupt stack and reloads registers and sets state
up to replay the soft-masked interrupt and try the exit again.

Some types of security exit fallback flushes and barriers are currently
unable to cope with reentrant interrupts, e.g., because they store some
state in the scratch SPR which would be clobbered even by masked
interrupts. For now the interrupts-enabled exits are disabled when these
flushes are used.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
[mpe: Guard unused exit_must_hard_disable() as reported by lkp]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210617155116.2167984-13-npiggin@gmail.com
parent 9d1988ca
......@@ -73,6 +73,11 @@ long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8,
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
#ifdef CONFIG_PPC64
unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
#endif
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low);
......
......@@ -18,8 +18,17 @@
* PACA flags in paca->irq_happened.
*
* This bits are set when interrupts occur while soft-disabled
* and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
* is set whenever we manually hard disable.
* and allow a proper replay.
*
* The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
* always in synch with the MSR[EE] state, except:
* - A window in interrupt entry, where hardware disables MSR[EE] and that
* must be "reconciled" with the soft mask state.
* - NMI interrupts that hit in awkward places, until they fix the state.
* - When local irqs are being enabled and state is being fixed up.
* - When returning from an interrupt there are some windows where this
* can become out of synch, but gets fixed before the RFI or before
* executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
*/
#define PACA_IRQ_HARD_DIS 0x01
#define PACA_IRQ_DBELL 0x02
......
......@@ -79,6 +79,8 @@ unsigned long search_kernel_restart_table(unsigned long addr);
#endif
#ifdef CONFIG_PPC_BOOK3S_64
DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
static inline void srr_regs_clobbered(void)
{
local_paca->srr_valid = 0;
......
......@@ -165,6 +165,9 @@ struct paca_struct {
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
#ifdef CONFIG_PPC64
u64 exit_save_r1; /* Syscall/interrupt R1 save */
#endif
#ifdef CONFIG_PPC_BOOK3E
u16 trap_save; /* Used when bad stack is encountered */
#endif
......
......@@ -53,6 +53,7 @@ struct pt_regs
struct {
#ifdef CONFIG_PPC64
unsigned long ppr;
unsigned long exit_result;
#endif
union {
#ifdef CONFIG_PPC_KUAP
......
......@@ -244,6 +244,9 @@ int main(void)
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
#ifdef CONFIG_PPC64
OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1);
#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
#endif
......
This diff is collapsed.
......@@ -58,7 +58,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
bne .Ltabort_syscall
bne tabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
SCV_INTERRUPT_TO_KERNEL
......@@ -116,9 +116,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
bl system_call_exception
.Lsyscall_vectored_\name\()_exit:
addi r4,r1,STACK_FRAME_OVERHEAD
addi r4,r1,STACK_FRAME_OVERHEAD
li r5,1 /* scv */
bl syscall_exit_prepare
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
.Lsyscall_vectored_\name\()_rst_start:
lbz r11,PACAIRQHAPPENED(r13)
andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
bne- syscall_vectored_\name\()_restart
li r11,IRQS_ENABLED
stb r11,PACAIRQSOFTMASK(r13)
li r11,0
stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
ld r2,_CCR(r1)
ld r4,_NIP(r1)
......@@ -168,8 +177,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
b . /* prevent speculative execution */
.Lsyscall_vectored_\name\()_restore_regs:
li r3,0
mtmsrd r3,1
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r5
......@@ -187,9 +194,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
REST_2GPRS(12, r1)
ld r1,GPR1(r1)
RFI_TO_USER
.Lsyscall_vectored_\name\()_rst_end:
syscall_vectored_\name\()_restart:
GET_PACA(r13)
ld r1,PACA_EXIT_SAVE_R1(r13)
ld r2,PACATOC(r13)
ld r3,RESULT(r1)
addi r4,r1,STACK_FRAME_OVERHEAD
li r11,IRQS_ALL_DISABLED
stb r11,PACAIRQSOFTMASK(r13)
bl syscall_exit_restart
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
b .Lsyscall_vectored_\name\()_rst_start
RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
.endm
system_call_vectored common 0x3000
/*
* We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
* which is tested by system_call_exception when r0 is -1 (as set by vector
......@@ -222,7 +246,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_common)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
bne .Ltabort_syscall
bne tabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
mr r10,r1
......@@ -292,9 +316,18 @@ END_BTB_FLUSH_SECTION
bl system_call_exception
.Lsyscall_exit:
addi r4,r1,STACK_FRAME_OVERHEAD
addi r4,r1,STACK_FRAME_OVERHEAD
li r5,0 /* !scv */
bl syscall_exit_prepare
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
.Lsyscall_rst_start:
lbz r11,PACAIRQHAPPENED(r13)
andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
bne- syscall_restart
li r11,IRQS_ENABLED
stb r11,PACAIRQSOFTMASK(r13)
li r11,0
stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
ld r2,_CCR(r1)
ld r6,_LINK(r1)
......@@ -361,9 +394,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
REST_8GPRS(4, r1)
ld r12,GPR12(r1)
b .Lsyscall_restore_regs_cont
.Lsyscall_rst_end:
syscall_restart:
GET_PACA(r13)
ld r1,PACA_EXIT_SAVE_R1(r13)
ld r2,PACATOC(r13)
ld r3,RESULT(r1)
addi r4,r1,STACK_FRAME_OVERHEAD
li r11,IRQS_ALL_DISABLED
stb r11,PACAIRQSOFTMASK(r13)
bl syscall_exit_restart
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
b .Lsyscall_rst_start
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
.Ltabort_syscall:
tabort_syscall:
/* Firstly we need to enable TM in the kernel */
mfmsr r10
li r9, 1
......@@ -427,8 +475,10 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
ld r5,_MSR(r1)
andi. r0,r5,MSR_PR
#ifdef CONFIG_PPC_BOOK3S
bne .Lfast_user_interrupt_return_amr_srr
kuap_kernel_restore r3, r4
beq 1f
kuap_user_restore r3, r4
b .Lfast_user_interrupt_return_srr
1: kuap_kernel_restore r3, r4
andi. r0,r5,MSR_RI
li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
bne+ .Lfast_kernel_interrupt_return_srr
......@@ -452,18 +502,18 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
bl interrupt_exit_user_prepare
cmpdi r3,0
bne- .Lrestore_nvgprs_\srr
.Lrestore_nvgprs_\srr\()_cont:
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
.Linterrupt_return_\srr\()_user_rst_start:
lbz r11,PACAIRQHAPPENED(r13)
andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
bne- interrupt_return_\srr\()_user_restart
li r11,IRQS_ENABLED
stb r11,PACAIRQSOFTMASK(r13)
li r11,0
stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
#ifdef CONFIG_PPC_BOOK3S
.Lfast_user_interrupt_return_amr_\srr\():
kuap_user_restore r3, r4
#endif
.Lfast_user_interrupt_return_\srr\():
BEGIN_FTR_SECTION
ld r10,_PPR(r1)
mtspr SPRN_PPR,r10
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
#ifdef CONFIG_PPC_BOOK3S
.ifc \srr,srr
lbz r4,PACASRR_VALID(r13)
......@@ -493,6 +543,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
.endif
DEBUG_SRR_VALID \srr
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
lbz r4,PACAIRQSOFTMASK(r13)
tdnei r4,IRQS_ENABLED
#endif
BEGIN_FTR_SECTION
ld r10,_PPR(r1)
mtspr SPRN_PPR,r10
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
FTR_SECTION_ELSE
......@@ -524,16 +584,44 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
HRFI_TO_USER
.endif
b . /* prevent speculative execution */
.Linterrupt_return_\srr\()_user_rst_end:
.Lrestore_nvgprs_\srr\():
REST_NVGPRS(r1)
b .Lfast_user_interrupt_return_\srr
b .Lrestore_nvgprs_\srr\()_cont
interrupt_return_\srr\()_user_restart:
GET_PACA(r13)
ld r1,PACA_EXIT_SAVE_R1(r13)
ld r2,PACATOC(r13)
addi r3,r1,STACK_FRAME_OVERHEAD
li r11,IRQS_ALL_DISABLED
stb r11,PACAIRQSOFTMASK(r13)
bl interrupt_exit_user_restart
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
b .Linterrupt_return_\srr\()_user_rst_start
RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
.balign IFETCH_ALIGN_BYTES
.Lkernel_interrupt_return_\srr\():
.Linterrupt_return_\srr\()_kernel:
addi r3,r1,STACK_FRAME_OVERHEAD
bl interrupt_exit_kernel_prepare
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
.Linterrupt_return_\srr\()_kernel_rst_start:
ld r11,SOFTE(r1)
cmpwi r11,IRQS_ENABLED
stb r11,PACAIRQSOFTMASK(r13)
bne 1f
lbz r11,PACAIRQHAPPENED(r13)
andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
bne- interrupt_return_\srr\()_kernel_restart
li r11,0
stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
1:
.Lfast_kernel_interrupt_return_\srr\():
cmpdi cr1,r3,0
#ifdef CONFIG_PPC_BOOK3S
......@@ -627,6 +715,21 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
HRFI_TO_KERNEL
.endif
b . /* prevent speculative execution */
.Linterrupt_return_\srr\()_kernel_rst_end:
interrupt_return_\srr\()_kernel_restart:
GET_PACA(r13)
ld r1,PACA_EXIT_SAVE_R1(r13)
ld r2,PACATOC(r13)
addi r3,r1,STACK_FRAME_OVERHEAD
li r11,IRQS_ALL_DISABLED
stb r11,PACAIRQSOFTMASK(r13)
bl interrupt_exit_kernel_restart
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
b .Linterrupt_return_\srr\()_kernel_rst_start
RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
.endm
interrupt_return_macro srr
......
......@@ -17,6 +17,7 @@
#include <linux/stop_machine.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
#include <asm/interrupt.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/setup.h>
......@@ -225,6 +226,9 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
static bool stf_exit_reentrant = false;
static bool rfi_exit_reentrant = false;
static int __do_stf_barrier_fixups(void *data)
{
enum stf_barrier_type *types = data;
......@@ -239,11 +243,27 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
{
/*
* The call to the fallback entry flush, and the fallback/sync-ori exit
* flush can not be safely patched in/out while other CPUs are executing
* them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
* spin in the stop machine core with interrupts hard disabled.
* flush can not be safely patched in/out while other CPUs are
* executing them. So call __do_stf_barrier_fixups() on one CPU while
* all other CPUs spin in the stop machine core with interrupts hard
* disabled.
*
* The branch to mark interrupt exits non-reentrant is enabled first,
* then stop_machine runs which will ensure all CPUs are out of the
* low level interrupt exit code before patching. After the patching,
* if allowed, then flip the branch to allow fast exits.
*/
static_branch_enable(&interrupt_exit_not_reentrant);
stop_machine(__do_stf_barrier_fixups, &types, NULL);
if ((types & STF_BARRIER_FALLBACK) || (types & STF_BARRIER_SYNC_ORI))
stf_exit_reentrant = false;
else
stf_exit_reentrant = true;
if (stf_exit_reentrant && rfi_exit_reentrant)
static_branch_disable(&interrupt_exit_not_reentrant);
}
void do_uaccess_flush_fixups(enum l1d_flush_type types)
......@@ -409,8 +429,9 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
stop_machine(__do_entry_flush_fixups, &types, NULL);
}
void do_rfi_flush_fixups(enum l1d_flush_type types)
static int __do_rfi_flush_fixups(void *data)
{
enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
......@@ -453,6 +474,29 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
return 0;
}
void do_rfi_flush_fixups(enum l1d_flush_type types)
{
/*
* stop_machine gets all CPUs out of the interrupt exit handler same
* as do_stf_barrier_fixups. do_rfi_flush_fixups patching can run
* without stop_machine, so this could be achieved with a broadcast
* IPI instead, but this matches the stf sequence.
*/
static_branch_enable(&interrupt_exit_not_reentrant);
stop_machine(__do_rfi_flush_fixups, &types, NULL);
if (types & L1D_FLUSH_FALLBACK)
rfi_exit_reentrant = false;
else
rfi_exit_reentrant = true;
if (stf_exit_reentrant && rfi_exit_reentrant)
static_branch_disable(&interrupt_exit_not_reentrant);
}
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment