Commit b037b09b authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Thomas Gleixner

x86/entry: Rename idtentry_enter/exit_cond_rcu() to idtentry_enter/exit()

They were originally called _cond_rcu because they were special versions
with conditional RCU handling.  Now they're the standard entry and exit
path, so the _cond_rcu part is just confusing.  Drop it.

Also change the signature to make them more extensible and more foolproof.

No functional change -- it's pure refactoring.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/247fc67685263e0b673e1d7f808182d28ff80359.1593795633.git.luto@kernel.org
parent dcb7fd82
...@@ -559,8 +559,7 @@ SYSCALL_DEFINE0(ni_syscall) ...@@ -559,8 +559,7 @@ SYSCALL_DEFINE0(ni_syscall)
} }
/** /**
* idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional * idtentry_enter - Handle state tracking on ordinary idtentries
* RCU handling
* @regs: Pointer to pt_regs of interrupted context * @regs: Pointer to pt_regs of interrupted context
* *
* Invokes: * Invokes:
...@@ -572,6 +571,9 @@ SYSCALL_DEFINE0(ni_syscall) ...@@ -572,6 +571,9 @@ SYSCALL_DEFINE0(ni_syscall)
* - The hardirq tracer to keep the state consistent as low level ASM * - The hardirq tracer to keep the state consistent as low level ASM
* entry disabled interrupts. * entry disabled interrupts.
* *
* As a precondition, this requires that the entry came from user mode,
* idle, or a kernel context in which RCU is watching.
*
* For kernel mode entries RCU handling is done conditional. If RCU is * For kernel mode entries RCU handling is done conditional. If RCU is
* watching then the only RCU requirement is to check whether the tick has * watching then the only RCU requirement is to check whether the tick has
* to be restarted. If RCU is not watching then rcu_irq_enter() has to be * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
...@@ -585,18 +587,21 @@ SYSCALL_DEFINE0(ni_syscall) ...@@ -585,18 +587,21 @@ SYSCALL_DEFINE0(ni_syscall)
* establish the proper context for NOHZ_FULL. Otherwise scheduling on exit * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
* would not be possible. * would not be possible.
* *
* Returns: True if RCU has been adjusted on a kernel entry * Returns: An opaque object that must be passed to idtentry_exit()
* False otherwise
* *
* The return value must be fed into the rcu_exit argument of * The return value must be fed into the state argument of
* idtentry_exit_cond_rcu(). * idtentry_exit().
*/ */
bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs) idtentry_state_t noinstr idtentry_enter(struct pt_regs *regs)
{ {
idtentry_state_t ret = {
.exit_rcu = false,
};
if (user_mode(regs)) { if (user_mode(regs)) {
check_user_regs(regs); check_user_regs(regs);
enter_from_user_mode(); enter_from_user_mode();
return false; return ret;
} }
/* /*
...@@ -634,7 +639,8 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs) ...@@ -634,7 +639,8 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
trace_hardirqs_off_finish(); trace_hardirqs_off_finish();
instrumentation_end(); instrumentation_end();
return true; ret.exit_rcu = true;
return ret;
} }
/* /*
...@@ -649,7 +655,7 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs) ...@@ -649,7 +655,7 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
trace_hardirqs_off(); trace_hardirqs_off();
instrumentation_end(); instrumentation_end();
return false; return ret;
} }
static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched) static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
...@@ -667,10 +673,9 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched) ...@@ -667,10 +673,9 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
} }
/** /**
* idtentry_exit_cond_rcu - Handle return from exception with conditional RCU * idtentry_exit - Handle return from exception that used idtentry_enter()
* handling
* @regs: Pointer to pt_regs (exception entry regs) * @regs: Pointer to pt_regs (exception entry regs)
* @rcu_exit: Invoke rcu_irq_exit() if true * @state: Return value from matching call to idtentry_enter()
* *
* Depending on the return target (kernel/user) this runs the necessary * Depending on the return target (kernel/user) this runs the necessary
* preemption and work checks if possible and reguired and returns to * preemption and work checks if possible and reguired and returns to
...@@ -679,10 +684,10 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched) ...@@ -679,10 +684,10 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
* This is the last action before returning to the low level ASM code which * This is the last action before returning to the low level ASM code which
* just needs to return to the appropriate context. * just needs to return to the appropriate context.
* *
* Counterpart to idtentry_enter_cond_rcu(). The return value of the entry * Counterpart to idtentry_enter(). The return value of the entry
* function must be fed into the @rcu_exit argument. * function must be fed into the @state argument.
*/ */
void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit) void noinstr idtentry_exit(struct pt_regs *regs, idtentry_state_t state)
{ {
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
...@@ -695,7 +700,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit) ...@@ -695,7 +700,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
* carefully and needs the same ordering of lockdep/tracing * carefully and needs the same ordering of lockdep/tracing
* and RCU as the return to user mode path. * and RCU as the return to user mode path.
*/ */
if (rcu_exit) { if (state.exit_rcu) {
instrumentation_begin(); instrumentation_begin();
/* Tell the tracer that IRET will enable interrupts */ /* Tell the tracer that IRET will enable interrupts */
trace_hardirqs_on_prepare(); trace_hardirqs_on_prepare();
...@@ -714,7 +719,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit) ...@@ -714,7 +719,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
* IRQ flags state is correct already. Just tell RCU if it * IRQ flags state is correct already. Just tell RCU if it
* was not watching on entry. * was not watching on entry.
*/ */
if (rcu_exit) if (state.exit_rcu)
rcu_irq_exit(); rcu_irq_exit();
} }
} }
...@@ -800,9 +805,10 @@ static void __xen_pv_evtchn_do_upcall(void) ...@@ -800,9 +805,10 @@ static void __xen_pv_evtchn_do_upcall(void)
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{ {
struct pt_regs *old_regs; struct pt_regs *old_regs;
bool inhcall, rcu_exit; bool inhcall;
idtentry_state_t state;
rcu_exit = idtentry_enter_cond_rcu(regs); state = idtentry_enter(regs);
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
instrumentation_begin(); instrumentation_begin();
...@@ -812,13 +818,13 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) ...@@ -812,13 +818,13 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
set_irq_regs(old_regs); set_irq_regs(old_regs);
inhcall = get_and_clear_inhcall(); inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(rcu_exit)) { if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
instrumentation_begin(); instrumentation_begin();
idtentry_exit_cond_resched(regs, true); idtentry_exit_cond_resched(regs, true);
instrumentation_end(); instrumentation_end();
restore_inhcall(inhcall); restore_inhcall(inhcall);
} else { } else {
idtentry_exit_cond_rcu(regs, rcu_exit); idtentry_exit(regs, state);
} }
} }
#endif /* CONFIG_XEN_PV */ #endif /* CONFIG_XEN_PV */
...@@ -13,8 +13,12 @@ ...@@ -13,8 +13,12 @@
void idtentry_enter_user(struct pt_regs *regs); void idtentry_enter_user(struct pt_regs *regs);
void idtentry_exit_user(struct pt_regs *regs); void idtentry_exit_user(struct pt_regs *regs);
bool idtentry_enter_cond_rcu(struct pt_regs *regs); typedef struct idtentry_state {
void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit); bool exit_rcu;
} idtentry_state_t;
idtentry_state_t idtentry_enter(struct pt_regs *regs);
void idtentry_exit(struct pt_regs *regs, idtentry_state_t state);
/** /**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
...@@ -54,12 +58,12 @@ static __always_inline void __##func(struct pt_regs *regs); \ ...@@ -54,12 +58,12 @@ static __always_inline void __##func(struct pt_regs *regs); \
\ \
__visible noinstr void func(struct pt_regs *regs) \ __visible noinstr void func(struct pt_regs *regs) \
{ \ { \
bool rcu_exit = idtentry_enter_cond_rcu(regs); \ idtentry_state_t state = idtentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
__##func (regs); \ __##func (regs); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit_cond_rcu(regs, rcu_exit); \ idtentry_exit(regs, state); \
} \ } \
\ \
static __always_inline void __##func(struct pt_regs *regs) static __always_inline void __##func(struct pt_regs *regs)
...@@ -101,12 +105,12 @@ static __always_inline void __##func(struct pt_regs *regs, \ ...@@ -101,12 +105,12 @@ static __always_inline void __##func(struct pt_regs *regs, \
__visible noinstr void func(struct pt_regs *regs, \ __visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \ unsigned long error_code) \
{ \ { \
bool rcu_exit = idtentry_enter_cond_rcu(regs); \ idtentry_state_t state = idtentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
__##func (regs, error_code); \ __##func (regs, error_code); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit_cond_rcu(regs, rcu_exit); \ idtentry_exit(regs, state); \
} \ } \
\ \
static __always_inline void __##func(struct pt_regs *regs, \ static __always_inline void __##func(struct pt_regs *regs, \
...@@ -199,7 +203,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector); \ ...@@ -199,7 +203,7 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
__visible noinstr void func(struct pt_regs *regs, \ __visible noinstr void func(struct pt_regs *regs, \
unsigned long error_code) \ unsigned long error_code) \
{ \ { \
bool rcu_exit = idtentry_enter_cond_rcu(regs); \ idtentry_state_t state = idtentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
irq_enter_rcu(); \ irq_enter_rcu(); \
...@@ -207,7 +211,7 @@ __visible noinstr void func(struct pt_regs *regs, \ ...@@ -207,7 +211,7 @@ __visible noinstr void func(struct pt_regs *regs, \
__##func (regs, (u8)error_code); \ __##func (regs, (u8)error_code); \
irq_exit_rcu(); \ irq_exit_rcu(); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit_cond_rcu(regs, rcu_exit); \ idtentry_exit(regs, state); \
} \ } \
\ \
static __always_inline void __##func(struct pt_regs *regs, u8 vector) static __always_inline void __##func(struct pt_regs *regs, u8 vector)
...@@ -241,7 +245,7 @@ static void __##func(struct pt_regs *regs); \ ...@@ -241,7 +245,7 @@ static void __##func(struct pt_regs *regs); \
\ \
__visible noinstr void func(struct pt_regs *regs) \ __visible noinstr void func(struct pt_regs *regs) \
{ \ { \
bool rcu_exit = idtentry_enter_cond_rcu(regs); \ idtentry_state_t state = idtentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
irq_enter_rcu(); \ irq_enter_rcu(); \
...@@ -249,7 +253,7 @@ __visible noinstr void func(struct pt_regs *regs) \ ...@@ -249,7 +253,7 @@ __visible noinstr void func(struct pt_regs *regs) \
run_on_irqstack_cond(__##func, regs, regs); \ run_on_irqstack_cond(__##func, regs, regs); \
irq_exit_rcu(); \ irq_exit_rcu(); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit_cond_rcu(regs, rcu_exit); \ idtentry_exit(regs, state); \
} \ } \
\ \
static noinline void __##func(struct pt_regs *regs) static noinline void __##func(struct pt_regs *regs)
...@@ -270,7 +274,7 @@ static __always_inline void __##func(struct pt_regs *regs); \ ...@@ -270,7 +274,7 @@ static __always_inline void __##func(struct pt_regs *regs); \
\ \
__visible noinstr void func(struct pt_regs *regs) \ __visible noinstr void func(struct pt_regs *regs) \
{ \ { \
bool rcu_exit = idtentry_enter_cond_rcu(regs); \ idtentry_state_t state = idtentry_enter(regs); \
\ \
instrumentation_begin(); \ instrumentation_begin(); \
__irq_enter_raw(); \ __irq_enter_raw(); \
...@@ -278,7 +282,7 @@ __visible noinstr void func(struct pt_regs *regs) \ ...@@ -278,7 +282,7 @@ __visible noinstr void func(struct pt_regs *regs) \
__##func (regs); \ __##func (regs); \
__irq_exit_raw(); \ __irq_exit_raw(); \
instrumentation_end(); \ instrumentation_end(); \
idtentry_exit_cond_rcu(regs, rcu_exit); \ idtentry_exit(regs, state); \
} \ } \
\ \
static __always_inline void __##func(struct pt_regs *regs) static __always_inline void __##func(struct pt_regs *regs)
......
...@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags); ...@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{ {
u32 reason = kvm_read_and_reset_apf_flags(); u32 reason = kvm_read_and_reset_apf_flags();
bool rcu_exit; idtentry_state_t state;
switch (reason) { switch (reason) {
case KVM_PV_REASON_PAGE_NOT_PRESENT: case KVM_PV_REASON_PAGE_NOT_PRESENT:
...@@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) ...@@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
return false; return false;
} }
rcu_exit = idtentry_enter_cond_rcu(regs); state = idtentry_enter(regs);
instrumentation_begin(); instrumentation_begin();
/* /*
...@@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) ...@@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
} }
instrumentation_end(); instrumentation_end();
idtentry_exit_cond_rcu(regs, rcu_exit); idtentry_exit(regs, state);
return true; return true;
} }
......
...@@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs) ...@@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW(exc_invalid_op) DEFINE_IDTENTRY_RAW(exc_invalid_op)
{ {
bool rcu_exit; idtentry_state_t state;
/* /*
* We use UD2 as a short encoding for 'CALL __WARN', as such * We use UD2 as a short encoding for 'CALL __WARN', as such
...@@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op) ...@@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op)
if (!user_mode(regs) && handle_bug(regs)) if (!user_mode(regs) && handle_bug(regs))
return; return;
rcu_exit = idtentry_enter_cond_rcu(regs); state = idtentry_enter(regs);
instrumentation_begin(); instrumentation_begin();
handle_invalid_op(regs); handle_invalid_op(regs);
instrumentation_end(); instrumentation_end();
idtentry_exit_cond_rcu(regs, rcu_exit); idtentry_exit(regs, state);
} }
DEFINE_IDTENTRY(exc_coproc_segment_overrun) DEFINE_IDTENTRY(exc_coproc_segment_overrun)
......
...@@ -1377,7 +1377,7 @@ handle_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1377,7 +1377,7 @@ handle_page_fault(struct pt_regs *regs, unsigned long error_code,
DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault) DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
{ {
unsigned long address = read_cr2(); unsigned long address = read_cr2();
bool rcu_exit; idtentry_state_t state;
prefetchw(&current->mm->mmap_lock); prefetchw(&current->mm->mmap_lock);
...@@ -1412,11 +1412,11 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault) ...@@ -1412,11 +1412,11 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
* code reenabled RCU to avoid subsequent wreckage which helps * code reenabled RCU to avoid subsequent wreckage which helps
* debugability. * debugability.
*/ */
rcu_exit = idtentry_enter_cond_rcu(regs); state = idtentry_enter(regs);
instrumentation_begin(); instrumentation_begin();
handle_page_fault(regs, error_code, address); handle_page_fault(regs, error_code, address);
instrumentation_end(); instrumentation_end();
idtentry_exit_cond_rcu(regs, rcu_exit); idtentry_exit(regs, state);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment