Commit d5c678ae authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Thomas Gleixner

x86/debug: Allow a single level of #DB recursion

Trying to clear DR7 around a #DB from usermode malfunctions if the tasks
schedules when delivering SIGTRAP.

Rather than trying to define a special no-recursion region, just allow a
single level of recursion.  The same mechanism is used for NMI, and it
hasn't caused any problems yet.

Fixes: 9f58fdde ("x86/db: Split out dr6/7 handling")
Reported-by: default avatarKyle Huey <me@kylehuey.com>
Debugged-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarDaniel Thompson <daniel.thompson@linaro.org>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/8b9bd05f187231df008d48cf818a6a311cbd5c98.1597882384.git.luto@kernel.org
Link: https://lore.kernel.org/r/20200902133200.726584153@infradead.org
parent 662a0221
...@@ -729,20 +729,9 @@ static bool is_sysenter_singlestep(struct pt_regs *regs) ...@@ -729,20 +729,9 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif #endif
} }
static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7) static __always_inline unsigned long debug_read_clear_dr6(void)
{ {
/* unsigned long dr6;
* Disable breakpoints during exception handling; recursive exceptions
* are exceedingly 'fun'.
*
* Since this function is NOKPROBE, and that also applies to
* HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
* HW_BREAKPOINT_W on our stack)
*
* Entry text is excluded for HW_BP_X and cpu_entry_area, which
* includes the entry stack is excluded for everything.
*/
*dr7 = local_db_save();
/* /*
* The Intel SDM says: * The Intel SDM says:
...@@ -755,15 +744,12 @@ static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7) ...@@ -755,15 +744,12 @@ static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
* *
* Keep it simple: clear DR6 immediately. * Keep it simple: clear DR6 immediately.
*/ */
get_debugreg(*dr6, 6); get_debugreg(dr6, 6);
set_debugreg(0, 6); set_debugreg(0, 6);
/* Filter out all the reserved bits which are preset to 1 */ /* Filter out all the reserved bits which are preset to 1 */
*dr6 &= ~DR6_RESERVED; dr6 &= ~DR6_RESERVED;
}
static __always_inline void debug_exit(unsigned long dr7) return dr6;
{
local_db_restore(dr7);
} }
/* /*
...@@ -863,6 +849,18 @@ static void handle_debug(struct pt_regs *regs, unsigned long dr6, bool user) ...@@ -863,6 +849,18 @@ static void handle_debug(struct pt_regs *regs, unsigned long dr6, bool user)
static __always_inline void exc_debug_kernel(struct pt_regs *regs, static __always_inline void exc_debug_kernel(struct pt_regs *regs,
unsigned long dr6) unsigned long dr6)
{ {
/*
* Disable breakpoints during exception handling; recursive exceptions
* are exceedingly 'fun'.
*
* Since this function is NOKPROBE, and that also applies to
* HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
* HW_BREAKPOINT_W on our stack)
*
* Entry text is excluded for HW_BP_X and cpu_entry_area, which
* includes the entry stack is excluded for everything.
*/
unsigned long dr7 = local_db_save();
bool irq_state = idtentry_enter_nmi(regs); bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin(); instrumentation_begin();
...@@ -883,6 +881,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs, ...@@ -883,6 +881,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
instrumentation_end(); instrumentation_end();
idtentry_exit_nmi(regs, irq_state); idtentry_exit_nmi(regs, irq_state);
local_db_restore(dr7);
} }
static __always_inline void exc_debug_user(struct pt_regs *regs, static __always_inline void exc_debug_user(struct pt_regs *regs,
...@@ -894,6 +894,15 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, ...@@ -894,6 +894,15 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
*/ */
WARN_ON_ONCE(!user_mode(regs)); WARN_ON_ONCE(!user_mode(regs));
/*
* NB: We can't easily clear DR7 here because
* idtentry_exit_to_usermode() can invoke ptrace, schedule, access
* user memory, etc. This means that a recursive #DB is possible. If
* this happens, that #DB will hit exc_debug_kernel() and clear DR7.
* Since we're not on the IST stack right now, everything will be
* fine.
*/
irqentry_enter_from_user_mode(regs); irqentry_enter_from_user_mode(regs);
instrumentation_begin(); instrumentation_begin();
...@@ -907,36 +916,24 @@ static __always_inline void exc_debug_user(struct pt_regs *regs, ...@@ -907,36 +916,24 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
/* IST stack entry */ /* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug) DEFINE_IDTENTRY_DEBUG(exc_debug)
{ {
unsigned long dr6, dr7; exc_debug_kernel(regs, debug_read_clear_dr6());
debug_enter(&dr6, &dr7);
exc_debug_kernel(regs, dr6);
debug_exit(dr7);
} }
/* User entry, runs on regular task stack */ /* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug) DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
{ {
unsigned long dr6, dr7; exc_debug_user(regs, debug_read_clear_dr6());
debug_enter(&dr6, &dr7);
exc_debug_user(regs, dr6);
debug_exit(dr7);
} }
#else #else
/* 32 bit does not have separate entry points. */ /* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug) DEFINE_IDTENTRY_RAW(exc_debug)
{ {
unsigned long dr6, dr7; unsigned long dr6 = debug_read_clear_dr6();
debug_enter(&dr6, &dr7);
if (user_mode(regs)) if (user_mode(regs))
exc_debug_user(regs, dr6); exc_debug_user(regs, dr6);
else else
exc_debug_kernel(regs, dr6); exc_debug_kernel(regs, dr6);
debug_exit(dr7);
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment