ftrace: Optimize testing what context current is in

The preempt_count() is not a simple location in memory, it could be part of
per_cpu code or more. Each access to preempt_count(), or one of its accessor
functions (like in_interrupt()) takes several cycles. By reading
preempt_count() once, and then doing tests to find the context against the
value return is slightly faster than using in_nmi() and in_interrupt().

Link: https://lkml.kernel.org/r/20201028115612.780796355@goodmis.org
Link: https://lkml.kernel.org/r/20201106023546.558881845@goodmis.orgSigned-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 6e4eb9cb
......@@ -117,22 +117,29 @@ enum {
#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
/*
* Used for setting context
* NMI = 0
* IRQ = 1
* SOFTIRQ = 2
* NORMAL = 3
*/
enum {
TRACE_CTX_NMI,
TRACE_CTX_IRQ,
TRACE_CTX_SOFTIRQ,
TRACE_CTX_NORMAL,
};
static __always_inline int trace_get_context_bit(void)
{
int bit;
unsigned long pc = preempt_count();
if (in_interrupt()) {
if (in_nmi())
bit = 0;
else if (in_irq())
bit = 1;
if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
return TRACE_CTX_NORMAL;
else
bit = 2;
} else
bit = 3;
return bit;
return pc & NMI_MASK ? TRACE_CTX_NMI :
pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
}
static __always_inline int trace_test_and_set_recursion(int start, int max)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment