tracing/perf: Add interrupt_context_level() helper

Now that there are three different instances of doing the addition trick
to the preempt_count() and NMI_MASK, HARDIRQ_MASK and SOFTIRQ_OFFSET
macros, it deserves a helper function defined in the preempt.h header.

Add the interrupt_context_level() helper and replace the three instances
that do that logic with it.

Link: https://lore.kernel.org/all/20211015142541.4badd8a9@gandalf.local.home/Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 9b84fadc
...@@ -77,6 +77,27 @@ ...@@ -77,6 +77,27 @@
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h> #include <asm/preempt.h>
/**
* interrupt_context_level - return interrupt context level
*
* Returns the current interrupt context level.
* 0 - normal context
* 1 - softirq context
* 2 - hardirq context
* 3 - NMI context
*/
static __always_inline unsigned char interrupt_context_level(void)
{
unsigned long pc = preempt_count();
unsigned char level = 0;
level += !!(pc & (NMI_MASK));
level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
return level;
}
#define nmi_count() (preempt_count() & NMI_MASK) #define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#ifdef CONFIG_PREEMPT_RT #ifdef CONFIG_PREEMPT_RT
......
...@@ -136,12 +136,7 @@ enum { ...@@ -136,12 +136,7 @@ enum {
static __always_inline int trace_get_context_bit(void) static __always_inline int trace_get_context_bit(void)
{ {
unsigned long pc = preempt_count(); unsigned char bit = interrupt_context_level();
unsigned char bit = 0;
bit += !!(pc & (NMI_MASK));
bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
return TRACE_CTX_NORMAL - bit; return TRACE_CTX_NORMAL - bit;
} }
......
...@@ -205,12 +205,7 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) ...@@ -205,12 +205,7 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
static inline int get_recursion_context(int *recursion) static inline int get_recursion_context(int *recursion)
{ {
unsigned int pc = preempt_count(); unsigned char rctx = interrupt_context_level();
unsigned char rctx = 0;
rctx += !!(pc & (NMI_MASK));
rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
if (recursion[rctx]) if (recursion[rctx])
return -1; return -1;
......
...@@ -3167,12 +3167,7 @@ static __always_inline int ...@@ -3167,12 +3167,7 @@ static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{ {
unsigned int val = cpu_buffer->current_context; unsigned int val = cpu_buffer->current_context;
unsigned long pc = preempt_count(); int bit = interrupt_context_level();
int bit = 0;
bit += !!(pc & (NMI_MASK));
bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
bit = RB_CTX_NORMAL - bit; bit = RB_CTX_NORMAL - bit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment