Commit 2e10e71c authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

sched/preempt: Rearrange a few symbols after headers merge

Adjust a few comments, and further integrate a few definitions after
the dumb headers copy.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-3-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 92cf2118
...@@ -9,14 +9,6 @@ ...@@ -9,14 +9,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/list.h> #include <linux/list.h>
/*
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for
* the other bits -- can't include that header due to inclusion hell.
*/
#define PREEMPT_NEED_RESCHED 0x80000000
#include <asm/preempt.h>
/* /*
* We put the hardirq and softirq counter into the preemption * We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning: * counter. The bitmask has the following meaning:
...@@ -30,11 +22,12 @@ ...@@ -30,11 +22,12 @@
* there are a few palaeontologic drivers which reenable interrupts in * there are a few palaeontologic drivers which reenable interrupts in
* the handler, so we need more than one bit here. * the handler, so we need more than one bit here.
* *
* PREEMPT_MASK: 0x000000ff * PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00 * SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000 * HARDIRQ_MASK: 0x000f0000
* NMI_MASK: 0x00100000 * NMI_MASK: 0x00100000
* PREEMPT_ACTIVE: 0x00200000 * PREEMPT_ACTIVE: 0x00200000
* PREEMPT_NEED_RESCHED: 0x80000000
*/ */
#define PREEMPT_BITS 8 #define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8 #define SOFTIRQ_BITS 8
...@@ -64,6 +57,12 @@ ...@@ -64,6 +57,12 @@
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
#define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
...@@ -122,12 +121,6 @@ ...@@ -122,12 +121,6 @@
#define in_atomic_preempt_off() \ #define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
#ifdef CONFIG_PREEMPT_COUNT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
#else
# define preemptible() 0
#endif
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val); extern void preempt_count_add(int val);
extern void preempt_count_sub(int val); extern void preempt_count_sub(int val);
...@@ -160,6 +153,8 @@ do { \ ...@@ -160,6 +153,8 @@ do { \
#define preempt_enable_no_resched() sched_preempt_enable_no_resched() #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define preempt_enable() \ #define preempt_enable() \
do { \ do { \
...@@ -232,6 +227,7 @@ do { \ ...@@ -232,6 +227,7 @@ do { \
#define preempt_disable_notrace() barrier() #define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier() #define preempt_enable_notrace() barrier()
#define preemptible() 0
#endif /* CONFIG_PREEMPT_COUNT */ #endif /* CONFIG_PREEMPT_COUNT */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment