Commit 7ef858da authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

sched/rt, arm64: Use CONFIG_PREEMPTION

CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT.
Both PREEMPT and PREEMPT_RT require the same functionality which today
depends on CONFIG_PREEMPT.

Switch the Kconfig dependency, entry code and preemption handling over
to use CONFIG_PREEMPTION. Add PREEMPT_RT output in show_stack().

[bigeasy: +traps.c, Kconfig]
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20191015191821.11479-3-bigeasy@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e7289c6d
...@@ -34,32 +34,32 @@ config ARM64 ...@@ -34,32 +34,32 @@ config ARM64
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK if !PREEMPT select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPT select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK if !PREEMPT select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK if !PREEMPT select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK if !PREEMPT select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_KEEP_MEMBLOCK select ARCH_KEEP_MEMBLOCK
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
......
...@@ -97,7 +97,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, ...@@ -97,7 +97,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
* input when running on a preemptible kernel, but process the * input when running on a preemptible kernel, but process the
* data block by block instead. * data block by block instead.
*/ */
if (IS_ENABLED(CONFIG_PREEMPT) && if (IS_ENABLED(CONFIG_PREEMPTION) &&
chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE) chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
chunk = SHA256_BLOCK_SIZE - chunk = SHA256_BLOCK_SIZE -
sctx->count % SHA256_BLOCK_SIZE; sctx->count % SHA256_BLOCK_SIZE;
......
...@@ -699,8 +699,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU ...@@ -699,8 +699,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* where <label> is optional, and marks the point where execution will resume * where <label> is optional, and marks the point where execution will resume
* after a yield has been performed. If omitted, execution resumes right after * after a yield has been performed. If omitted, execution resumes right after
* the endif_yield_neon invocation. Note that the entire sequence, including * the endif_yield_neon invocation. Note that the entire sequence, including
* the provided patchup code, will be omitted from the image if CONFIG_PREEMPT * the provided patchup code, will be omitted from the image if
* is not defined. * CONFIG_PREEMPTION is not defined.
* *
* As a convenience, in the case where no patchup code is required, the above * As a convenience, in the case where no patchup code is required, the above
* sequence may be abbreviated to * sequence may be abbreviated to
...@@ -728,7 +728,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU ...@@ -728,7 +728,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm .endm
.macro if_will_cond_yield_neon .macro if_will_cond_yield_neon
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
get_current_task x0 get_current_task x0
ldr x0, [x0, #TSK_TI_PREEMPT] ldr x0, [x0, #TSK_TI_PREEMPT]
sub x0, x0, #PREEMPT_DISABLE_OFFSET sub x0, x0, #PREEMPT_DISABLE_OFFSET
......
...@@ -79,11 +79,11 @@ static inline bool should_resched(int preempt_offset) ...@@ -79,11 +79,11 @@ static inline bool should_resched(int preempt_offset)
return pc == preempt_offset; return pc == preempt_offset;
} }
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
void preempt_schedule(void); void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule() #define __preempt_schedule() preempt_schedule()
void preempt_schedule_notrace(void); void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace() #define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */ #endif /* __ASM_PREEMPT_H */
...@@ -604,7 +604,7 @@ el1_irq: ...@@ -604,7 +604,7 @@ el1_irq:
irq_handler irq_handler
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
alternative_if ARM64_HAS_IRQ_PRIO_MASKING alternative_if ARM64_HAS_IRQ_PRIO_MASKING
/* /*
......
...@@ -144,9 +144,12 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) ...@@ -144,9 +144,12 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define S_PREEMPT " PREEMPT" #define S_PREEMPT " PREEMPT"
#elif defined(CONFIG_PREEMPT_RT)
#define S_PREEMPT " PREEMPT_RT"
#else #else
#define S_PREEMPT "" #define S_PREEMPT ""
#endif #endif
#define S_SMP " SMP" #define S_SMP " SMP"
static int __die(const char *str, int err, struct pt_regs *regs) static int __die(const char *str, int err, struct pt_regs *regs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment