Commit 354f4492 authored by Paul Mackerras's avatar Paul Mackerras

PPC32: update hard/soft IRQ stuff along the lines of the i386 code.

parent a832c38b
......@@ -507,9 +507,8 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
}
#ifndef CONFIG_PPC_ISERIES /* iSeries version is in iSeries_pic.c */
int do_IRQ(struct pt_regs *regs)
void do_IRQ(struct pt_regs *regs)
{
int cpu = smp_processor_id();
int irq, first = 1;
irq_enter();
......@@ -529,10 +528,6 @@ int do_IRQ(struct pt_regs *regs)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
irq_exit();
if (softirq_pending(cpu))
do_softirq();
return 1; /* lets ret_from_int know we can do checks */
}
#endif /* CONFIG_PPC_ISERIES */
......@@ -566,6 +561,10 @@ void __init init_IRQ(void)
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
{
/* is there anything to synchronize with? */
if (!irq_desc[irq].action)
return;
while (irq_desc[irq].status & IRQ_INPROGRESS)
barrier();
}
......
......@@ -202,8 +202,7 @@ void __switch_to(struct task_struct *prev, struct task_struct *new)
struct thread_struct *new_thread, *old_thread;
unsigned long s;
local_save_flags(s);
local_irq_disable();
local_irq_save(s);
#if CHECK_STACK
check_stack(prev);
check_stack(new);
......
......@@ -151,7 +151,7 @@ static inline void ppc_do_profile (unsigned long nip)
* with interrupts disabled.
* We set it up to overflow again in 1/HZ seconds.
*/
int timer_interrupt(struct pt_regs * regs)
void timer_interrupt(struct pt_regs * regs)
{
int next_dec;
unsigned long cpu = smp_processor_id();
......@@ -215,11 +215,6 @@ int timer_interrupt(struct pt_regs * regs)
ppc_md.heartbeat();
irq_exit();
if (softirq_pending(cpu))
do_softirq();
return 1; /* lets ret_from_int know we can do checks */
}
#endif /* CONFIG_PPC_ISERIES */
......
......@@ -42,7 +42,7 @@ extern void __no_lpq_restore_flags(unsigned long flags);
extern void iSeries_smp_message_recv( struct pt_regs * );
unsigned lpEvent_count = 0;
int do_IRQ(struct pt_regs *regs)
void do_IRQ(struct pt_regs *regs)
{
int cpu = smp_processor_id();
unsigned long flags;
......@@ -65,8 +65,7 @@ int do_IRQ(struct pt_regs *regs)
lpq = paca->lpQueuePtr;
if ( lpq ) {
local_save_flags( flags );
local_irq_disable();
local_irq_save(flags);
lpEvent_count += ItLpQueue_process( lpq, regs );
local_irq_restore( flags );
}
......@@ -78,8 +77,4 @@ int do_IRQ(struct pt_regs *regs)
/* Signal a fake decrementer interrupt */
timer_interrupt( regs );
}
if (softirq_pending(cpu))
do_softirq();
return 1; /* lets ret_from_int know we can do checks */
}
......@@ -102,7 +102,7 @@ int timerRetEnabled = 0;
int timerRetDisabled = 0;
extern unsigned long iSeries_dec_value;
int timer_interrupt(struct pt_regs * regs)
void timer_interrupt(struct pt_regs * regs)
{
long next_dec;
struct Paca * paca;
......@@ -150,8 +150,4 @@ int timer_interrupt(struct pt_regs * regs)
set_dec( (unsigned)next_dec );
irq_exit();
if (softirq_pending(cpu))
do_softirq();
return 1;
}
......@@ -24,25 +24,86 @@ typedef struct {
#define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
#define IRQ_OFFSET 64
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-23 are the hardirq count (max # of hardirqs: 256)
*
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* HARDIRQ_MASK: 0x0000ff00
* SOFTIRQ_MASK: 0x00ff0000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 8
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define __MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#define in_interrupt() ((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
#define in_irq in_interrupt
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
#define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#ifndef CONFIG_SMP
#define synchronize_irq(irq) barrier()
#else /* CONFIG_SMP */
extern void synchronize_irq(unsigned int irq);
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
#else
extern void synchronize_irq(unsigned int irq);
#endif /* CONFIG_SMP */
extern void show_stack(unsigned long *sp);
#endif /* __ASM_HARDIRQ_H */
#endif /* __KERNEL__ */
......@@ -8,21 +8,18 @@
#ifndef _PPC_HW_IRQ_H
#define _PPC_HW_IRQ_H
extern unsigned long timer_interrupt_intercept;
extern unsigned long do_IRQ_intercept;
extern int timer_interrupt(struct pt_regs *);
extern void timer_interrupt(struct pt_regs *);
extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
#define INLINE_IRQS
#ifdef INLINE_IRQS
#define mfmsr() ({unsigned int rval; \
asm volatile("mfmsr %0" : "=r" (rval)); rval;})
#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
#define local_save_flags(flags) ((flags) = mfmsr())
#define local_irq_restore(flags) mtmsr(flags)
#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
#ifdef INLINE_IRQS
static inline void local_irq_disable(void)
{
......@@ -40,7 +37,7 @@ static inline void local_irq_enable(void)
mtmsr(msr | MSR_EE);
}
static inline void __do_save_and_cli(unsigned long *flags)
static inline void local_irq_save_ptr(unsigned long *flags)
{
unsigned long msr;
msr = mfmsr();
......@@ -49,7 +46,9 @@ static inline void __do_save_and_cli(unsigned long *flags)
__asm__ __volatile__("": : :"memory");
}
#define local_irq_save(flags) __do_save_and_cli(&flags)
#define local_save_flags(flags) ((flags) = mfmsr())
#define local_irq_save(flags) local_irq_save_ptr(&flags)
#define local_irq_restore(flags) mtmsr(flags)
#else
......@@ -57,9 +56,8 @@ extern void local_irq_enable(void);
extern void local_irq_disable(void);
extern void local_irq_restore(unsigned long);
extern void local_save_flags_ptr(unsigned long *);
extern unsigned long local_irq_enable_end, local_irq_disable_end, local_irq_restore_end, local_save_flags_ptr_end;
#define local_save_flags(flags) local_save_flags_ptr((unsigned long *)&flags)
#define local_save_flags(flags) local_save_flags_ptr(&flags)
#define local_irq_save(flags) ({local_save_flags(flags);local_irq_disable();})
#endif
......
......@@ -5,32 +5,30 @@
#ifndef __ASM_SOFTIRQ_H
#define __ASM_SOFTIRQ_H
#include <asm/atomic.h>
#include <linux/preempt.h>
#include <asm/hardirq.h>
#define local_bh_disable() \
do { \
preempt_count() += IRQ_OFFSET; \
barrier(); \
#define local_bh_disable() \
do { \
preempt_count() += SOFTIRQ_OFFSET; \
barrier(); \
} while (0)
#define __local_bh_enable() \
do { \
barrier(); \
preempt_count() -= IRQ_OFFSET; \
#define __local_bh_enable() \
do { \
barrier(); \
preempt_count() -= SOFTIRQ_OFFSET; \
} while (0)
#define local_bh_enable() \
do { \
barrier(); \
if ((preempt_count() -= IRQ_OFFSET) < IRQ_OFFSET \
&& softirq_pending(smp_processor_id())) \
__local_bh_enable(); \
if (unlikely(!in_interrupt() \
&& softirq_pending(smp_processor_id()))) \
do_softirq(); \
if (preempt_count() == 0) \
preempt_check_resched(); \
} while (0)
#define in_softirq() in_interrupt()
#endif /* __ASM_SOFTIRQ_H */
#endif /* __KERNEL__ */
......@@ -96,19 +96,6 @@ extern unsigned int rtas_data;
struct pt_regs;
extern void dump_regs(struct pt_regs *);
#ifndef CONFIG_SMP
/*
* Compatibility macros, to be removed in future...
*/
#define cli() local_irq_disable()
#define sti() local_irq_enable()
#define save_flags(flags) local_save_flags(flags)
#define restore_flags(flags) local_irq_restore(flags)
#define save_and_cli(flags) local_irq_save(flags)
#endif /* !CONFIG_SMP */
static __inline__ unsigned long
xchg_u32(volatile void *p, unsigned long val)
{
......
......@@ -22,25 +22,25 @@ struct thread_info {
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
int preempt_count; /* not used at present */
int softirq_count;
int hardirq_count;
int preempt_count;
};
/*
* macros/functions for gaining access to the thread information structure
*/
#define INIT_THREAD_INFO(tsk) \
{ \
task: &tsk, \
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1 \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/*
* macros/functions for gaining access to the thread information structure
*/
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment