Commit 8e70b6f7 authored by Paul Mackerras's avatar Paul Mackerras

Merge samba.org:/home/paulus/kernel/linux-2.5

into samba.org:/home/paulus/kernel/for-linus-ppc
parents b0ab8396 889d64d5
...@@ -178,15 +178,6 @@ setup_irq(unsigned int irq, struct irqaction * new) ...@@ -178,15 +178,6 @@ setup_irq(unsigned int irq, struct irqaction * new)
return 0; return 0;
} }
#if (defined(CONFIG_8xx) || defined(CONFIG_8260))
/* Name change so we can catch standard drivers that potentially mess up
* the internal interrupt controller on 8xx and 8260. Just bear with me,
* I don't like this either and I am searching a better solution. For
* now, this is what I need. -- Dan
*/
#define request_irq request_8xxirq
#endif
void free_irq(unsigned int irq, void* dev_id) void free_irq(unsigned int irq, void* dev_id)
{ {
irq_desc_t *desc; irq_desc_t *desc;
...@@ -212,11 +203,7 @@ void free_irq(unsigned int irq, void* dev_id) ...@@ -212,11 +203,7 @@ void free_irq(unsigned int irq, void* dev_id)
} }
spin_unlock_irqrestore(&desc->lock,flags); spin_unlock_irqrestore(&desc->lock,flags);
#ifdef CONFIG_SMP synchronize_irq(irq);
/* Wait to make sure it's not being used on another CPU */
while (desc->status & IRQ_INPROGRESS)
barrier();
#endif
irq_kfree(action); irq_kfree(action);
return; return;
} }
...@@ -289,8 +276,8 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *) ...@@ -289,8 +276,8 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *)
* *
* This function may be called from IRQ context. * This function may be called from IRQ context.
*/ */
void disable_irq_nosync(unsigned int irq) void disable_irq_nosync(unsigned int irq)
{ {
irq_desc_t *desc = irq_desc + irq; irq_desc_t *desc = irq_desc + irq;
unsigned long flags; unsigned long flags;
...@@ -320,12 +307,7 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *) ...@@ -320,12 +307,7 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *)
void disable_irq(unsigned int irq) void disable_irq(unsigned int irq)
{ {
disable_irq_nosync(irq); disable_irq_nosync(irq);
synchronize_irq(irq);
if (!local_irq_count(smp_processor_id())) {
do {
barrier();
} while (irq_desc[irq].status & IRQ_INPROGRESS);
}
} }
/** /**
...@@ -529,7 +511,7 @@ int do_IRQ(struct pt_regs *regs) ...@@ -529,7 +511,7 @@ int do_IRQ(struct pt_regs *regs)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int irq, first = 1; int irq, first = 1;
hardirq_enter( cpu ); irq_enter();
/* /*
* Every platform is required to implement ppc_md.get_irq. * Every platform is required to implement ppc_md.get_irq.
...@@ -546,7 +528,7 @@ int do_IRQ(struct pt_regs *regs) ...@@ -546,7 +528,7 @@ int do_IRQ(struct pt_regs *regs)
if (irq != -2 && first) if (irq != -2 && first)
/* That's not SMP safe ... but who cares ? */ /* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++; ppc_spurious_interrupts++;
hardirq_exit( cpu ); irq_exit();
if (softirq_pending(cpu)) if (softirq_pending(cpu))
do_softirq(); do_softirq();
...@@ -582,262 +564,10 @@ void __init init_IRQ(void) ...@@ -582,262 +564,10 @@ void __init init_IRQ(void)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned char global_irq_holder = NO_PROC_ID; void synchronize_irq(unsigned int irq)
unsigned volatile long global_irq_lock; /* pendantic :long for set_bit--RR*/
atomic_t global_bh_count;
static void show(char * str)
{
int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: [%d %d]\n",
local_irq_count(0),
local_irq_count(1));
printk("bh: %d [%d %d]\n",
atomic_read(&global_bh_count),
local_bh_count(0),
local_bh_count(1));
}
static inline void wait_on_bh(void)
{
int count = MAXCOUNT;
do {
if (!--count) {
show("wait_on_bh");
count = ~0;
}
/* nothing .. wait for the other bh's to go away */
} while (atomic_read(&global_bh_count) != 0);
}
static inline void wait_on_irq(int cpu)
{
int count = MAXCOUNT;
for (;;) {
/*
* Wait until all interrupts are gone. Wait
* for bottom half handlers unless we're
* already executing in one..
*/
if (!irqs_running())
if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
break;
/* Duh, we have to loop. Release the lock to avoid deadlocks */
clear_bit(0,&global_irq_lock);
for (;;) {
if (!--count) {
show("wait_on_irq");
count = ~0;
}
local_irq_enable();
/*
* We have to allow irqs to arrive between local_irq_enable and local_irq_disable
* Some cpus apparently won't cause the interrupt
* for several instructions. We hope that isync will
* catch this --Troy
*/
__asm__ __volatile__ ("isync");
local_irq_disable();
if (irqs_running())
continue;
if (global_irq_lock)
continue;
if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
continue;
if (!test_and_set_bit(0,&global_irq_lock))
break;
}
}
}
/*
* This is called when we want to synchronize with
* bottom half handlers. We need to wait until
* no other CPU is executing any bottom half handler.
*
* Don't wait if we're already running in an interrupt
* context or are inside a bh handler.
*/
void synchronize_bh(void)
{
if (atomic_read(&global_bh_count) && !in_interrupt())
wait_on_bh();
}
/*
* This is called when we want to synchronize with
* interrupts. We may for example tell a device to
* stop sending interrupts: but to make sure there
* are no interrupts that are executing on another
* CPU we need to call this function.
*/
void synchronize_irq(void)
{
if (irqs_running()) {
/* Stupid approach */
cli();
sti();
}
}
static inline void get_irqlock(int cpu)
{ {
unsigned int loops = MAXCOUNT; while (irq_desc[irq].status & IRQ_INPROGRESS)
barrier();
if (test_and_set_bit(0,&global_irq_lock)) {
/* do we already hold the lock? */
if ((unsigned char) cpu == global_irq_holder)
return;
/* Uhhuh.. Somebody else got it. Wait.. */
do {
do {
if (loops-- == 0) {
printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu, global_irq_holder);
#ifdef CONFIG_XMON
xmon(0);
#endif
}
} while (test_bit(0,&global_irq_lock));
} while (test_and_set_bit(0,&global_irq_lock));
}
/*
* We also need to make sure that nobody else is running
* in an interrupt context.
*/
wait_on_irq(cpu);
/*
* Ok, finally..
*/
global_irq_holder = cpu;
}
/*
* A global "cli()" while in an interrupt context
* turns into just a local cli(). Interrupts
* should use spinlocks for the (very unlikely)
* case that they ever want to protect against
* each other.
*
* If we already have local interrupts disabled,
* this will not turn a local disable into a
* global one (problems with spinlocks: this makes
* save_flags+cli+sti usable inside a spinlock).
*/
void __global_cli(void)
{
unsigned long flags;
local_save_flags(flags);
if (flags & (1 << 15)) {
int cpu = smp_processor_id();
local_irq_disable();
if (!local_irq_count(cpu))
get_irqlock(cpu);
}
}
void __global_sti(void)
{
int cpu = smp_processor_id();
if (!local_irq_count(cpu))
release_irqlock(cpu);
local_irq_enable();
}
/*
* SMP flags value to restore to:
* 0 - global cli
* 1 - global sti
* 2 - local cli
* 3 - local sti
*/
unsigned long __global_save_flags(void)
{
int retval;
int local_enabled;
unsigned long flags;
local_save_flags(flags);
local_enabled = (flags >> 15) & 1;
/* default to local */
retval = 2 + local_enabled;
/* check for global flags if we're not in an interrupt */
if (!local_irq_count(smp_processor_id())) {
if (local_enabled)
retval = 1;
if (global_irq_holder == (unsigned char) smp_processor_id())
retval = 0;
}
return retval;
}
int
tb(long vals[],
int max_size)
{
register unsigned long *orig_sp __asm__ ("r1");
register unsigned long lr __asm__ ("r3");
unsigned long *sp;
int i;
asm volatile ("mflr 3");
vals[0] = lr;
sp = (unsigned long *) *orig_sp;
sp = (unsigned long *) *sp;
for (i=1; i<max_size; i++) {
if (sp == 0) {
break;
}
vals[i] = *(sp+1);
sp = (unsigned long *) *sp;
}
return i;
}
void __global_restore_flags(unsigned long flags)
{
switch (flags) {
case 0:
__global_cli();
break;
case 1:
__global_sti();
break;
case 2:
local_irq_disable();
break;
case 3:
local_irq_enable();
break;
default:
{
unsigned long trace[5];
int count;
int i;
printk("global_restore_flags: %08lx (%08lx)\n",
flags, (&flags)[-1]);
count = tb(trace, 5);
printk("tb:");
for(i=0; i<count; i++) {
printk(" %8.8lx", trace[i]);
}
printk("\n");
}
}
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -207,12 +207,6 @@ EXPORT_SYMBOL(last_task_used_altivec); ...@@ -207,12 +207,6 @@ EXPORT_SYMBOL(last_task_used_altivec);
EXPORT_SYMBOL(giveup_altivec); EXPORT_SYMBOL(giveup_altivec);
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(global_irq_lock);
EXPORT_SYMBOL(global_irq_holder);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
EXPORT_SYMBOL(_raw_spin_lock); EXPORT_SYMBOL(_raw_spin_lock);
EXPORT_SYMBOL(_raw_spin_unlock); EXPORT_SYMBOL(_raw_spin_unlock);
......
...@@ -124,27 +124,27 @@ void TAUupdate(int cpu) ...@@ -124,27 +124,27 @@ void TAUupdate(int cpu)
void TAUException(struct pt_regs * regs) void TAUException(struct pt_regs * regs)
{ {
unsigned long cpu = smp_processor_id(); int cpu = smp_processor_id();
hardirq_enter(cpu); irq_enter();
tau[cpu].interrupts++; tau[cpu].interrupts++;
TAUupdate(cpu); TAUupdate(cpu);
hardirq_exit(cpu); irq_exit();
return;
} }
#endif /* CONFIG_TAU_INT */ #endif /* CONFIG_TAU_INT */
static void tau_timeout(void * info) static void tau_timeout(void * info)
{ {
unsigned long cpu = smp_processor_id(); int cpu;
unsigned long flags; unsigned long flags;
int size; int size;
int shrink; int shrink;
/* disabling interrupts *should* be okay */ /* disabling interrupts *should* be okay */
save_flags(flags); cli(); local_irq_save(flags);
cpu = smp_processor_id();
#ifndef CONFIG_TAU_INT #ifndef CONFIG_TAU_INT
TAUupdate(cpu); TAUupdate(cpu);
...@@ -186,7 +186,7 @@ static void tau_timeout(void * info) ...@@ -186,7 +186,7 @@ static void tau_timeout(void * info)
*/ */
mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
restore_flags(flags); local_irq_restore(flags);
} }
static void tau_timeout_smp(unsigned long unused) static void tau_timeout_smp(unsigned long unused)
......
...@@ -75,7 +75,7 @@ u64 jiffies_64; ...@@ -75,7 +75,7 @@ u64 jiffies_64;
unsigned long disarm_decr[NR_CPUS]; unsigned long disarm_decr[NR_CPUS];
extern int do_sys_settimeofday(struct timeval *tv, struct timezone *tz); extern struct timezone sys_tz;
/* keep track of when we need to update the rtc */ /* keep track of when we need to update the rtc */
time_t last_rtc_update; time_t last_rtc_update;
...@@ -161,7 +161,7 @@ int timer_interrupt(struct pt_regs * regs) ...@@ -161,7 +161,7 @@ int timer_interrupt(struct pt_regs * regs)
if (atomic_read(&ppc_n_lost_interrupts) != 0) if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs); do_IRQ(regs);
hardirq_enter(cpu); irq_enter();
while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) < 0) { while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) < 0) {
jiffy_stamp += tb_ticks_per_jiffy; jiffy_stamp += tb_ticks_per_jiffy;
...@@ -214,7 +214,7 @@ int timer_interrupt(struct pt_regs * regs) ...@@ -214,7 +214,7 @@ int timer_interrupt(struct pt_regs * regs)
if (ppc_md.heartbeat && !ppc_md.heartbeat_count--) if (ppc_md.heartbeat && !ppc_md.heartbeat_count--)
ppc_md.heartbeat(); ppc_md.heartbeat();
hardirq_exit(cpu); irq_exit();
if (softirq_pending(cpu)) if (softirq_pending(cpu))
do_softirq(); do_softirq();
...@@ -358,14 +358,11 @@ void __init time_init(void) ...@@ -358,14 +358,11 @@ void __init time_init(void)
/* Not exact, but the timer interrupt takes care of this */ /* Not exact, but the timer interrupt takes care of this */
set_dec(tb_ticks_per_jiffy); set_dec(tb_ticks_per_jiffy);
/* If platform provided a timezone (pmac), we correct the time /* If platform provided a timezone (pmac), we correct the time */
* using do_sys_settimeofday() which in turn calls warp_clock()
*/
if (time_offset) { if (time_offset) {
struct timezone tz; sys_tz.tz_minuteswest = -time_offset / 60;
tz.tz_minuteswest = -time_offset / 60; sys_tz.tz_dsttime = 0;
tz.tz_dsttime = 0; xtime.tv_sec -= time_offset;
do_sys_settimeofday(NULL, &tz);
} }
} }
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
extern void mapin_ram(void); extern void mapin_ram(void);
extern void bat_mapin_ram(void);
extern int map_page(unsigned long va, unsigned long pa, int flags); extern int map_page(unsigned long va, unsigned long pa, int flags);
extern void setbat(int index, unsigned long virt, unsigned long phys, extern void setbat(int index, unsigned long virt, unsigned long phys,
unsigned int size, int flags); unsigned int size, int flags);
...@@ -49,14 +48,17 @@ extern unsigned long Hash_size, Hash_mask; ...@@ -49,14 +48,17 @@ extern unsigned long Hash_size, Hash_mask;
#if defined(CONFIG_8xx) #if defined(CONFIG_8xx)
#define flush_HPTE(X, va, pg) _tlbie(va) #define flush_HPTE(X, va, pg) _tlbie(va)
#define MMU_init_hw() do { } while(0) #define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx) #elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg) _tlbie(va) #define flush_HPTE(X, va, pg) _tlbie(va)
extern void MMU_init_hw(void); extern void MMU_init_hw(void);
#define mmu_mapin_ram() (0UL)
#else #else
/* anything except 4xx or 8xx */ /* anything except 4xx or 8xx */
extern void MMU_init_hw(void); extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
/* Be careful....this needs to be updated if we ever encounter 603 SMPs, /* Be careful....this needs to be updated if we ever encounter 603 SMPs,
* which includes all new 82xx processors. We need tlbie/tlbsync here * which includes all new 82xx processors. We need tlbie/tlbsync here
......
...@@ -252,31 +252,14 @@ void __init mapin_ram(void) ...@@ -252,31 +252,14 @@ void __init mapin_ram(void)
{ {
unsigned long v, p, s, f; unsigned long v, p, s, f;
#ifdef HAVE_BATS s = mmu_mapin_ram();
if (!__map_without_bats) v = KERNELBASE + s;
bat_mapin_ram(); p = PPC_MEMSTART + s;
#endif /* HAVE_BATS */ for (; s < total_lowmem; s += PAGE_SIZE) {
if ((char *) v >= _stext && (char *) v < etext)
v = KERNELBASE; f = _PAGE_RAM_TEXT;
p = PPC_MEMSTART;
for (s = 0; s < total_lowmem; s += PAGE_SIZE) {
/* On the MPC8xx, we want the page shared so we
* don't get ASID compares on kernel space.
*/
f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC;
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* Allows stub to set breakpoints everywhere */
f |= _PAGE_WRENABLE;
#else /* !CONFIG_KGDB && !CONFIG_XMON */
if ((char *) v < _stext || (char *) v >= etext)
f |= _PAGE_WRENABLE;
#ifdef CONFIG_PPC_STD_MMU
else else
/* On the powerpc (not all), no user access f = _PAGE_RAM;
forces R/W kernel access */
f |= _PAGE_USER;
#endif /* CONFIG_PPC_STD_MMU */
#endif /* CONFIG_KGDB || CONFIG_XMON */
map_page(v, p, f); map_page(v, p, f);
v += PAGE_SIZE; v += PAGE_SIZE;
p += PAGE_SIZE; p += PAGE_SIZE;
......
...@@ -87,12 +87,15 @@ unsigned long p_mapped_by_bats(unsigned long pa) ...@@ -87,12 +87,15 @@ unsigned long p_mapped_by_bats(unsigned long pa)
return 0; return 0;
} }
void __init bat_mapin_ram(void) unsigned long __init mmu_mapin_ram(void)
{ {
unsigned long tot, bl, done; unsigned long tot, bl, done;
unsigned long max_size = (256<<20); unsigned long max_size = (256<<20);
unsigned long align; unsigned long align;
if (__map_without_bats)
return 0;
/* Set up BAT2 and if necessary BAT3 to cover RAM. */ /* Set up BAT2 and if necessary BAT3 to cover RAM. */
/* Make sure we don't map a block larger than the /* Make sure we don't map a block larger than the
...@@ -119,7 +122,10 @@ void __init bat_mapin_ram(void) ...@@ -119,7 +122,10 @@ void __init bat_mapin_ram(void)
break; break;
setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl,
_PAGE_KERNEL); _PAGE_KERNEL);
done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
} }
return done;
} }
/* /*
......
...@@ -52,7 +52,7 @@ int do_IRQ(struct pt_regs *regs) ...@@ -52,7 +52,7 @@ int do_IRQ(struct pt_regs *regs)
if ( is_soft_enabled() ) if ( is_soft_enabled() )
BUG(); BUG();
hardirq_enter( cpu ); irq_enter();
paca = (struct Paca *)mfspr(SPRG1); paca = (struct Paca *)mfspr(SPRG1);
...@@ -71,7 +71,7 @@ int do_IRQ(struct pt_regs *regs) ...@@ -71,7 +71,7 @@ int do_IRQ(struct pt_regs *regs)
local_irq_restore( flags ); local_irq_restore( flags );
} }
hardirq_exit( cpu ); irq_exit();
if ( paca->xLpPacaPtr->xDecrInt ) { if ( paca->xLpPacaPtr->xDecrInt ) {
paca->xLpPacaPtr->xDecrInt = 0; paca->xLpPacaPtr->xDecrInt = 0;
......
...@@ -117,7 +117,7 @@ int timer_interrupt(struct pt_regs * regs) ...@@ -117,7 +117,7 @@ int timer_interrupt(struct pt_regs * regs)
else else
timerRetDisabled++; timerRetDisabled++;
hardirq_enter(cpu); irq_enter();
if (!user_mode(regs)) if (!user_mode(regs))
ppc_do_profile(instruction_pointer(regs)); ppc_do_profile(instruction_pointer(regs));
...@@ -149,7 +149,7 @@ int timer_interrupt(struct pt_regs * regs) ...@@ -149,7 +149,7 @@ int timer_interrupt(struct pt_regs * regs)
paca->xLpPacaPtr->xDecrInt = 0; paca->xLpPacaPtr->xDecrInt = 0;
set_dec( (unsigned)next_dec ); set_dec( (unsigned)next_dec );
hardirq_exit(cpu); irq_exit();
if (softirq_pending(cpu)) if (softirq_pending(cpu))
do_softirq(); do_softirq();
......
...@@ -15,8 +15,6 @@ ...@@ -15,8 +15,6 @@
*/ */
typedef struct { typedef struct {
unsigned long __softirq_pending; /* set_bit is used on this */ unsigned long __softirq_pending; /* set_bit is used on this */
unsigned int __local_irq_count;
unsigned int __local_bh_count;
unsigned int __syscall_count; unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task; struct task_struct * __ksoftirqd_task;
unsigned int __last_jiffy_stamp; unsigned int __last_jiffy_stamp;
...@@ -25,89 +23,24 @@ typedef struct { ...@@ -25,89 +23,24 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp) #define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
#define IRQ_OFFSET 64
/* /*
* Are we in an interrupt context? Either doing bottom half * Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing? * or hardware interrupt processing?
*/ */
#define in_interrupt() ({ int __cpu = smp_processor_id(); \ #define in_interrupt() ((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }) #define in_irq in_interrupt
#define in_irq() (local_irq_count(smp_processor_id()) != 0) #define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define synchronize_irq(irq) barrier()
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define hardirq_enter(cpu) do { preempt_disable(); local_irq_count(cpu)++; } while (0)
#define hardirq_exit(cpu) do { local_irq_count(cpu)--; preempt_enable(); } while (0)
#define synchronize_irq() do { } while (0)
#define release_irqlock(cpu) do { } while (0)
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
extern void synchronize_irq(unsigned int irq);
#include <asm/atomic.h>
extern unsigned char global_irq_holder;
extern unsigned volatile long global_irq_lock;
static inline int irqs_running (void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
if (local_irq_count(i))
return 1;
return 0;
}
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
if (global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
clear_bit(0,&global_irq_lock);
}
}
static inline void hardirq_enter(int cpu)
{
unsigned int loops = 10000000;
preempt_disable();
++local_irq_count(cpu);
while (test_bit(0,&global_irq_lock)) {
if (cpu == global_irq_holder) {
printk("uh oh, interrupt while we hold global irq lock! (CPU %d)\n", cpu);
#ifdef CONFIG_XMON
xmon(0);
#endif
break;
}
if (loops-- == 0) {
printk("do_IRQ waiting for irq lock (holder=%d)\n", global_irq_holder);
#ifdef CONFIG_XMON
xmon(0);
#endif
}
}
}
static inline void hardirq_exit(int cpu)
{
--local_irq_count(cpu);
preempt_enable();
}
static inline int hardirq_trylock(int cpu)
{
return !test_bit(0,&global_irq_lock);
}
#define hardirq_endlock(cpu) do { } while (0)
extern void synchronize_irq(void);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -246,6 +246,23 @@ extern unsigned long ioremap_bot, ioremap_base; ...@@ -246,6 +246,23 @@ extern unsigned long ioremap_bot, ioremap_base;
#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC #define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC
#define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED #define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
#define _PAGE_RAM _PAGE_KERNEL
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* We want the debuggers to be able to set breakpoints anywhere, so
* don't write protect the kernel text */
#define _PAGE_RAM_TEXT _PAGE_RAM
#else
#ifdef CONFIG_PPC_STD_MMU
/* On standard PPC MMU, no user access implies kernel read/write
* access, so to write-protect the kernel text we must turn on user
* access */
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE) | _PAGE_USER
#else
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE)
#endif
#endif
#define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
......
...@@ -18,7 +18,7 @@ extern spinlock_t kernel_flag; ...@@ -18,7 +18,7 @@ extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag) #define kernel_locked() spin_is_locked(&kernel_flag)
#elif defined(CONFIG_PREEMPT) #elif defined(CONFIG_PREEMPT)
#define kernel_locked() preempt_get_count() #define kernel_locked() preempt_count()
#endif #endif
/* /*
......
...@@ -8,31 +8,29 @@ ...@@ -8,31 +8,29 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#define local_bh_disable() \ #define local_bh_disable() \
do { \ do { \
preempt_disable(); \ preempt_count() += IRQ_OFFSET; \
local_bh_count(smp_processor_id())++; \ barrier(); \
barrier(); \
} while (0) } while (0)
#define __local_bh_enable() \ #define __local_bh_enable() \
do { \ do { \
barrier(); \ barrier(); \
local_bh_count(smp_processor_id())--; \ preempt_count() -= IRQ_OFFSET; \
preempt_enable(); \
} while (0) } while (0)
#define local_bh_enable() \ #define local_bh_enable() \
do { \ do { \
barrier(); \ barrier(); \
if (!--local_bh_count(smp_processor_id()) \ if ((preempt_count() -= IRQ_OFFSET) < IRQ_OFFSET \
&& softirq_pending(smp_processor_id())) { \ && softirq_pending(smp_processor_id())) \
do_softirq(); \ do_softirq(); \
} \ if (preempt_count() == 0) \
preempt_enable(); \ preempt_check_resched(); \
} while (0) } while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0) #define in_softirq() in_interrupt()
#endif /* __ASM_SOFTIRQ_H */ #endif /* __ASM_SOFTIRQ_H */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
...@@ -98,23 +98,15 @@ extern void dump_regs(struct pt_regs *); ...@@ -98,23 +98,15 @@ extern void dump_regs(struct pt_regs *);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/*
* Compatibility macros, to be removed in future...
*/
#define cli() local_irq_disable() #define cli() local_irq_disable()
#define sti() local_irq_enable() #define sti() local_irq_enable()
#define save_flags(flags) local_save_flags(flags) #define save_flags(flags) local_save_flags(flags)
#define restore_flags(flags) local_irq_restore(flags) #define restore_flags(flags) local_irq_restore(flags)
#define save_and_cli(flags) local_irq_save(flags) #define save_and_cli(flags) local_irq_save(flags)
#else /* CONFIG_SMP */
extern void __global_cli(void);
extern void __global_sti(void);
extern unsigned long __global_save_flags(void);
extern void __global_restore_flags(unsigned long);
#define cli() __global_cli()
#define sti() __global_sti()
#define save_flags(x) ((x)=__global_save_flags())
#define restore_flags(x) __global_restore_flags(x)
#endif /* !CONFIG_SMP */ #endif /* !CONFIG_SMP */
static __inline__ unsigned long static __inline__ unsigned long
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment