Commit ae86a80a authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] "big IRQ lock" removal, IRQ cleanups

This is a massive cleanup of the IRQ subsystem.  It's losely based on
Linus' original idea and DaveM's original implementation, to fold our
various irq, softirq and bh counters into the preemption counter.

with this approach it was possible:

 - to remove the 'big IRQ lock' on SMP - on which sti() and cli() relied.

 - to streamline/simplify arch/i386/kernel/irq.c significantly.

 - to simplify the softirq code.

 - to remove the preemption count increase/decrease code from the lowlevel
   IRQ assembly code.

 - to speed up schedule() a bit.

Global sti() and cli() is gone forever on SMP, there is no more globally
synchronizing irq-disabling capability.  All code that relied on sti()
and cli() and restore_flags() must use other locking mechanisms from now
on (spinlocks and __cli()/__sti()).

obviously this patch breaks massive amounts of code, so only limited
.configs are working at the moment (UP is expected to be unaffected, but
SMP will require various driver updates).

The patch was developed and tested on SMP systems, and while the code is
still a bit rough in places, the base IRQ code appears to be pretty
robust and clean.

while it boots already so the worst is over, there is lots of work left:
eg. to fix the serial layer to not use cli()/sti() and bhs ...
parent 3d37e1e6
...@@ -1084,9 +1084,9 @@ void smp_apic_timer_interrupt(struct pt_regs regs) ...@@ -1084,9 +1084,9 @@ void smp_apic_timer_interrupt(struct pt_regs regs)
* Besides, if we don't timer interrupts ignore the global * Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do. * interrupt lock, which is the WrongThing (tm) to do.
*/ */
irq_enter(cpu, 0); irq_enter();
smp_local_timer_interrupt(&regs); smp_local_timer_interrupt(&regs);
irq_exit(cpu, 0); irq_exit();
if (softirq_pending(cpu)) if (softirq_pending(cpu))
do_softirq(); do_softirq();
......
...@@ -222,6 +222,8 @@ ...@@ -222,6 +222,8 @@
#include <linux/sysrq.h> #include <linux/sysrq.h>
extern rwlock_t xtime_lock;
extern spinlock_t i8253_lock;
extern unsigned long get_cmos_time(void); extern unsigned long get_cmos_time(void);
extern void machine_real_restart(unsigned char *, int); extern void machine_real_restart(unsigned char *, int);
...@@ -1141,40 +1143,25 @@ static void queue_event(apm_event_t event, struct apm_user *sender) ...@@ -1141,40 +1143,25 @@ static void queue_event(apm_event_t event, struct apm_user *sender)
static void set_time(void) static void set_time(void)
{ {
unsigned long flags; if (got_clock_diff) /* Must know time zone in order to set clock */
if (got_clock_diff) { /* Must know time zone in order to set clock */
save_flags(flags);
cli();
CURRENT_TIME = get_cmos_time() + clock_cmos_diff; CURRENT_TIME = get_cmos_time() + clock_cmos_diff;
restore_flags(flags);
}
} }
static void get_time_diff(void) static void get_time_diff(void)
{ {
#ifndef CONFIG_APM_RTC_IS_GMT #ifndef CONFIG_APM_RTC_IS_GMT
unsigned long flags;
/* /*
* Estimate time zone so that set_time can update the clock * Estimate time zone so that set_time can update the clock
*/ */
save_flags(flags);
clock_cmos_diff = -get_cmos_time(); clock_cmos_diff = -get_cmos_time();
cli();
clock_cmos_diff += CURRENT_TIME; clock_cmos_diff += CURRENT_TIME;
got_clock_diff = 1; got_clock_diff = 1;
restore_flags(flags);
#endif #endif
} }
static void reinit_timer(void) static inline void reinit_timer(void)
{ {
#ifdef INIT_TIMER_AFTER_SUSPEND #ifdef INIT_TIMER_AFTER_SUSPEND
unsigned long flags;
save_flags(flags);
cli();
/* set the clock to 100 Hz */ /* set the clock to 100 Hz */
outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
udelay(10); udelay(10);
...@@ -1182,7 +1169,6 @@ static void reinit_timer(void) ...@@ -1182,7 +1169,6 @@ static void reinit_timer(void)
udelay(10); udelay(10);
outb(LATCH >> 8 , 0x40); /* MSB */ outb(LATCH >> 8 , 0x40); /* MSB */
udelay(10); udelay(10);
restore_flags(flags);
#endif #endif
} }
...@@ -1203,13 +1189,21 @@ static int suspend(int vetoable) ...@@ -1203,13 +1189,21 @@ static int suspend(int vetoable)
} }
printk(KERN_CRIT "apm: suspend was vetoed, but suspending anyway.\n"); printk(KERN_CRIT "apm: suspend was vetoed, but suspending anyway.\n");
} }
/* serialize with the timer interrupt */
write_lock_irq(&xtime_lock);
/* protect against access to timer chip registers */
spin_lock(&i8253_lock);
get_time_diff(); get_time_diff();
cli();
err = set_system_power_state(APM_STATE_SUSPEND); err = set_system_power_state(APM_STATE_SUSPEND);
reinit_timer(); reinit_timer();
set_time(); set_time();
ignore_normal_resume = 1; ignore_normal_resume = 1;
sti();
spin_unlock(&i8253_lock);
write_unlock_irq(&xtime_lock);
if (err == APM_NO_ERROR) if (err == APM_NO_ERROR)
err = APM_SUCCESS; err = APM_SUCCESS;
if (err != APM_SUCCESS) if (err != APM_SUCCESS)
...@@ -1232,8 +1226,12 @@ static void standby(void) ...@@ -1232,8 +1226,12 @@ static void standby(void)
{ {
int err; int err;
/* serialize with the timer interrupt */
write_lock_irq(&xtime_lock);
/* If needed, notify drivers here */ /* If needed, notify drivers here */
get_time_diff(); get_time_diff();
write_unlock_irq(&xtime_lock);
err = set_system_power_state(APM_STATE_STANDBY); err = set_system_power_state(APM_STATE_STANDBY);
if ((err != APM_SUCCESS) && (err != APM_NO_ERROR)) if ((err != APM_SUCCESS) && (err != APM_NO_ERROR))
apm_error("standby", err); apm_error("standby", err);
...@@ -1321,7 +1319,9 @@ static void check_events(void) ...@@ -1321,7 +1319,9 @@ static void check_events(void)
ignore_bounce = 1; ignore_bounce = 1;
if ((event != APM_NORMAL_RESUME) if ((event != APM_NORMAL_RESUME)
|| (ignore_normal_resume == 0)) { || (ignore_normal_resume == 0)) {
write_lock_irq(&xtime_lock);
set_time(); set_time();
write_unlock_irq(&xtime_lock);
pm_send_all(PM_RESUME, (void *)0); pm_send_all(PM_RESUME, (void *)0);
queue_event(event, NULL); queue_event(event, NULL);
} }
...@@ -1336,7 +1336,9 @@ static void check_events(void) ...@@ -1336,7 +1336,9 @@ static void check_events(void)
break; break;
case APM_UPDATE_TIME: case APM_UPDATE_TIME:
write_lock_irq(&xtime_lock);
set_time(); set_time();
write_unlock_irq(&xtime_lock);
break; break;
case APM_CRITICAL_SUSPEND: case APM_CRITICAL_SUSPEND:
......
...@@ -72,12 +72,8 @@ VM_MASK = 0x00020000 ...@@ -72,12 +72,8 @@ VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define preempt_stop cli #define preempt_stop cli
#define INC_PRE_COUNT(reg) incl TI_PRE_COUNT(reg);
#define DEC_PRE_COUNT(reg) decl TI_PRE_COUNT(reg);
#else #else
#define preempt_stop #define preempt_stop
#define INC_PRE_COUNT(reg)
#define DEC_PRE_COUNT(reg)
#define resume_kernel restore_all #define resume_kernel restore_all
#endif #endif
...@@ -191,7 +187,6 @@ ENTRY(ret_from_fork) ...@@ -191,7 +187,6 @@ ENTRY(ret_from_fork)
ALIGN ALIGN
ret_from_intr: ret_from_intr:
preempt_stop preempt_stop
DEC_PRE_COUNT(%ebx)
ret_from_exception: ret_from_exception:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al movb CS(%esp), %al
...@@ -338,9 +333,8 @@ vector=vector+1 ...@@ -338,9 +333,8 @@ vector=vector+1
ALIGN ALIGN
common_interrupt: common_interrupt:
SAVE_ALL SAVE_ALL
GET_THREAD_INFO(%ebx)
INC_PRE_COUNT(%ebx)
call do_IRQ call do_IRQ
GET_THREAD_INFO(%ebx)
jmp ret_from_intr jmp ret_from_intr
#define BUILD_INTERRUPT(name, nr) \ #define BUILD_INTERRUPT(name, nr) \
...@@ -348,7 +342,6 @@ ENTRY(name) \ ...@@ -348,7 +342,6 @@ ENTRY(name) \
pushl $nr-256; \ pushl $nr-256; \
SAVE_ALL \ SAVE_ALL \
GET_THREAD_INFO(%ebx); \ GET_THREAD_INFO(%ebx); \
INC_PRE_COUNT(%ebx) \
call smp_/**/name; \ call smp_/**/name; \
jmp ret_from_intr; jmp ret_from_intr;
......
...@@ -132,13 +132,7 @@ EXPORT_SYMBOL(cpu_online_map); ...@@ -132,13 +132,7 @@ EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL_NOVERS(__write_lock_failed); EXPORT_SYMBOL_NOVERS(__write_lock_failed);
EXPORT_SYMBOL_NOVERS(__read_lock_failed); EXPORT_SYMBOL_NOVERS(__read_lock_failed);
/* Global SMP irq stuff */ /* Global SMP stuff */
EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(global_irq_holder);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
/* TLB flushing */ /* TLB flushing */
......
...@@ -1219,7 +1219,7 @@ static int __init timer_irq_works(void) ...@@ -1219,7 +1219,7 @@ static int __init timer_irq_works(void)
{ {
unsigned int t1 = jiffies; unsigned int t1 = jiffies;
sti(); __sti();
/* Let ten ticks pass... */ /* Let ten ticks pass... */
mdelay((10 * 1000) / HZ); mdelay((10 * 1000) / HZ);
......
...@@ -184,250 +184,12 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -184,250 +184,12 @@ int show_interrupts(struct seq_file *p, void *v)
return 0; return 0;
} }
/* #if CONFIG_SMP
* Global interrupt locks for SMP. Allow interrupts to come in on any inline void synchronize_irq(unsigned int irq)
* CPU, yet make cli/sti act globally to protect critical regions..
*/
#ifdef CONFIG_SMP
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
extern void show_stack(unsigned long* esp);
static void show(char * str)
{
int i;
int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [",irqs_running());
for(i=0;i < NR_CPUS;i++)
printk(" %d",local_irq_count(i));
printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
for(i=0;i < NR_CPUS;i++)
printk(" %d",local_bh_count(i));
printk(" ]\nStack dumps:");
for(i = 0; i < NR_CPUS; i++) {
unsigned long esp;
if (i == cpu)
continue;
printk("\nCPU %d:",i);
esp = init_tss[i].esp0;
if (!esp) {
/* tss->esp0 is set to NULL in cpu_init(),
* it's initialized when the cpu returns to user
* space. -- manfreds
*/
printk(" <unknown> ");
continue;
}
esp &= ~(THREAD_SIZE-1);
esp += sizeof(struct thread_info);
show_stack((void*)esp);
}
printk("\nCPU %d:",cpu);
show_stack(NULL);
printk("\n");
}
#define MAXCOUNT 100000000
/*
* I had a lockup scenario where a tight loop doing
* spin_unlock()/spin_lock() on CPU#1 was racing with
* spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
* apparently the spin_unlock() information did not make it
* through to CPU#0 ... nasty, is this by design, do we have to limit
* 'memory update oscillation frequency' artificially like here?
*
* Such 'high frequency update' races can be avoided by careful design, but
* some of our major constructs like spinlocks use similar techniques,
* it would be nice to clarify this issue. Set this define to 0 if you
* want to check whether your system freezes. I suspect the delay done
* by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
* i thought that such things are guaranteed by design, since we use
* the 'LOCK' prefix.
*/
#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
# define SYNC_OTHER_CORES(x) udelay(x+1)
#else
/*
* We have to allow irqs to arrive between __sti and __cli
*/
# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
#endif
static inline void wait_on_irq(int cpu)
{
int count = MAXCOUNT;
for (;;) {
/*
* Wait until all interrupts are gone. Wait
* for bottom half handlers unless we're
* already executing in one..
*/
if (!irqs_running())
if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
break;
/* Duh, we have to loop. Release the lock to avoid deadlocks */
clear_bit(0,&global_irq_lock);
for (;;) {
if (!--count) {
show("wait_on_irq");
count = ~0;
}
__sti();
SYNC_OTHER_CORES(cpu);
__cli();
if (irqs_running())
continue;
if (global_irq_lock)
continue;
if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
continue;
if (!test_and_set_bit(0,&global_irq_lock))
break;
}
}
}
/*
* This is called when we want to synchronize with
* interrupts. We may for example tell a device to
* stop sending interrupts: but to make sure there
* are no interrupts that are executing on another
* CPU we need to call this function.
*/
void synchronize_irq(void)
{
if (irqs_running()) {
/* Stupid approach */
cli();
sti();
}
}
static inline void get_irqlock(int cpu)
{
if (test_and_set_bit(0,&global_irq_lock)) {
/* do we already hold the lock? */
if ((unsigned char) cpu == global_irq_holder)
return;
/* Uhhuh.. Somebody else got it. Wait.. */
do {
do {
rep_nop();
} while (test_bit(0,&global_irq_lock));
} while (test_and_set_bit(0,&global_irq_lock));
}
/*
* We also to make sure that nobody else is running
* in an interrupt context.
*/
wait_on_irq(cpu);
/*
* Ok, finally..
*/
global_irq_holder = cpu;
}
#define EFLAGS_IF_SHIFT 9
/*
* A global "cli()" while in an interrupt context
* turns into just a local cli(). Interrupts
* should use spinlocks for the (very unlikely)
* case that they ever want to protect against
* each other.
*
* If we already have local interrupts disabled,
* this will not turn a local disable into a
* global one (problems with spinlocks: this makes
* save_flags+cli+sti usable inside a spinlock).
*/
void __global_cli(void)
{
unsigned int flags;
__save_flags(flags);
if (flags & (1 << EFLAGS_IF_SHIFT)) {
int cpu;
__cli();
cpu = smp_processor_id();
if (!local_irq_count(cpu))
get_irqlock(cpu);
}
}
void __global_sti(void)
{
int cpu = get_cpu();
if (!local_irq_count(cpu))
release_irqlock(cpu);
__sti();
put_cpu();
}
/*
* SMP flags value to restore to:
* 0 - global cli
* 1 - global sti
* 2 - local cli
* 3 - local sti
*/
unsigned long __global_save_flags(void)
{
int retval;
int local_enabled;
unsigned long flags;
int cpu = smp_processor_id();
__save_flags(flags);
local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
/* default to local */
retval = 2 + local_enabled;
/* check for global flags if we're not in an interrupt */
if (!local_irq_count(cpu)) {
if (local_enabled)
retval = 1;
if (global_irq_holder == cpu)
retval = 0;
}
return retval;
}
void __global_restore_flags(unsigned long flags)
{ {
switch (flags) { while (irq_desc[irq].status & IRQ_INPROGRESS)
case 0: cpu_relax();
__global_cli();
break;
case 1:
__global_sti();
break;
case 2:
__cli();
break;
case 3:
__sti();
break;
default:
printk("global_restore_flags: %08lx (%08lx)\n",
flags, (&flags)[-1]);
}
} }
#endif #endif
/* /*
...@@ -439,12 +201,7 @@ void __global_restore_flags(unsigned long flags) ...@@ -439,12 +201,7 @@ void __global_restore_flags(unsigned long flags)
*/ */
int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action) int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
{ {
int status; int status = 1; /* Force the "do bottom halves" bit */
int cpu = smp_processor_id();
irq_enter(cpu, irq);
status = 1; /* Force the "do bottom halves" bit */
if (!(action->flags & SA_INTERRUPT)) if (!(action->flags & SA_INTERRUPT))
__sti(); __sti();
...@@ -458,8 +215,6 @@ int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * ...@@ -458,8 +215,6 @@ int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction *
add_interrupt_randomness(irq); add_interrupt_randomness(irq);
__cli(); __cli();
irq_exit(cpu, irq);
return status; return status;
} }
...@@ -511,13 +266,7 @@ inline void disable_irq_nosync(unsigned int irq) ...@@ -511,13 +266,7 @@ inline void disable_irq_nosync(unsigned int irq)
void disable_irq(unsigned int irq) void disable_irq(unsigned int irq)
{ {
disable_irq_nosync(irq); disable_irq_nosync(irq);
synchronize_irq(irq);
if (!local_irq_count(smp_processor_id())) {
do {
barrier();
cpu_relax();
} while (irq_desc[irq].status & IRQ_INPROGRESS);
}
} }
/** /**
...@@ -581,6 +330,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs) ...@@ -581,6 +330,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
struct irqaction * action; struct irqaction * action;
unsigned int status; unsigned int status;
irq_enter();
kstat.irqs[cpu][irq]++; kstat.irqs[cpu][irq]++;
spin_lock(&desc->lock); spin_lock(&desc->lock);
desc->handler->ack(irq); desc->handler->ack(irq);
...@@ -640,6 +390,8 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs) ...@@ -640,6 +390,8 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
desc->handler->end(irq); desc->handler->end(irq);
spin_unlock(&desc->lock); spin_unlock(&desc->lock);
irq_exit();
if (softirq_pending(cpu)) if (softirq_pending(cpu))
do_softirq(); do_softirq();
return 1; return 1;
...@@ -768,13 +520,8 @@ void free_irq(unsigned int irq, void *dev_id) ...@@ -768,13 +520,8 @@ void free_irq(unsigned int irq, void *dev_id)
} }
spin_unlock_irqrestore(&desc->lock,flags); spin_unlock_irqrestore(&desc->lock,flags);
#ifdef CONFIG_SMP
/* Wait to make sure it's not being used on another CPU */ /* Wait to make sure it's not being used on another CPU */
while (desc->status & IRQ_INPROGRESS) { synchronize_irq(irq);
barrier();
cpu_relax();
}
#endif
kfree(action); kfree(action);
return; return;
} }
...@@ -826,7 +573,7 @@ unsigned long probe_irq_on(void) ...@@ -826,7 +573,7 @@ unsigned long probe_irq_on(void)
/* Wait for longstanding interrupts to trigger. */ /* Wait for longstanding interrupts to trigger. */
for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
/* about 20ms delay */ synchronize_irq(); /* about 20ms delay */ barrier();
/* /*
* enable any unassigned irqs * enable any unassigned irqs
...@@ -849,7 +596,7 @@ unsigned long probe_irq_on(void) ...@@ -849,7 +596,7 @@ unsigned long probe_irq_on(void)
* Wait for spurious interrupts to trigger * Wait for spurious interrupts to trigger
*/ */
for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
/* about 100ms delay */ synchronize_irq(); /* about 100ms delay */ barrier();
/* /*
* Now filter out any obviously spurious interrupts * Now filter out any obviously spurious interrupts
......
...@@ -102,6 +102,12 @@ struct MCA_info { ...@@ -102,6 +102,12 @@ struct MCA_info {
static struct MCA_info* mca_info = NULL; static struct MCA_info* mca_info = NULL;
/*
* Motherboard register spinlock. Untested on SMP at the moment, but
* are there any MCA SMP boxes?
*/
static spinlock_t mca_lock = SPIN_LOCK_UNLOCKED;
/* MCA registers */ /* MCA registers */
#define MCA_MOTHERBOARD_SETUP_REG 0x94 #define MCA_MOTHERBOARD_SETUP_REG 0x94
...@@ -213,8 +219,11 @@ void __init mca_init(void) ...@@ -213,8 +219,11 @@ void __init mca_init(void)
} }
memset(mca_info, 0, sizeof(struct MCA_info)); memset(mca_info, 0, sizeof(struct MCA_info));
save_flags(flags); /*
cli(); * We do not expect many MCA interrupts during initialization,
* but let us be safe:
*/
spin_lock_irq(&mca_lock);
/* Make sure adapter setup is off */ /* Make sure adapter setup is off */
...@@ -300,8 +309,7 @@ void __init mca_init(void) ...@@ -300,8 +309,7 @@ void __init mca_init(void)
outb_p(0, MCA_ADAPTER_SETUP_REG); outb_p(0, MCA_ADAPTER_SETUP_REG);
/* Enable interrupts and return memory start */ /* Enable interrupts and return memory start */
spin_unlock_irq(&mca_lock);
restore_flags(flags);
for (i = 0; i < MCA_STANDARD_RESOURCES; i++) for (i = 0; i < MCA_STANDARD_RESOURCES; i++)
request_resource(&ioport_resource, mca_standard_resources + i); request_resource(&ioport_resource, mca_standard_resources + i);
...@@ -514,8 +522,7 @@ unsigned char mca_read_pos(int slot, int reg) ...@@ -514,8 +522,7 @@ unsigned char mca_read_pos(int slot, int reg)
if(slot < 0 || slot >= MCA_NUMADAPTERS || mca_info == NULL) return 0; if(slot < 0 || slot >= MCA_NUMADAPTERS || mca_info == NULL) return 0;
if(reg < 0 || reg >= 8) return 0; if(reg < 0 || reg >= 8) return 0;
save_flags(flags); spin_lock_irqsave(&mca_lock, flags);
cli();
/* Make sure motherboard setup is off */ /* Make sure motherboard setup is off */
...@@ -566,7 +573,7 @@ unsigned char mca_read_pos(int slot, int reg) ...@@ -566,7 +573,7 @@ unsigned char mca_read_pos(int slot, int reg)
mca_info->slot[slot].pos[reg] = byte; mca_info->slot[slot].pos[reg] = byte;
restore_flags(flags); spin_unlock_irqrestore(&mca_lock, flags);
return byte; return byte;
} /* mca_read_pos() */ } /* mca_read_pos() */
...@@ -610,8 +617,7 @@ void mca_write_pos(int slot, int reg, unsigned char byte) ...@@ -610,8 +617,7 @@ void mca_write_pos(int slot, int reg, unsigned char byte)
if(mca_info == NULL) if(mca_info == NULL)
return; return;
save_flags(flags); spin_lock_irqsave(&mca_lock, flags);
cli();
/* Make sure motherboard setup is off */ /* Make sure motherboard setup is off */
...@@ -623,7 +629,7 @@ void mca_write_pos(int slot, int reg, unsigned char byte) ...@@ -623,7 +629,7 @@ void mca_write_pos(int slot, int reg, unsigned char byte)
outb_p(byte, MCA_POS_REG(reg)); outb_p(byte, MCA_POS_REG(reg));
outb_p(0, MCA_ADAPTER_SETUP_REG); outb_p(0, MCA_ADAPTER_SETUP_REG);
restore_flags(flags); spin_unlock_irqrestore(&mca_lock, flags);
/* Update the global register list, while we have the byte */ /* Update the global register list, while we have the byte */
......
...@@ -78,7 +78,7 @@ int __init check_nmi_watchdog (void) ...@@ -78,7 +78,7 @@ int __init check_nmi_watchdog (void)
printk(KERN_INFO "testing NMI watchdog ... "); printk(KERN_INFO "testing NMI watchdog ... ");
memcpy(tmp, irq_stat, sizeof(tmp)); memcpy(tmp, irq_stat, sizeof(tmp));
sti(); __sti();
mdelay((10*1000)/nmi_hz); // wait 10 ticks mdelay((10*1000)/nmi_hz); // wait 10 ticks
for (cpu = 0; cpu < NR_CPUS; cpu++) { for (cpu = 0; cpu < NR_CPUS; cpu++) {
......
...@@ -290,7 +290,7 @@ void machine_real_restart(unsigned char *code, int length) ...@@ -290,7 +290,7 @@ void machine_real_restart(unsigned char *code, int length)
{ {
unsigned long flags; unsigned long flags;
cli(); __cli();
/* Write zero to CMOS register number 0x0f, which the BIOS POST /* Write zero to CMOS register number 0x0f, which the BIOS POST
routine will recognize as telling it to do a proper reboot. (Well routine will recognize as telling it to do a proper reboot. (Well
......
...@@ -1060,7 +1060,6 @@ void __init smp_boot_cpus(void) ...@@ -1060,7 +1060,6 @@ void __init smp_boot_cpus(void)
boot_cpu_logical_apicid = logical_smp_processor_id(); boot_cpu_logical_apicid = logical_smp_processor_id();
map_cpu_to_boot_apicid(0, boot_cpu_apicid); map_cpu_to_boot_apicid(0, boot_cpu_apicid);
global_irq_holder = NO_PROC_ID;
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
smp_tune_scheduling(); smp_tune_scheduling();
......
...@@ -571,6 +571,8 @@ static struct vm86_irqs { ...@@ -571,6 +571,8 @@ static struct vm86_irqs {
struct task_struct *tsk; struct task_struct *tsk;
int sig; int sig;
} vm86_irqs[16]; } vm86_irqs[16];
static spinlock_t irqbits_lock = SPIN_LOCK_UNLOCKED;
static int irqbits; static int irqbits;
#define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \
...@@ -580,9 +582,8 @@ static int irqbits; ...@@ -580,9 +582,8 @@ static int irqbits;
static void irq_handler(int intno, void *dev_id, struct pt_regs * regs) { static void irq_handler(int intno, void *dev_id, struct pt_regs * regs) {
int irq_bit; int irq_bit;
unsigned long flags; unsigned long flags;
save_flags(flags); spin_lock_irqsave(&irqbits_lock, flags);
cli();
irq_bit = 1 << intno; irq_bit = 1 << intno;
if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk)
goto out; goto out;
...@@ -591,14 +592,19 @@ static void irq_handler(int intno, void *dev_id, struct pt_regs * regs) { ...@@ -591,14 +592,19 @@ static void irq_handler(int intno, void *dev_id, struct pt_regs * regs) {
send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
/* else user will poll for IRQs */ /* else user will poll for IRQs */
out: out:
restore_flags(flags); spin_unlock_irqrestore(&irqbits_lock, flags);
} }
static inline void free_vm86_irq(int irqnumber) static inline void free_vm86_irq(int irqnumber)
{ {
unsigned long flags;
free_irq(irqnumber,0); free_irq(irqnumber,0);
vm86_irqs[irqnumber].tsk = 0; vm86_irqs[irqnumber].tsk = 0;
spin_lock_irqsave(&irqbits_lock, flags);
irqbits &= ~(1 << irqnumber); irqbits &= ~(1 << irqnumber);
spin_unlock_irqrestore(&irqbits_lock, flags);
} }
static inline int task_valid(struct task_struct *tsk) static inline int task_valid(struct task_struct *tsk)
...@@ -635,11 +641,10 @@ static inline int get_and_reset_irq(int irqnumber) ...@@ -635,11 +641,10 @@ static inline int get_and_reset_irq(int irqnumber)
if ( (irqnumber<3) || (irqnumber>15) ) return 0; if ( (irqnumber<3) || (irqnumber>15) ) return 0;
if (vm86_irqs[irqnumber].tsk != current) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0;
save_flags(flags); spin_lock_irqsave(&irqbits_lock, flags);
cli();
bit = irqbits & (1 << irqnumber); bit = irqbits & (1 << irqnumber);
irqbits &= ~bit; irqbits &= ~bit;
restore_flags(flags); spin_unlock_irqrestore(&irqbits_lock, flags);
return bit; return bit;
} }
......
...@@ -107,27 +107,25 @@ extern spinlock_t timerlist_lock; ...@@ -107,27 +107,25 @@ extern spinlock_t timerlist_lock;
*/ */
void bust_spinlocks(int yes) void bust_spinlocks(int yes)
{ {
int loglevel_save = console_loglevel;
spin_lock_init(&timerlist_lock); spin_lock_init(&timerlist_lock);
if (yes) { if (yes) {
oops_in_progress = 1; oops_in_progress = 1;
#ifdef CONFIG_SMP return;
global_irq_lock = 0; /* Many serial drivers do __global_cli() */ }
#endif
} else {
int loglevel_save = console_loglevel;
#ifdef CONFIG_VT #ifdef CONFIG_VT
unblank_screen(); unblank_screen();
#endif #endif
oops_in_progress = 0; oops_in_progress = 0;
/* /*
* OK, the message is on the console. Now we call printk() * OK, the message is on the console. Now we call printk()
* without oops_in_progress set so that printk will give klogd * without oops_in_progress set so that printk will give klogd
* a poke. Hold onto your hats... * a poke. Hold onto your hats...
*/ */
console_loglevel = 15; /* NMI oopser may have shut the console up */ console_loglevel = 15; /* NMI oopser may have shut the console up */
printk(" "); printk(" ");
console_loglevel = loglevel_save; console_loglevel = loglevel_save;
}
} }
asmlinkage void do_invalid_op(struct pt_regs *, unsigned long); asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
......
...@@ -184,7 +184,6 @@ int __init device_init(void) ...@@ -184,7 +184,6 @@ int __init device_init(void)
{ {
rwlock_init(&gendisk_lock); rwlock_init(&gendisk_lock);
blk_dev_init(); blk_dev_init();
sti();
#ifdef CONFIG_I2O #ifdef CONFIG_I2O
i2o_init(); i2o_init();
#endif #endif
......
...@@ -2041,9 +2041,6 @@ int __init blk_dev_init(void) ...@@ -2041,9 +2041,6 @@ int __init blk_dev_init(void)
#if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_HD) #if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_HD)
hd_init(); hd_init();
#endif #endif
#if defined(__i386__) /* Do we even need this? */
outb_p(0xc, 0x3f2);
#endif
return 0; return 0;
}; };
......
...@@ -807,7 +807,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct termios * old) ...@@ -807,7 +807,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct termios * old)
I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) || I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) ||
I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) || I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) ||
I_PARMRK(tty)) { I_PARMRK(tty)) {
cli(); __cli(); // FIXME: is this safe?
memset(tty->process_char_map, 0, 256/8); memset(tty->process_char_map, 0, 256/8);
if (I_IGNCR(tty) || I_ICRNL(tty)) if (I_IGNCR(tty) || I_ICRNL(tty))
...@@ -843,7 +843,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct termios * old) ...@@ -843,7 +843,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct termios * old)
set_bit(SUSP_CHAR(tty), tty->process_char_map); set_bit(SUSP_CHAR(tty), tty->process_char_map);
} }
clear_bit(__DISABLED_CHAR, tty->process_char_map); clear_bit(__DISABLED_CHAR, tty->process_char_map);
sti(); __sti(); // FIXME: is this safe?
tty->raw = 0; tty->raw = 0;
tty->real_raw = 0; tty->real_raw = 0;
} else { } else {
......
...@@ -456,11 +456,12 @@ void do_tty_hangup(void *data) ...@@ -456,11 +456,12 @@ void do_tty_hangup(void *data)
} }
file_list_unlock(); file_list_unlock();
/* FIXME! What are the locking issues here? This may me overdoing things.. */ /* FIXME! What are the locking issues here? This may me overdoing things..
* this question is especially important now that we've removed the irqlock. */
{ {
unsigned long flags; unsigned long flags;
save_flags(flags); cli(); __save_flags(flags); __cli(); // FIXME: is this safe?
if (tty->ldisc.flush_buffer) if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty); tty->ldisc.flush_buffer(tty);
if (tty->driver.flush_buffer) if (tty->driver.flush_buffer)
...@@ -468,7 +469,7 @@ void do_tty_hangup(void *data) ...@@ -468,7 +469,7 @@ void do_tty_hangup(void *data)
if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
tty->ldisc.write_wakeup) tty->ldisc.write_wakeup)
(tty->ldisc.write_wakeup)(tty); (tty->ldisc.write_wakeup)(tty);
restore_flags(flags); __restore_flags(flags); // FIXME: is this safe?
} }
wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->write_wait);
...@@ -1900,7 +1901,7 @@ static void flush_to_ldisc(void *private_) ...@@ -1900,7 +1901,7 @@ static void flush_to_ldisc(void *private_)
fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE; fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
tty->flip.buf_num = 0; tty->flip.buf_num = 0;
save_flags(flags); cli(); __save_flags(flags); __cli(); // FIXME: is this safe?
tty->flip.char_buf_ptr = tty->flip.char_buf; tty->flip.char_buf_ptr = tty->flip.char_buf;
tty->flip.flag_buf_ptr = tty->flip.flag_buf; tty->flip.flag_buf_ptr = tty->flip.flag_buf;
} else { } else {
...@@ -1908,13 +1909,13 @@ static void flush_to_ldisc(void *private_) ...@@ -1908,13 +1909,13 @@ static void flush_to_ldisc(void *private_)
fp = tty->flip.flag_buf; fp = tty->flip.flag_buf;
tty->flip.buf_num = 1; tty->flip.buf_num = 1;
save_flags(flags); cli(); __save_flags(flags); __cli(); // FIXME: is this safe?
tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE; tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE; tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
} }
count = tty->flip.count; count = tty->flip.count;
tty->flip.count = 0; tty->flip.count = 0;
restore_flags(flags); __restore_flags(flags); // FIXME: is this safe?
tty->ldisc.receive_buf(tty, cp, fp, count); tty->ldisc.receive_buf(tty, cp, fp, count);
} }
......
...@@ -97,7 +97,7 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios ...@@ -97,7 +97,7 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios
int canon_change; int canon_change;
struct termios old_termios = *tty->termios; struct termios old_termios = *tty->termios;
cli(); __cli(); // FIXME: is this safe?
*tty->termios = *new_termios; *tty->termios = *new_termios;
unset_locked_termios(tty->termios, &old_termios, tty->termios_locked); unset_locked_termios(tty->termios, &old_termios, tty->termios_locked);
canon_change = (old_termios.c_lflag ^ tty->termios->c_lflag) & ICANON; canon_change = (old_termios.c_lflag ^ tty->termios->c_lflag) & ICANON;
...@@ -107,7 +107,7 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios ...@@ -107,7 +107,7 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios
tty->canon_data = 0; tty->canon_data = 0;
tty->erasing = 0; tty->erasing = 0;
} }
sti(); __sti(); // FIXME: is this safe?
if (canon_change && !L_ICANON(tty) && tty->read_cnt) if (canon_change && !L_ICANON(tty) && tty->read_cnt)
/* Get characters left over from canonical mode. */ /* Get characters left over from canonical mode. */
wake_up_interruptible(&tty->read_wait); wake_up_interruptible(&tty->read_wait);
......
...@@ -113,8 +113,8 @@ _kd_mksound(unsigned int hz, unsigned int ticks) ...@@ -113,8 +113,8 @@ _kd_mksound(unsigned int hz, unsigned int ticks)
if (hz > 20 && hz < 32767) if (hz > 20 && hz < 32767)
count = 1193180 / hz; count = 1193180 / hz;
save_flags(flags); __save_flags(flags); // FIXME: is this safe?
cli(); __cli();
del_timer(&sound_timer); del_timer(&sound_timer);
if (count) { if (count) {
/* enable counter 2 */ /* enable counter 2 */
...@@ -131,7 +131,7 @@ _kd_mksound(unsigned int hz, unsigned int ticks) ...@@ -131,7 +131,7 @@ _kd_mksound(unsigned int hz, unsigned int ticks)
} }
} else } else
kd_nosound(0); kd_nosound(0);
restore_flags(flags); __restore_flags(flags);
return; return;
} }
......
...@@ -1082,18 +1082,18 @@ int ide_unregister_subdriver(struct ata_device *drive) ...@@ -1082,18 +1082,18 @@ int ide_unregister_subdriver(struct ata_device *drive)
{ {
unsigned long flags; unsigned long flags;
save_flags(flags); /* all CPUs */ __save_flags(flags); // FIXME: is this safe?
cli(); /* all CPUs */ __cli();
#if 0 #if 0
if (__MOD_IN_USE(ata_ops(drive)->owner)) { if (__MOD_IN_USE(ata_ops(drive)->owner)) {
restore_flags(flags); __restore_flags(flags); // FIXME: is this safe?
return 1; return 1;
} }
#endif #endif
if (drive->usage || drive->busy || !ata_ops(drive)) { if (drive->usage || drive->busy || !ata_ops(drive)) {
restore_flags(flags); /* all CPUs */ __restore_flags(flags); // FIXME: is this safe?
return 1; return 1;
} }
...@@ -1102,7 +1102,7 @@ int ide_unregister_subdriver(struct ata_device *drive) ...@@ -1102,7 +1102,7 @@ int ide_unregister_subdriver(struct ata_device *drive)
#endif #endif
drive->driver = NULL; drive->driver = NULL;
restore_flags(flags); /* all CPUs */ __restore_flags(flags); // FIXME: is this safe?
return 0; return 0;
} }
......
...@@ -1354,7 +1354,7 @@ static void speedo_tx_timeout(struct net_device *dev) ...@@ -1354,7 +1354,7 @@ static void speedo_tx_timeout(struct net_device *dev)
udelay(10); udelay(10);
/* Disable interrupts. */ /* Disable interrupts. */
outw(SCBMaskAll, ioaddr + SCBCmd); outw(SCBMaskAll, ioaddr + SCBCmd);
synchronize_irq(); synchronize_irq(dev->irq);
speedo_tx_buffer_gc(dev); speedo_tx_buffer_gc(dev);
/* Free as much as possible. /* Free as much as possible.
It helps to recover from a hang because of out-of-memory. It helps to recover from a hang because of out-of-memory.
......
...@@ -13,11 +13,10 @@ extern spinlock_t kernel_flag; ...@@ -13,11 +13,10 @@ extern spinlock_t kernel_flag;
/* /*
* Release global kernel lock and global interrupt lock * Release global kernel lock and global interrupt lock
*/ */
#define release_kernel_lock(task, cpu) \ #define release_kernel_lock(task) \
do { \ do { \
if (task->lock_depth >= 0) \ if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \ spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \ __sti(); \
} while (0) } while (0)
......
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
/* assembly code in softirq.h is sensitive to the offsets of these fields */ /* assembly code in softirq.h is sensitive to the offsets of these fields */
typedef struct { typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
unsigned int __local_irq_count;
unsigned int __local_bh_count;
unsigned int __syscall_count; unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */ struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
unsigned long idle_timestamp; unsigned long idle_timestamp;
...@@ -18,77 +16,27 @@ typedef struct { ...@@ -18,77 +16,27 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define IRQ_OFFSET 64
/* /*
* Are we in an interrupt context? Either doing bottom half * Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing? * or hardware interrupt processing?
*/ */
#define in_interrupt() ({ int __cpu = smp_processor_id(); \ #define in_interrupt() \
(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }) ((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
#define in_irq() (local_irq_count(smp_processor_id()) != 0)
#ifndef CONFIG_SMP
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define irq_enter(cpu, irq) (local_irq_count(cpu)++) #define in_irq in_interrupt
#define irq_exit(cpu, irq) (local_irq_count(cpu)--)
#define synchronize_irq() barrier() #define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define release_irqlock(cpu) do { } while (0) #define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
#else #else
extern void synchronize_irq(unsigned int irq);
#include <asm/atomic.h>
#include <asm/smp.h>
extern unsigned char global_irq_holder;
extern unsigned volatile long global_irq_lock; /* long for set_bit -RR */
static inline int irqs_running (void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
if (local_irq_count(i))
return 1;
return 0;
}
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
if (global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
clear_bit(0,&global_irq_lock);
}
}
static inline void irq_enter(int cpu, int irq)
{
++local_irq_count(cpu);
while (test_bit(0,&global_irq_lock)) {
cpu_relax();
}
}
static inline void irq_exit(int cpu, int irq)
{
--local_irq_count(cpu);
}
static inline int hardirq_trylock(int cpu)
{
return !local_irq_count(cpu) && !test_bit(0,&global_irq_lock);
}
#define hardirq_endlock(cpu) do { } while (0)
extern void synchronize_irq(void);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#endif /* __ASM_HARDIRQ_H */ #endif /* __ASM_HARDIRQ_H */
...@@ -12,15 +12,9 @@ extern spinlock_t kernel_flag; ...@@ -12,15 +12,9 @@ extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag) #define kernel_locked() spin_is_locked(&kernel_flag)
#define check_irq_holder(cpu) \
do { \
if (global_irq_holder == (cpu)) \
BUG(); \
} while(0)
#else #else
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count() #define kernel_locked() preempt_count()
#define check_irq_holder(cpu) do { } while(0)
#else #else
#define kernel_locked() 1 #define kernel_locked() 1
#endif #endif
...@@ -29,12 +23,10 @@ do { \ ...@@ -29,12 +23,10 @@ do { \
/* /*
* Release global kernel lock and global interrupt lock * Release global kernel lock and global interrupt lock
*/ */
#define release_kernel_lock(task, cpu) \ #define release_kernel_lock(task) \
do { \ do { \
if (unlikely(task->lock_depth >= 0)) { \ if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \ spin_unlock(&kernel_flag); \
check_irq_holder(cpu); \
} \
} while (0) } while (0)
/* /*
......
#ifndef __ASM_SOFTIRQ_H #ifndef __ASM_SOFTIRQ_H
#define __ASM_SOFTIRQ_H #define __ASM_SOFTIRQ_H
#include <asm/atomic.h> #include <linux/preempt.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#define __cpu_bh_enable(cpu) \ #define local_bh_disable() \
do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0) do { preempt_count() += IRQ_OFFSET; barrier(); } while (0)
#define cpu_bh_disable(cpu) \ #define __local_bh_enable() \
do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0) do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0)
#define local_bh_disable() cpu_bh_disable(smp_processor_id()) #define local_bh_enable() \
#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
/*
* NOTE: this assembly code assumes:
*
* (char *)&local_bh_count - 8 == (char *)&softirq_pending
*
* If you change the offsets in irq_stat then you have to
* update this code as well.
*/
#define _local_bh_enable() \
do { \ do { \
unsigned int *ptr = &local_bh_count(smp_processor_id()); \ if (unlikely((preempt_count() == IRQ_OFFSET) && \
\ softirq_pending(smp_processor_id()))) { \
barrier(); \ __local_bh_enable(); \
if (!--*ptr) \ do_softirq(); \
__asm__ __volatile__ ( \ preempt_check_resched(); \
"cmpl $0, -8(%0);" \ } else { \
"jnz 2f;" \ __local_bh_enable(); \
"1:;" \ preempt_check_resched(); \
\ } \
LOCK_SECTION_START("") \
"2: pushl %%eax; pushl %%ecx; pushl %%edx;" \
"call %c1;" \
"popl %%edx; popl %%ecx; popl %%eax;" \
"jmp 1b;" \
LOCK_SECTION_END \
\
: /* no output */ \
: "r" (ptr), "i" (do_softirq) \
/* no registers clobbered */ ); \
} while (0) } while (0)
#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0) #define in_softirq() in_interrupt()
#endif /* __ASM_SOFTIRQ_H */ #endif /* __ASM_SOFTIRQ_H */
...@@ -324,24 +324,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -324,24 +324,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define local_irq_disable() __cli() #define local_irq_disable() __cli()
#define local_irq_enable() __sti() #define local_irq_enable() __sti()
#ifdef CONFIG_SMP /*
* Compatibility macros - they will be removed after some time.
extern void __global_cli(void); */
extern void __global_sti(void); #if !CONFIG_SMP
extern unsigned long __global_save_flags(void); # define sti() __sti()
extern void __global_restore_flags(unsigned long); # define cli() __cli()
#define cli() __global_cli() # define save_flags(flags) __save_flags(flags)
#define sti() __global_sti() # define restore_flags(flags) __restore_flags(flags)
#define save_flags(x) ((x)=__global_save_flags())
#define restore_flags(x) __global_restore_flags(x)
#else
#define cli() __cli()
#define sti() __sti()
#define save_flags(x) __save_flags(x)
#define restore_flags(x) __restore_flags(x)
#endif #endif
/* /*
......
...@@ -29,8 +29,6 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */ ...@@ -29,8 +29,6 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
/* arch independent irq_stat fields */ /* arch independent irq_stat fields */
#define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending) #define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending)
#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count)
#define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count)
#define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count) #define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count)
#define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task) #define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task)
/* arch dependent irq_stat fields */ /* arch dependent irq_stat fields */
......
#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H
#include <linux/config.h>
#define preempt_count() (current_thread_info()->preempt_count)
#ifdef CONFIG_PREEMPT
extern void preempt_schedule(void);
#define preempt_disable() \
do { \
preempt_count()++; \
barrier(); \
} while (0)
#define preempt_enable_no_resched() \
do { \
preempt_count()--; \
barrier(); \
} while (0)
#define preempt_enable() \
do { \
preempt_enable_no_resched(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#else
#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do {} while(0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
#endif
#endif /* __LINUX_PREEMPT_H */
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#define lock_kernel() do { } while(0) #define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0) #define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu) do { } while(0) #define release_kernel_lock(task) do { } while(0)
#define reacquire_kernel_lock(task) do { } while(0) #define reacquire_kernel_lock(task) do { } while(0)
#define kernel_locked() 1 #define kernel_locked() 1
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H
#include <linux/config.h> #include <linux/config.h>
#include <linux/preempt.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
...@@ -120,36 +121,6 @@ ...@@ -120,36 +121,6 @@
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void);
#define preempt_get_count() (current_thread_info()->preempt_count)
#define preempt_disable() \
do { \
++current_thread_info()->preempt_count; \
barrier(); \
} while (0)
#define preempt_enable_no_resched() \
do { \
--current_thread_info()->preempt_count; \
barrier(); \
} while (0)
#define preempt_enable() \
do { \
--current_thread_info()->preempt_count; \
barrier(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define spin_lock(lock) \ #define spin_lock(lock) \
do { \ do { \
preempt_disable(); \ preempt_disable(); \
...@@ -179,12 +150,6 @@ do { \ ...@@ -179,12 +150,6 @@ do { \
#else #else
#define preempt_get_count() (0)
#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do {} while(0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
#define spin_lock(lock) _raw_spin_lock(lock) #define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock) #define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock) #define spin_unlock(lock) _raw_spin_unlock(lock)
......
...@@ -373,7 +373,7 @@ asmlinkage void __init start_kernel(void) ...@@ -373,7 +373,7 @@ asmlinkage void __init start_kernel(void)
} }
kmem_cache_init(); kmem_cache_init();
sti(); __sti();
calibrate_delay(); calibrate_delay();
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start && !initrd_below_start_ok && if (initrd_start && !initrd_below_start_ok &&
......
...@@ -530,10 +530,10 @@ NORET_TYPE void do_exit(long code) ...@@ -530,10 +530,10 @@ NORET_TYPE void do_exit(long code)
tsk->flags |= PF_EXITING; tsk->flags |= PF_EXITING;
del_timer_sync(&tsk->real_timer); del_timer_sync(&tsk->real_timer);
if (unlikely(preempt_get_count())) if (unlikely(preempt_count()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, current->pid, current->comm, current->pid,
preempt_get_count()); preempt_count());
fake_volatile: fake_volatile:
acct_process(code); acct_process(code);
......
...@@ -94,7 +94,7 @@ NORET_TYPE void panic(const char * fmt, ...) ...@@ -94,7 +94,7 @@ NORET_TYPE void panic(const char * fmt, ...)
#if defined(CONFIG_ARCH_S390) #if defined(CONFIG_ARCH_S390)
disabled_wait(caller); disabled_wait(caller);
#endif #endif
sti(); __sti();
for(;;) { for(;;) {
CHECK_EMERGENCY_SYNC CHECK_EMERGENCY_SYNC
} }
......
...@@ -727,7 +727,8 @@ void scheduler_tick(int user_tick, int system) ...@@ -727,7 +727,8 @@ void scheduler_tick(int user_tick, int system)
task_t *p = current; task_t *p = current;
if (p == rq->idle) { if (p == rq->idle) {
if (local_bh_count(cpu) || local_irq_count(cpu) > 1) /* note: this timer irq context must be accounted for as well */
if (preempt_count() >= 2*IRQ_OFFSET)
kstat.per_cpu_system[cpu] += system; kstat.per_cpu_system[cpu] += system;
#if CONFIG_SMP #if CONFIG_SMP
idle_tick(); idle_tick();
...@@ -816,7 +817,7 @@ asmlinkage void schedule(void) ...@@ -816,7 +817,7 @@ asmlinkage void schedule(void)
prev = current; prev = current;
rq = this_rq(); rq = this_rq();
release_kernel_lock(prev, smp_processor_id()); release_kernel_lock(prev);
prepare_arch_schedule(prev); prepare_arch_schedule(prev);
prev->sleep_timestamp = jiffies; prev->sleep_timestamp = jiffies;
spin_lock_irq(&rq->lock); spin_lock_irq(&rq->lock);
...@@ -825,7 +826,7 @@ asmlinkage void schedule(void) ...@@ -825,7 +826,7 @@ asmlinkage void schedule(void)
* if entering off of a kernel preemption go straight * if entering off of a kernel preemption go straight
* to picking the next task. * to picking the next task.
*/ */
if (unlikely(preempt_get_count() & PREEMPT_ACTIVE)) if (unlikely(preempt_count() & PREEMPT_ACTIVE))
goto pick_next_task; goto pick_next_task;
switch (prev->state) { switch (prev->state) {
...@@ -1694,7 +1695,9 @@ void __init init_idle(task_t *idle, int cpu) ...@@ -1694,7 +1695,9 @@ void __init init_idle(task_t *idle, int cpu)
__restore_flags(flags); __restore_flags(flags);
/* Set the preempt count _outside_ the spinlocks! */ /* Set the preempt count _outside_ the spinlocks! */
#if CONFIG_PREEMPT
idle->thread_info->preempt_count = (idle->lock_depth >= 0); idle->thread_info->preempt_count = (idle->lock_depth >= 0);
#endif
} }
extern void init_timervecs(void); extern void init_timervecs(void);
......
...@@ -61,17 +61,17 @@ static inline void wakeup_softirqd(unsigned cpu) ...@@ -61,17 +61,17 @@ static inline void wakeup_softirqd(unsigned cpu)
asmlinkage void do_softirq() asmlinkage void do_softirq()
{ {
int cpu;
__u32 pending; __u32 pending;
long flags; long flags;
__u32 mask; __u32 mask;
int cpu;
if (in_interrupt()) if (in_interrupt())
return; return;
local_irq_save(flags); local_irq_save(flags);
cpu = smp_processor_id(); cpu = smp_processor_id();
pending = softirq_pending(cpu); pending = softirq_pending(cpu);
if (pending) { if (pending) {
...@@ -111,7 +111,7 @@ asmlinkage void do_softirq() ...@@ -111,7 +111,7 @@ asmlinkage void do_softirq()
} }
/* /*
* This function must run with irq disabled! * This function must run with irqs disabled!
*/ */
inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr) inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{ {
...@@ -126,7 +126,7 @@ inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr) ...@@ -126,7 +126,7 @@ inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
* Otherwise we wake up ksoftirqd to make sure we * Otherwise we wake up ksoftirqd to make sure we
* schedule the softirq soon. * schedule the softirq soon.
*/ */
if (!(local_irq_count(cpu) | local_bh_count(cpu))) if (!in_interrupt())
wakeup_softirqd(cpu); wakeup_softirqd(cpu);
} }
...@@ -290,22 +290,16 @@ spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED; ...@@ -290,22 +290,16 @@ spinlock_t global_bh_lock = SPIN_LOCK_UNLOCKED;
static void bh_action(unsigned long nr) static void bh_action(unsigned long nr)
{ {
int cpu = smp_processor_id();
if (!spin_trylock(&global_bh_lock)) if (!spin_trylock(&global_bh_lock))
goto resched; goto resched;
if (!hardirq_trylock(cpu))
goto resched_unlock;
if (bh_base[nr]) if (bh_base[nr])
bh_base[nr](); bh_base[nr]();
hardirq_endlock(cpu); hardirq_endlock();
spin_unlock(&global_bh_lock); spin_unlock(&global_bh_lock);
return; return;
resched_unlock:
spin_unlock(&global_bh_lock); spin_unlock(&global_bh_lock);
resched: resched:
mark_bh(nr); mark_bh(nr);
......
...@@ -318,7 +318,7 @@ void __kfree_skb(struct sk_buff *skb) ...@@ -318,7 +318,7 @@ void __kfree_skb(struct sk_buff *skb)
dst_release(skb->dst); dst_release(skb->dst);
if(skb->destructor) { if(skb->destructor) {
if (in_irq()) if (0 && in_irq())
printk(KERN_WARNING "Warning: kfree_skb on " printk(KERN_WARNING "Warning: kfree_skb on "
"hard IRQ %p\n", NET_CALLER(skb)); "hard IRQ %p\n", NET_CALLER(skb));
skb->destructor(skb); skb->destructor(skb);
......
...@@ -1104,7 +1104,7 @@ static int snd_intel8x0_free(intel8x0_t *chip) ...@@ -1104,7 +1104,7 @@ static int snd_intel8x0_free(intel8x0_t *chip)
outb(ICH_RESETREGS, ICHREG(chip, PO_CR)); outb(ICH_RESETREGS, ICHREG(chip, PO_CR));
outb(ICH_RESETREGS, ICHREG(chip, MC_CR)); outb(ICH_RESETREGS, ICHREG(chip, MC_CR));
/* --- */ /* --- */
synchronize_irq(); synchronize_irq(chip->irq);
__hw_end: __hw_end:
if (chip->bdbars) if (chip->bdbars)
snd_free_pci_pages(chip->pci, 3 * sizeof(u32) * ICH_MAX_FRAGS * 2, chip->bdbars, chip->bdbars_addr); snd_free_pci_pages(chip->pci, 3 * sizeof(u32) * ICH_MAX_FRAGS * 2, chip->bdbars, chip->bdbars_addr);
...@@ -1335,7 +1335,7 @@ static int __devinit snd_intel8x0_create(snd_card_t * card, ...@@ -1335,7 +1335,7 @@ static int __devinit snd_intel8x0_create(snd_card_t * card,
} }
chip->irq = pci->irq; chip->irq = pci->irq;
pci_set_master(pci); pci_set_master(pci);
synchronize_irq(); synchronize_irq(chip->irq);
/* initialize offsets */ /* initialize offsets */
chip->reg_pi_sr = ICH_REG_PI_SR; chip->reg_pi_sr = ICH_REG_PI_SR;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment