Commit 9609bfec authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Kyle McMartin

parisc: convert to asm-generic/hardirq.h

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKyle McMartin <kyle@mcmartin.ca>
parent f32ed395
/* hardirq.h: PA-RISC hard IRQ support. /* hardirq.h: PA-RISC hard IRQ support.
* *
* Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
*
* The locking is really quite interesting. There's a cpu-local
* count of how many interrupts are being handled, and a global
* lock. An interrupt can only be serviced if the global lock
* is free. You can't be sure no more interrupts are being
* serviced until you've acquired the lock and then checked
* all the per-cpu interrupt counts are all zero. It's a specialised
* br_lock, and that's exactly how Sparc does it. We don't because
* it's more locking for us. This way is lock-free in the interrupt path.
*/ */
#ifndef _PARISC_HARDIRQ_H #ifndef _PARISC_HARDIRQ_H
#define _PARISC_HARDIRQ_H #define _PARISC_HARDIRQ_H
#include <linux/threads.h> #include <asm-generic/hardirq.h>
#include <linux/irq.h>
typedef struct {
unsigned long __softirq_pending; /* set_bit is used on this */
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
void ack_bad_irq(unsigned int irq);
#endif /* _PARISC_HARDIRQ_H */ #endif /* _PARISC_HARDIRQ_H */
...@@ -423,8 +423,3 @@ void __init init_IRQ(void) ...@@ -423,8 +423,3 @@ void __init init_IRQ(void)
set_eiem(cpu_eiem); /* EIEM : enable all external intr */ set_eiem(cpu_eiem); /* EIEM : enable all external intr */
} }
void ack_bad_irq(unsigned int irq)
{
printk(KERN_WARNING "unexpected IRQ %d\n", irq);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment