Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
f8438e65
Commit
f8438e65
authored
Jul 26, 2002
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ppc64: 2.5.28 update
parent
2f79e495
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
97 additions
and
48 deletions
+97
-48
arch/ppc64/kernel/irq.c
arch/ppc64/kernel/irq.c
+5
-5
arch/ppc64/kernel/time.c
arch/ppc64/kernel/time.c
+0
-3
include/asm-ppc64/hardirq.h
include/asm-ppc64/hardirq.h
+75
-10
include/asm-ppc64/softirq.h
include/asm-ppc64/softirq.h
+8
-16
include/asm-ppc64/system.h
include/asm-ppc64/system.h
+6
-14
include/asm-ppc64/thread_info.h
include/asm-ppc64/thread_info.h
+3
-0
No files found.
arch/ppc64/kernel/irq.c
View file @
f8438e65
...
@@ -169,6 +169,10 @@ setup_irq(unsigned int irq, struct irqaction * new)
...
@@ -169,6 +169,10 @@ setup_irq(unsigned int irq, struct irqaction * new)
inline
void
synchronize_irq
(
unsigned
int
irq
)
inline
void
synchronize_irq
(
unsigned
int
irq
)
{
{
/* is there anything to synchronize with? */
if
(
!
irq_desc
[
irq
].
action
)
return
;
while
(
irq_desc
[
irq
].
status
&
IRQ_INPROGRESS
)
{
while
(
irq_desc
[
irq
].
status
&
IRQ_INPROGRESS
)
{
barrier
();
barrier
();
cpu_relax
();
cpu_relax
();
...
@@ -567,7 +571,6 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
...
@@ -567,7 +571,6 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
int
do_IRQ
(
struct
pt_regs
*
regs
)
int
do_IRQ
(
struct
pt_regs
*
regs
)
{
{
int
cpu
=
smp_processor_id
();
int
irq
,
first
=
1
;
int
irq
,
first
=
1
;
#ifdef CONFIG_PPC_ISERIES
#ifdef CONFIG_PPC_ISERIES
struct
paca_struct
*
lpaca
;
struct
paca_struct
*
lpaca
;
...
@@ -615,9 +618,6 @@ int do_IRQ(struct pt_regs *regs)
...
@@ -615,9 +618,6 @@ int do_IRQ(struct pt_regs *regs)
}
}
#endif
#endif
if
(
softirq_pending
(
cpu
))
do_softirq
();
return
1
;
/* lets ret_from_int know we can do checks */
return
1
;
/* lets ret_from_int know we can do checks */
}
}
...
...
arch/ppc64/kernel/time.c
View file @
f8438e65
...
@@ -293,9 +293,6 @@ int timer_interrupt(struct pt_regs * regs)
...
@@ -293,9 +293,6 @@ int timer_interrupt(struct pt_regs * regs)
irq_exit
();
irq_exit
();
if
(
softirq_pending
(
cpu
))
do_softirq
();
return
1
;
return
1
;
}
}
...
...
include/asm-ppc64/hardirq.h
View file @
f8438e65
...
@@ -13,30 +13,86 @@
...
@@ -13,30 +13,86 @@
#include <linux/preempt.h>
#include <linux/preempt.h>
typedef
struct
{
typedef
struct
{
unsigned
long
__softirq_pending
;
unsigned
int
__softirq_pending
;
unsigned
long
__syscall_count
;
unsigned
int
__syscall_count
;
struct
task_struct
*
__ksoftirqd_task
;
struct
task_struct
*
__ksoftirqd_task
;
unsigned
long
idle_timestamp
;
unsigned
long
idle_timestamp
;
}
____cacheline_aligned
irq_cpustat_t
;
}
____cacheline_aligned
irq_cpustat_t
;
#include <linux/irq_cpustat.h>
/* Standard mappings for irq_cpustat_t above */
#include <linux/irq_cpustat.h>
/* Standard mappings for irq_cpustat_t above */
#define IRQ_OFFSET 64
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-24 are the hardirq count (max # of hardirqs: 512)
*
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x01ff0000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 9
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define __HARDIRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__HARDIRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__HARDIRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__HARDIRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
/*
* Are we in an interrupt context? Either doing bottom half
* The hardirq mask has to be large enough to have
* or hardware interrupt processing?
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
*/
#define in_interrupt() \
#if (1 << HARDIRQ_BITS) < NR_IRQS
((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
# error HARDIRQ_BITS is too low!
#endif
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_irq in_interrupt
#define hardirq_trylock() (!in_interrupt())
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#if CONFIG_PREEMPT
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifndef CONFIG_SMP
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
# define synchronize_irq(irq) barrier()
...
@@ -45,4 +101,13 @@ typedef struct {
...
@@ -45,4 +101,13 @@ typedef struct {
#endif
/* CONFIG_SMP */
#endif
/* CONFIG_SMP */
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
#define show_stack(SP) \
do { \
if (SP) \
print_backtrace(SP); \
else \
print_backtrace(_get_SP()); \
} while (0)
#endif
/* __ASM_HARDIRQ_H */
#endif
/* __ASM_HARDIRQ_H */
include/asm-ppc64/softirq.h
View file @
f8438e65
...
@@ -12,24 +12,16 @@
...
@@ -12,24 +12,16 @@
#include <asm/hardirq.h>
#include <asm/hardirq.h>
#define local_bh_disable() \
#define local_bh_disable() \
do { preempt_count() += IRQ_OFFSET; barrier(); } while (0)
do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
#define __local_bh_enable() \
do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0)
do { barrier(); preempt_count() -=
SOFT
IRQ_OFFSET; } while (0)
#define local_bh_enable() \
#define local_bh_enable() \
do { \
do { \
if (unlikely((preempt_count() == IRQ_OFFSET) && \
softirq_pending(smp_processor_id()))) { \
__local_bh_enable(); \
__local_bh_enable(); \
if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
do_softirq(); \
do_softirq(); \
preempt_check_resched(); \
preempt_check_resched(); \
} else { \
__local_bh_enable(); \
preempt_check_resched(); \
} \
} while (0)
} while (0)
#define in_softirq() in_interrupt()
#endif
/* __ASM_SOFTIRQ_H */
#endif
/* __ASM_SOFTIRQ_H */
include/asm-ppc64/system.h
View file @
f8438e65
...
@@ -88,26 +88,18 @@ struct task_struct;
...
@@ -88,26 +88,18 @@ struct task_struct;
extern
void
__switch_to
(
struct
task_struct
*
,
struct
task_struct
*
);
extern
void
__switch_to
(
struct
task_struct
*
,
struct
task_struct
*
);
#define switch_to(prev, next, last) __switch_to((prev), (next))
#define switch_to(prev, next, last) __switch_to((prev), (next))
#define prepare_arch_schedule(prev) do { } while(0)
#define finish_arch_schedule(prev) do { } while(0)
#define prepare_arch_switch(rq) do { } while(0)
#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock)
struct
thread_struct
;
struct
thread_struct
;
extern
void
_switch
(
struct
thread_struct
*
prev
,
struct
thread_struct
*
next
);
extern
void
_switch
(
struct
thread_struct
*
prev
,
struct
thread_struct
*
next
);
struct
pt_regs
;
struct
pt_regs
;
extern
void
dump_regs
(
struct
pt_regs
*
);
extern
void
dump_regs
(
struct
pt_regs
*
);
#ifndef CONFIG_SMP
#define irqs_disabled() \
({ \
#define cli() local_irq_disable()
unsigned long flags; \
#define sti() local_irq_enable()
local_save_flags(flags); \
#define save_flags(flags) local_save_flags(flags)
!(flags & MSR_EE); \
#define restore_flags(flags) local_irq_restore(flags)
})
#define save_and_cli(flags) local_irq_save(flags)
#endif
/* !CONFIG_SMP */
static
__inline__
int
__is_processor
(
unsigned
long
pv
)
static
__inline__
int
__is_processor
(
unsigned
long
pv
)
{
{
...
...
include/asm-ppc64/thread_info.h
View file @
f8438e65
...
@@ -27,6 +27,8 @@ struct thread_info {
...
@@ -27,6 +27,8 @@ struct thread_info {
/*
/*
* macros/functions for gaining access to the thread information structure
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
*/
#define INIT_THREAD_INFO(tsk) \
#define INIT_THREAD_INFO(tsk) \
{ \
{ \
...
@@ -34,6 +36,7 @@ struct thread_info {
...
@@ -34,6 +36,7 @@ struct thread_info {
exec_domain: &default_exec_domain, \
exec_domain: &default_exec_domain, \
flags: 0, \
flags: 0, \
cpu: 0, \
cpu: 0, \
preempt_count: 1, \
}
}
#define init_thread_info (init_thread_union.thread_info)
#define init_thread_info (init_thread_union.thread_info)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment