Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
7f268f43
Commit
7f268f43
authored
Jan 15, 2009
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'cpus4096', 'x86/cleanups' and 'x86/urgent' into x86/percpu
parents
a6525042
54da5b3d
b6659679
a08c4743
Changes
92
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
92 changed files
with
890 additions
and
593 deletions
+890
-593
Documentation/cputopology.txt
Documentation/cputopology.txt
+3
-3
arch/alpha/kernel/irq.c
arch/alpha/kernel/irq.c
+1
-1
arch/arm/kernel/irq.c
arch/arm/kernel/irq.c
+12
-6
arch/arm/oprofile/op_model_mpcore.c
arch/arm/oprofile/op_model_mpcore.c
+1
-1
arch/blackfin/kernel/irqchip.c
arch/blackfin/kernel/irqchip.c
+5
-0
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/iosapic.c
+1
-1
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq.c
+2
-2
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/irq_ia64.c
+8
-4
arch/ia64/kernel/msi_ia64.c
arch/ia64/kernel/msi_ia64.c
+2
-2
arch/ia64/sn/kernel/msi_sn.c
arch/ia64/sn/kernel/msi_sn.c
+1
-1
arch/mips/include/asm/irq.h
arch/mips/include/asm/irq.h
+1
-1
arch/mips/kernel/irq-gic.c
arch/mips/kernel/irq-gic.c
+1
-1
arch/mips/kernel/smtc.c
arch/mips/kernel/smtc.c
+4
-2
arch/mips/mti-malta/malta-smtc.c
arch/mips/mti-malta/malta-smtc.c
+3
-2
arch/mips/sgi-ip22/ip22-int.c
arch/mips/sgi-ip22/ip22-int.c
+1
-1
arch/mips/sgi-ip22/ip22-time.c
arch/mips/sgi-ip22/ip22-time.c
+1
-1
arch/mips/sibyte/bcm1480/smp.c
arch/mips/sibyte/bcm1480/smp.c
+2
-1
arch/mips/sibyte/sb1250/smp.c
arch/mips/sibyte/sb1250/smp.c
+2
-1
arch/mn10300/kernel/mn10300-watchdog.c
arch/mn10300/kernel/mn10300-watchdog.c
+2
-1
arch/parisc/kernel/irq.c
arch/parisc/kernel/irq.c
+4
-4
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/irq.c
+1
-1
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/platforms/pseries/xics.c
+3
-2
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/mpic.c
+2
-1
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/irq_64.c
+3
-2
arch/sparc/kernel/time_64.c
arch/sparc/kernel/time_64.c
+1
-1
arch/x86/include/asm/apicnum.h
arch/x86/include/asm/apicnum.h
+12
-0
arch/x86/include/asm/bitops.h
arch/x86/include/asm/bitops.h
+10
-4
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpu.h
+21
-0
arch/x86/include/asm/cpumask.h
arch/x86/include/asm/cpumask.h
+28
-0
arch/x86/include/asm/hardirq_32.h
arch/x86/include/asm/hardirq_32.h
+3
-0
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/io_apic.h
+2
-24
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/irq_vectors.h
+8
-5
arch/x86/include/asm/mpspec_def.h
arch/x86/include/asm/mpspec_def.h
+12
-11
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt.h
+5
-3
arch/x86/include/asm/smp.h
arch/x86/include/asm/smp.h
+1
-40
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/tlbflush.h
+6
-4
arch/x86/include/asm/uv/uv_bau.h
arch/x86/include/asm/uv/uv_bau.h
+2
-1
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/boot.c
+47
-49
arch/x86/kernel/apic.c
arch/x86/kernel/apic.c
+10
-1
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/common.c
+2
-0
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+0
-2
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/intel_cacheinfo.c
+44
-19
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+14
-7
arch/x86/kernel/crash.c
arch/x86/kernel/crash.c
+1
-1
arch/x86/kernel/io_apic.c
arch/x86/kernel/io_apic.c
+92
-71
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_32.c
+1
-1
arch/x86/kernel/irq_64.c
arch/x86/kernel/irq_64.c
+1
-1
arch/x86/kernel/microcode_intel.c
arch/x86/kernel/microcode_intel.c
+5
-5
arch/x86/kernel/module_32.c
arch/x86/kernel/module_32.c
+3
-3
arch/x86/kernel/module_64.c
arch/x86/kernel/module_64.c
+16
-16
arch/x86/kernel/mpparse.c
arch/x86/kernel/mpparse.c
+71
-71
arch/x86/kernel/msr.c
arch/x86/kernel/msr.c
+1
-1
arch/x86/kernel/reboot.c
arch/x86/kernel/reboot.c
+1
-0
arch/x86/kernel/setup.c
arch/x86/kernel/setup.c
+1
-1
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/setup_percpu.c
+1
-0
arch/x86/kernel/smpboot.c
arch/x86/kernel/smpboot.c
+1
-1
arch/x86/kernel/tlb_32.c
arch/x86/kernel/tlb_32.c
+29
-38
arch/x86/kernel/tlb_64.c
arch/x86/kernel/tlb_64.c
+35
-28
arch/x86/kernel/tlb_uv.c
arch/x86/kernel/tlb_uv.c
+8
-8
arch/x86/mach-voyager/setup.c
arch/x86/mach-voyager/setup.c
+1
-0
arch/x86/mm/init_32.c
arch/x86/mm/init_32.c
+0
-1
arch/x86/mm/pat.c
arch/x86/mm/pat.c
+27
-10
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten.c
+12
-19
drivers/base/cpu.c
drivers/base/cpu.c
+1
-1
drivers/base/topology.c
drivers/base/topology.c
+16
-17
drivers/firmware/dcdbas.c
drivers/firmware/dcdbas.c
+8
-4
drivers/misc/sgi-xp/xpc_main.c
drivers/misc/sgi-xp/xpc_main.c
+1
-1
drivers/net/sfc/efx.c
drivers/net/sfc/efx.c
+12
-5
drivers/oprofile/buffer_sync.c
drivers/oprofile/buffer_sync.c
+18
-4
drivers/oprofile/buffer_sync.h
drivers/oprofile/buffer_sync.h
+4
-0
drivers/oprofile/oprof.c
drivers/oprofile/oprof.c
+8
-1
drivers/pci/intr_remapping.c
drivers/pci/intr_remapping.c
+1
-0
drivers/xen/events.c
drivers/xen/events.c
+19
-7
drivers/xen/manage.c
drivers/xen/manage.c
+1
-1
include/asm-generic/bitops/__ffs.h
include/asm-generic/bitops/__ffs.h
+1
-1
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/__fls.h
+1
-1
include/asm-generic/bitops/fls.h
include/asm-generic/bitops/fls.h
+1
-1
include/asm-generic/bitops/fls64.h
include/asm-generic/bitops/fls64.h
+2
-2
include/linux/interrupt.h
include/linux/interrupt.h
+1
-0
include/linux/irq.h
include/linux/irq.h
+83
-3
include/linux/irqnr.h
include/linux/irqnr.h
+1
-0
include/linux/topology.h
include/linux/topology.h
+6
-0
kernel/irq/chip.c
kernel/irq/chip.c
+4
-1
kernel/irq/handle.c
kernel/irq/handle.c
+35
-22
kernel/irq/internals.h
kernel/irq/internals.h
+7
-0
kernel/irq/manage.c
kernel/irq/manage.c
+6
-6
kernel/irq/migration.c
kernel/irq/migration.c
+6
-6
kernel/irq/numa_migrate.c
kernel/irq/numa_migrate.c
+16
-3
kernel/irq/proc.c
kernel/irq/proc.c
+2
-2
kernel/sched_rt.c
kernel/sched_rt.c
+22
-14
kernel/softirq.c
kernel/softirq.c
+5
-0
lib/smp_processor_id.c
lib/smp_processor_id.c
+1
-1
No files found.
Documentation/cputopology.txt
View file @
7f268f43
...
...
@@ -18,11 +18,11 @@ For an architecture to support this feature, it must define some of
these macros in include/asm-XXX/topology.h:
#define topology_physical_package_id(cpu)
#define topology_core_id(cpu)
#define topology_thread_
siblings
(cpu)
#define topology_core_
siblings
(cpu)
#define topology_thread_
cpumask
(cpu)
#define topology_core_
cpumask
(cpu)
The type of **_id is int.
The type of siblings is
cpumask_t
.
The type of siblings is
(const) struct cpumask *
.
To be consistent on all architectures, include/linux/topology.h
provides default definitions for any of the above macros that are
...
...
arch/alpha/kernel/irq.c
View file @
7f268f43
...
...
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
cpu
=
(
cpu
<
(
NR_CPUS
-
1
)
?
cpu
+
1
:
0
);
last_cpu
=
cpu
;
irq_desc
[
irq
].
affinity
=
cpumask_of_cpu
(
cpu
);
cpumask_copy
(
irq_desc
[
irq
].
affinity
,
cpumask_of
(
cpu
)
);
irq_desc
[
irq
].
chip
->
set_affinity
(
irq
,
cpumask_of
(
cpu
));
return
0
;
}
...
...
arch/arm/kernel/irq.c
View file @
7f268f43
...
...
@@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
.
lock
=
SPIN_LOCK_UNLOCKED
};
#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating bad_irq_desc.affinity or .pending_mask */
#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
...
...
@@ -161,7 +166,7 @@ void __init init_IRQ(void)
irq_desc
[
irq
].
status
|=
IRQ_NOREQUEST
|
IRQ_NOPROBE
;
#ifdef CONFIG_SMP
bad_irq_desc
.
affinity
=
CPU_MASK_ALL
;
cpumask_setall
(
bad_irq_desc
.
affinity
)
;
bad_irq_desc
.
cpu
=
smp_processor_id
();
#endif
init_arch_irq
();
...
...
@@ -191,15 +196,16 @@ void migrate_irqs(void)
struct
irq_desc
*
desc
=
irq_desc
+
i
;
if
(
desc
->
cpu
==
cpu
)
{
unsigned
int
newcpu
=
any_online_cpu
(
desc
->
affinity
);
if
(
newcpu
==
NR_CPUS
)
{
unsigned
int
newcpu
=
cpumask_any_and
(
desc
->
affinity
,
cpu_online_mask
);
if
(
newcpu
>=
nr_cpu_ids
)
{
if
(
printk_ratelimit
())
printk
(
KERN_INFO
"IRQ%u no longer affine to CPU%u
\n
"
,
i
,
cpu
);
cpus_setall
(
desc
->
affinity
);
newcpu
=
any_online_cpu
(
desc
->
affinity
);
cpumask_setall
(
desc
->
affinity
);
newcpu
=
cpumask_any_and
(
desc
->
affinity
,
cpu_online_mask
);
}
route_irq
(
desc
,
i
,
newcpu
);
...
...
arch/arm/oprofile/op_model_mpcore.c
View file @
7f268f43
...
...
@@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
const
struct
cpumask
*
mask
=
cpumask_of
(
cpu
);
spin_lock_irq
(
&
desc
->
lock
);
desc
->
affinity
=
*
mask
;
cpumask_copy
(
desc
->
affinity
,
mask
)
;
desc
->
chip
->
set_affinity
(
irq
,
mask
);
spin_unlock_irq
(
&
desc
->
lock
);
}
...
...
arch/blackfin/kernel/irqchip.c
View file @
7f268f43
...
...
@@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = {
#endif
};
#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating a variable-sized bad_irq_desc.affinity */
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
int
show_interrupts
(
struct
seq_file
*
p
,
void
*
v
)
{
int
i
=
*
(
loff_t
*
)
v
,
j
;
...
...
arch/ia64/kernel/iosapic.c
View file @
7f268f43
...
...
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
if
(
iosapic_intr_info
[
irq
].
count
==
0
)
{
#ifdef CONFIG_SMP
/* Clear affinity */
cpu
s
_setall
(
idesc
->
affinity
);
cpu
mask
_setall
(
idesc
->
affinity
);
#endif
/* Clear the interrupt information */
iosapic_intr_info
[
irq
].
dest
=
0
;
...
...
arch/ia64/kernel/irq.c
View file @
7f268f43
...
...
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
void
set_irq_affinity_info
(
unsigned
int
irq
,
int
hwid
,
int
redir
)
{
if
(
irq
<
NR_IRQS
)
{
cpumask_copy
(
&
irq_desc
[
irq
].
affinity
,
cpumask_copy
(
irq_desc
[
irq
].
affinity
,
cpumask_of
(
cpu_logical_id
(
hwid
)));
irq_redir
[
irq
]
=
(
char
)
(
redir
&
0xff
);
}
...
...
@@ -148,7 +148,7 @@ static void migrate_irqs(void)
if
(
desc
->
status
==
IRQ_PER_CPU
)
continue
;
if
(
cpumask_any_and
(
&
irq_desc
[
irq
].
affinity
,
cpu_online_mask
)
if
(
cpumask_any_and
(
irq_desc
[
irq
].
affinity
,
cpu_online_mask
)
>=
nr_cpu_ids
)
{
/*
* Save it for phase 2 processing
...
...
arch/ia64/kernel/irq_ia64.c
View file @
7f268f43
...
...
@@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
saved_tpr
=
ia64_getreg
(
_IA64_REG_CR_TPR
);
ia64_srlz_d
();
while
(
vector
!=
IA64_SPURIOUS_INT_VECTOR
)
{
struct
irq_desc
*
desc
=
irq_to_desc
(
vector
);
if
(
unlikely
(
IS_LOCAL_TLB_FLUSH
(
vector
)))
{
smp_local_flush_tlb
();
kstat_
this_cpu
.
irqs
[
vector
]
++
;
kstat_
incr_irqs_this_cpu
(
vector
,
desc
)
;
}
else
if
(
unlikely
(
IS_RESCHEDULE
(
vector
)))
kstat_
this_cpu
.
irqs
[
vector
]
++
;
kstat_
incr_irqs_this_cpu
(
vector
,
desc
)
;
else
{
int
irq
=
local_vector_to_irq
(
vector
);
...
...
@@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
* Perform normal interrupt style processing
*/
while
(
vector
!=
IA64_SPURIOUS_INT_VECTOR
)
{
struct
irq_desc
*
desc
=
irq_to_desc
(
vector
);
if
(
unlikely
(
IS_LOCAL_TLB_FLUSH
(
vector
)))
{
smp_local_flush_tlb
();
kstat_
this_cpu
.
irqs
[
vector
]
++
;
kstat_
incr_irqs_this_cpu
(
vector
,
desc
)
;
}
else
if
(
unlikely
(
IS_RESCHEDULE
(
vector
)))
kstat_
this_cpu
.
irqs
[
vector
]
++
;
kstat_
incr_irqs_this_cpu
(
vector
,
desc
)
;
else
{
struct
pt_regs
*
old_regs
=
set_irq_regs
(
NULL
);
int
irq
=
local_vector_to_irq
(
vector
);
...
...
arch/ia64/kernel/msi_ia64.c
View file @
7f268f43
...
...
@@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
msg
.
data
=
data
;
write_msi_msg
(
irq
,
&
msg
);
irq_desc
[
irq
].
affinity
=
cpumask_of_cpu
(
cpu
);
cpumask_copy
(
irq_desc
[
irq
].
affinity
,
cpumask_of
(
cpu
)
);
}
#endif
/* CONFIG_SMP */
...
...
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
msg
.
address_lo
|=
MSI_ADDR_DESTID_CPU
(
cpu_physical_id
(
cpu
));
dmar_msi_write
(
irq
,
&
msg
);
irq_desc
[
irq
].
affinity
=
*
mask
;
cpumask_copy
(
irq_desc
[
irq
].
affinity
,
mask
)
;
}
#endif
/* CONFIG_SMP */
...
...
arch/ia64/sn/kernel/msi_sn.c
View file @
7f268f43
...
...
@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
msg
.
address_lo
=
(
u32
)(
bus_addr
&
0x00000000ffffffff
);
write_msi_msg
(
irq
,
&
msg
);
irq_desc
[
irq
].
affinity
=
*
cpu_mask
;
cpumask_copy
(
irq_desc
[
irq
].
affinity
,
cpu_mask
)
;
}
#endif
/* CONFIG_SMP */
...
...
arch/mips/include/asm/irq.h
View file @
7f268f43
...
...
@@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
*/
#define IRQ_AFFINITY_HOOK(irq) \
do { \
if (!cpu
_isset(smp_processor_id(), irq_desc[irq].affinity)) {
\
if (!cpu
mask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {
\
smtc_forward_irq(irq); \
irq_exit(); \
return; \
...
...
arch/mips/kernel/irq-gic.c
View file @
7f268f43
...
...
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
set_bit
(
irq
,
pcpu_masks
[
first_cpu
(
tmp
)].
pcpu_mask
);
}
irq_desc
[
irq
].
affinity
=
*
cpumask
;
cpumask_copy
(
irq_desc
[
irq
].
affinity
,
cpumask
)
;
spin_unlock_irqrestore
(
&
gic_lock
,
flags
);
}
...
...
arch/mips/kernel/smtc.c
View file @
7f268f43
...
...
@@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
* and efficiency, we just pick the easiest one to find.
*/
target
=
first_cpu
(
irq_desc
[
irq
].
affinity
);
target
=
cpumask_first
(
irq_desc
[
irq
].
affinity
);
/*
* We depend on the platform code to have correctly processed
...
...
@@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
struct
clock_event_device
*
cd
;
void
*
arg_copy
=
pipi
->
arg
;
int
type_copy
=
pipi
->
type
;
int
irq
=
MIPS_CPU_IRQ_BASE
+
1
;
smtc_ipi_nq
(
&
freeIPIq
,
pipi
);
switch
(
type_copy
)
{
case
SMTC_CLOCK_TICK
:
irq_enter
();
kstat_
this_cpu
.
irqs
[
MIPS_CPU_IRQ_BASE
+
1
]
++
;
kstat_
incr_irqs_this_cpu
(
irq
,
irq_to_desc
(
irq
))
;
cd
=
&
per_cpu
(
mips_clockevent_device
,
cpu
);
cd
->
event_handler
(
cd
);
irq_exit
();
...
...
arch/mips/mti-malta/malta-smtc.c
View file @
7f268f43
...
...
@@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
void
plat_set_irq_affinity
(
unsigned
int
irq
,
const
struct
cpumask
*
affinity
)
{
cpumask_t
tmask
=
*
affinity
;
cpumask_t
tmask
;
int
cpu
=
0
;
void
smtc_set_irq_affinity
(
unsigned
int
irq
,
cpumask_t
aff
);
...
...
@@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
* be made to forward to an offline "CPU".
*/
cpumask_copy
(
&
tmask
,
affinity
);
for_each_cpu
(
cpu
,
affinity
)
{
if
((
cpu_data
[
cpu
].
vpe_id
!=
0
)
||
!
cpu_online
(
cpu
))
cpu_clear
(
cpu
,
tmask
);
}
irq_desc
[
irq
].
affinity
=
tmask
;
cpumask_copy
(
irq_desc
[
irq
].
affinity
,
&
tmask
)
;
if
(
cpus_empty
(
tmask
))
/*
...
...
arch/mips/sgi-ip22/ip22-int.c
View file @
7f268f43
...
...
@@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
int
irq
=
SGI_BUSERR_IRQ
;
irq_enter
();
kstat_
this_cpu
.
irqs
[
irq
]
++
;
kstat_
incr_irqs_this_cpu
(
irq
,
irq_to_desc
(
irq
))
;
ip22_be_interrupt
(
irq
);
irq_exit
();
}
...
...
arch/mips/sgi-ip22/ip22-time.c
View file @
7f268f43
...
...
@@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
char
c
;
irq_enter
();
kstat_
this_cpu
.
irqs
[
irq
]
++
;
kstat_
incr_irqs_this_cpu
(
irq
,
irq_to_desc
(
irq
))
;
printk
(
KERN_ALERT
"Oops, got 8254 interrupt.
\n
"
);
ArcRead
(
0
,
&
c
,
1
,
&
cnt
);
ArcEnterInteractiveMode
();
...
...
arch/mips/sibyte/bcm1480/smp.c
View file @
7f268f43
...
...
@@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
void
bcm1480_mailbox_interrupt
(
void
)
{
int
cpu
=
smp_processor_id
();
int
irq
=
K_BCM1480_INT_MBOX_0_0
;
unsigned
int
action
;
kstat_
this_cpu
.
irqs
[
K_BCM1480_INT_MBOX_0_0
]
++
;
kstat_
incr_irqs_this_cpu
(
irq
,
irq_to_desc
(
irq
))
;
/* Load the mailbox register to figure out what we're supposed to do */
action
=
(
__raw_readq
(
mailbox_0_regs
[
cpu
])
>>
48
)
&
0xffff
;
...
...
arch/mips/sibyte/sb1250/smp.c
View file @
7f268f43
...
...
@@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
void
sb1250_mailbox_interrupt
(
void
)
{
int
cpu
=
smp_processor_id
();
int
irq
=
K_INT_MBOX_0
;
unsigned
int
action
;
kstat_
this_cpu
.
irqs
[
K_INT_MBOX_0
]
++
;
kstat_
incr_irqs_this_cpu
(
irq
,
irq_to_desc
(
irq
))
;
/* Load the mailbox register to figure out what we're supposed to do */
action
=
(
____raw_readq
(
mailbox_regs
[
cpu
])
>>
48
)
&
0xffff
;
...
...
arch/mn10300/kernel/mn10300-watchdog.c
View file @
7f268f43
...
...
@@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
* the stack NMI-atomically, it's safe to use smp_processor_id().
*/
int
sum
,
cpu
=
smp_processor_id
();
int
irq
=
NMIIRQ
;
u8
wdt
,
tmp
;
wdt
=
WDCTR
&
~
WDCTR_WDCNE
;
...
...
@@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
NMICR
=
NMICR_WDIF
;
nmi_count
(
cpu
)
++
;
kstat_
this_cpu
.
irqs
[
NMIIRQ
]
++
;
kstat_
incr_irqs_this_cpu
(
irq
,
irq_to_desc
(
irq
))
;
sum
=
irq_stat
[
cpu
].
__irq_count
;
if
(
last_irq_sums
[
cpu
]
==
sum
)
{
...
...
arch/parisc/kernel/irq.c
View file @
7f268f43
...
...
@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
if
(
CHECK_IRQ_PER_CPU
(
irq
))
{
/* Bad linux design decision. The mask has already
* been set; we must reset it */
irq_desc
[
irq
].
affinity
=
CPU_MASK_ALL
;
cpumask_setall
(
irq_desc
[
irq
].
affinity
)
;
return
-
EINVAL
;
}
...
...
@@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
if
(
cpu_check_affinity
(
irq
,
dest
))
return
;
irq_desc
[
irq
].
affinity
=
*
dest
;
cpumask_copy
(
irq_desc
[
irq
].
affinity
,
dest
)
;
}
#endif
...
...
@@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned
long
txn_affinity_addr
(
unsigned
int
irq
,
int
cpu
)
{
#ifdef CONFIG_SMP
irq_desc
[
irq
].
affinity
=
cpumask_of_cpu
(
cpu
);
cpumask_copy
(
irq_desc
[
irq
].
affinity
,
cpumask_of
(
cpu
)
);
#endif
return
per_cpu
(
cpu_data
,
cpu
).
txn_addr
;
...
...
@@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq
=
eirr_to_irq
(
eirr_val
);
#ifdef CONFIG_SMP
dest
=
irq_desc
[
irq
].
affinity
;
cpumask_copy
(
&
dest
,
irq_desc
[
irq
].
affinity
)
;
if
(
CHECK_IRQ_PER_CPU
(
irq_desc
[
irq
].
status
)
&&
!
cpu_isset
(
smp_processor_id
(),
dest
))
{
int
cpu
=
first_cpu
(
dest
);
...
...
arch/powerpc/kernel/irq.c
View file @
7f268f43
...
...
@@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
if
(
irq_desc
[
irq
].
status
&
IRQ_PER_CPU
)
continue
;
cpu
s_and
(
mask
,
irq_desc
[
irq
].
affinity
,
map
);
cpu
mask_and
(
&
mask
,
irq_desc
[
irq
].
affinity
,
&
map
);
if
(
any_online_cpu
(
mask
)
==
NR_CPUS
)
{
printk
(
"Breaking affinity for irq %i
\n
"
,
irq
);
mask
=
map
;
...
...
arch/powerpc/platforms/pseries/xics.c
View file @
7f268f43
...
...
@@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
{
int
server
;
/* For the moment only implement delivery to all cpus or one cpu */
cpumask_t
cpumask
=
irq_desc
[
virq
].
affinity
;
cpumask_t
cpumask
;
cpumask_t
tmp
=
CPU_MASK_NONE
;
cpumask_copy
(
&
cpumask
,
irq_desc
[
virq
].
affinity
);
if
(
!
distribute_irqs
)
return
default_server
;
...
...
@@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
virq
,
cpu
);
/* Reset affinity to all cpus */
irq_desc
[
virq
].
affinity
=
CPU_MASK_ALL
;
cpumask_setall
(
irq_desc
[
virq
].
affinity
)
;
desc
->
chip
->
set_affinity
(
virq
,
cpu_all_mask
);
unlock:
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
...
...
arch/powerpc/sysdev/mpic.c
View file @
7f268f43
...
...
@@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
#ifdef CONFIG_SMP
static
int
irq_choose_cpu
(
unsigned
int
virt_irq
)
{
cpumask_t
mask
=
irq_desc
[
virt_irq
].
affinity
;
cpumask_t
mask
;
int
cpuid
;
cpumask_copy
(
&
mask
,
irq_desc
[
virt_irq
].
affinity
);
if
(
cpus_equal
(
mask
,
CPU_MASK_ALL
))
{
static
int
irq_rover
;
static
DEFINE_SPINLOCK
(
irq_rover_lock
);
...
...
arch/sparc/kernel/irq_64.c
View file @
7f268f43
...
...
@@ -247,9 +247,10 @@ struct irq_handler_data {
#ifdef CONFIG_SMP
static
int
irq_choose_cpu
(
unsigned
int
virt_irq
)
{
cpumask_t
mask
=
irq_desc
[
virt_irq
].
affinity
;
cpumask_t
mask
;
int
cpuid
;
cpumask_copy
(
&
mask
,
irq_desc
[
virt_irq
].
affinity
);
if
(
cpus_equal
(
mask
,
CPU_MASK_ALL
))
{
static
int
irq_rover
;
static
DEFINE_SPINLOCK
(
irq_rover_lock
);
...
...
@@ -854,7 +855,7 @@ void fixup_irqs(void)
!
(
irq_desc
[
irq
].
status
&
IRQ_PER_CPU
))
{
if
(
irq_desc
[
irq
].
chip
->
set_affinity
)
irq_desc
[
irq
].
chip
->
set_affinity
(
irq
,
&
irq_desc
[
irq
].
affinity
);
irq_desc
[
irq
].
affinity
);
}
spin_unlock_irqrestore
(
&
irq_desc
[
irq
].
lock
,
flags
);
}
...
...
arch/sparc/kernel/time_64.c
View file @
7f268f43
...
...
@@ -729,7 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
irq_enter
();
kstat_
this_cpu
.
irqs
[
0
]
++
;
kstat_
incr_irqs_this_cpu
(
0
,
irq_to_desc
(
0
))
;
if
(
unlikely
(
!
evt
->
event_handler
))
{
printk
(
KERN_WARNING
...
...
arch/x86/include/asm/apicnum.h
0 → 100644
View file @
7f268f43
#ifndef _ASM_X86_APICNUM_H
#define _ASM_X86_APICNUM_H
/* define MAX_IO_APICS */
#ifdef CONFIG_X86_32
# define MAX_IO_APICS 64
#else
# define MAX_IO_APICS 128
# define MAX_LOCAL_APIC 32768
#endif
#endif
/* _ASM_X86_APICNUM_H */
arch/x86/include/asm/bitops.h
View file @
7f268f43
...
...
@@ -3,6 +3,9 @@
/*
* Copyright 1992, Linus Torvalds.
*
* Note: inlines with more than a single statement should be marked
* __always_inline to avoid problems with older gcc's inlining heuristics.
*/
#ifndef _LINUX_BITOPS_H
...
...
@@ -53,7 +56,8 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static
inline
void
set_bit
(
unsigned
int
nr
,
volatile
unsigned
long
*
addr
)
static
__always_inline
void
set_bit
(
unsigned
int
nr
,
volatile
unsigned
long
*
addr
)
{
if
(
IS_IMMEDIATE
(
nr
))
{
asm
volatile
(
LOCK_PREFIX
"orb %1,%0"
...
...
@@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static
inline
void
clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
static
__always_inline
void
clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
if
(
IS_IMMEDIATE
(
nr
))
{
asm
volatile
(
LOCK_PREFIX
"andb %1,%0"
...
...
@@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
*
* This is the same as test_and_set_bit on x86.
*/
static
inline
int
test_and_set_bit_lock
(
int
nr
,
volatile
unsigned
long
*
addr
)
static
__always_inline
int
test_and_set_bit_lock
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
return
test_and_set_bit
(
nr
,
addr
);
}
...
...
@@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
return
oldbit
;
}
static
inline
int
constant_test_bit
(
unsigned
int
nr
,
const
volatile
unsigned
long
*
addr
)
static
__always_
inline
int
constant_test_bit
(
unsigned
int
nr
,
const
volatile
unsigned
long
*
addr
)
{
return
((
1UL
<<
(
nr
%
BITS_PER_LONG
))
&
(((
unsigned
long
*
)
addr
)[
nr
/
BITS_PER_LONG
]))
!=
0
;
...
...
arch/x86/include/asm/cpu.h
View file @
7f268f43
...
...
@@ -7,6 +7,20 @@
#include <linux/nodemask.h>
#include <linux/percpu.h>
#ifdef CONFIG_SMP
extern
void
prefill_possible_map
(
void
);
#else
/* CONFIG_SMP */
static
inline
void
prefill_possible_map
(
void
)
{}
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
#define stack_smp_processor_id() 0
#endif
/* CONFIG_SMP */
struct
x86_cpu
{
struct
cpu
cpu
;
};
...
...
@@ -17,4 +31,11 @@ extern void arch_unregister_cpu(int);
#endif
DECLARE_PER_CPU
(
int
,
cpu_state
);
#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
extern
unsigned
char
boot_cpu_id
;
#else
#define boot_cpu_id 0
#endif
#endif
/* _ASM_X86_CPU_H */
arch/x86/include/asm/cpumask.h
0 → 100644
View file @
7f268f43
#ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#ifdef CONFIG_X86_64
extern
cpumask_var_t
cpu_callin_mask
;
extern
cpumask_var_t
cpu_callout_mask
;
extern
cpumask_var_t
cpu_initialized_mask
;
extern
cpumask_var_t
cpu_sibling_setup_mask
;
#else
/* CONFIG_X86_32 */
extern
cpumask_t
cpu_callin_map
;
extern
cpumask_t
cpu_callout_map
;
extern
cpumask_t
cpu_initialized
;
extern
cpumask_t
cpu_sibling_setup_map
;
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
#endif
/* CONFIG_X86_32 */
#endif
/* __ASSEMBLY__ */
#endif
/* _ASM_X86_CPUMASK_H */
arch/x86/include/asm/hardirq_32.h
View file @
7f268f43
...
...
@@ -19,6 +19,9 @@ typedef struct {
DECLARE_PER_CPU
(
irq_cpustat_t
,
irq_stat
);
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
...
...
arch/x86/include/asm/io_apic.h
View file @
7f268f43
...
...
@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
extern
int
nr_ioapics
;
extern
int
nr_ioapic_registers
[
MAX_IO_APICS
];
/*
* MP-BIOS irq configuration table structures:
*/
#define MP_MAX_IOAPIC_PIN 127
struct
mp_config_ioapic
{
unsigned
long
mp_apicaddr
;
unsigned
int
mp_apicid
;
unsigned
char
mp_type
;
unsigned
char
mp_apicver
;
unsigned
char
mp_flags
;
};
struct
mp_config_intsrc
{
unsigned
int
mp_dstapic
;
unsigned
char
mp_type
;
unsigned
char
mp_irqtype
;
unsigned
short
mp_irqflag
;
unsigned
char
mp_srcbus
;
unsigned
char
mp_srcbusirq
;
unsigned
char
mp_dstirq
;
};
/* I/O APIC entries */
extern
struct
mp
_config
_ioapic
mp_ioapics
[
MAX_IO_APICS
];
extern
struct
mp
c
_ioapic
mp_ioapics
[
MAX_IO_APICS
];
/* # of MP IRQ source entries */
extern
int
mp_irq_entries
;
/* MP IRQ source entries */
extern
struct
mp
_config
_intsrc
mp_irqs
[
MAX_IRQ_SOURCES
];
extern
struct
mp
c
_intsrc
mp_irqs
[
MAX_IRQ_SOURCES
];
/* non-0 if default (table-less) MP configuration */
extern
int
mpc_default_type
;
...
...
arch/x86/include/asm/irq_vectors.h
View file @
7f268f43
...
...
@@ -105,6 +105,8 @@
#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
#include <asm/apicnum.h>
/* need MAX_IO_APICS */
#ifndef CONFIG_SPARSE_IRQ
# if NR_CPUS < MAX_IO_APICS
# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
...
...
@@ -112,11 +114,12 @@
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
# endif
#else
# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
# else
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
# endif
# define NR_IRQS \
((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \
(NR_VECTORS + (8 * NR_CPUS)) : \
(NR_VECTORS + (32 * MAX_IO_APICS))) \
#endif
#elif defined(CONFIG_X86_VOYAGER)
...
...
arch/x86/include/asm/mpspec_def.h
View file @
7f268f43
...
...
@@ -24,17 +24,18 @@
# endif
#endif
struct
intel_mp_floating
{
char
mpf_signature
[
4
];
/* "_MP_" */
unsigned
int
mpf_physptr
;
/* Configuration table address */
unsigned
char
mpf_length
;
/* Our length (paragraphs) */
unsigned
char
mpf_specification
;
/* Specification version */
unsigned
char
mpf_checksum
;
/* Checksum (makes sum 0) */
unsigned
char
mpf_feature1
;
/* Standard or configuration ? */
unsigned
char
mpf_feature2
;
/* Bit7 set for IMCR|PIC */
unsigned
char
mpf_feature3
;
/* Unused (0) */
unsigned
char
mpf_feature4
;
/* Unused (0) */
unsigned
char
mpf_feature5
;
/* Unused (0) */
/* Intel MP Floating Pointer Structure */
struct
mpf_intel
{
char
signature
[
4
];
/* "_MP_" */
unsigned
int
physptr
;
/* Configuration table address */
unsigned
char
length
;
/* Our length (paragraphs) */
unsigned
char
specification
;
/* Specification version */
unsigned
char
checksum
;
/* Checksum (makes sum 0) */
unsigned
char
feature1
;
/* Standard or configuration ? */
unsigned
char
feature2
;
/* Bit7 set for IMCR|PIC */
unsigned
char
feature3
;
/* Unused (0) */
unsigned
char
feature4
;
/* Unused (0) */
unsigned
char
feature5
;
/* Unused (0) */
};
#define MPC_SIGNATURE "PCMP"
...
...
arch/x86/include/asm/paravirt.h
View file @
7f268f43
...
...
@@ -244,7 +244,8 @@ struct pv_mmu_ops {
void
(
*
flush_tlb_user
)(
void
);
void
(
*
flush_tlb_kernel
)(
void
);
void
(
*
flush_tlb_single
)(
unsigned
long
addr
);
void
(
*
flush_tlb_others
)(
const
cpumask_t
*
cpus
,
struct
mm_struct
*
mm
,
void
(
*
flush_tlb_others
)(
const
struct
cpumask
*
cpus
,
struct
mm_struct
*
mm
,
unsigned
long
va
);
/* Hooks for allocating and freeing a pagetable top-level */
...
...
@@ -984,10 +985,11 @@ static inline void __flush_tlb_single(unsigned long addr)
PVOP_VCALL1
(
pv_mmu_ops
.
flush_tlb_single
,
addr
);
}
static
inline
void
flush_tlb_others
(
cpumask_t
cpumask
,
struct
mm_struct
*
mm
,
static
inline
void
flush_tlb_others
(
const
struct
cpumask
*
cpumask
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
{
PVOP_VCALL3
(
pv_mmu_ops
.
flush_tlb_others
,
&
cpumask
,
mm
,
va
);
PVOP_VCALL3
(
pv_mmu_ops
.
flush_tlb_others
,
cpumask
,
mm
,
va
);
}
static
inline
int
paravirt_pgd_alloc
(
struct
mm_struct
*
mm
)
...
...
arch/x86/include/asm/smp.h
View file @
7f268f43
...
...
@@ -17,30 +17,7 @@
#endif
#include <asm/pda.h>
#include <asm/thread_info.h>
#ifdef CONFIG_X86_64
extern
cpumask_var_t
cpu_callin_mask
;
extern
cpumask_var_t
cpu_callout_mask
;
extern
cpumask_var_t
cpu_initialized_mask
;
extern
cpumask_var_t
cpu_sibling_setup_mask
;
#else
/* CONFIG_X86_32 */
extern
cpumask_t
cpu_callin_map
;
extern
cpumask_t
cpu_callout_map
;
extern
cpumask_t
cpu_initialized
;
extern
cpumask_t
cpu_sibling_setup_map
;
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
#endif
/* CONFIG_X86_32 */
extern
void
(
*
mtrr_hook
)(
void
);
extern
void
zap_low_mappings
(
void
);
#include <asm/cpumask.h>
extern
int
__cpuinit
get_local_pda
(
int
cpu
);
...
...
@@ -167,8 +144,6 @@ void play_dead_common(void);
void
native_send_call_func_ipi
(
const
struct
cpumask
*
mask
);
void
native_send_call_func_single_ipi
(
int
cpu
);
extern
void
prefill_possible_map
(
void
);
void
smp_store_cpu_info
(
int
id
);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
...
...
@@ -177,10 +152,6 @@ static inline int num_booting_cpus(void)
{
return
cpumask_weight
(
cpu_callout_mask
);
}
#else
static
inline
void
prefill_possible_map
(
void
)
{
}
#endif
/* CONFIG_SMP */
extern
unsigned
disabled_cpus
__cpuinitdata
;
...
...
@@ -205,10 +176,6 @@ extern int safe_smp_processor_id(void);
})
#define safe_smp_processor_id() smp_processor_id()
#else
/* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
#define stack_smp_processor_id() 0
#endif
#ifdef CONFIG_X86_LOCAL_APIC
...
...
@@ -251,11 +218,5 @@ static inline int hard_smp_processor_id(void)
#endif
/* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
extern
unsigned
char
boot_cpu_id
;
#else
#define boot_cpu_id 0
#endif
#endif
/* __ASSEMBLY__ */
#endif
/* _ASM_X86_SMP_H */
arch/x86/include/asm/tlbflush.h
View file @
7f268f43
...
...
@@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb
();
}
static
inline
void
native_flush_tlb_others
(
const
cpumask_t
*
cpumask
,
static
inline
void
native_flush_tlb_others
(
const
struct
cpumask
*
cpumask
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
{
...
...
@@ -142,8 +142,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
flush_tlb_mm
(
vma
->
vm_mm
);
}
void
native_flush_tlb_others
(
const
cpumask_t
*
cpumask
,
struct
mm_struct
*
mm
,
unsigned
long
va
);
void
native_flush_tlb_others
(
const
struct
cpumask
*
cpumask
,
struct
mm_struct
*
mm
,
unsigned
long
va
);
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
...
...
@@ -166,7 +166,7 @@ static inline void reset_lazy_tlbstate(void)
#endif
/* SMP */
#ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(
&
mask, mm, va)
#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
#endif
static
inline
void
flush_tlb_kernel_range
(
unsigned
long
start
,
...
...
@@ -175,4 +175,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_all
();
}
extern
void
zap_low_mappings
(
void
);
#endif
/* _ASM_X86_TLBFLUSH_H */
arch/x86/include/asm/uv/uv_bau.h
View file @
7f268f43
...
...
@@ -325,7 +325,8 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
#define cpubit_isset(cpu, bau_local_cpumask) \
test_bit((cpu), (bau_local_cpumask).bits)
extern
int
uv_flush_tlb_others
(
cpumask_t
*
,
struct
mm_struct
*
,
unsigned
long
);
extern
int
uv_flush_tlb_others
(
struct
cpumask
*
,
struct
mm_struct
*
,
unsigned
long
);
extern
void
uv_bau_message_intr1
(
void
);
extern
void
uv_bau_timeout_intr1
(
void
);
...
...
arch/x86/kernel/acpi/boot.c
View file @
7f268f43
...
...
@@ -912,8 +912,8 @@ static u8 __init uniq_ioapic_id(u8 id)
DECLARE_BITMAP
(
used
,
256
);
bitmap_zero
(
used
,
256
);
for
(
i
=
0
;
i
<
nr_ioapics
;
i
++
)
{
struct
mp
_config
_ioapic
*
ia
=
&
mp_ioapics
[
i
];
__set_bit
(
ia
->
mp_
apicid
,
used
);
struct
mp
c
_ioapic
*
ia
=
&
mp_ioapics
[
i
];
__set_bit
(
ia
->
apicid
,
used
);
}
if
(
!
test_bit
(
id
,
used
))
return
id
;
...
...
@@ -945,47 +945,47 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
idx
=
nr_ioapics
;
mp_ioapics
[
idx
].
mp_
type
=
MP_IOAPIC
;
mp_ioapics
[
idx
].
mp_
flags
=
MPC_APIC_USABLE
;
mp_ioapics
[
idx
].
mp_
apicaddr
=
address
;
mp_ioapics
[
idx
].
type
=
MP_IOAPIC
;
mp_ioapics
[
idx
].
flags
=
MPC_APIC_USABLE
;
mp_ioapics
[
idx
].
apicaddr
=
address
;
set_fixmap_nocache
(
FIX_IO_APIC_BASE_0
+
idx
,
address
);
mp_ioapics
[
idx
].
mp_
apicid
=
uniq_ioapic_id
(
id
);
mp_ioapics
[
idx
].
apicid
=
uniq_ioapic_id
(
id
);
#ifdef CONFIG_X86_32
mp_ioapics
[
idx
].
mp_
apicver
=
io_apic_get_version
(
idx
);
mp_ioapics
[
idx
].
apicver
=
io_apic_get_version
(
idx
);
#else
mp_ioapics
[
idx
].
mp_
apicver
=
0
;
mp_ioapics
[
idx
].
apicver
=
0
;
#endif
/*
* Build basic GSI lookup table to facilitate gsi->io_apic lookups
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
*/
mp_ioapic_routing
[
idx
].
apic_id
=
mp_ioapics
[
idx
].
mp_
apicid
;
mp_ioapic_routing
[
idx
].
apic_id
=
mp_ioapics
[
idx
].
apicid
;
mp_ioapic_routing
[
idx
].
gsi_base
=
gsi_base
;
mp_ioapic_routing
[
idx
].
gsi_end
=
gsi_base
+
io_apic_get_redir_entries
(
idx
);
printk
(
KERN_INFO
"IOAPIC[%d]: apic_id %d, version %d, address 0x%
l
x, "
"GSI %d-%d
\n
"
,
idx
,
mp_ioapics
[
idx
].
mp_
apicid
,
mp_ioapics
[
idx
].
mp_apicver
,
mp_ioapics
[
idx
].
mp_
apicaddr
,
printk
(
KERN_INFO
"IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
"GSI %d-%d
\n
"
,
idx
,
mp_ioapics
[
idx
].
apicid
,
mp_ioapics
[
idx
].
apicver
,
mp_ioapics
[
idx
].
apicaddr
,
mp_ioapic_routing
[
idx
].
gsi_base
,
mp_ioapic_routing
[
idx
].
gsi_end
);
nr_ioapics
++
;
}
static
void
assign_to_mp_irq
(
struct
mp
_config
_intsrc
*
m
,
struct
mp
_config
_intsrc
*
mp_irq
)
static
void
assign_to_mp_irq
(
struct
mp
c
_intsrc
*
m
,
struct
mp
c
_intsrc
*
mp_irq
)
{
memcpy
(
mp_irq
,
m
,
sizeof
(
struct
mp
_config
_intsrc
));
memcpy
(
mp_irq
,
m
,
sizeof
(
struct
mp
c
_intsrc
));
}
static
int
mp_irq_cmp
(
struct
mp
_config
_intsrc
*
mp_irq
,
struct
mp
_config
_intsrc
*
m
)
static
int
mp_irq_cmp
(
struct
mp
c
_intsrc
*
mp_irq
,
struct
mp
c
_intsrc
*
m
)
{
return
memcmp
(
mp_irq
,
m
,
sizeof
(
struct
mp
_config
_intsrc
));
return
memcmp
(
mp_irq
,
m
,
sizeof
(
struct
mp
c
_intsrc
));
}
static
void
save_mp_irq
(
struct
mp
_config
_intsrc
*
m
)
static
void
save_mp_irq
(
struct
mp
c
_intsrc
*
m
)
{
int
i
;
...
...
@@ -1003,7 +1003,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
{
int
ioapic
;
int
pin
;
struct
mp
_config
_intsrc
mp_irq
;
struct
mp
c
_intsrc
mp_irq
;
/*
* Convert 'gsi' to 'ioapic.pin'.
...
...
@@ -1021,13 +1021,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
if
((
bus_irq
==
0
)
&&
(
trigger
==
3
))
trigger
=
1
;
mp_irq
.
mp_
type
=
MP_INTSRC
;
mp_irq
.
mp_
irqtype
=
mp_INT
;
mp_irq
.
mp_
irqflag
=
(
trigger
<<
2
)
|
polarity
;
mp_irq
.
mp_
srcbus
=
MP_ISA_BUS
;
mp_irq
.
mp_
srcbusirq
=
bus_irq
;
/* IRQ */
mp_irq
.
mp_dstapic
=
mp_ioapics
[
ioapic
].
mp_
apicid
;
/* APIC ID */
mp_irq
.
mp_
dstirq
=
pin
;
/* INTIN# */
mp_irq
.
type
=
MP_INTSRC
;
mp_irq
.
irqtype
=
mp_INT
;
mp_irq
.
irqflag
=
(
trigger
<<
2
)
|
polarity
;
mp_irq
.
srcbus
=
MP_ISA_BUS
;
mp_irq
.
srcbusirq
=
bus_irq
;
/* IRQ */
mp_irq
.
dstapic
=
mp_ioapics
[
ioapic
].
apicid
;
/* APIC ID */
mp_irq
.
dstirq
=
pin
;
/* INTIN# */
save_mp_irq
(
&
mp_irq
);
}
...
...
@@ -1037,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void)
int
i
;
int
ioapic
;
unsigned
int
dstapic
;
struct
mp
_config
_intsrc
mp_irq
;
struct
mp
c
_intsrc
mp_irq
;
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
/*
...
...
@@ -1062,7 +1062,7 @@ void __init mp_config_acpi_legacy_irqs(void)
ioapic
=
mp_find_ioapic
(
0
);
if
(
ioapic
<
0
)
return
;
dstapic
=
mp_ioapics
[
ioapic
].
mp_
apicid
;
dstapic
=
mp_ioapics
[
ioapic
].
apicid
;
/*
* Use the default configuration for the IRQs 0-15. Unless
...
...
@@ -1072,16 +1072,14 @@ void __init mp_config_acpi_legacy_irqs(void)
int
idx
;
for
(
idx
=
0
;
idx
<
mp_irq_entries
;
idx
++
)
{
struct
mp
_config
_intsrc
*
irq
=
mp_irqs
+
idx
;
struct
mp
c
_intsrc
*
irq
=
mp_irqs
+
idx
;
/* Do we already have a mapping for this ISA IRQ? */
if
(
irq
->
mp_srcbus
==
MP_ISA_BUS
&&
irq
->
mp_srcbusirq
==
i
)
if
(
irq
->
srcbus
==
MP_ISA_BUS
&&
irq
->
srcbusirq
==
i
)
break
;
/* Do we already have a mapping for this IOAPIC pin */
if
(
irq
->
mp_dstapic
==
dstapic
&&
irq
->
mp_dstirq
==
i
)
if
(
irq
->
dstapic
==
dstapic
&&
irq
->
dstirq
==
i
)
break
;
}
...
...
@@ -1090,13 +1088,13 @@ void __init mp_config_acpi_legacy_irqs(void)
continue
;
/* IRQ already used */
}
mp_irq
.
mp_
type
=
MP_INTSRC
;
mp_irq
.
mp_
irqflag
=
0
;
/* Conforming */
mp_irq
.
mp_
srcbus
=
MP_ISA_BUS
;
mp_irq
.
mp_
dstapic
=
dstapic
;
mp_irq
.
mp_
irqtype
=
mp_INT
;
mp_irq
.
mp_
srcbusirq
=
i
;
/* Identity mapped */
mp_irq
.
mp_
dstirq
=
i
;
mp_irq
.
type
=
MP_INTSRC
;
mp_irq
.
irqflag
=
0
;
/* Conforming */
mp_irq
.
srcbus
=
MP_ISA_BUS
;
mp_irq
.
dstapic
=
dstapic
;
mp_irq
.
irqtype
=
mp_INT
;
mp_irq
.
srcbusirq
=
i
;
/* Identity mapped */
mp_irq
.
dstirq
=
i
;
save_mp_irq
(
&
mp_irq
);
}
...
...
@@ -1207,22 +1205,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
u32
gsi
,
int
triggering
,
int
polarity
)
{
#ifdef CONFIG_X86_MPPARSE
struct
mp
_config
_intsrc
mp_irq
;
struct
mp
c
_intsrc
mp_irq
;
int
ioapic
;
if
(
!
acpi_ioapic
)
return
0
;
/* print the entry should happen on mptable identically */
mp_irq
.
mp_
type
=
MP_INTSRC
;
mp_irq
.
mp_
irqtype
=
mp_INT
;
mp_irq
.
mp_
irqflag
=
(
triggering
==
ACPI_EDGE_SENSITIVE
?
4
:
0x0c
)
|
mp_irq
.
type
=
MP_INTSRC
;
mp_irq
.
irqtype
=
mp_INT
;
mp_irq
.
irqflag
=
(
triggering
==
ACPI_EDGE_SENSITIVE
?
4
:
0x0c
)
|
(
polarity
==
ACPI_ACTIVE_HIGH
?
1
:
3
);
mp_irq
.
mp_
srcbus
=
number
;
mp_irq
.
mp_
srcbusirq
=
(((
devfn
>>
3
)
&
0x1f
)
<<
2
)
|
((
pin
-
1
)
&
3
);
mp_irq
.
srcbus
=
number
;
mp_irq
.
srcbusirq
=
(((
devfn
>>
3
)
&
0x1f
)
<<
2
)
|
((
pin
-
1
)
&
3
);
ioapic
=
mp_find_ioapic
(
gsi
);
mp_irq
.
mp_
dstapic
=
mp_ioapic_routing
[
ioapic
].
apic_id
;
mp_irq
.
mp_
dstirq
=
gsi
-
mp_ioapic_routing
[
ioapic
].
gsi_base
;
mp_irq
.
dstapic
=
mp_ioapic_routing
[
ioapic
].
apic_id
;
mp_irq
.
dstirq
=
gsi
-
mp_ioapic_routing
[
ioapic
].
gsi_base
;
save_mp_irq
(
&
mp_irq
);
#endif
...
...
arch/x86/kernel/apic.c
View file @
7f268f43
...
...
@@ -895,6 +895,10 @@ void disable_local_APIC(void)
{
unsigned
int
value
;
/* APIC hasn't been mapped yet */
if
(
!
apic_phys
)
return
;
clear_local_APIC
();
/*
...
...
@@ -1126,6 +1130,11 @@ void __cpuinit setup_local_APIC(void)
unsigned
int
value
;
int
i
,
j
;
if
(
disable_apic
)
{
disable_ioapic_setup
();
return
;
}
#ifdef CONFIG_X86_32
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if
(
lapic_is_integrated
()
&&
esr_disable
)
{
...
...
@@ -1566,11 +1575,11 @@ int apic_version[MAX_APICS];
int
__init
APIC_init_uniprocessor
(
void
)
{
#ifdef CONFIG_X86_64
if
(
disable_apic
)
{
pr_info
(
"Apic disabled
\n
"
);
return
-
1
;
}
#ifdef CONFIG_X86_64
if
(
!
cpu_has_apic
)
{
disable_apic
=
1
;
pr_info
(
"Apic disabled by BIOS
\n
"
);
...
...
arch/x86/kernel/cpu/common.c
View file @
7f268f43
...
...
@@ -21,6 +21,8 @@
#include <asm/asm.h>
#include <asm/numa.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/cpumask.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
...
...
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
View file @
7f268f43
...
...
@@ -235,8 +235,6 @@ static u32 get_cur_val(const struct cpumask *mask)
return
0
;
}
cpumask_copy
(
cmd
.
mask
,
mask
);
drv_read
(
&
cmd
);
dprintk
(
"get_cur_val = %u
\n
"
,
cmd
.
val
);
...
...
arch/x86/kernel/cpu/intel_cacheinfo.c
View file @
7f268f43
...
...
@@ -132,7 +132,16 @@ struct _cpuid4_info {
union
_cpuid4_leaf_ecx
ecx
;
unsigned
long
size
;
unsigned
long
can_disable
;
cpumask_t
shared_cpu_map
;
/* future?: only cpus/node is needed */
DECLARE_BITMAP
(
shared_cpu_map
,
NR_CPUS
);
};
/* subset of above _cpuid4_info w/o shared_cpu_map */
struct
_cpuid4_info_regs
{
union
_cpuid4_leaf_eax
eax
;
union
_cpuid4_leaf_ebx
ebx
;
union
_cpuid4_leaf_ecx
ecx
;
unsigned
long
size
;
unsigned
long
can_disable
;
};
#ifdef CONFIG_PCI
...
...
@@ -263,7 +272,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
}
static
void
__cpuinit
amd_check_l3_disable
(
int
index
,
struct
_cpuid4_info
*
this_leaf
)
amd_check_l3_disable
(
int
index
,
struct
_cpuid4_info
_regs
*
this_leaf
)
{
if
(
index
<
3
)
return
;
...
...
@@ -271,7 +280,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
}
static
int
__cpuinit
cpuid4_cache_lookup
(
int
index
,
struct
_cpuid4_info
*
this_leaf
)
__cpuinit
cpuid4_cache_lookup_regs
(
int
index
,
struct
_cpuid4_info_regs
*
this_leaf
)
{
union
_cpuid4_leaf_eax
eax
;
union
_cpuid4_leaf_ebx
ebx
;
...
...
@@ -299,6 +309,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
return
0
;
}
static
int
__cpuinit
cpuid4_cache_lookup
(
int
index
,
struct
_cpuid4_info
*
this_leaf
)
{
struct
_cpuid4_info_regs
*
leaf_regs
=
(
struct
_cpuid4_info_regs
*
)
this_leaf
;
return
cpuid4_cache_lookup_regs
(
index
,
leaf_regs
);
}
static
int
__cpuinit
find_num_cache_leaves
(
void
)
{
unsigned
int
eax
,
ebx
,
ecx
,
edx
;
...
...
@@ -338,11 +357,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
* parameters cpuid leaf to find the cache details
*/
for
(
i
=
0
;
i
<
num_cache_leaves
;
i
++
)
{
struct
_cpuid4_info
this_leaf
;
struct
_cpuid4_info_regs
this_leaf
;
int
retval
;
retval
=
cpuid4_cache_lookup
(
i
,
&
this_leaf
);
retval
=
cpuid4_cache_lookup
_regs
(
i
,
&
this_leaf
);
if
(
retval
>=
0
)
{
switch
(
this_leaf
.
eax
.
split
.
level
)
{
case
1
:
...
...
@@ -491,17 +509,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
num_threads_sharing
=
1
+
this_leaf
->
eax
.
split
.
num_threads_sharing
;
if
(
num_threads_sharing
==
1
)
cpu
_set
(
cpu
,
this_leaf
->
shared_cpu_map
);
cpu
mask_set_cpu
(
cpu
,
to_cpumask
(
this_leaf
->
shared_cpu_map
)
);
else
{
index_msb
=
get_count_order
(
num_threads_sharing
);
for_each_online_cpu
(
i
)
{
if
(
cpu_data
(
i
).
apicid
>>
index_msb
==
c
->
apicid
>>
index_msb
)
{
cpu_set
(
i
,
this_leaf
->
shared_cpu_map
);
cpumask_set_cpu
(
i
,
to_cpumask
(
this_leaf
->
shared_cpu_map
));
if
(
i
!=
cpu
&&
per_cpu
(
cpuid4_info
,
i
))
{
sibling_leaf
=
CPUID4_INFO_IDX
(
i
,
index
);
cpu_set
(
cpu
,
sibling_leaf
->
shared_cpu_map
);
sibling_leaf
=
CPUID4_INFO_IDX
(
i
,
index
);
cpumask_set_cpu
(
cpu
,
to_cpumask
(
sibling_leaf
->
shared_cpu_map
));
}
}
}
...
...
@@ -513,9 +534,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
int
sibling
;
this_leaf
=
CPUID4_INFO_IDX
(
cpu
,
index
);
for_each_cpu
_mask_nr
(
sibling
,
this_leaf
->
shared_cpu_map
)
{
for_each_cpu
(
sibling
,
to_cpumask
(
this_leaf
->
shared_cpu_map
)
)
{
sibling_leaf
=
CPUID4_INFO_IDX
(
sibling
,
index
);
cpu_clear
(
cpu
,
sibling_leaf
->
shared_cpu_map
);
cpumask_clear_cpu
(
cpu
,
to_cpumask
(
sibling_leaf
->
shared_cpu_map
));
}
}
#else
...
...
@@ -620,8 +642,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
int
n
=
0
;
if
(
len
>
1
)
{
c
pumask_t
*
mask
=
&
this_leaf
->
shared_cpu_map
;
c
onst
struct
cpumask
*
mask
;
mask
=
to_cpumask
(
this_leaf
->
shared_cpu_map
);
n
=
type
?
cpulist_scnprintf
(
buf
,
len
-
2
,
mask
)
:
cpumask_scnprintf
(
buf
,
len
-
2
,
mask
);
...
...
@@ -684,7 +707,8 @@ static struct pci_dev *get_k8_northbridge(int node)
static
ssize_t
show_cache_disable
(
struct
_cpuid4_info
*
this_leaf
,
char
*
buf
)
{
int
node
=
cpu_to_node
(
first_cpu
(
this_leaf
->
shared_cpu_map
));
const
struct
cpumask
*
mask
=
to_cpumask
(
this_leaf
->
shared_cpu_map
);
int
node
=
cpu_to_node
(
cpumask_first
(
mask
));
struct
pci_dev
*
dev
=
NULL
;
ssize_t
ret
=
0
;
int
i
;
...
...
@@ -718,7 +742,8 @@ static ssize_t
store_cache_disable
(
struct
_cpuid4_info
*
this_leaf
,
const
char
*
buf
,
size_t
count
)
{
int
node
=
cpu_to_node
(
first_cpu
(
this_leaf
->
shared_cpu_map
));
const
struct
cpumask
*
mask
=
to_cpumask
(
this_leaf
->
shared_cpu_map
);
int
node
=
cpu_to_node
(
cpumask_first
(
mask
));
struct
pci_dev
*
dev
=
NULL
;
unsigned
int
ret
,
index
,
val
;
...
...
@@ -863,7 +888,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
return
-
ENOMEM
;
}
static
cpumask_t
cache_dev_map
=
CPU_MASK_NONE
;
static
DECLARE_BITMAP
(
cache_dev_map
,
NR_CPUS
)
;
/* Add/Remove cache interface for CPU device */
static
int
__cpuinit
cache_add_dev
(
struct
sys_device
*
sys_dev
)
...
...
@@ -903,7 +928,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
}
kobject_uevent
(
&
(
this_object
->
kobj
),
KOBJ_ADD
);
}
cpu
_set
(
cpu
,
cache_dev_map
);
cpu
mask_set_cpu
(
cpu
,
to_cpumask
(
cache_dev_map
)
);
kobject_uevent
(
per_cpu
(
cache_kobject
,
cpu
),
KOBJ_ADD
);
return
0
;
...
...
@@ -916,9 +941,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
if
(
per_cpu
(
cpuid4_info
,
cpu
)
==
NULL
)
return
;
if
(
!
cpu
_isset
(
cpu
,
cache_dev_map
))
if
(
!
cpu
mask_test_cpu
(
cpu
,
to_cpumask
(
cache_dev_map
)
))
return
;
cpu
_clear
(
cpu
,
cache_dev_map
);
cpu
mask_clear_cpu
(
cpu
,
to_cpumask
(
cache_dev_map
)
);
for
(
i
=
0
;
i
<
num_cache_leaves
;
i
++
)
kobject_put
(
&
(
INDEX_KOBJECT_PTR
(
cpu
,
i
)
->
kobj
));
...
...
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
View file @
7f268f43
...
...
@@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
struct
threshold_bank
{
struct
kobject
*
kobj
;
struct
threshold_block
*
blocks
;
cpumask_t
cpus
;
cpumask_
var_
t
cpus
;
};
static
DEFINE_PER_CPU
(
struct
threshold_bank
*
,
threshold_banks
[
NR_BANKS
]);
...
...
@@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifdef CONFIG_SMP
if
(
cpu_data
(
cpu
).
cpu_core_id
&&
shared_bank
[
bank
])
{
/* symlink */
i
=
first_cpu
(
per_cpu
(
cpu_core_map
,
cpu
));
i
=
cpumask_first
(
&
per_cpu
(
cpu_core_map
,
cpu
));
/* first core not up yet */
if
(
cpu_data
(
i
).
cpu_core_id
)
...
...
@@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if
(
err
)
goto
out
;
b
->
cpus
=
per_cpu
(
cpu_core_map
,
cpu
);
cpumask_copy
(
b
->
cpus
,
&
per_cpu
(
cpu_core_map
,
cpu
)
);
per_cpu
(
threshold_banks
,
cpu
)[
bank
]
=
b
;
goto
out
;
}
...
...
@@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
err
=
-
ENOMEM
;
goto
out
;
}
if
(
!
alloc_cpumask_var
(
&
b
->
cpus
,
GFP_KERNEL
))
{
kfree
(
b
);
err
=
-
ENOMEM
;
goto
out
;
}
b
->
kobj
=
kobject_create_and_add
(
name
,
&
per_cpu
(
device_mce
,
cpu
).
kobj
);
if
(
!
b
->
kobj
)
goto
out_free
;
#ifndef CONFIG_SMP
b
->
cpus
=
CPU_MASK_ALL
;
cpumask_setall
(
b
->
cpus
)
;
#else
b
->
cpus
=
per_cpu
(
cpu_core_map
,
cpu
);
cpumask_copy
(
b
->
cpus
,
&
per_cpu
(
cpu_core_map
,
cpu
)
);
#endif
per_cpu
(
threshold_banks
,
cpu
)[
bank
]
=
b
;
...
...
@@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if
(
err
)
goto
out_free
;
for_each_cpu
_mask_nr
(
i
,
b
->
cpus
)
{
for_each_cpu
(
i
,
b
->
cpus
)
{
if
(
i
==
cpu
)
continue
;
...
...
@@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
out_free:
per_cpu
(
threshold_banks
,
cpu
)[
bank
]
=
NULL
;
free_cpumask_var
(
b
->
cpus
);
kfree
(
b
);
out:
return
err
;
...
...
@@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
#endif
/* remove all sibling symlinks before unregistering */
for_each_cpu
_mask_nr
(
i
,
b
->
cpus
)
{
for_each_cpu
(
i
,
b
->
cpus
)
{
if
(
i
==
cpu
)
continue
;
...
...
@@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
free_out:
kobject_del
(
b
->
kobj
);
kobject_put
(
b
->
kobj
);
free_cpumask_var
(
b
->
cpus
);
kfree
(
b
);
per_cpu
(
threshold_banks
,
cpu
)[
bank
]
=
NULL
;
}
...
...
arch/x86/kernel/crash.c
View file @
7f268f43
...
...
@@ -24,7 +24,7 @@
#include <asm/apic.h>
#include <asm/hpet.h>
#include <linux/kdebug.h>
#include <asm/
smp
.h>
#include <asm/
cpu
.h>
#include <asm/reboot.h>
#include <asm/virtext.h>
...
...
arch/x86/kernel/io_apic.c
View file @
7f268f43
This diff is collapsed.
Click to expand it.
arch/x86/kernel/irq_32.c
View file @
7f268f43
...
...
@@ -248,7 +248,7 @@ void fixup_irqs(void)
if
(
irq
==
2
)
continue
;
affinity
=
&
desc
->
affinity
;
affinity
=
desc
->
affinity
;
if
(
cpumask_any_and
(
affinity
,
cpu_online_mask
)
>=
nr_cpu_ids
)
{
printk
(
"Breaking affinity for irq %i
\n
"
,
irq
);
affinity
=
cpu_all_mask
;
...
...
arch/x86/kernel/irq_64.c
View file @
7f268f43
...
...
@@ -100,7 +100,7 @@ void fixup_irqs(void)
/* interrupt's are disabled at this point */
spin_lock
(
&
desc
->
lock
);
affinity
=
&
desc
->
affinity
;
affinity
=
desc
->
affinity
;
if
(
!
irq_has_action
(
irq
)
||
cpumask_equal
(
affinity
,
cpu_online_mask
))
{
spin_unlock
(
&
desc
->
lock
);
...
...
arch/x86/kernel/microcode_intel.c
View file @
7f268f43
...
...
@@ -87,9 +87,9 @@
#include <linux/cpu.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/microcode.h>
...
...
@@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
return
(
!
sigmatch
(
sig
,
csig
->
sig
,
pf
,
csig
->
pf
))
?
0
:
1
;
}
static
inline
int
static
inline
int
update_match_revision
(
struct
microcode_header_intel
*
mc_header
,
int
rev
)
{
return
(
mc_header
->
rev
<=
rev
)
?
0
:
1
;
...
...
@@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
return
ret
;
}
ret
=
generic_load_microcode
(
cpu
,
(
void
*
)
firmware
->
data
,
firmware
->
size
,
&
get_ucode_fw
);
ret
=
generic_load_microcode
(
cpu
,
(
void
*
)
firmware
->
data
,
firmware
->
size
,
&
get_ucode_fw
);
release_firmware
(
firmware
);
...
...
@@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
/* We should bind the task to the CPU */
BUG_ON
(
cpu
!=
raw_smp_processor_id
());
return
generic_load_microcode
(
cpu
,
(
void
*
)
buf
,
size
,
&
get_ucode_user
);
return
generic_load_microcode
(
cpu
,
(
void
*
)
buf
,
size
,
&
get_ucode_user
);
}
static
void
microcode_fini_cpu
(
int
cpu
)
...
...
arch/x86/kernel/module_32.c
View file @
7f268f43
...
...
@@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
{
vfree
(
module_region
);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
table entries. */
}
/* We don't need anything special. */
...
...
@@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
*
para
=
NULL
;
char
*
secstrings
=
(
void
*
)
hdr
+
sechdrs
[
hdr
->
e_shstrndx
].
sh_offset
;
for
(
s
=
sechdrs
;
s
<
sechdrs
+
hdr
->
e_shnum
;
s
++
)
{
for
(
s
=
sechdrs
;
s
<
sechdrs
+
hdr
->
e_shnum
;
s
++
)
{
if
(
!
strcmp
(
".text"
,
secstrings
+
s
->
sh_name
))
text
=
s
;
if
(
!
strcmp
(
".altinstructions"
,
secstrings
+
s
->
sh_name
))
alt
=
s
;
if
(
!
strcmp
(
".smp_locks"
,
secstrings
+
s
->
sh_name
))
locks
=
s
;
locks
=
s
;
if
(
!
strcmp
(
".parainstructions"
,
secstrings
+
s
->
sh_name
))
para
=
s
;
}
...
...
arch/x86/kernel/module_64.c
View file @
7f268f43
...
...
@@ -30,14 +30,14 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#define DEBUGP(fmt...)
#define DEBUGP(fmt...)
#ifndef CONFIG_UML
void
module_free
(
struct
module
*
mod
,
void
*
module_region
)
{
vfree
(
module_region
);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
table entries. */
}
void
*
module_alloc
(
unsigned
long
size
)
...
...
@@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
Elf64_Rela
*
rel
=
(
void
*
)
sechdrs
[
relsec
].
sh_addr
;
Elf64_Sym
*
sym
;
void
*
loc
;
u64
val
;
u64
val
;
DEBUGP
(
"Applying relocate section %u to %u
\n
"
,
relsec
,
sechdrs
[
relsec
].
sh_info
);
...
...
@@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
sym
=
(
Elf64_Sym
*
)
sechdrs
[
symindex
].
sh_addr
+
ELF64_R_SYM
(
rel
[
i
].
r_info
);
DEBUGP
(
"type %d st_value %Lx r_addend %Lx loc %Lx
\n
"
,
(
int
)
ELF64_R_TYPE
(
rel
[
i
].
r_info
),
sym
->
st_value
,
rel
[
i
].
r_addend
,
(
u64
)
loc
);
DEBUGP
(
"type %d st_value %Lx r_addend %Lx loc %Lx
\n
"
,
(
int
)
ELF64_R_TYPE
(
rel
[
i
].
r_info
),
sym
->
st_value
,
rel
[
i
].
r_addend
,
(
u64
)
loc
);
val
=
sym
->
st_value
+
rel
[
i
].
r_addend
;
val
=
sym
->
st_value
+
rel
[
i
].
r_addend
;
switch
(
ELF64_R_TYPE
(
rel
[
i
].
r_info
))
{
case
R_X86_64_NONE
:
...
...
@@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if
((
s64
)
val
!=
*
(
s32
*
)
loc
)
goto
overflow
;
break
;
case
R_X86_64_PC32
:
case
R_X86_64_PC32
:
val
-=
(
u64
)
loc
;
*
(
u32
*
)
loc
=
val
;
#if 0
if ((s64)val != *(s32 *)loc)
goto overflow;
goto overflow;
#endif
break
;
default:
printk
(
KERN_ERR
"module %s: Unknown rela relocation: %
L
u
\n
"
,
printk
(
KERN_ERR
"module %s: Unknown rela relocation: %
ll
u
\n
"
,
me
->
name
,
ELF64_R_TYPE
(
rel
[
i
].
r_info
));
return
-
ENOEXEC
;
}
...
...
@@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return
0
;
overflow:
printk
(
KERN_ERR
"overflow in relocation type %d val %Lx
\n
"
,
printk
(
KERN_ERR
"overflow in relocation type %d val %Lx
\n
"
,
(
int
)
ELF64_R_TYPE
(
rel
[
i
].
r_info
),
val
);
printk
(
KERN_ERR
"`%s' likely not compiled with -mcmodel=kernel
\n
"
,
me
->
name
);
...
...
@@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
unsigned
int
relsec
,
struct
module
*
me
)
{
printk
(
"non add relocation not supported
\n
"
);
printk
(
KERN_ERR
"non add relocation not supported
\n
"
);
return
-
ENOSYS
;
}
}
int
module_finalize
(
const
Elf_Ehdr
*
hdr
,
const
Elf_Shdr
*
sechdrs
,
struct
module
*
me
)
const
Elf_Shdr
*
sechdrs
,
struct
module
*
me
)
{
const
Elf_Shdr
*
s
,
*
text
=
NULL
,
*
alt
=
NULL
,
*
locks
=
NULL
,
*
para
=
NULL
;
...
...
@@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
if
(
!
strcmp
(
".altinstructions"
,
secstrings
+
s
->
sh_name
))
alt
=
s
;
if
(
!
strcmp
(
".smp_locks"
,
secstrings
+
s
->
sh_name
))
locks
=
s
;
locks
=
s
;
if
(
!
strcmp
(
".parainstructions"
,
secstrings
+
s
->
sh_name
))
para
=
s
;
}
...
...
arch/x86/kernel/mpparse.c
View file @
7f268f43
...
...
@@ -144,11 +144,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
if
(
bad_ioapic
(
m
->
apicaddr
))
return
;
mp_ioapics
[
nr_ioapics
].
mp_
apicaddr
=
m
->
apicaddr
;
mp_ioapics
[
nr_ioapics
].
mp_
apicid
=
m
->
apicid
;
mp_ioapics
[
nr_ioapics
].
mp_
type
=
m
->
type
;
mp_ioapics
[
nr_ioapics
].
mp_
apicver
=
m
->
apicver
;
mp_ioapics
[
nr_ioapics
].
mp_
flags
=
m
->
flags
;
mp_ioapics
[
nr_ioapics
].
apicaddr
=
m
->
apicaddr
;
mp_ioapics
[
nr_ioapics
].
apicid
=
m
->
apicid
;
mp_ioapics
[
nr_ioapics
].
type
=
m
->
type
;
mp_ioapics
[
nr_ioapics
].
apicver
=
m
->
apicver
;
mp_ioapics
[
nr_ioapics
].
flags
=
m
->
flags
;
nr_ioapics
++
;
}
...
...
@@ -160,55 +160,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
m
->
srcbusirq
,
m
->
dstapic
,
m
->
dstirq
);
}
static
void
__init
print_mp_irq_info
(
struct
mp
_config
_intsrc
*
mp_irq
)
static
void
__init
print_mp_irq_info
(
struct
mp
c
_intsrc
*
mp_irq
)
{
apic_printk
(
APIC_VERBOSE
,
"Int: type %d, pol %d, trig %d, bus %02x,"
" IRQ %02x, APIC ID %x, APIC INT %02x
\n
"
,
mp_irq
->
mp_irqtype
,
mp_irq
->
mp_
irqflag
&
3
,
(
mp_irq
->
mp_irqflag
>>
2
)
&
3
,
mp_irq
->
mp_
srcbus
,
mp_irq
->
mp_srcbusirq
,
mp_irq
->
mp_dstapic
,
mp_irq
->
mp_
dstirq
);
mp_irq
->
irqtype
,
mp_irq
->
irqflag
&
3
,
(
mp_irq
->
irqflag
>>
2
)
&
3
,
mp_irq
->
srcbus
,
mp_irq
->
srcbusirq
,
mp_irq
->
dstapic
,
mp_irq
->
dstirq
);
}
static
void
__init
assign_to_mp_irq
(
struct
mpc_intsrc
*
m
,
struct
mp
_config
_intsrc
*
mp_irq
)
struct
mp
c
_intsrc
*
mp_irq
)
{
mp_irq
->
mp_
dstapic
=
m
->
dstapic
;
mp_irq
->
mp_
type
=
m
->
type
;
mp_irq
->
mp_
irqtype
=
m
->
irqtype
;
mp_irq
->
mp_
irqflag
=
m
->
irqflag
;
mp_irq
->
mp_
srcbus
=
m
->
srcbus
;
mp_irq
->
mp_
srcbusirq
=
m
->
srcbusirq
;
mp_irq
->
mp_
dstirq
=
m
->
dstirq
;
mp_irq
->
dstapic
=
m
->
dstapic
;
mp_irq
->
type
=
m
->
type
;
mp_irq
->
irqtype
=
m
->
irqtype
;
mp_irq
->
irqflag
=
m
->
irqflag
;
mp_irq
->
srcbus
=
m
->
srcbus
;
mp_irq
->
srcbusirq
=
m
->
srcbusirq
;
mp_irq
->
dstirq
=
m
->
dstirq
;
}
static
void
__init
assign_to_mpc_intsrc
(
struct
mp
_config
_intsrc
*
mp_irq
,
static
void
__init
assign_to_mpc_intsrc
(
struct
mp
c
_intsrc
*
mp_irq
,
struct
mpc_intsrc
*
m
)
{
m
->
dstapic
=
mp_irq
->
mp_
dstapic
;
m
->
type
=
mp_irq
->
mp_
type
;
m
->
irqtype
=
mp_irq
->
mp_
irqtype
;
m
->
irqflag
=
mp_irq
->
mp_
irqflag
;
m
->
srcbus
=
mp_irq
->
mp_
srcbus
;
m
->
srcbusirq
=
mp_irq
->
mp_
srcbusirq
;
m
->
dstirq
=
mp_irq
->
mp_
dstirq
;
m
->
dstapic
=
mp_irq
->
dstapic
;
m
->
type
=
mp_irq
->
type
;
m
->
irqtype
=
mp_irq
->
irqtype
;
m
->
irqflag
=
mp_irq
->
irqflag
;
m
->
srcbus
=
mp_irq
->
srcbus
;
m
->
srcbusirq
=
mp_irq
->
srcbusirq
;
m
->
dstirq
=
mp_irq
->
dstirq
;
}
static
int
__init
mp_irq_mpc_intsrc_cmp
(
struct
mp
_config
_intsrc
*
mp_irq
,
static
int
__init
mp_irq_mpc_intsrc_cmp
(
struct
mp
c
_intsrc
*
mp_irq
,
struct
mpc_intsrc
*
m
)
{
if
(
mp_irq
->
mp_
dstapic
!=
m
->
dstapic
)
if
(
mp_irq
->
dstapic
!=
m
->
dstapic
)
return
1
;
if
(
mp_irq
->
mp_
type
!=
m
->
type
)
if
(
mp_irq
->
type
!=
m
->
type
)
return
2
;
if
(
mp_irq
->
mp_
irqtype
!=
m
->
irqtype
)
if
(
mp_irq
->
irqtype
!=
m
->
irqtype
)
return
3
;
if
(
mp_irq
->
mp_
irqflag
!=
m
->
irqflag
)
if
(
mp_irq
->
irqflag
!=
m
->
irqflag
)
return
4
;
if
(
mp_irq
->
mp_
srcbus
!=
m
->
srcbus
)
if
(
mp_irq
->
srcbus
!=
m
->
srcbus
)
return
5
;
if
(
mp_irq
->
mp_
srcbusirq
!=
m
->
srcbusirq
)
if
(
mp_irq
->
srcbusirq
!=
m
->
srcbusirq
)
return
6
;
if
(
mp_irq
->
mp_
dstirq
!=
m
->
dstirq
)
if
(
mp_irq
->
dstirq
!=
m
->
dstirq
)
return
7
;
return
0
;
...
...
@@ -417,7 +417,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
intsrc
.
type
=
MP_INTSRC
;
intsrc
.
irqflag
=
0
;
/* conforming */
intsrc
.
srcbus
=
0
;
intsrc
.
dstapic
=
mp_ioapics
[
0
].
mp_
apicid
;
intsrc
.
dstapic
=
mp_ioapics
[
0
].
apicid
;
intsrc
.
irqtype
=
mp_INT
;
...
...
@@ -570,14 +570,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
}
}
static
struct
intel_mp_floating
*
mpf_found
;
static
struct
mpf_intel
*
mpf_found
;
/*
* Scan the memory blocks for an SMP configuration block.
*/
static
void
__init
__get_smp_config
(
unsigned
int
early
)
{
struct
intel_mp_floating
*
mpf
=
mpf_found
;
struct
mpf_intel
*
mpf
=
mpf_found
;
if
(
!
mpf
)
return
;
...
...
@@ -598,9 +598,9 @@ static void __init __get_smp_config(unsigned int early)
}
printk
(
KERN_INFO
"Intel MultiProcessor Specification v1.%d
\n
"
,
mpf
->
mpf_
specification
);
mpf
->
specification
);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
if
(
mpf
->
mpf_
feature2
&
(
1
<<
7
))
{
if
(
mpf
->
feature2
&
(
1
<<
7
))
{
printk
(
KERN_INFO
" IMCR and PIC compatibility mode.
\n
"
);
pic_mode
=
1
;
}
else
{
...
...
@@ -611,7 +611,7 @@ static void __init __get_smp_config(unsigned int early)
/*
* Now see if we need to read further.
*/
if
(
mpf
->
mpf_
feature1
!=
0
)
{
if
(
mpf
->
feature1
!=
0
)
{
if
(
early
)
{
/*
* local APIC has default address
...
...
@@ -621,16 +621,16 @@ static void __init __get_smp_config(unsigned int early)
}
printk
(
KERN_INFO
"Default MP configuration #%d
\n
"
,
mpf
->
mpf_
feature1
);
construct_default_ISA_mptable
(
mpf
->
mpf_
feature1
);
mpf
->
feature1
);
construct_default_ISA_mptable
(
mpf
->
feature1
);
}
else
if
(
mpf
->
mpf_
physptr
)
{
}
else
if
(
mpf
->
physptr
)
{
/*
* Read the physical hardware table. Anything here will
* override the defaults.
*/
if
(
!
smp_read_mpc
(
phys_to_virt
(
mpf
->
mpf_
physptr
),
early
))
{
if
(
!
smp_read_mpc
(
phys_to_virt
(
mpf
->
physptr
),
early
))
{
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config
=
0
;
#endif
...
...
@@ -688,19 +688,19 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
unsigned
reserve
)
{
unsigned
int
*
bp
=
phys_to_virt
(
base
);
struct
intel_mp_floating
*
mpf
;
struct
mpf_intel
*
mpf
;
apic_printk
(
APIC_VERBOSE
,
"Scan SMP from %p for %ld bytes.
\n
"
,
bp
,
length
);
BUILD_BUG_ON
(
sizeof
(
*
mpf
)
!=
16
);
while
(
length
>
0
)
{
mpf
=
(
struct
intel_mp_floating
*
)
bp
;
mpf
=
(
struct
mpf_intel
*
)
bp
;
if
((
*
bp
==
SMP_MAGIC_IDENT
)
&&
(
mpf
->
mpf_
length
==
1
)
&&
(
mpf
->
length
==
1
)
&&
!
mpf_checksum
((
unsigned
char
*
)
bp
,
16
)
&&
((
mpf
->
mpf_
specification
==
1
)
||
(
mpf
->
mpf_
specification
==
4
)))
{
((
mpf
->
specification
==
1
)
||
(
mpf
->
specification
==
4
)))
{
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config
=
1
;
#endif
...
...
@@ -713,7 +713,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
return
1
;
reserve_bootmem_generic
(
virt_to_phys
(
mpf
),
PAGE_SIZE
,
BOOTMEM_DEFAULT
);
if
(
mpf
->
mpf_
physptr
)
{
if
(
mpf
->
physptr
)
{
unsigned
long
size
=
PAGE_SIZE
;
#ifdef CONFIG_X86_32
/*
...
...
@@ -722,14 +722,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
* the bottom is mapped now.
* PC-9800's MPC table places on the very last
* of physical memory; so that simply reserving
* PAGE_SIZE from mp
g->mpf_
physptr yields BUG()
* PAGE_SIZE from mp
f->
physptr yields BUG()
* in reserve_bootmem.
*/
unsigned
long
end
=
max_low_pfn
*
PAGE_SIZE
;
if
(
mpf
->
mpf_
physptr
+
size
>
end
)
size
=
end
-
mpf
->
mpf_
physptr
;
if
(
mpf
->
physptr
+
size
>
end
)
size
=
end
-
mpf
->
physptr
;
#endif
reserve_bootmem_generic
(
mpf
->
mpf_
physptr
,
size
,
reserve_bootmem_generic
(
mpf
->
physptr
,
size
,
BOOTMEM_DEFAULT
);
}
...
...
@@ -809,15 +809,15 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
/* not legacy */
for
(
i
=
0
;
i
<
mp_irq_entries
;
i
++
)
{
if
(
mp_irqs
[
i
].
mp_
irqtype
!=
mp_INT
)
if
(
mp_irqs
[
i
].
irqtype
!=
mp_INT
)
continue
;
if
(
mp_irqs
[
i
].
mp_
irqflag
!=
0x0f
)
if
(
mp_irqs
[
i
].
irqflag
!=
0x0f
)
continue
;
if
(
mp_irqs
[
i
].
mp_
srcbus
!=
m
->
srcbus
)
if
(
mp_irqs
[
i
].
srcbus
!=
m
->
srcbus
)
continue
;
if
(
mp_irqs
[
i
].
mp_
srcbusirq
!=
m
->
srcbusirq
)
if
(
mp_irqs
[
i
].
srcbusirq
!=
m
->
srcbusirq
)
continue
;
if
(
irq_used
[
i
])
{
/* already claimed */
...
...
@@ -922,10 +922,10 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
if
(
irq_used
[
i
])
continue
;
if
(
mp_irqs
[
i
].
mp_
irqtype
!=
mp_INT
)
if
(
mp_irqs
[
i
].
irqtype
!=
mp_INT
)
continue
;
if
(
mp_irqs
[
i
].
mp_
irqflag
!=
0x0f
)
if
(
mp_irqs
[
i
].
irqflag
!=
0x0f
)
continue
;
if
(
nr_m_spare
>
0
)
{
...
...
@@ -1001,7 +1001,7 @@ static int __init update_mp_table(void)
{
char
str
[
16
];
char
oem
[
10
];
struct
intel_mp_floating
*
mpf
;
struct
mpf_intel
*
mpf
;
struct
mpc_table
*
mpc
,
*
mpc_new
;
if
(
!
enable_update_mptable
)
...
...
@@ -1014,19 +1014,19 @@ static int __init update_mp_table(void)
/*
* Now see if we need to go further.
*/
if
(
mpf
->
mpf_
feature1
!=
0
)
if
(
mpf
->
feature1
!=
0
)
return
0
;
if
(
!
mpf
->
mpf_
physptr
)
if
(
!
mpf
->
physptr
)
return
0
;
mpc
=
phys_to_virt
(
mpf
->
mpf_
physptr
);
mpc
=
phys_to_virt
(
mpf
->
physptr
);
if
(
!
smp_check_mpc
(
mpc
,
oem
,
str
))
return
0
;
printk
(
KERN_INFO
"mpf: %lx
\n
"
,
virt_to_phys
(
mpf
));
printk
(
KERN_INFO
"
mpf_physptr: %x
\n
"
,
mpf
->
mpf_
physptr
);
printk
(
KERN_INFO
"
physptr: %x
\n
"
,
mpf
->
physptr
);
if
(
mpc_new_phys
&&
mpc
->
length
>
mpc_new_length
)
{
mpc_new_phys
=
0
;
...
...
@@ -1047,23 +1047,23 @@ static int __init update_mp_table(void)
}
printk
(
KERN_INFO
"use in-positon replacing
\n
"
);
}
else
{
mpf
->
mpf_
physptr
=
mpc_new_phys
;
mpf
->
physptr
=
mpc_new_phys
;
mpc_new
=
phys_to_virt
(
mpc_new_phys
);
memcpy
(
mpc_new
,
mpc
,
mpc
->
length
);
mpc
=
mpc_new
;
/* check if we can modify that */
if
(
mpc_new_phys
-
mpf
->
mpf_
physptr
)
{
struct
intel_mp_floating
*
mpf_new
;
if
(
mpc_new_phys
-
mpf
->
physptr
)
{
struct
mpf_intel
*
mpf_new
;
/* steal 16 bytes from [0, 1k) */
printk
(
KERN_INFO
"mpf new: %x
\n
"
,
0x400
-
16
);
mpf_new
=
phys_to_virt
(
0x400
-
16
);
memcpy
(
mpf_new
,
mpf
,
16
);
mpf
=
mpf_new
;
mpf
->
mpf_
physptr
=
mpc_new_phys
;
mpf
->
physptr
=
mpc_new_phys
;
}
mpf
->
mpf_
checksum
=
0
;
mpf
->
mpf_
checksum
-=
mpf_checksum
((
unsigned
char
*
)
mpf
,
16
);
printk
(
KERN_INFO
"
mpf_physptr new: %x
\n
"
,
mpf
->
mpf_
physptr
);
mpf
->
checksum
=
0
;
mpf
->
checksum
-=
mpf_checksum
((
unsigned
char
*
)
mpf
,
16
);
printk
(
KERN_INFO
"
physptr new: %x
\n
"
,
mpf
->
physptr
);
}
/*
...
...
arch/x86/kernel/msr.c
View file @
7f268f43
...
...
@@ -35,10 +35,10 @@
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
#include <asm/system.h>
static
struct
class
*
msr_class
;
...
...
arch/x86/kernel/reboot.c
View file @
7f268f43
...
...
@@ -14,6 +14,7 @@
#include <asm/reboot.h>
#include <asm/pci_x86.h>
#include <asm/virtext.h>
#include <asm/cpu.h>
#ifdef CONFIG_X86_32
# include <linux/dmi.h>
...
...
arch/x86/kernel/setup.c
View file @
7f268f43
...
...
@@ -89,7 +89,7 @@
#include <asm/system.h>
#include <asm/vsyscall.h>
#include <asm/
smp
.h>
#include <asm/
cpu
.h>
#include <asm/desc.h>
#include <asm/dma.h>
#include <asm/iommu.h>
...
...
arch/x86/kernel/setup_percpu.c
View file @
7f268f43
...
...
@@ -13,6 +13,7 @@
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/highmem.h>
#include <asm/cpumask.h>
#ifdef CONFIG_X86_LOCAL_APIC
unsigned
int
num_processors
;
...
...
arch/x86/kernel/smpboot.c
View file @
7f268f43
...
...
@@ -53,7 +53,6 @@
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/idle.h>
#include <asm/smp.h>
#include <asm/trampoline.h>
#include <asm/cpu.h>
#include <asm/numa.h>
...
...
@@ -1125,6 +1124,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
printk
(
KERN_ERR
"... forcing use of dummy APIC emulation."
"(tell your hw vendor)
\n
"
);
smpboot_clear_io_apic
();
disable_ioapic_setup
();
return
-
1
;
}
...
...
arch/x86/kernel/tlb_32.c
View file @
7f268f43
...
...
@@ -20,7 +20,7 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
* Optimizations Manfred Spraul <manfred@colorfullife.com>
*/
static
cpumask_t
flush_cpumask
;
static
cpumask_
var_
t
flush_cpumask
;
static
struct
mm_struct
*
flush_mm
;
static
unsigned
long
flush_va
;
static
DEFINE_SPINLOCK
(
tlbstate_lock
);
...
...
@@ -92,7 +92,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
cpu
=
get_cpu
();
if
(
!
cpu
_isset
(
cpu
,
flush_cpumask
))
if
(
!
cpu
mask_test_cpu
(
cpu
,
flush_cpumask
))
goto
out
;
/*
* This was a BUG() but until someone can quote me the
...
...
@@ -114,35 +114,22 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
}
ack_APIC_irq
();
smp_mb__before_clear_bit
();
cpu
_clear
(
cpu
,
flush_cpumask
);
cpu
mask_clear_cpu
(
cpu
,
flush_cpumask
);
smp_mb__after_clear_bit
();
out:
put_cpu_no_resched
();
inc_irq_stat
(
irq_tlb_count
);
}
void
native_flush_tlb_others
(
const
cpumask_t
*
cpumaskp
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
void
native_flush_tlb_others
(
const
struct
cpumask
*
cpumask
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
{
cpumask_t
cpumask
=
*
cpumaskp
;
/*
* A couple of (to be removed) sanity checks:
*
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON
(
cpus_empty
(
cpumask
));
BUG_ON
(
cpu_isset
(
smp_processor_id
(),
cpumask
));
BUG_ON
(
cpumask_empty
(
cpumask
));
BUG_ON
(
!
mm
);
#ifdef CONFIG_HOTPLUG_CPU
/* If a CPU which we ran on has gone down, OK. */
cpus_and
(
cpumask
,
cpumask
,
cpu_online_map
);
if
(
unlikely
(
cpus_empty
(
cpumask
)))
return
;
#endif
/*
* i'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is.
...
...
@@ -150,9 +137,17 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
*/
spin_lock
(
&
tlbstate_lock
);
cpumask_andnot
(
flush_cpumask
,
cpumask
,
cpumask_of
(
smp_processor_id
()));
#ifdef CONFIG_HOTPLUG_CPU
/* If a CPU which we ran on has gone down, OK. */
cpumask_and
(
flush_cpumask
,
flush_cpumask
,
cpu_online_mask
);
if
(
unlikely
(
cpumask_empty
(
flush_cpumask
)))
{
spin_unlock
(
&
tlbstate_lock
);
return
;
}
#endif
flush_mm
=
mm
;
flush_va
=
va
;
cpus_or
(
flush_cpumask
,
cpumask
,
flush_cpumask
);
/*
* Make the above memory operations globally visible before
...
...
@@ -163,9 +158,9 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to
* CPUs affected.
*/
send_IPI_mask
(
&
cpumask
,
INVALIDATE_TLB_VECTOR
);
send_IPI_mask
(
flush_
cpumask
,
INVALIDATE_TLB_VECTOR
);
while
(
!
cpu
s
_empty
(
flush_cpumask
))
while
(
!
cpu
mask
_empty
(
flush_cpumask
))
/* nothing. lockup detection does not belong here */
cpu_relax
();
...
...
@@ -177,25 +172,19 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
void
flush_tlb_current_task
(
void
)
{
struct
mm_struct
*
mm
=
current
->
mm
;
cpumask_t
cpu_mask
;
preempt_disable
();
cpu_mask
=
mm
->
cpu_vm_mask
;
cpu_clear
(
smp_processor_id
(),
cpu_mask
);
local_flush_tlb
();
if
(
!
cpus_empty
(
cpu_mask
)
)
flush_tlb_others
(
cpu
_mask
,
mm
,
TLB_FLUSH_ALL
);
if
(
cpumask_any_but
(
&
mm
->
cpu_vm_mask
,
smp_processor_id
())
<
nr_cpu_ids
)
flush_tlb_others
(
&
mm
->
cpu_vm
_mask
,
mm
,
TLB_FLUSH_ALL
);
preempt_enable
();
}
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
cpumask_t
cpu_mask
;
preempt_disable
();
cpu_mask
=
mm
->
cpu_vm_mask
;
cpu_clear
(
smp_processor_id
(),
cpu_mask
);
if
(
current
->
active_mm
==
mm
)
{
if
(
current
->
mm
)
...
...
@@ -203,8 +192,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else
leave_mm
(
smp_processor_id
());
}
if
(
!
cpus_empty
(
cpu_mask
)
)
flush_tlb_others
(
cpu
_mask
,
mm
,
TLB_FLUSH_ALL
);
if
(
cpumask_any_but
(
&
mm
->
cpu_vm_mask
,
smp_processor_id
())
<
nr_cpu_ids
)
flush_tlb_others
(
&
mm
->
cpu_vm
_mask
,
mm
,
TLB_FLUSH_ALL
);
preempt_enable
();
}
...
...
@@ -212,11 +201,8 @@ void flush_tlb_mm(struct mm_struct *mm)
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
va
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
cpumask_t
cpu_mask
;
preempt_disable
();
cpu_mask
=
mm
->
cpu_vm_mask
;
cpu_clear
(
smp_processor_id
(),
cpu_mask
);
if
(
current
->
active_mm
==
mm
)
{
if
(
current
->
mm
)
...
...
@@ -225,9 +211,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm
(
smp_processor_id
());
}
if
(
!
cpus_empty
(
cpu_mask
))
flush_tlb_others
(
cpu_mask
,
mm
,
va
);
if
(
cpumask_any_but
(
&
mm
->
cpu_vm_mask
,
smp_processor_id
())
<
nr_cpu_ids
)
flush_tlb_others
(
&
mm
->
cpu_vm_mask
,
mm
,
va
);
preempt_enable
();
}
EXPORT_SYMBOL
(
flush_tlb_page
);
...
...
@@ -254,3 +239,9 @@ void reset_lazy_tlbstate(void)
per_cpu
(
cpu_tlbstate
,
cpu
).
active_mm
=
&
init_mm
;
}
static
int
init_flush_cpumask
(
void
)
{
alloc_cpumask_var
(
&
flush_cpumask
,
GFP_KERNEL
);
return
0
;
}
early_initcall
(
init_flush_cpumask
);
arch/x86/kernel/tlb_64.c
View file @
7f268f43
...
...
@@ -43,10 +43,10 @@
union
smp_flush_state
{
struct
{
cpumask_t
flush_cpumask
;
struct
mm_struct
*
flush_mm
;
unsigned
long
flush_va
;
spinlock_t
tlbstate_lock
;
DECLARE_BITMAP
(
flush_cpumask
,
NR_CPUS
);
};
char
pad
[
SMP_CACHE_BYTES
];
}
____cacheline_aligned
;
...
...
@@ -131,7 +131,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
sender
=
~
regs
->
orig_ax
-
INVALIDATE_TLB_VECTOR_START
;
f
=
&
per_cpu
(
flush_state
,
sender
);
if
(
!
cpu
_isset
(
cpu
,
f
->
flush_cpumask
))
if
(
!
cpu
mask_test_cpu
(
cpu
,
to_cpumask
(
f
->
flush_cpumask
)
))
goto
out
;
/*
* This was a BUG() but until someone can quote me the
...
...
@@ -153,19 +153,15 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
}
out:
ack_APIC_irq
();
cpu
_clear
(
cpu
,
f
->
flush_cpumask
);
cpu
mask_clear_cpu
(
cpu
,
to_cpumask
(
f
->
flush_cpumask
)
);
inc_irq_stat
(
irq_tlb_count
);
}
void
native_flush_tlb_others
(
const
cpumask_t
*
cpumaskp
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
static
void
flush_tlb_others_ipi
(
const
struct
cpumask
*
cpumask
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
{
int
sender
;
union
smp_flush_state
*
f
;
cpumask_t
cpumask
=
*
cpumaskp
;
if
(
is_uv_system
()
&&
uv_flush_tlb_others
(
&
cpumask
,
mm
,
va
))
return
;
/* Caller has disabled preemption */
sender
=
smp_processor_id
()
%
NUM_INVALIDATE_TLB_VECTORS
;
...
...
@@ -180,7 +176,8 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
f
->
flush_mm
=
mm
;
f
->
flush_va
=
va
;
cpus_or
(
f
->
flush_cpumask
,
cpumask
,
f
->
flush_cpumask
);
cpumask_andnot
(
to_cpumask
(
f
->
flush_cpumask
),
cpumask
,
cpumask_of
(
smp_processor_id
()));
/*
* Make the above memory operations globally visible before
...
...
@@ -191,9 +188,10 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to
* CPUs affected.
*/
send_IPI_mask
(
&
cpumask
,
INVALIDATE_TLB_VECTOR_START
+
sender
);
send_IPI_mask
(
to_cpumask
(
f
->
flush_cpumask
),
INVALIDATE_TLB_VECTOR_START
+
sender
);
while
(
!
cpu
s_empty
(
f
->
flush_cpumask
))
while
(
!
cpu
mask_empty
(
to_cpumask
(
f
->
flush_cpumask
)
))
cpu_relax
();
f
->
flush_mm
=
NULL
;
...
...
@@ -201,6 +199,25 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
spin_unlock
(
&
f
->
tlbstate_lock
);
}
void
native_flush_tlb_others
(
const
struct
cpumask
*
cpumask
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
{
if
(
is_uv_system
())
{
/* FIXME: could be an percpu_alloc'd thing */
static
DEFINE_PER_CPU
(
cpumask_t
,
flush_tlb_mask
);
struct
cpumask
*
after_uv_flush
=
&
get_cpu_var
(
flush_tlb_mask
);
cpumask_andnot
(
after_uv_flush
,
cpumask
,
cpumask_of
(
smp_processor_id
()));
if
(
!
uv_flush_tlb_others
(
after_uv_flush
,
mm
,
va
))
flush_tlb_others_ipi
(
after_uv_flush
,
mm
,
va
);
put_cpu_var
(
flush_tlb_uv_cpumask
);
return
;
}
flush_tlb_others_ipi
(
cpumask
,
mm
,
va
);
}
static
int
__cpuinit
init_smp_flush
(
void
)
{
int
i
;
...
...
@@ -215,25 +232,18 @@ core_initcall(init_smp_flush);
void
flush_tlb_current_task
(
void
)
{
struct
mm_struct
*
mm
=
current
->
mm
;
cpumask_t
cpu_mask
;
preempt_disable
();
cpu_mask
=
mm
->
cpu_vm_mask
;
cpu_clear
(
smp_processor_id
(),
cpu_mask
);
local_flush_tlb
();
if
(
!
cpus_empty
(
cpu_mask
)
)
flush_tlb_others
(
cpu
_mask
,
mm
,
TLB_FLUSH_ALL
);
if
(
cpumask_any_but
(
&
mm
->
cpu_vm_mask
,
smp_processor_id
())
<
nr_cpu_ids
)
flush_tlb_others
(
&
mm
->
cpu_vm
_mask
,
mm
,
TLB_FLUSH_ALL
);
preempt_enable
();
}
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
cpumask_t
cpu_mask
;
preempt_disable
();
cpu_mask
=
mm
->
cpu_vm_mask
;
cpu_clear
(
smp_processor_id
(),
cpu_mask
);
if
(
current
->
active_mm
==
mm
)
{
if
(
current
->
mm
)
...
...
@@ -241,8 +251,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else
leave_mm
(
smp_processor_id
());
}
if
(
!
cpus_empty
(
cpu_mask
)
)
flush_tlb_others
(
cpu
_mask
,
mm
,
TLB_FLUSH_ALL
);
if
(
cpumask_any_but
(
&
mm
->
cpu_vm_mask
,
smp_processor_id
())
<
nr_cpu_ids
)
flush_tlb_others
(
&
mm
->
cpu_vm
_mask
,
mm
,
TLB_FLUSH_ALL
);
preempt_enable
();
}
...
...
@@ -250,11 +260,8 @@ void flush_tlb_mm(struct mm_struct *mm)
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
va
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
cpumask_t
cpu_mask
;
preempt_disable
();
cpu_mask
=
mm
->
cpu_vm_mask
;
cpu_clear
(
smp_processor_id
(),
cpu_mask
);
if
(
current
->
active_mm
==
mm
)
{
if
(
current
->
mm
)
...
...
@@ -263,8 +270,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm
(
smp_processor_id
());
}
if
(
!
cpus_empty
(
cpu_mask
)
)
flush_tlb_others
(
cpu
_mask
,
mm
,
va
);
if
(
cpumask_any_but
(
&
mm
->
cpu_vm_mask
,
smp_processor_id
())
<
nr_cpu_ids
)
flush_tlb_others
(
&
mm
->
cpu_vm
_mask
,
mm
,
va
);
preempt_enable
();
}
...
...
arch/x86/kernel/tlb_uv.c
View file @
7f268f43
...
...
@@ -212,11 +212,11 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
* The cpumaskp mask contains the cpus the broadcast was sent to.
*
* Returns 1 if all remote flushing was done. The mask is zeroed.
* Returns 0 if some remote flushing remains to be done. The mask
is left
*
unchanged
.
* Returns 0 if some remote flushing remains to be done. The mask
will have
*
some bits still set
.
*/
int
uv_flush_send_and_wait
(
int
cpu
,
int
this_blade
,
struct
bau_desc
*
bau_desc
,
cpumask_t
*
cpumaskp
)
struct
cpumask
*
cpumaskp
)
{
int
completion_status
=
0
;
int
right_shift
;
...
...
@@ -263,13 +263,13 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
* Success, so clear the remote cpu's from the mask so we don't
* use the IPI method of shootdown on them.
*/
for_each_cpu
_mask
(
bit
,
*
cpumaskp
)
{
for_each_cpu
(
bit
,
cpumaskp
)
{
blade
=
uv_cpu_to_blade_id
(
bit
);
if
(
blade
==
this_blade
)
continue
;
cpu
_clear
(
bit
,
*
cpumaskp
);
cpu
mask_clear_cpu
(
bit
,
cpumaskp
);
}
if
(
!
cpu
s_empty
(
*
cpumaskp
))
if
(
!
cpu
mask_empty
(
cpumaskp
))
return
0
;
return
1
;
}
...
...
@@ -296,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
* Returns 1 if all remote flushing was done.
* Returns 0 if some remote flushing remains to be done.
*/
int
uv_flush_tlb_others
(
cpumask_t
*
cpumaskp
,
struct
mm_struct
*
mm
,
int
uv_flush_tlb_others
(
struct
cpumask
*
cpumaskp
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
{
int
i
;
...
...
@@ -315,7 +315,7 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
bau_nodes_clear
(
&
bau_desc
->
distribution
,
UV_DISTRIBUTION_SIZE
);
i
=
0
;
for_each_cpu
_mask
(
bit
,
*
cpumaskp
)
{
for_each_cpu
(
bit
,
cpumaskp
)
{
blade
=
uv_cpu_to_blade_id
(
bit
);
BUG_ON
(
blade
>
(
UV_DISTRIBUTION_SIZE
-
1
));
if
(
blade
==
this_blade
)
{
...
...
arch/x86/mach-voyager/setup.c
View file @
7f268f43
...
...
@@ -9,6 +9,7 @@
#include <asm/e820.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/cpu.h>
void
__init
pre_intr_init_hook
(
void
)
{
...
...
arch/x86/mm/init_32.c
View file @
7f268f43
...
...
@@ -49,7 +49,6 @@
#include <asm/paravirt.h>
#include <asm/setup.h>
#include <asm/cacheflush.h>
#include <asm/smp.h>
unsigned
int
__VMALLOC_RESERVE
=
128
<<
20
;
...
...
arch/x86/mm/pat.c
View file @
7f268f43
...
...
@@ -333,11 +333,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
req_type
&
_PAGE_CACHE_MASK
);
}
is_range_ram
=
pagerange_is_ram
(
start
,
end
);
if
(
is_range_ram
==
1
)
return
reserve_ram_pages_type
(
start
,
end
,
req_type
,
new_type
);
else
if
(
is_range_ram
<
0
)
return
-
EINVAL
;
/*
* For legacy reasons, some parts of the physical address range in the
* legacy 1MB region is treated as non-RAM (even when listed as RAM in
* the e820 tables). So we will track the memory attributes of this
* legacy 1MB region using the linear memtype_list always.
*/
if
(
end
>=
ISA_END_ADDRESS
)
{
is_range_ram
=
pagerange_is_ram
(
start
,
end
);
if
(
is_range_ram
==
1
)
return
reserve_ram_pages_type
(
start
,
end
,
req_type
,
new_type
);
else
if
(
is_range_ram
<
0
)
return
-
EINVAL
;
}
new
=
kmalloc
(
sizeof
(
struct
memtype
),
GFP_KERNEL
);
if
(
!
new
)
...
...
@@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end)
if
(
is_ISA_range
(
start
,
end
-
1
))
return
0
;
is_range_ram
=
pagerange_is_ram
(
start
,
end
);
if
(
is_range_ram
==
1
)
return
free_ram_pages_type
(
start
,
end
);
else
if
(
is_range_ram
<
0
)
return
-
EINVAL
;
/*
* For legacy reasons, some parts of the physical address range in the
* legacy 1MB region is treated as non-RAM (even when listed as RAM in
* the e820 tables). So we will track the memory attributes of this
* legacy 1MB region using the linear memtype_list always.
*/
if
(
end
>=
ISA_END_ADDRESS
)
{
is_range_ram
=
pagerange_is_ram
(
start
,
end
);
if
(
is_range_ram
==
1
)
return
free_ram_pages_type
(
start
,
end
);
else
if
(
is_range_ram
<
0
)
return
-
EINVAL
;
}
spin_lock
(
&
memtype_lock
);
list_for_each_entry
(
entry
,
&
memtype_list
,
nd
)
{
...
...
arch/x86/xen/enlighten.c
View file @
7f268f43
...
...
@@ -634,35 +634,27 @@ static void xen_flush_tlb_single(unsigned long addr)
preempt_enable
();
}
static
void
xen_flush_tlb_others
(
const
cpumask_t
*
cpus
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
static
void
xen_flush_tlb_others
(
const
struct
cpumask
*
cpus
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
{
struct
{
struct
mmuext_op
op
;
cpumask_t
mask
;
DECLARE_BITMAP
(
mask
,
NR_CPUS
)
;
}
*
args
;
cpumask_t
cpumask
=
*
cpus
;
struct
multicall_space
mcs
;
/*
* A couple of (to be removed) sanity checks:
*
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON
(
cpus_empty
(
cpumask
));
BUG_ON
(
cpu_isset
(
smp_processor_id
(),
cpumask
));
BUG_ON
(
cpumask_empty
(
cpus
));
BUG_ON
(
!
mm
);
/* If a CPU which we ran on has gone down, OK. */
cpus_and
(
cpumask
,
cpumask
,
cpu_online_map
);
if
(
cpus_empty
(
cpumask
))
return
;
mcs
=
xen_mc_entry
(
sizeof
(
*
args
));
args
=
mcs
.
args
;
args
->
mask
=
cpumask
;
args
->
op
.
arg2
.
vcpumask
=
&
args
->
mask
;
args
->
op
.
arg2
.
vcpumask
=
to_cpumask
(
args
->
mask
);
/* Remove us, and any offline CPUS. */
cpumask_and
(
to_cpumask
(
args
->
mask
),
cpus
,
cpu_online_mask
);
cpumask_clear_cpu
(
smp_processor_id
(),
to_cpumask
(
args
->
mask
));
if
(
unlikely
(
cpumask_empty
(
to_cpumask
(
args
->
mask
))))
goto
issue
;
if
(
va
==
TLB_FLUSH_ALL
)
{
args
->
op
.
cmd
=
MMUEXT_TLB_FLUSH_MULTI
;
...
...
@@ -673,6 +665,7 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
MULTI_mmuext_op
(
mcs
.
mc
,
&
args
->
op
,
1
,
NULL
,
DOMID_SELF
);
issue:
xen_mc_issue
(
PARAVIRT_LAZY_MMU
);
}
...
...
drivers/base/cpu.c
View file @
7f268f43
...
...
@@ -107,7 +107,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
/*
* Print cpu online, possible, present, and system maps
*/
static
ssize_t
print_cpus_map
(
char
*
buf
,
c
pumask_t
*
map
)
static
ssize_t
print_cpus_map
(
char
*
buf
,
c
onst
struct
cpumask
*
map
)
{
int
n
=
cpulist_scnprintf
(
buf
,
PAGE_SIZE
-
2
,
map
);
...
...
drivers/base/topology.c
View file @
7f268f43
...
...
@@ -31,7 +31,10 @@
#include <linux/hardirq.h>
#include <linux/topology.h>
#define define_one_ro(_name) \
#define define_one_ro_named(_name, _func) \
static SYSDEV_ATTR(_name, 0444, _func, NULL)
#define define_one_ro(_name) \
static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
#define define_id_show_func(name) \
...
...
@@ -42,8 +45,8 @@ static ssize_t show_##name(struct sys_device *dev, \
return sprintf(buf, "%d\n", topology_##name(cpu)); \
}
#if defined(topology_thread_
siblings) || defined(topology_core_siblings
)
static
ssize_t
show_cpumap
(
int
type
,
c
pumask_t
*
mask
,
char
*
buf
)
#if defined(topology_thread_
cpumask) || defined(topology_core_cpumask
)
static
ssize_t
show_cpumap
(
int
type
,
c
onst
struct
cpumask
*
mask
,
char
*
buf
)
{
ptrdiff_t
len
=
PTR_ALIGN
(
buf
+
PAGE_SIZE
-
1
,
PAGE_SIZE
)
-
buf
;
int
n
=
0
;
...
...
@@ -65,7 +68,7 @@ static ssize_t show_##name(struct sys_device *dev, \
struct sysdev_attribute *attr, char *buf) \
{ \
unsigned int cpu = dev->id; \
return show_cpumap(0,
&(topology_##name(cpu)
), buf); \
return show_cpumap(0,
topology_##name(cpu
), buf); \
}
#define define_siblings_show_list(name) \
...
...
@@ -74,7 +77,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \
char *buf) \
{ \
unsigned int cpu = dev->id; \
return show_cpumap(1,
&(topology_##name(cpu)
), buf); \
return show_cpumap(1,
topology_##name(cpu
), buf); \
}
#else
...
...
@@ -82,9 +85,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \
static ssize_t show_##name(struct sys_device *dev, \
struct sysdev_attribute *attr, char *buf) \
{ \
unsigned int cpu = dev->id; \
cpumask_t mask = topology_##name(cpu); \
return show_cpumap(0, &mask, buf); \
return show_cpumap(0, topology_##name(dev->id), buf); \
}
#define define_siblings_show_list(name) \
...
...
@@ -92,9 +93,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \
struct sysdev_attribute *attr, \
char *buf) \
{ \
unsigned int cpu = dev->id; \
cpumask_t mask = topology_##name(cpu); \
return show_cpumap(1, &mask, buf); \
return show_cpumap(1, topology_##name(dev->id), buf); \
}
#endif
...
...
@@ -107,13 +106,13 @@ define_one_ro(physical_package_id);
define_id_show_func
(
core_id
);
define_one_ro
(
core_id
);
define_siblings_show_func
(
thread_
siblings
);
define_one_ro
(
thread_siblings
);
define_one_ro
(
thread_siblings
_list
);
define_siblings_show_func
(
thread_
cpumask
);
define_one_ro
_named
(
thread_siblings
,
show_thread_cpumask
);
define_one_ro
_named
(
thread_siblings_list
,
show_thread_cpumask
_list
);
define_siblings_show_func
(
core_
siblings
);
define_one_ro
(
core_siblings
);
define_one_ro
(
core_siblings
_list
);
define_siblings_show_func
(
core_
cpumask
);
define_one_ro
_named
(
core_siblings
,
show_core_cpumask
);
define_one_ro
_named
(
core_siblings_list
,
show_core_cpumask
_list
);
static
struct
attribute
*
default_attrs
[]
=
{
&
attr_physical_package_id
.
attr
,
...
...
drivers/firmware/dcdbas.c
View file @
7f268f43
...
...
@@ -244,7 +244,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
*/
int
dcdbas_smi_request
(
struct
smi_cmd
*
smi_cmd
)
{
cpumask_t
old_mask
;
cpumask_
var_
t
old_mask
;
int
ret
=
0
;
if
(
smi_cmd
->
magic
!=
SMI_CMD_MAGIC
)
{
...
...
@@ -254,8 +254,11 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
}
/* SMI requires CPU 0 */
old_mask
=
current
->
cpus_allowed
;
set_cpus_allowed_ptr
(
current
,
&
cpumask_of_cpu
(
0
));
if
(
!
alloc_cpumask_var
(
&
old_mask
,
GFP_KERNEL
))
return
-
ENOMEM
;
cpumask_copy
(
old_mask
,
&
current
->
cpus_allowed
);
set_cpus_allowed_ptr
(
current
,
cpumask_of
(
0
));
if
(
smp_processor_id
()
!=
0
)
{
dev_dbg
(
&
dcdbas_pdev
->
dev
,
"%s: failed to get CPU 0
\n
"
,
__func__
);
...
...
@@ -275,7 +278,8 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
);
out:
set_cpus_allowed_ptr
(
current
,
&
old_mask
);
set_cpus_allowed_ptr
(
current
,
old_mask
);
free_cpumask_var
(
old_mask
);
return
ret
;
}
...
...
drivers/misc/sgi-xp/xpc_main.c
View file @
7f268f43
...
...
@@ -318,7 +318,7 @@ xpc_hb_checker(void *ignore)
/* this thread was marked active by xpc_hb_init() */
set_cpus_allowed_ptr
(
current
,
&
cpumask_of_cpu
(
XPC_HB_CHECK_CPU
));
set_cpus_allowed_ptr
(
current
,
cpumask_of
(
XPC_HB_CHECK_CPU
));
/* set our heartbeating to other partitions into motion */
xpc_hb_check_timeout
=
jiffies
+
(
xpc_hb_check_interval
*
HZ
);
...
...
drivers/net/sfc/efx.c
View file @
7f268f43
...
...
@@ -854,20 +854,27 @@ static void efx_fini_io(struct efx_nic *efx)
* interrupts across them. */
static
int
efx_wanted_rx_queues
(
void
)
{
cpumask_t
core_mask
;
cpumask_
var_
t
core_mask
;
int
count
;
int
cpu
;
cpus_clear
(
core_mask
);
if
(
!
alloc_cpumask_var
(
&
core_mask
,
GFP_KERNEL
))
{
printk
(
KERN_WARNING
"efx.c: allocation failure, irq balancing hobbled
\n
"
);
return
1
;
}
cpumask_clear
(
core_mask
);
count
=
0
;
for_each_online_cpu
(
cpu
)
{
if
(
!
cpu
_isset
(
cpu
,
core_mask
))
{
if
(
!
cpu
mask_test_cpu
(
cpu
,
core_mask
))
{
++
count
;
cpu
s
_or
(
core_mask
,
core_mask
,
topology_core_siblings
(
cpu
));
cpu
mask
_or
(
core_mask
,
core_mask
,
topology_core_cpumask
(
cpu
));
}
}
free_cpumask_var
(
core_mask
);
return
count
;
}
...
...
drivers/oprofile/buffer_sync.c
View file @
7f268f43
...
...
@@ -38,7 +38,7 @@
static
LIST_HEAD
(
dying_tasks
);
static
LIST_HEAD
(
dead_tasks
);
static
cpumask_
t
marked_cpus
=
CPU_MASK_NONE
;
static
cpumask_
var_t
marked_cpus
;
static
DEFINE_SPINLOCK
(
task_mortuary
);
static
void
process_task_mortuary
(
void
);
...
...
@@ -456,10 +456,10 @@ static void mark_done(int cpu)
{
int
i
;
cpu
_set
(
cpu
,
marked_cpus
);
cpu
mask_set_cpu
(
cpu
,
marked_cpus
);
for_each_online_cpu
(
i
)
{
if
(
!
cpu
_isset
(
i
,
marked_cpus
))
if
(
!
cpu
mask_test_cpu
(
i
,
marked_cpus
))
return
;
}
...
...
@@ -468,7 +468,7 @@ static void mark_done(int cpu)
*/
process_task_mortuary
();
cpu
s
_clear
(
marked_cpus
);
cpu
mask
_clear
(
marked_cpus
);
}
...
...
@@ -565,6 +565,20 @@ void sync_buffer(int cpu)
mutex_unlock
(
&
buffer_mutex
);
}
int
__init
buffer_sync_init
(
void
)
{
if
(
!
alloc_cpumask_var
(
&
marked_cpus
,
GFP_KERNEL
))
return
-
ENOMEM
;
cpumask_clear
(
marked_cpus
);
return
0
;
}
void
__exit
buffer_sync_cleanup
(
void
)
{
free_cpumask_var
(
marked_cpus
);
}
/* The function can be used to add a buffer worth of data directly to
* the kernel buffer. The buffer is assumed to be a circular buffer.
* Take the entries from index start and end at index end, wrapping
...
...
drivers/oprofile/buffer_sync.h
View file @
7f268f43
...
...
@@ -19,4 +19,8 @@ void sync_stop(void);
/* sync the given CPU's buffer */
void
sync_buffer
(
int
cpu
);
/* initialize/destroy the buffer system. */
int
buffer_sync_init
(
void
);
void
buffer_sync_cleanup
(
void
);
#endif
/* OPROFILE_BUFFER_SYNC_H */
drivers/oprofile/oprof.c
View file @
7f268f43
...
...
@@ -183,6 +183,10 @@ static int __init oprofile_init(void)
{
int
err
;
err
=
buffer_sync_init
();
if
(
err
)
return
err
;
err
=
oprofile_arch_init
(
&
oprofile_ops
);
if
(
err
<
0
||
timer
)
{
...
...
@@ -191,8 +195,10 @@ static int __init oprofile_init(void)
}
err
=
oprofilefs_register
();
if
(
err
)
if
(
err
)
{
oprofile_arch_exit
();
buffer_sync_cleanup
();
}
return
err
;
}
...
...
@@ -202,6 +208,7 @@ static void __exit oprofile_exit(void)
{
oprofilefs_unregister
();
oprofile_arch_exit
();
buffer_sync_cleanup
();
}
...
...
drivers/pci/intr_remapping.c
View file @
7f268f43
...
...
@@ -6,6 +6,7 @@
#include <linux/irq.h>
#include <asm/io_apic.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <linux/intel-iommu.h>
#include "intr_remapping.h"
...
...
drivers/xen/events.c
View file @
7f268f43
...
...
@@ -26,6 +26,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bootmem.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
...
...
@@ -75,7 +76,14 @@ enum {
static
int
evtchn_to_irq
[
NR_EVENT_CHANNELS
]
=
{
[
0
...
NR_EVENT_CHANNELS
-
1
]
=
-
1
};
static
unsigned
long
cpu_evtchn_mask
[
NR_CPUS
][
NR_EVENT_CHANNELS
/
BITS_PER_LONG
];
struct
cpu_evtchn_s
{
unsigned
long
bits
[
NR_EVENT_CHANNELS
/
BITS_PER_LONG
];
};
static
struct
cpu_evtchn_s
*
cpu_evtchn_mask_p
;
static
inline
unsigned
long
*
cpu_evtchn_mask
(
int
cpu
)
{
return
cpu_evtchn_mask_p
[
cpu
].
bits
;
}
static
u8
cpu_evtchn
[
NR_EVENT_CHANNELS
];
/* Reference counts for bindings to IRQs. */
...
...
@@ -115,7 +123,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
unsigned
int
idx
)
{
return
(
sh
->
evtchn_pending
[
idx
]
&
cpu_evtchn_mask
[
cpu
]
[
idx
]
&
cpu_evtchn_mask
(
cpu
)
[
idx
]
&
~
sh
->
evtchn_mask
[
idx
]);
}
...
...
@@ -125,11 +133,11 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
BUG_ON
(
irq
==
-
1
);
#ifdef CONFIG_SMP
irq_to_desc
(
irq
)
->
affinity
=
cpumask_of_cpu
(
cpu
);
cpumask_copy
(
irq_to_desc
(
irq
)
->
affinity
,
cpumask_of
(
cpu
)
);
#endif
__clear_bit
(
chn
,
cpu_evtchn_mask
[
cpu_evtchn
[
chn
]]
);
__set_bit
(
chn
,
cpu_evtchn_mask
[
cpu
]
);
__clear_bit
(
chn
,
cpu_evtchn_mask
(
cpu_evtchn
[
chn
])
);
__set_bit
(
chn
,
cpu_evtchn_mask
(
cpu
)
);
cpu_evtchn
[
chn
]
=
cpu
;
}
...
...
@@ -142,12 +150,12 @@ static void init_evtchn_cpu_bindings(void)
/* By default all event channels notify CPU#0. */
for_each_irq_desc
(
i
,
desc
)
{
desc
->
affinity
=
cpumask_of_cpu
(
0
);
cpumask_copy
(
desc
->
affinity
,
cpumask_of
(
0
)
);
}
#endif
memset
(
cpu_evtchn
,
0
,
sizeof
(
cpu_evtchn
));
memset
(
cpu_evtchn_mask
[
0
],
~
0
,
sizeof
(
cpu_evtchn_mask
[
0
]
));
memset
(
cpu_evtchn_mask
(
0
),
~
0
,
sizeof
(
cpu_evtchn_mask
(
0
)
));
}
static
inline
unsigned
int
cpu_from_evtchn
(
unsigned
int
evtchn
)
...
...
@@ -822,6 +830,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
void
__init
xen_init_IRQ
(
void
)
{
int
i
;
size_t
size
=
nr_cpu_ids
*
sizeof
(
struct
cpu_evtchn_s
);
cpu_evtchn_mask_p
=
alloc_bootmem
(
size
);
BUG_ON
(
cpu_evtchn_mask_p
==
NULL
);
init_evtchn_cpu_bindings
();
...
...
drivers/xen/manage.c
View file @
7f268f43
...
...
@@ -100,7 +100,7 @@ static void do_suspend(void)
/* XXX use normal device tree? */
xenbus_suspend
();
err
=
stop_machine
(
xen_suspend
,
&
cancelled
,
&
cpumask_of_cpu
(
0
));
err
=
stop_machine
(
xen_suspend
,
&
cancelled
,
cpumask_of
(
0
));
if
(
err
)
{
printk
(
KERN_ERR
"failed to start xen_suspend: %d
\n
"
,
err
);
goto
out
;
...
...
include/asm-generic/bitops/__ffs.h
View file @
7f268f43
...
...
@@ -9,7 +9,7 @@
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static
inline
unsigned
long
__ffs
(
unsigned
long
word
)
static
__always_
inline
unsigned
long
__ffs
(
unsigned
long
word
)
{
int
num
=
0
;
...
...
include/asm-generic/bitops/__fls.h
View file @
7f268f43
...
...
@@ -9,7 +9,7 @@
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static
inline
unsigned
long
__fls
(
unsigned
long
word
)
static
__always_
inline
unsigned
long
__fls
(
unsigned
long
word
)
{
int
num
=
BITS_PER_LONG
-
1
;
...
...
include/asm-generic/bitops/fls.h
View file @
7f268f43
...
...
@@ -9,7 +9,7 @@
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static
inline
int
fls
(
int
x
)
static
__always_
inline
int
fls
(
int
x
)
{
int
r
=
32
;
...
...
include/asm-generic/bitops/fls64.h
View file @
7f268f43
...
...
@@ -15,7 +15,7 @@
* at position 64.
*/
#if BITS_PER_LONG == 32
static
inline
int
fls64
(
__u64
x
)
static
__always_
inline
int
fls64
(
__u64
x
)
{
__u32
h
=
x
>>
32
;
if
(
h
)
...
...
@@ -23,7 +23,7 @@ static inline int fls64(__u64 x)
return
fls
(
x
);
}
#elif BITS_PER_LONG == 64
static
inline
int
fls64
(
__u64
x
)
static
__always_
inline
int
fls64
(
__u64
x
)
{
if
(
x
==
0
)
return
0
;
...
...
include/linux/interrupt.h
View file @
7f268f43
...
...
@@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
struct
irq_desc
;
extern
int
early_irq_init
(
void
);
extern
int
arch_probe_nr_irqs
(
void
);
extern
int
arch_early_irq_init
(
void
);
extern
int
arch_init_chip_data
(
struct
irq_desc
*
desc
,
int
cpu
);
...
...
include/linux/irq.h
View file @
7f268f43
...
...
@@ -182,11 +182,11 @@ struct irq_desc {
unsigned
int
irqs_unhandled
;
spinlock_t
lock
;
#ifdef CONFIG_SMP
cpumask_t
affinity
;
cpumask_
var_
t
affinity
;
unsigned
int
cpu
;
#endif
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_t
pending_mask
;
cpumask_var_t
pending_mask
;
#endif
#endif
#ifdef CONFIG_PROC_FS
struct
proc_dir_entry
*
dir
;
...
...
@@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
#endif
/* !CONFIG_S390 */
#ifdef CONFIG_SMP
/**
* init_alloc_desc_masks - allocate cpumasks for irq_desc
* @desc: pointer to irq_desc struct
* @cpu: cpu which will be handling the cpumasks
* @boot: true if need bootmem
*
* Allocates affinity and pending_mask cpumask if required.
* Returns true if successful (or not required).
* Side effect: affinity has all bits set, pending_mask has all bits clear.
*/
static
inline
bool
init_alloc_desc_masks
(
struct
irq_desc
*
desc
,
int
cpu
,
bool
boot
)
{
int
node
;
if
(
boot
)
{
alloc_bootmem_cpumask_var
(
&
desc
->
affinity
);
cpumask_setall
(
desc
->
affinity
);
#ifdef CONFIG_GENERIC_PENDING_IRQ
alloc_bootmem_cpumask_var
(
&
desc
->
pending_mask
);
cpumask_clear
(
desc
->
pending_mask
);
#endif
return
true
;
}
node
=
cpu_to_node
(
cpu
);
if
(
!
alloc_cpumask_var_node
(
&
desc
->
affinity
,
GFP_ATOMIC
,
node
))
return
false
;
cpumask_setall
(
desc
->
affinity
);
#ifdef CONFIG_GENERIC_PENDING_IRQ
if
(
!
alloc_cpumask_var_node
(
&
desc
->
pending_mask
,
GFP_ATOMIC
,
node
))
{
free_cpumask_var
(
desc
->
affinity
);
return
false
;
}
cpumask_clear
(
desc
->
pending_mask
);
#endif
return
true
;
}
/**
* init_copy_desc_masks - copy cpumasks for irq_desc
* @old_desc: pointer to old irq_desc struct
* @new_desc: pointer to new irq_desc struct
*
* Insures affinity and pending_masks are copied to new irq_desc.
* If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
* irq_desc struct so the copy is redundant.
*/
static
inline
void
init_copy_desc_masks
(
struct
irq_desc
*
old_desc
,
struct
irq_desc
*
new_desc
)
{
#ifdef CONFIG_CPUMASKS_OFFSTACK
cpumask_copy
(
new_desc
->
affinity
,
old_desc
->
affinity
);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_copy
(
new_desc
->
pending_mask
,
old_desc
->
pending_mask
);
#endif
#endif
}
#else
/* !CONFIG_SMP */
static
inline
bool
init_alloc_desc_masks
(
struct
irq_desc
*
desc
,
int
cpu
,
bool
boot
)
{
return
true
;
}
static
inline
void
init_copy_desc_masks
(
struct
irq_desc
*
old_desc
,
struct
irq_desc
*
new_desc
)
{
}
#endif
/* CONFIG_SMP */
#endif
/* _LINUX_IRQ_H */
include/linux/irqnr.h
View file @
7f268f43
...
...
@@ -20,6 +20,7 @@
# define for_each_irq_desc_reverse(irq, desc) \
for (irq = nr_irqs - 1; irq >= 0; irq--)
#else
/* CONFIG_GENERIC_HARDIRQS */
extern
int
nr_irqs
;
...
...
include/linux/topology.h
View file @
7f268f43
...
...
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
#ifndef topology_core_siblings
#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
#endif
#ifndef topology_thread_cpumask
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
#endif
/* _LINUX_TOPOLOGY_H */
kernel/irq/chip.c
View file @
7f268f43
...
...
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
desc
->
irq_count
=
0
;
desc
->
irqs_unhandled
=
0
;
#ifdef CONFIG_SMP
cpumask_setall
(
&
desc
->
affinity
);
cpumask_setall
(
desc
->
affinity
);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear
(
desc
->
pending_mask
);
#endif
#endif
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
}
...
...
kernel/irq/handle.c
View file @
7f268f43
...
...
@@ -17,6 +17,7 @@
#include <linux/kernel_stat.h>
#include <linux/rculist.h>
#include <linux/hash.h>
#include <linux/bootmem.h>
#include "internals.h"
...
...
@@ -57,6 +58,7 @@ int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL
(
nr_irqs
);
#ifdef CONFIG_SPARSE_IRQ
static
struct
irq_desc
irq_desc_init
=
{
.
irq
=
-
1
,
.
status
=
IRQ_DISABLED
,
...
...
@@ -64,9 +66,6 @@ static struct irq_desc irq_desc_init = {
.
handle_irq
=
handle_bad_irq
,
.
depth
=
1
,
.
lock
=
__SPIN_LOCK_UNLOCKED
(
irq_desc_init
.
lock
),
#ifdef CONFIG_SMP
.
affinity
=
CPU_MASK_ALL
#endif
};
void
init_kstat_irqs
(
struct
irq_desc
*
desc
,
int
cpu
,
int
nr
)
...
...
@@ -101,6 +100,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
printk
(
KERN_ERR
"can not alloc kstat_irqs
\n
"
);
BUG_ON
(
1
);
}
if
(
!
init_alloc_desc_masks
(
desc
,
cpu
,
false
))
{
printk
(
KERN_ERR
"can not alloc irq_desc cpumasks
\n
"
);
BUG_ON
(
1
);
}
arch_init_chip_data
(
desc
,
cpu
);
}
...
...
@@ -109,7 +112,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
*/
DEFINE_SPINLOCK
(
sparse_irq_lock
);
struct
irq_desc
*
irq_desc_ptrs
[
NR_IRQS
]
__read_mostly
;
struct
irq_desc
*
*
irq_desc_ptrs
__read_mostly
;
static
struct
irq_desc
irq_desc_legacy
[
NR_IRQS_LEGACY
]
__cacheline_aligned_in_smp
=
{
[
0
...
NR_IRQS_LEGACY
-
1
]
=
{
...
...
@@ -119,14 +122,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
.
handle_irq
=
handle_bad_irq
,
.
depth
=
1
,
.
lock
=
__SPIN_LOCK_UNLOCKED
(
irq_desc_init
.
lock
),
#ifdef CONFIG_SMP
.
affinity
=
CPU_MASK_ALL
#endif
}
};
/* FIXME: use bootmem alloc ...*/
static
unsigned
int
kstat_irqs_legacy
[
NR_IRQS_LEGACY
][
NR_CPUS
];
static
unsigned
int
*
kstat_irqs_legacy
;
int
__init
early_irq_init
(
void
)
{
...
...
@@ -134,18 +133,30 @@ int __init early_irq_init(void)
int
legacy_count
;
int
i
;
/* initialize nr_irqs based on nr_cpu_ids */
arch_probe_nr_irqs
();
printk
(
KERN_INFO
"NR_IRQS:%d nr_irqs:%d
\n
"
,
NR_IRQS
,
nr_irqs
);
desc
=
irq_desc_legacy
;
legacy_count
=
ARRAY_SIZE
(
irq_desc_legacy
);
/* allocate irq_desc_ptrs array based on nr_irqs */
irq_desc_ptrs
=
alloc_bootmem
(
nr_irqs
*
sizeof
(
void
*
));
/* allocate based on nr_cpu_ids */
/* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
kstat_irqs_legacy
=
alloc_bootmem
(
NR_IRQS_LEGACY
*
nr_cpu_ids
*
sizeof
(
int
));
for
(
i
=
0
;
i
<
legacy_count
;
i
++
)
{
desc
[
i
].
irq
=
i
;
desc
[
i
].
kstat_irqs
=
kstat_irqs_legacy
[
i
]
;
desc
[
i
].
kstat_irqs
=
kstat_irqs_legacy
+
i
*
nr_cpu_ids
;
lockdep_set_class
(
&
desc
[
i
].
lock
,
&
irq_desc_lock_class
);
init_alloc_desc_masks
(
&
desc
[
i
],
0
,
true
);
irq_desc_ptrs
[
i
]
=
desc
+
i
;
}
for
(
i
=
legacy_count
;
i
<
NR_IRQS
;
i
++
)
for
(
i
=
legacy_count
;
i
<
nr_irqs
;
i
++
)
irq_desc_ptrs
[
i
]
=
NULL
;
return
arch_early_irq_init
();
...
...
@@ -153,7 +164,10 @@ int __init early_irq_init(void)
struct
irq_desc
*
irq_to_desc
(
unsigned
int
irq
)
{
return
(
irq
<
NR_IRQS
)
?
irq_desc_ptrs
[
irq
]
:
NULL
;
if
(
irq_desc_ptrs
&&
irq
<
nr_irqs
)
return
irq_desc_ptrs
[
irq
];
return
NULL
;
}
struct
irq_desc
*
irq_to_desc_alloc_cpu
(
unsigned
int
irq
,
int
cpu
)
...
...
@@ -162,10 +176,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
unsigned
long
flags
;
int
node
;
if
(
irq
>=
NR_IRQS
)
{
printk
(
KERN_WARNING
"irq >= NR_IRQS in irq_to_desc_alloc: %d %d
\n
"
,
irq
,
NR_IRQS
);
WARN_ON
(
1
);
if
(
irq
>=
nr_irqs
)
{
WARN
(
1
,
"irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc
\n
"
,
irq
,
nr_irqs
);
return
NULL
;
}
...
...
@@ -207,9 +220,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
.
handle_irq
=
handle_bad_irq
,
.
depth
=
1
,
.
lock
=
__SPIN_LOCK_UNLOCKED
(
irq_desc
->
lock
),
#ifdef CONFIG_SMP
.
affinity
=
CPU_MASK_ALL
#endif
}
};
...
...
@@ -219,12 +229,15 @@ int __init early_irq_init(void)
int
count
;
int
i
;
printk
(
KERN_INFO
"NR_IRQS:%d
\n
"
,
NR_IRQS
);
desc
=
irq_desc
;
count
=
ARRAY_SIZE
(
irq_desc
);
for
(
i
=
0
;
i
<
count
;
i
++
)
for
(
i
=
0
;
i
<
count
;
i
++
)
{
desc
[
i
].
irq
=
i
;
init_alloc_desc_masks
(
&
desc
[
i
],
0
,
true
);
}
return
arch_early_irq_init
();
}
...
...
kernel/irq/internals.h
View file @
7f268f43
...
...
@@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
extern
struct
lock_class_key
irq_desc_lock_class
;
extern
void
init_kstat_irqs
(
struct
irq_desc
*
desc
,
int
cpu
,
int
nr
);
extern
spinlock_t
sparse_irq_lock
;
#ifdef CONFIG_SPARSE_IRQ
/* irq_desc_ptrs allocated at boot time */
extern
struct
irq_desc
**
irq_desc_ptrs
;
#else
/* irq_desc_ptrs is a fixed size array */
extern
struct
irq_desc
*
irq_desc_ptrs
[
NR_IRQS
];
#endif
#ifdef CONFIG_PROC_FS
extern
void
register_irq_proc
(
unsigned
int
irq
,
struct
irq_desc
*
desc
);
...
...
kernel/irq/manage.c
View file @
7f268f43
...
...
@@ -98,14 +98,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
#ifdef CONFIG_GENERIC_PENDING_IRQ
if
(
desc
->
status
&
IRQ_MOVE_PCNTXT
||
desc
->
status
&
IRQ_DISABLED
)
{
cpumask_copy
(
&
desc
->
affinity
,
cpumask
);
cpumask_copy
(
desc
->
affinity
,
cpumask
);
desc
->
chip
->
set_affinity
(
irq
,
cpumask
);
}
else
{
desc
->
status
|=
IRQ_MOVE_PENDING
;
cpumask_copy
(
&
desc
->
pending_mask
,
cpumask
);
cpumask_copy
(
desc
->
pending_mask
,
cpumask
);
}
#else
cpumask_copy
(
&
desc
->
affinity
,
cpumask
);
cpumask_copy
(
desc
->
affinity
,
cpumask
);
desc
->
chip
->
set_affinity
(
irq
,
cpumask
);
#endif
desc
->
status
|=
IRQ_AFFINITY_SET
;
...
...
@@ -127,16 +127,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
* one of the targets is online.
*/
if
(
desc
->
status
&
(
IRQ_AFFINITY_SET
|
IRQ_NO_BALANCING
))
{
if
(
cpumask_any_and
(
&
desc
->
affinity
,
cpu_online_mask
)
if
(
cpumask_any_and
(
desc
->
affinity
,
cpu_online_mask
)
<
nr_cpu_ids
)
goto
set_affinity
;
else
desc
->
status
&=
~
IRQ_AFFINITY_SET
;
}
cpumask_and
(
&
desc
->
affinity
,
cpu_online_mask
,
irq_default_affinity
);
cpumask_and
(
desc
->
affinity
,
cpu_online_mask
,
irq_default_affinity
);
set_affinity:
desc
->
chip
->
set_affinity
(
irq
,
&
desc
->
affinity
);
desc
->
chip
->
set_affinity
(
irq
,
desc
->
affinity
);
return
0
;
}
...
...
kernel/irq/migration.c
View file @
7f268f43
...
...
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
desc
->
status
&=
~
IRQ_MOVE_PENDING
;
if
(
unlikely
(
cpumask_empty
(
&
desc
->
pending_mask
)))
if
(
unlikely
(
cpumask_empty
(
desc
->
pending_mask
)))
return
;
if
(
!
desc
->
chip
->
set_affinity
)
...
...
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
* For correct operation this depends on the caller
* masking the irqs.
*/
if
(
likely
(
cpumask_any_and
(
&
desc
->
pending_mask
,
cpu_online_mask
)
if
(
likely
(
cpumask_any_and
(
desc
->
pending_mask
,
cpu_online_mask
)
<
nr_cpu_ids
))
{
cpumask_and
(
&
desc
->
affinity
,
&
desc
->
pending_mask
,
cpu_online_mask
);
desc
->
chip
->
set_affinity
(
irq
,
&
desc
->
affinity
);
cpumask_and
(
desc
->
affinity
,
desc
->
pending_mask
,
cpu_online_mask
);
desc
->
chip
->
set_affinity
(
irq
,
desc
->
affinity
);
}
cpumask_clear
(
&
desc
->
pending_mask
);
cpumask_clear
(
desc
->
pending_mask
);
}
void
move_native_irq
(
int
irq
)
...
...
kernel/irq/numa_migrate.c
View file @
7f268f43
...
...
@@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
old_desc
->
kstat_irqs
=
NULL
;
}
static
void
init_copy_one_irq_desc
(
int
irq
,
struct
irq_desc
*
old_desc
,
static
bool
init_copy_one_irq_desc
(
int
irq
,
struct
irq_desc
*
old_desc
,
struct
irq_desc
*
desc
,
int
cpu
)
{
memcpy
(
desc
,
old_desc
,
sizeof
(
struct
irq_desc
));
if
(
!
init_alloc_desc_masks
(
desc
,
cpu
,
false
))
{
printk
(
KERN_ERR
"irq %d: can not get new irq_desc cpumask "
"for migration.
\n
"
,
irq
);
return
false
;
}
spin_lock_init
(
&
desc
->
lock
);
desc
->
cpu
=
cpu
;
lockdep_set_class
(
&
desc
->
lock
,
&
irq_desc_lock_class
);
init_copy_kstat_irqs
(
old_desc
,
desc
,
cpu
,
nr_cpu_ids
);
init_copy_desc_masks
(
old_desc
,
desc
);
arch_init_copy_chip_data
(
old_desc
,
desc
,
cpu
);
return
true
;
}
static
void
free_one_irq_desc
(
struct
irq_desc
*
old_desc
,
struct
irq_desc
*
desc
)
...
...
@@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
node
=
cpu_to_node
(
cpu
);
desc
=
kzalloc_node
(
sizeof
(
*
desc
),
GFP_ATOMIC
,
node
);
if
(
!
desc
)
{
printk
(
KERN_ERR
"irq %d: can not get new irq_desc for migration.
\n
"
,
irq
);
printk
(
KERN_ERR
"irq %d: can not get new irq_desc "
"for migration.
\n
"
,
irq
);
/* still use old one */
desc
=
old_desc
;
goto
out_unlock
;
}
if
(
!
init_copy_one_irq_desc
(
irq
,
old_desc
,
desc
,
cpu
))
{
/* still use old one */
kfree
(
desc
);
desc
=
old_desc
;
goto
out_unlock
;
}
init_copy_one_irq_desc
(
irq
,
old_desc
,
desc
,
cpu
);
irq_desc_ptrs
[
irq
]
=
desc
;
...
...
kernel/irq/proc.c
View file @
7f268f43
...
...
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
static
int
irq_affinity_proc_show
(
struct
seq_file
*
m
,
void
*
v
)
{
struct
irq_desc
*
desc
=
irq_to_desc
((
long
)
m
->
private
);
const
struct
cpumask
*
mask
=
&
desc
->
affinity
;
const
struct
cpumask
*
mask
=
desc
->
affinity
;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if
(
desc
->
status
&
IRQ_MOVE_PENDING
)
mask
=
&
desc
->
pending_mask
;
mask
=
desc
->
pending_mask
;
#endif
seq_cpumask
(
m
,
mask
);
seq_putc
(
m
,
'\n'
);
...
...
kernel/sched_rt.c
View file @
7f268f43
...
...
@@ -960,16 +960,17 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
static
DEFINE_PER_CPU
(
cpumask_var_t
,
local_cpu_mask
);
static
inline
int
pick_optimal_cpu
(
int
this_cpu
,
cpumask_t
*
mask
)
static
inline
int
pick_optimal_cpu
(
int
this_cpu
,
const
struct
cpumask
*
mask
)
{
int
first
;
/* "this_cpu" is cheaper to preempt than a remote processor */
if
((
this_cpu
!=
-
1
)
&&
cpu
_isset
(
this_cpu
,
*
mask
))
if
((
this_cpu
!=
-
1
)
&&
cpu
mask_test_cpu
(
this_cpu
,
mask
))
return
this_cpu
;
first
=
first_cpu
(
*
mask
);
if
(
first
!=
NR_CPUS
)
first
=
cpumask_first
(
mask
);
if
(
first
<
nr_cpu_ids
)
return
first
;
return
-
1
;
...
...
@@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task)
struct
cpumask
*
lowest_mask
=
__get_cpu_var
(
local_cpu_mask
);
int
this_cpu
=
smp_processor_id
();
int
cpu
=
task_cpu
(
task
);
cpumask_var_t
domain_mask
;
if
(
task
->
rt
.
nr_cpus_allowed
==
1
)
return
-
1
;
/* No other targets possible */
...
...
@@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task)
if
(
this_cpu
==
cpu
)
this_cpu
=
-
1
;
/* Skip this_cpu opt if the same */
for_each_domain
(
cpu
,
sd
)
{
if
(
sd
->
flags
&
SD_WAKE_AFFINE
)
{
cpumask_t
domain_mask
;
int
best_cpu
;
if
(
alloc_cpumask_var
(
&
domain_mask
,
GFP_ATOMIC
)
)
{
for_each_domain
(
cpu
,
sd
)
{
if
(
sd
->
flags
&
SD_WAKE_AFFINE
)
{
int
best_cpu
;
cpumask_and
(
&
domain_mask
,
sched_domain_span
(
sd
),
lowest_mask
);
cpumask_and
(
domain_mask
,
sched_domain_span
(
sd
),
lowest_mask
);
best_cpu
=
pick_optimal_cpu
(
this_cpu
,
&
domain_mask
);
if
(
best_cpu
!=
-
1
)
return
best_cpu
;
best_cpu
=
pick_optimal_cpu
(
this_cpu
,
domain_mask
);
if
(
best_cpu
!=
-
1
)
{
free_cpumask_var
(
domain_mask
);
return
best_cpu
;
}
}
}
free_cpumask_var
(
domain_mask
);
}
/*
...
...
kernel/softirq.c
View file @
7f268f43
...
...
@@ -795,6 +795,11 @@ int __init __weak early_irq_init(void)
return
0
;
}
int
__init
__weak
arch_probe_nr_irqs
(
void
)
{
return
0
;
}
int
__init
__weak
arch_early_irq_init
(
void
)
{
return
0
;
...
...
lib/smp_processor_id.c
View file @
7f268f43
...
...
@@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
if
(
cpu
s_equal
(
current
->
cpus_allowed
,
cpumask_of_cpu
(
this_cpu
)))
if
(
cpu
mask_equal
(
&
current
->
cpus_allowed
,
cpumask_of
(
this_cpu
)))
goto
out
;
/*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment