Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
e22aa6d3
Commit
e22aa6d3
authored
Dec 18, 2002
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ppc64: support for > 32 CPUs (24 way RS64 with HMT shows up as 48 way)
parent
8d93229f
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
37 additions
and
41 deletions
+37
-41
arch/ppc64/kernel/init_task.c
arch/ppc64/kernel/init_task.c
+1
-1
arch/ppc64/kernel/irq.c
arch/ppc64/kernel/irq.c
+9
-12
arch/ppc64/kernel/smp.c
arch/ppc64/kernel/smp.c
+20
-21
arch/ppc64/kernel/xics.c
arch/ppc64/kernel/xics.c
+3
-3
include/asm-ppc64/smp.h
include/asm-ppc64/smp.h
+1
-1
include/asm-ppc64/tlb.h
include/asm-ppc64/tlb.h
+2
-2
include/asm-ppc64/topology.h
include/asm-ppc64/topology.h
+1
-1
No files found.
arch/ppc64/kernel/init_task.c
View file @
e22aa6d3
...
...
@@ -13,7 +13,7 @@ struct mm_struct init_mm = INIT_MM(init_mm);
/*
* Initial thread structure.
*
* We need to make sure that this is
8192
-byte aligned due to the
* We need to make sure that this is
16384
-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
...
...
arch/ppc64/kernel/irq.c
View file @
e22aa6d3
...
...
@@ -395,7 +395,7 @@ handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
}
#ifdef CONFIG_SMP
extern
unsigned
int
irq_affinity
[
NR_IRQS
];
extern
unsigned
long
irq_affinity
[
NR_IRQS
];
typedef
struct
{
unsigned
long
cpu
;
...
...
@@ -409,7 +409,7 @@ static irq_balance_t irq_balance[NR_IRQS] __cacheline_aligned
(idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > 1))
#define IRQ_ALLOWED(cpu,allowed_mask) \
((1 << cpu) & (allowed_mask))
((1
UL
<< cpu) & (allowed_mask))
#define IRQ_BALANCE_INTERVAL (HZ/50)
...
...
@@ -461,7 +461,7 @@ static inline void balance_irq(int irq)
new_cpu
=
move
(
entry
->
cpu
,
allowed_mask
,
now
,
random_number
);
if
(
entry
->
cpu
!=
new_cpu
)
{
entry
->
cpu
=
new_cpu
;
irq_desc
[
irq
].
handler
->
set_affinity
(
irq
,
1
<<
new_cpu
);
irq_desc
[
irq
].
handler
->
set_affinity
(
irq
,
1
UL
<<
new_cpu
);
}
}
}
...
...
@@ -649,19 +649,19 @@ static struct proc_dir_entry * irq_dir [NR_IRQS];
static
struct
proc_dir_entry
*
smp_affinity_entry
[
NR_IRQS
];
#ifdef CONFIG_IRQ_ALL_CPUS
unsigned
int
irq_affinity
[
NR_IRQS
]
=
{
[
0
...
NR_IRQS
-
1
]
=
0xffffffff
};
unsigned
long
irq_affinity
[
NR_IRQS
]
=
{
[
0
...
NR_IRQS
-
1
]
=
-
1UL
};
#else
/* CONFIG_IRQ_ALL_CPUS */
unsigned
int
irq_affinity
[
NR_IRQS
]
=
{
[
0
...
NR_IRQS
-
1
]
=
0x0000000
0
};
unsigned
long
irq_affinity
[
NR_IRQS
]
=
{
[
0
...
NR_IRQS
-
1
]
=
0x
0
};
#endif
/* CONFIG_IRQ_ALL_CPUS */
#define HEX_DIGITS
8
#define HEX_DIGITS
16
static
int
irq_affinity_read_proc
(
char
*
page
,
char
**
start
,
off_t
off
,
int
count
,
int
*
eof
,
void
*
data
)
{
if
(
count
<
HEX_DIGITS
+
1
)
return
-
EINVAL
;
return
sprintf
(
page
,
"%08x
\n
"
,
irq_affinity
[(
int
)
(
long
)
data
]);
return
sprintf
(
page
,
"%16lx
\n
"
,
irq_affinity
[
(
long
)
data
]);
}
static
unsigned
int
parse_hex_value
(
const
char
*
buffer
,
...
...
@@ -679,7 +679,7 @@ static unsigned int parse_hex_value (const char *buffer,
return
-
EFAULT
;
/*
* Parse the first
8
characters as a hex string, any non-hex char
* Parse the first
16
characters as a hex string, any non-hex char
* is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
*/
value
=
0
;
...
...
@@ -704,7 +704,7 @@ static unsigned int parse_hex_value (const char *buffer,
static
int
irq_affinity_write_proc
(
struct
file
*
file
,
const
char
*
buffer
,
unsigned
long
count
,
void
*
data
)
{
int
irq
=
(
int
)(
long
)
data
,
full_count
=
count
,
err
;
int
irq
=
(
long
)
data
,
full_count
=
count
,
err
;
unsigned
long
new_value
;
if
(
!
irq_desc
[
irq
].
handler
->
set_affinity
)
...
...
@@ -712,8 +712,6 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
err
=
parse_hex_value
(
buffer
,
count
,
&
new_value
);
/* Why is this disabled ? --BenH */
#if 0/*CONFIG_SMP*/
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
...
...
@@ -721,7 +719,6 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
*/
if
(
!
(
new_value
&
cpu_online_map
))
return
-
EINVAL
;
#endif
irq_affinity
[
irq
]
=
new_value
;
irq_desc
[
irq
].
handler
->
set_affinity
(
irq
,
new_value
);
...
...
arch/ppc64/kernel/smp.c
View file @
e22aa6d3
...
...
@@ -56,7 +56,7 @@ unsigned long cpu_online_map = 0;
static
struct
smp_ops_t
*
smp_ops
;
volatile
unsigned
long
cpu_callin_map
[
NR_CPUS
];
volatile
unsigned
int
cpu_callin_map
[
NR_CPUS
];
extern
unsigned
char
stab_array
[];
...
...
@@ -564,26 +564,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* Fixup boot cpu */
smp_store_cpu_info
(
smp_processor_id
());
cpu_callin_map
[
smp_processor_id
()]
=
1
;
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
paca
[
i
].
prof_counter
=
1
;
paca
[
i
].
prof_multiplier
=
1
;
if
(
i
!=
boot_cpuid
)
{
void
*
tmp
;
/*
* the boot cpu segment table is statically
* initialized to real address 0x5000. The
* Other processor's tables are created and
* initialized here.
*/
tmp
=
&
stab_array
[
PAGE_SIZE
*
(
i
-
1
)];
memset
(
tmp
,
0
,
PAGE_SIZE
);
paca
[
i
].
xStab_data
.
virt
=
(
unsigned
long
)
tmp
;
paca
[
i
].
xStab_data
.
real
=
(
unsigned
long
)
__v2a
(
tmp
);
paca
[
i
].
default_decr
=
tb_ticks_per_jiffy
/
decr_overclock
;
}
}
paca
[
smp_processor_id
()].
prof_counter
=
1
;
paca
[
smp_processor_id
()].
prof_multiplier
=
1
;
/*
* XXX very rough.
...
...
@@ -611,6 +593,23 @@ int __devinit __cpu_up(unsigned int cpu)
struct
task_struct
*
p
;
int
c
;
paca
[
cpu
].
prof_counter
=
1
;
paca
[
cpu
].
prof_multiplier
=
1
;
paca
[
cpu
].
default_decr
=
tb_ticks_per_jiffy
/
decr_overclock
;
if
(
!
cpu_has_slb
())
{
void
*
tmp
;
/* maximum of 48 CPUs on machines with a segment table */
if
(
cpu
>=
48
)
BUG
();
tmp
=
&
stab_array
[
PAGE_SIZE
*
cpu
];
memset
(
tmp
,
0
,
PAGE_SIZE
);
paca
[
cpu
].
xStab_data
.
virt
=
(
unsigned
long
)
tmp
;
paca
[
cpu
].
xStab_data
.
real
=
(
unsigned
long
)
__v2a
(
tmp
);
}
/* create a process for the processor */
/* only regs.msr is actually used, and 0 is OK for it */
memset
(
&
regs
,
0
,
sizeof
(
struct
pt_regs
));
...
...
arch/ppc64/kernel/xics.c
View file @
e22aa6d3
...
...
@@ -437,7 +437,7 @@ void xics_set_affinity(unsigned int virq, unsigned long cpumask)
unsigned
long
flags
;
long
status
;
unsigned
long
xics_status
[
2
];
u
32
newmask
;
u
nsigned
long
newmask
;
virq
-=
XICS_IRQ_OFFSET
;
irq
=
virt_irq_to_real
(
virq
);
...
...
@@ -455,12 +455,12 @@ void xics_set_affinity(unsigned int virq, unsigned long cpumask)
}
/* For the moment only implement delivery to all cpus or one cpu */
if
(
cpumask
==
0xffffffff
)
{
if
(
cpumask
==
-
1UL
)
{
newmask
=
default_distrib_server
;
}
else
{
if
(
!
(
cpumask
&
cpu_online_map
))
goto
out
;
newmask
=
find_first_bit
(
&
cpumask
,
32
);
newmask
=
find_first_bit
(
&
cpumask
,
8
*
sizeof
(
unsigned
long
)
);
}
status
=
rtas_call
(
ibm_set_xive
,
3
,
1
,
NULL
,
...
...
include/asm-ppc64/smp.h
View file @
e22aa6d3
...
...
@@ -52,7 +52,7 @@ static inline int num_online_cpus(void)
return
nr
;
}
extern
volatile
unsigned
long
cpu_callin_map
[
NR_CPUS
];
extern
volatile
unsigned
int
cpu_callin_map
[
NR_CPUS
];
#define smp_processor_id() (get_paca()->xPacaIndex)
...
...
include/asm-ppc64/tlb.h
View file @
e22aa6d3
...
...
@@ -62,7 +62,7 @@ static inline void __tlb_remove_tlb_entry(mmu_gather_t *tlb, pte_t *ptep,
if
(
i
==
PPC64_TLB_BATCH_NR
)
{
int
local
=
0
;
if
(
tlb
->
mm
->
cpu_vm_mask
==
(
1
<<
cpu
))
if
(
tlb
->
mm
->
cpu_vm_mask
==
(
1
UL
<<
cpu
))
local
=
1
;
flush_hash_range
(
tlb
->
mm
->
context
,
i
,
local
);
...
...
@@ -80,7 +80,7 @@ static inline void tlb_flush(struct free_pte_ctx *tlb)
struct
ppc64_tlb_batch
*
batch
=
&
ppc64_tlb_batch
[
cpu
];
int
local
=
0
;
if
(
tlb
->
mm
->
cpu_vm_mask
==
(
1
<<
smp_processor_id
()))
if
(
tlb
->
mm
->
cpu_vm_mask
==
(
1
UL
<<
smp_processor_id
()))
local
=
1
;
flush_hash_range
(
tlb
->
mm
->
context
,
batch
->
index
,
local
);
...
...
include/asm-ppc64/topology.h
View file @
e22aa6d3
...
...
@@ -41,7 +41,7 @@ static inline unsigned long __node_to_cpu_mask(int node)
for
(
cpu
=
0
;
cpu
<
NR_CPUS
;
cpu
++
)
if
(
numa_cpu_lookup_table
[
cpu
]
==
node
)
mask
|=
1
<<
cpu
;
mask
|=
1
UL
<<
cpu
;
return
mask
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment