Commit a07f95d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/sparc-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 6dceb11b 37daf4ba
......@@ -26,9 +26,6 @@ struct cpu_fp_info {
char* fp_name;
};
/* In order to get the fpu type correct, you need to take the IDPROM's
* machine type value into consideration too. I will fix this.
*/
struct cpu_fp_info linux_sparc_fpu[] = {
{ 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
{ 0x22, 0x10, 0, "UltraSparc II integrated FPU"},
......@@ -51,13 +48,8 @@ struct cpu_iu_info linux_sparc_chips[] = {
#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
#ifdef CONFIG_SMP
char *sparc_cpu_type[64] = { "cpu-oops", "cpu-oops1", "cpu-oops2", "cpu-oops3" };
char *sparc_fpu_type[64] = { "fpu-oops", "fpu-oops1", "fpu-oops2", "fpu-oops3" };
#else
char *sparc_cpu_type[64] = { "cpu-oops", };
char *sparc_fpu_type[64] = { "fpu-oops", };
#endif
char *sparc_cpu_type[NR_CPUS] = { "cpu-oops", };
char *sparc_fpu_type[NR_CPUS] = { "fpu-oops", };
unsigned int fsr_storage;
......@@ -72,39 +64,47 @@ void __init cpu_probe(void)
fprs = fprs_read ();
fprs_write (FPRS_FEF);
__asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" : "=&r" (ver) : "r" (&fpu_vers));
__asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]"
: "=&r" (ver)
: "r" (&fpu_vers));
fprs_write (fprs);
manuf = ((ver >> 48)&0xffff);
impl = ((ver >> 32)&0xffff);
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
fpu_vers = ((fpu_vers>>17)&0x7);
fpu_vers = ((fpu_vers >> 17) & 0x7);
for(i = 0; i<NSPARCCHIPS; i++) {
if(linux_sparc_chips[i].manuf == manuf)
if(linux_sparc_chips[i].impl == impl) {
sparc_cpu_type[cpuid] = linux_sparc_chips[i].cpu_name;
for (i = 0; i < NSPARCCHIPS; i++) {
if (linux_sparc_chips[i].manuf == manuf) {
if (linux_sparc_chips[i].impl == impl) {
sparc_cpu_type[cpuid] =
linux_sparc_chips[i].cpu_name;
break;
}
}
}
if(i==NSPARCCHIPS) {
printk("DEBUG: manuf = 0x%x impl = 0x%x\n", manuf,
impl);
if (i == NSPARCCHIPS) {
printk("DEBUG: manuf = 0x%x impl = 0x%x\n",
manuf, impl);
sparc_cpu_type[cpuid] = "Unknown CPU";
}
for(i = 0; i<NSPARCFPU; i++) {
if(linux_sparc_fpu[i].manuf == manuf && linux_sparc_fpu[i].impl == impl)
if(linux_sparc_fpu[i].fpu_vers == fpu_vers) {
sparc_fpu_type[cpuid] = linux_sparc_fpu[i].fp_name;
for (i = 0; i < NSPARCFPU; i++) {
if (linux_sparc_fpu[i].manuf == manuf &&
linux_sparc_fpu[i].impl == impl) {
if (linux_sparc_fpu[i].fpu_vers == fpu_vers) {
sparc_fpu_type[cpuid] =
linux_sparc_fpu[i].fp_name;
break;
}
}
}
if(i == NSPARCFPU) {
printk("DEBUG: manuf = 0x%x impl = 0x%x fsr.vers = 0x%x\n", manuf, impl,
(unsigned)fpu_vers);
if (i == NSPARCFPU) {
printk("DEBUG: manuf = 0x%x impl = 0x%x fsr.vers = 0x%x\n",
manuf, impl,
(unsigned int) fpu_vers);
sparc_fpu_type[cpuid] = "Unknown FPU";
}
}
......@@ -17,8 +17,8 @@
#include <asm/smp.h>
#include <asm/spitfire.h>
struct prom_cpuinfo linux_cpus[64] __initdata = { { 0 } };
unsigned prom_cpu_nodes[64];
struct prom_cpuinfo linux_cpus[NR_CPUS] __initdata = { { 0 } };
unsigned prom_cpu_nodes[NR_CPUS];
int linux_num_cpus = 0;
extern void cpu_probe(void);
......
......@@ -122,9 +122,12 @@ int show_interrupts(struct seq_file *p, void *v)
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for (j = 0; j < smp_num_cpus; j++)
for (j = 0; j < NR_CPUS; j++) {
if (!cpu_online(j))
continue;
seq_printf(p, "%10u ",
kstat.irqs[cpu_logical_map(j)][i]);
kstat.irqs[j][i]);
}
#endif
seq_printf(p, " %s:%lx", action->name,
get_ino_in_irqaction(action));
......@@ -574,12 +577,18 @@ static void show(char * str)
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [ ", irqs_running());
for (i = 0; i < smp_num_cpus; i++)
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
}
printk("]\nbh: %d [ ",
(spin_is_locked(&global_bh_lock) ? 1 : 0));
for (i = 0; i < smp_num_cpus; i++)
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
printk("%u ", local_bh_count(i));
}
printk("]\n");
}
......@@ -743,8 +752,9 @@ static inline void redirect_intr(int cpu, struct ino_bucket *bp)
unsigned long cpu_mask = get_smpaff_in_irqaction(ap);
unsigned int buddy, ticks;
cpu_mask &= cpu_online_map;
if (cpu_mask == 0)
cpu_mask = ~0UL;
cpu_mask = cpu_online_map;
if (this_is_starfire != 0 ||
bp->pil >= 10 || current->pid == 0)
......@@ -753,28 +763,23 @@ static inline void redirect_intr(int cpu, struct ino_bucket *bp)
/* 'cpu' is the MID (ie. UPAID), calculate the MID
* of our buddy.
*/
buddy = cpu_number_map(cpu) + 1;
if (buddy >= NR_CPUS ||
cpu_logical_map(buddy) == -1)
buddy = cpu + 1;
if (buddy >= NR_CPUS)
buddy = 0;
ticks = 0;
while ((cpu_mask & (1UL << buddy)) == 0) {
buddy++;
if (buddy >= NR_CPUS ||
cpu_logical_map(buddy) == -1)
buddy = cpu_logical_map(0);
if (++buddy >= NR_CPUS)
buddy = 0;
if (++ticks > NR_CPUS) {
put_smpaff_in_irqaction(ap, 0);
goto out;
}
}
if (buddy == cpu_number_map(cpu))
if (buddy == cpu)
goto out;
buddy = cpu_logical_map(buddy);
/* Voo-doo programming. */
if (cpu_data[buddy].idle_volume < FORWARD_VOLUME)
goto out;
......@@ -1140,22 +1145,28 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu)
unsigned long imap = bucket->imap;
unsigned int tid;
while (!cpu_online(goal_cpu)) {
if (++goal_cpu >= NR_CPUS)
goal_cpu = 0;
}
if (tlb_type == cheetah) {
tid = __cpu_logical_map[goal_cpu] << 26;
tid = goal_cpu << 26;
tid &= IMAP_AID_SAFARI;
} else if (this_is_starfire == 0) {
tid = __cpu_logical_map[goal_cpu] << 26;
tid = goal_cpu << 26;
tid &= IMAP_TID_UPA;
} else {
tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26);
tid = (starfire_translate(imap, goal_cpu) << 26);
tid &= IMAP_TID_UPA;
}
upa_writel(tid | IMAP_VALID, imap);
goal_cpu++;
if(goal_cpu >= NR_CPUS ||
__cpu_logical_map[goal_cpu] == -1)
goal_cpu = 0;
while (!cpu_online(goal_cpu)) {
if (++goal_cpu >= NR_CPUS)
goal_cpu = 0;
}
return goal_cpu;
}
......@@ -1326,38 +1337,6 @@ static unsigned int parse_hex_value (const char *buffer,
return 0;
}
static unsigned long hw_to_logical(unsigned long mask)
{
unsigned long new_mask = 0UL;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) {
int logical = cpu_number_map(i);
new_mask |= (1UL << logical);
}
}
return new_mask;
}
static unsigned long logical_to_hw(unsigned long mask)
{
unsigned long new_mask = 0UL;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) {
int hw = cpu_logical_map(i);
new_mask |= (1UL << hw);
}
}
return new_mask;
}
static int irq_affinity_read_proc (char *page, char **start, off_t off,
int count, int *eof, void *data)
{
......@@ -1365,8 +1344,6 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
struct irqaction *ap = bp->irq_info;
unsigned long mask = get_smpaff_in_irqaction(ap);
mask = logical_to_hw(mask);
if (count < HEX_DIGITS+1)
return -EINVAL;
return sprintf (page, "%016lx\n", mask == 0 ? ~0UL : mask);
......@@ -1375,14 +1352,11 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
static inline void set_intr_affinity(int irq, unsigned long hw_aff)
{
struct ino_bucket *bp = ivector_table + irq;
unsigned long aff = hw_to_logical(hw_aff);
/*
* Users specify affinity in terms of cpu ids, which is what
* is displayed via /proc/cpuinfo. As soon as we do this,
* handler_irq() might see and take action.
/* Users specify affinity in terms of hw cpu ids.
* As soon as we do this, handler_irq() might see and take action.
*/
put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, aff);
put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
/* Migration is simply done by the next cpu to service this
* interrupt.
......@@ -1393,7 +1367,7 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
unsigned long count, void *data)
{
int irq = (long) data, full_count = count, err;
unsigned long new_value;
unsigned long new_value, i;
err = parse_hex_value(buffer, count, &new_value);
......@@ -1402,7 +1376,12 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
new_value &= cpu_online_map;
for (i = 0; i < NR_CPUS; i++) {
if ((new_value & (1UL << i)) != 0 &&
!cpu_online(i))
new_value &= ~(1UL << i);
}
if (!new_value)
return -EINVAL;
......
......@@ -649,7 +649,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
(prom_prev >> 8) & 0xff,
prom_prev & 0xff,
linux_num_cpus,
smp_num_cpus
num_online_cpus()
#ifndef CONFIG_SMP
, loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100,
......
......@@ -40,13 +40,9 @@
extern int linux_num_cpus;
extern void calibrate_delay(void);
extern unsigned prom_cpu_nodes[];
cpuinfo_sparc cpu_data[NR_CPUS];
volatile int __cpu_number_map[NR_CPUS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
volatile int __cpu_logical_map[NR_CPUS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
/* Please don't make this stuff initdata!!! --DaveM */
static unsigned char boot_cpu_id;
static int smp_activated;
......@@ -55,8 +51,8 @@ static int smp_activated;
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
volatile int smp_processors_ready = 0;
unsigned long cpu_present_map = 0;
int smp_num_cpus = 1;
atomic_t sparc64_num_cpus_online = ATOMIC_INIT(0);
unsigned long cpu_online_map = 0;
int smp_threads_ready = 0;
void __init smp_setup(char *str, int *ints)
......@@ -79,7 +75,7 @@ void smp_info(struct seq_file *m)
seq_printf(m, "State:\n");
for (i = 0; i < NR_CPUS; i++) {
if (cpu_present_map & (1UL << i))
if (cpu_online(i))
seq_printf(m,
"CPU%d:\t\tonline\n", i);
}
......@@ -90,7 +86,7 @@ void smp_bogo(struct seq_file *m)
int i;
for (i = 0; i < NR_CPUS; i++)
if (cpu_present_map & (1UL << i))
if (cpu_online(i))
seq_printf(m,
"Cpu%dBogo\t: %lu.%02lu\n"
"Cpu%dClkTck\t: %016lx\n",
......@@ -230,7 +226,7 @@ void cpu_panic(void)
panic("SMP bolixed\n");
}
extern struct prom_cpuinfo linux_cpus[64];
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
extern unsigned long sparc64_cpu_startup;
......@@ -261,9 +257,11 @@ void __init smp_boot_cpus(void)
if ((cpucount + 1) == max_cpus)
goto ignorecpu;
if (cpu_present_map & (1UL << i)) {
unsigned long entry = (unsigned long)(&sparc64_cpu_startup);
unsigned long cookie = (unsigned long)(&cpu_new_thread);
if (cpu_online(i)) {
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
unsigned long cookie =
(unsigned long)(&cpu_new_thread);
struct task_struct *p;
int timeout;
int no;
......@@ -291,8 +289,7 @@ void __init smp_boot_cpus(void)
udelay(100);
}
if (callin_flag) {
__cpu_number_map[i] = cpucount;
__cpu_logical_map[cpucount] = i;
atomic_inc(&sparc64_num_cpus_online);
prom_cpu_nodes[i] = linux_cpus[no].prom_node;
prom_printf("OK\n");
} else {
......@@ -300,31 +297,33 @@ void __init smp_boot_cpus(void)
printk("Processor %d is stuck.\n", i);
prom_printf("FAILED\n");
}
}
if (!callin_flag) {
if (!callin_flag) {
ignorecpu:
cpu_present_map &= ~(1UL << i);
__cpu_number_map[i] = -1;
clear_bit(i, &cpu_online_map);
}
}
}
cpu_new_thread = NULL;
if (cpucount == 0) {
if (max_cpus != 1)
printk("Error: only one processor found.\n");
cpu_present_map = (1UL << smp_processor_id());
memset(&cpu_online_map, 0, sizeof(cpu_online_map));
set_bit(smp_processor_id(), &cpu_online_map);
atomic_set(&sparc64_num_cpus_online, 1);
} else {
unsigned long bogosum = 0;
for (i = 0; i < NR_CPUS; i++) {
if (cpu_present_map & (1UL << i))
if (cpu_online(i))
bogosum += cpu_data[i].udelay_val;
}
printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
printk("Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n",
cpucount + 1,
bogosum/(500000/HZ),
(bogosum/(5000/HZ))%100);
smp_activated = 1;
smp_num_cpus = cpucount + 1;
}
/* We want to run this with all the other cpus spinning
......@@ -372,8 +371,9 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u
membar #Sync"
: "=r" (tmp)
: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
"r" (data0), "r" (data1), "r" (data2), "r" (target), "r" (0x10), "0" (tmp)
: "g1");
"r" (data0), "r" (data1), "r" (data2), "r" (target),
"r" (0x10), "0" (tmp)
: "g1");
/* NOTE: PSTATE_IE is still clear. */
stuck = 100000;
......@@ -403,15 +403,16 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u
static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long mask)
{
int ncpus = smp_num_cpus - 1;
int i;
u64 pstate;
int i;
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
for (i = 0; (i < NR_CPUS) && ncpus; i++) {
for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) {
spitfire_xcall_helper(data0, data1, data2, pstate, i);
ncpus--;
mask &= ~(1UL << i);
if (!mask)
break;
}
}
}
......@@ -449,18 +450,22 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long
nack_busy_id = 0;
{
int i, ncpus = smp_num_cpus - 1;
unsigned long work_mask = mask;
int i;
for (i = 0; (i < NR_CPUS) && ncpus; i++) {
if (mask & (1UL << i)) {
for (i = 0; i < NR_CPUS; i++) {
if (work_mask & (1UL << i)) {
u64 target = (i << 14) | 0x70;
target |= (nack_busy_id++ << 24);
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync\n\t"
: /* no outputs */
: "r" (target), "i" (ASI_INTR_W));
ncpus--;
__asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t"
"membar #Sync\n\t"
: /* no outputs */
: "r" (target), "i" (ASI_INTR_W));
work_mask &= ~(1UL << i);
if (!work_mask)
break;
}
}
}
......@@ -494,6 +499,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long
printk("CPU[%d]: mondo stuckage result[%016lx]\n",
smp_processor_id(), dispatch_stat);
} else {
unsigned long work_mask = mask;
int i, this_busy_nack = 0;
/* Delay some random time with interrupts enabled
......@@ -505,10 +511,14 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long
* NACK us.
*/
for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) {
if ((dispatch_stat & (0x2 << this_busy_nack)) == 0)
if (work_mask & (1UL << i)) {
if ((dispatch_stat &
(0x2 << this_busy_nack)) == 0)
mask &= ~(1UL << i);
this_busy_nack += 2;
work_mask &= ~(1UL << i);
if (!work_mask)
break;
}
}
......@@ -525,6 +535,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
if (smp_processors_ready) {
u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
mask &= cpu_online_map;
mask &= ~(1UL<<smp_processor_id());
if (tlb_type == spitfire)
......@@ -538,7 +549,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
/* Send cross call to all processors except self. */
#define smp_cross_call(func, ctx, data1, data2) \
smp_cross_call_masked(func, ctx, data1, data2, cpu_present_map)
smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
struct call_data_struct {
void (*func) (void *info);
......@@ -560,7 +571,7 @@ int smp_call_function(void (*func)(void *info), void *info,
int nonatomic, int wait)
{
struct call_data_struct data;
int cpus = smp_num_cpus - 1;
int cpus = num_online_cpus() - 1;
long timeout;
if (!cpus)
......@@ -596,7 +607,7 @@ int smp_call_function(void (*func)(void *info), void *info,
out_timeout:
spin_unlock(&call_lock);
printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
smp_num_cpus - 1, atomic_read(&data.finished));
num_online_cpus() - 1, atomic_read(&data.finished));
return 0;
}
......@@ -657,11 +668,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
#endif
if (cpu == smp_processor_id()) {
__local_flush_dcache_page(page);
} else if ((cpu_present_map & mask) != 0) {
} else if ((cpu_online_map & mask) != 0) {
u64 data0;
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
data0 =
((u64)&xcall_flush_dcache_page_spitfire);
if (page->mapping != NULL)
data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
......@@ -669,7 +681,8 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
(u64) page->virtual,
mask);
} else {
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
data0 =
((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
__pa(page->virtual),
0, mask);
......@@ -684,7 +697,8 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{
if (smp_processors_ready) {
unsigned long mask = cpu_present_map & ~(1UL << smp_processor_id());
unsigned long mask =
cpu_online_map & ~(1UL << smp_processor_id());
u64 data0;
#ifdef CONFIG_DEBUG_DCFLUSH
......@@ -719,8 +733,9 @@ void smp_receive_signal(int cpu)
if (smp_processors_ready) {
unsigned long mask = 1UL << cpu;
if ((cpu_present_map & mask) != 0) {
u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
if ((cpu_online_map & mask) != 0) {
u64 data0 =
(((u64)&xcall_receive_signal) & 0xffffffff);
if (tlb_type == spitfire)
spitfire_xcall_deliver(data0, 0, 0, mask);
......@@ -848,7 +863,8 @@ void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
mm->cpu_vm_mask);
local_flush_and_out:
__flush_tlb_range(ctx, start, SECONDARY_CONTEXT, end, PAGE_SIZE, (end-start));
__flush_tlb_range(ctx, start, SECONDARY_CONTEXT,
end, PAGE_SIZE, (end-start));
}
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
......@@ -870,31 +886,32 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
int cpu = smp_processor_id();
page &= PAGE_MASK;
if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
if (mm == current->active_mm &&
atomic_read(&mm->mm_users) == 1) {
/* By virtue of being the current address space, and
* having the only reference to it, the following operation
* is safe.
* having the only reference to it, the following
* operation is safe.
*
* It would not be a win to perform the xcall tlb flush in
* this case, because even if we switch back to one of the
* other processors in cpu_vm_mask it is almost certain that
* all TLB entries for this context will be replaced by the
* time that happens.
* It would not be a win to perform the xcall tlb
* flush in this case, because even if we switch back
* to one of the other processors in cpu_vm_mask it
* is almost certain that all TLB entries for this
* context will be replaced by the time that happens.
*/
mm->cpu_vm_mask = (1UL << cpu);
goto local_flush_and_out;
} else {
/* By virtue of running under the mm->page_table_lock,
* and mmu_context.h:switch_mm doing the same, the following
* operation is safe.
* and mmu_context.h:switch_mm doing the same, the
* following operation is safe.
*/
if (mm->cpu_vm_mask == (1UL << cpu))
goto local_flush_and_out;
}
/* OK, we have to actually perform the cross call. Most likely
* this is a cloned mm or kswapd is kicking out pages for a task
* which has run recently on another cpu.
/* OK, we have to actually perform the cross call. Most
* likely this is a cloned mm or kswapd is kicking out pages
* for a task which has run recently on another cpu.
*/
smp_cross_call_masked(&xcall_flush_tlb_page,
ctx, page, 0,
......@@ -922,7 +939,7 @@ void smp_capture(void)
membar("#StoreStore | #LoadStore");
if (result == 1) {
int ncpus = smp_num_cpus;
int ncpus = num_online_cpus();
#ifdef CAPTURE_DEBUG
printk("CPU[%d]: Sending penguins to jail...",
......@@ -946,7 +963,8 @@ void smp_release(void)
if (smp_processors_ready) {
if (atomic_dec_and_test(&smp_capture_depth)) {
#ifdef CAPTURE_DEBUG
printk("CPU[%d]: Giving pardon to imprisoned penguins\n",
printk("CPU[%d]: Giving pardon to "
"imprisoned penguins\n",
smp_processor_id());
#endif
penguins_are_doing_time = 0;
......@@ -1027,7 +1045,8 @@ void smp_percpu_timer_interrupt(struct pt_regs *regs)
do {
if (!user)
sparc64_do_profile(regs->tpc, regs->u_regs[UREG_RETPC]);
sparc64_do_profile(regs->tpc,
regs->u_regs[UREG_RETPC]);
if (!--prof_counter(cpu)) {
if (cpu == boot_cpu_id) {
irq_enter(cpu, 0);
......@@ -1151,16 +1170,20 @@ void __init smp_tick_init(void)
boot_cpu_id = hard_smp_processor_id();
current_tick_offset = timer_tick_offset;
cpu_present_map = 0;
for (i = 0; i < linux_num_cpus; i++)
cpu_present_map |= (1UL << linux_cpus[i].mid);
for (i = 0; i < NR_CPUS; i++) {
__cpu_number_map[i] = -1;
__cpu_logical_map[i] = -1;
if (boot_cpu_id >= NR_CPUS) {
prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
prom_halt();
}
atomic_set(&sparc64_num_cpus_online, 1);
memset(&cpu_online_map, 0, sizeof(cpu_online_map));
for (i = 0; i < linux_num_cpus; i++) {
if (linux_cpus[i].mid < NR_CPUS)
set_bit(linux_cpus[i].mid, &cpu_online_map);
}
__cpu_number_map[boot_cpu_id] = 0;
prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node;
__cpu_logical_map[0] = boot_cpu_id;
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
}
......@@ -1223,8 +1246,10 @@ static void __init smp_tune_scheduling(void)
"bne,pt %%xcc, 1b\n\t"
" nop\n\t"
"rd %%tick, %1\n\t"
: "=&r" (tick1), "=&r" (tick2), "=&r" (flush_base)
: "2" (flush_base), "r" (flush_base + ecache_size)
: "=&r" (tick1), "=&r" (tick2),
"=&r" (flush_base)
: "2" (flush_base),
"r" (flush_base + ecache_size)
: "g1", "g2", "g3", "g5");
} else {
__asm__ __volatile__("b,pt %%xcc, 1f\n\t"
......@@ -1239,8 +1264,10 @@ static void __init smp_tune_scheduling(void)
"bne,pt %%xcc, 1b\n\t"
" nop\n\t"
"rd %%asr24, %1\n\t"
: "=&r" (tick1), "=&r" (tick2), "=&r" (flush_base)
: "2" (flush_base), "r" (flush_base + ecache_size)
: "=&r" (tick1), "=&r" (tick2),
"=&r" (flush_base)
: "2" (flush_base),
"r" (flush_base + ecache_size)
: "g1", "g2", "g3", "g5");
}
......@@ -1276,10 +1303,8 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL;
save_and_cli(flags);
for (i = 0; i < NR_CPUS; i++) {
if (cpu_present_map & (1UL << i))
prof_multiplier(i) = multiplier;
}
for (i = 0; i < NR_CPUS; i++)
prof_multiplier(i) = multiplier;
current_tick_offset = (timer_tick_offset / multiplier);
restore_flags(flags);
......
......@@ -59,7 +59,6 @@ struct poll {
short revents;
};
extern unsigned prom_cpu_nodes[64];
extern void die_if_kernel(char *str, struct pt_regs *regs);
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
void _sigpause_common (unsigned int set, struct pt_regs *);
......@@ -103,7 +102,6 @@ extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
#ifdef CONFIG_SMP
extern spinlock_t kernel_flag;
extern int smp_num_cpus;
#ifdef CONFIG_DEBUG_SPINLOCK
extern void _do_spin_lock (spinlock_t *lock, char *str);
extern void _do_spin_unlock (spinlock_t *lock);
......@@ -149,12 +147,9 @@ EXPORT_SYMBOL_NOVERS(mcount);
/* Per-CPU information table */
EXPORT_SYMBOL(cpu_data);
/* Misc SMP information */
#ifdef CONFIG_SMP
EXPORT_SYMBOL(smp_num_cpus);
#endif
EXPORT_SYMBOL(__cpu_number_map);
EXPORT_SYMBOL(__cpu_logical_map);
/* CPU online map and active count. */
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(sparc64_num_cpus_online);
/* Spinlock debugging library, optional. */
#ifdef CONFIG_DEBUG_SPINLOCK
......
......@@ -24,37 +24,13 @@ int this_is_starfire = 0;
void check_if_starfire(void)
{
int ssnode = prom_finddevice("/ssp-serial");
if(ssnode != 0 && ssnode != -1)
if (ssnode != 0 && ssnode != -1)
this_is_starfire = 1;
}
void starfire_cpu_setup(void)
{
if (this_is_starfire) {
/*
* We do this in starfire_translate and xcall_deliver. When we fix our cpu
* arrays to support > 64 processors we can use the real upaid instead
* of the logical cpuid in __cpu_number_map etc, then we can get rid of
* the translations everywhere. - Anton
*/
#if 0
int i;
/*
* Now must fixup cpu MIDs. OBP gave us a logical
* linear cpuid number, not the real upaid.
*/
for(i = 0; i < linux_num_cpus; i++) {
unsigned int mid = linux_cpus[i].mid;
mid = (((mid & 0x3c) << 1) |
((mid & 0x40) >> 4) |
(mid & 0x3));
linux_cpus[i].mid = mid;
}
#endif
}
/* Currently, nothing to do. */
}
int starfire_hard_smp_processor_id(void)
......@@ -84,7 +60,7 @@ void *starfire_hookup(int upaid)
unsigned long treg_base, hwmid, i;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if(!p) {
if (!p) {
prom_printf("starfire_hookup: No memory, this is insane.\n");
prom_halt();
}
......@@ -95,7 +71,7 @@ void *starfire_hookup(int upaid)
p->hwmid = hwmid;
treg_base += (hwmid << 33UL);
treg_base += 0x200UL;
for(i = 0; i < 32; i++) {
for (i = 0; i < 32; i++) {
p->imap_slots[i] = 0UL;
p->tregs[i] = treg_base + (i * 0x10UL);
/* Lets play it safe and not overwrite existing mappings */
......@@ -117,20 +93,20 @@ unsigned int starfire_translate(unsigned long imap,
unsigned int i;
bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
for(p = sflist; p != NULL; p = p->next)
if(p->hwmid == bus_hwmid)
for (p = sflist; p != NULL; p = p->next)
if (p->hwmid == bus_hwmid)
break;
if(p == NULL) {
if (p == NULL) {
prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
((unsigned long)imap));
prom_halt();
}
for(i = 0; i < 32; i++) {
if(p->imap_slots[i] == imap ||
p->imap_slots[i] == 0UL)
for (i = 0; i < 32; i++) {
if (p->imap_slots[i] == imap ||
p->imap_slots[i] == 0UL)
break;
}
if(i == 32) {
if (i == 32) {
printk("starfire_translate: Are you kidding me?\n");
panic("Lucy in the sky....");
}
......@@ -138,8 +114,8 @@ unsigned int starfire_translate(unsigned long imap,
/* map to real upaid */
upaid = (((upaid & 0x3c) << 1) |
((upaid & 0x40) >> 4) |
(upaid & 0x3));
((upaid & 0x40) >> 4) |
(upaid & 0x3));
upa_writel(upaid, p->tregs[i]);
......
......@@ -402,7 +402,7 @@ void __init cheetah_ecache_flush_init(void)
{
unsigned long largest_size, smallest_linesize, order;
char type[16];
int node, highest_cpu, i;
int node, i;
/* Scan all cpu device tree nodes, note two values:
* 1) largest E-cache size
......@@ -458,15 +458,7 @@ void __init cheetah_ecache_flush_init(void)
}
/* Now allocate error trap reporting scoreboard. */
highest_cpu = 0;
#ifdef CONFIG_SMP
for (i = 0; i < NR_CPUS; i++) {
if ((1UL << i) & cpu_present_map)
highest_cpu = i;
}
#endif
highest_cpu++;
node = highest_cpu * (2 * sizeof(struct cheetah_err_info));
node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
for (order = 0; order < MAX_ORDER; order++) {
if ((PAGE_SIZE << order) >= node)
break;
......@@ -483,7 +475,7 @@ void __init cheetah_ecache_flush_init(void)
/* Mark all AFSRs as invalid so that the trap handler will
* log new new information there.
*/
for (i = 0; i < 2 * highest_cpu; i++)
for (i = 0; i < 2 * NR_CPUS; i++)
cheetah_error_log[i].afsr = CHAFSR_INVALID;
/* Now patch trap tables. */
......
......@@ -21,6 +21,7 @@
#include <asm/string.h>
#include <asm/oplib.h>
#include <asm/idprom.h>
#include <asm/smp.h>
#include "conv.h"
......@@ -336,8 +337,6 @@ asmlinkage int solaris_sysinfo(int cmd, u32 buf, s32 count)
#define SOLARIS_CONFIG_PHYS_PAGES 26
#define SOLARIS_CONFIG_AVPHYS_PAGES 27
extern unsigned prom_cpu_nodes[NR_CPUS];
asmlinkage int solaris_sysconf(int id)
{
switch (id) {
......@@ -353,7 +352,7 @@ asmlinkage int solaris_sysconf(int id)
"clock-frequency", 167000000);
#ifdef CONFIG_SMP
case SOLARIS_CONFIG_NPROC_CONF: return NR_CPUS;
case SOLARIS_CONFIG_NPROC_ONLN: return smp_num_cpus;
case SOLARIS_CONFIG_NPROC_ONLN: return num_online_cpus();
#else
case SOLARIS_CONFIG_NPROC_CONF: return 1;
case SOLARIS_CONFIG_NPROC_ONLN: return 1;
......
......@@ -41,6 +41,7 @@
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/random.h>
#include <linux/tqueue.h>
#include <asm/system.h>
#include <asm/bitops.h>
......
......@@ -64,9 +64,12 @@ static __inline__ int irqs_running(void)
{
int i;
for (i = 0; i < smp_num_cpus; i++)
if (local_irq_count(cpu_logical_map(i)))
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
if (local_irq_count(i))
return 1;
}
return 0;
}
......
......@@ -24,7 +24,8 @@ struct prom_cpuinfo {
};
extern int linux_num_cpus; /* number of CPUs probed */
extern struct prom_cpuinfo linux_cpus[64];
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
extern unsigned int prom_cpu_nodes[NR_CPUS];
#endif /* !(__ASSEMBLY__) */
......@@ -60,9 +61,21 @@ extern cpuinfo_sparc cpu_data[NR_CPUS];
* Private routines/data
*/
#include <asm/bitops.h>
#include <asm/atomic.h>
extern unsigned char boot_cpu_id;
extern unsigned long cpu_present_map;
#define cpu_online_map cpu_present_map
extern unsigned long cpu_online_map;
#define cpu_online(cpu) (cpu_online_map & (1UL << (cpu)))
extern atomic_t sparc64_num_cpus_online;
#define num_online_cpus() (atomic_read(&sparc64_num_cpus_online))
static inline int any_online_cpu(unsigned long mask)
{
if ((mask &= cpu_online_map) != 0UL)
return __ffs(mask);
return -1;
}
/*
* General functions that each host system must provide.
......@@ -72,18 +85,6 @@ extern void smp_callin(void);
extern void smp_boot_cpus(void);
extern void smp_store_cpu_info(int id);
extern __volatile__ int __cpu_number_map[NR_CPUS];
extern __volatile__ int __cpu_logical_map[NR_CPUS];
extern __inline__ int cpu_logical_map(int cpu)
{
return __cpu_logical_map[cpu];
}
extern __inline__ int cpu_number_map(int cpu)
{
return __cpu_number_map[cpu];
}
extern __inline__ int hard_smp_processor_id(void)
{
if (tlb_type == cheetah) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment