Commit 2fa182f2 authored by David S. Miller's avatar David S. Miller

Sparc64: Update for CPU hotplugging changes.

parent e170a233
......@@ -26,9 +26,6 @@ struct cpu_fp_info {
char* fp_name;
};
/* In order to get the fpu type correct, you need to take the IDPROM's
* machine type value into consideration too. I will fix this.
*/
struct cpu_fp_info linux_sparc_fpu[] = {
{ 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
{ 0x22, 0x10, 0, "UltraSparc II integrated FPU"},
......@@ -51,13 +48,8 @@ struct cpu_iu_info linux_sparc_chips[] = {
#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
#ifdef CONFIG_SMP
char *sparc_cpu_type[64] = { "cpu-oops", "cpu-oops1", "cpu-oops2", "cpu-oops3" };
char *sparc_fpu_type[64] = { "fpu-oops", "fpu-oops1", "fpu-oops2", "fpu-oops3" };
#else
char *sparc_cpu_type[64] = { "cpu-oops", };
char *sparc_fpu_type[64] = { "fpu-oops", };
#endif
char *sparc_cpu_type[NR_CPUS] = { "cpu-oops", };
char *sparc_fpu_type[NR_CPUS] = { "fpu-oops", };
unsigned int fsr_storage;
......@@ -72,39 +64,47 @@ void __init cpu_probe(void)
fprs = fprs_read ();
fprs_write (FPRS_FEF);
__asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" : "=&r" (ver) : "r" (&fpu_vers));
__asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]"
: "=&r" (ver)
: "r" (&fpu_vers));
fprs_write (fprs);
manuf = ((ver >> 48)&0xffff);
impl = ((ver >> 32)&0xffff);
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
fpu_vers = ((fpu_vers>>17)&0x7);
fpu_vers = ((fpu_vers >> 17) & 0x7);
for(i = 0; i<NSPARCCHIPS; i++) {
if(linux_sparc_chips[i].manuf == manuf)
if(linux_sparc_chips[i].impl == impl) {
sparc_cpu_type[cpuid] = linux_sparc_chips[i].cpu_name;
for (i = 0; i < NSPARCCHIPS; i++) {
if (linux_sparc_chips[i].manuf == manuf) {
if (linux_sparc_chips[i].impl == impl) {
sparc_cpu_type[cpuid] =
linux_sparc_chips[i].cpu_name;
break;
}
}
}
if(i==NSPARCCHIPS) {
printk("DEBUG: manuf = 0x%x impl = 0x%x\n", manuf,
impl);
if (i == NSPARCCHIPS) {
printk("DEBUG: manuf = 0x%x impl = 0x%x\n",
manuf, impl);
sparc_cpu_type[cpuid] = "Unknown CPU";
}
for(i = 0; i<NSPARCFPU; i++) {
if(linux_sparc_fpu[i].manuf == manuf && linux_sparc_fpu[i].impl == impl)
if(linux_sparc_fpu[i].fpu_vers == fpu_vers) {
sparc_fpu_type[cpuid] = linux_sparc_fpu[i].fp_name;
for (i = 0; i < NSPARCFPU; i++) {
if (linux_sparc_fpu[i].manuf == manuf &&
linux_sparc_fpu[i].impl == impl) {
if (linux_sparc_fpu[i].fpu_vers == fpu_vers) {
sparc_fpu_type[cpuid] =
linux_sparc_fpu[i].fp_name;
break;
}
}
}
if(i == NSPARCFPU) {
printk("DEBUG: manuf = 0x%x impl = 0x%x fsr.vers = 0x%x\n", manuf, impl,
(unsigned)fpu_vers);
if (i == NSPARCFPU) {
printk("DEBUG: manuf = 0x%x impl = 0x%x fsr.vers = 0x%x\n",
manuf, impl,
(unsigned int) fpu_vers);
sparc_fpu_type[cpuid] = "Unknown FPU";
}
}
......@@ -17,8 +17,8 @@
#include <asm/smp.h>
#include <asm/spitfire.h>
struct prom_cpuinfo linux_cpus[64] __initdata = { { 0 } };
unsigned prom_cpu_nodes[64];
struct prom_cpuinfo linux_cpus[NR_CPUS] __initdata = { { 0 } };
unsigned prom_cpu_nodes[NR_CPUS];
int linux_num_cpus = 0;
extern void cpu_probe(void);
......
......@@ -122,9 +122,12 @@ int show_interrupts(struct seq_file *p, void *v)
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for (j = 0; j < smp_num_cpus; j++)
for (j = 0; j < NR_CPUS; j++) {
if (!cpu_online(j))
continue;
seq_printf(p, "%10u ",
kstat.irqs[cpu_logical_map(j)][i]);
kstat.irqs[j][i]);
}
#endif
seq_printf(p, " %s:%lx", action->name,
get_ino_in_irqaction(action));
......@@ -574,12 +577,18 @@ static void show(char * str)
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [ ", irqs_running());
for (i = 0; i < smp_num_cpus; i++)
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);
}
printk("]\nbh: %d [ ",
(spin_is_locked(&global_bh_lock) ? 1 : 0));
for (i = 0; i < smp_num_cpus; i++)
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
printk("%u ", local_bh_count(i));
}
printk("]\n");
}
......@@ -743,8 +752,9 @@ static inline void redirect_intr(int cpu, struct ino_bucket *bp)
unsigned long cpu_mask = get_smpaff_in_irqaction(ap);
unsigned int buddy, ticks;
cpu_mask &= cpu_online_map;
if (cpu_mask == 0)
cpu_mask = ~0UL;
cpu_mask = cpu_online_map;
if (this_is_starfire != 0 ||
bp->pil >= 10 || current->pid == 0)
......@@ -753,28 +763,23 @@ static inline void redirect_intr(int cpu, struct ino_bucket *bp)
/* 'cpu' is the MID (ie. UPAID), calculate the MID
* of our buddy.
*/
buddy = cpu_number_map(cpu) + 1;
if (buddy >= NR_CPUS ||
cpu_logical_map(buddy) == -1)
buddy = cpu + 1;
if (buddy >= NR_CPUS)
buddy = 0;
ticks = 0;
while ((cpu_mask & (1UL << buddy)) == 0) {
buddy++;
if (buddy >= NR_CPUS ||
cpu_logical_map(buddy) == -1)
buddy = cpu_logical_map(0);
if (++buddy >= NR_CPUS)
buddy = 0;
if (++ticks > NR_CPUS) {
put_smpaff_in_irqaction(ap, 0);
goto out;
}
}
if (buddy == cpu_number_map(cpu))
if (buddy == cpu)
goto out;
buddy = cpu_logical_map(buddy);
/* Voo-doo programming. */
if (cpu_data[buddy].idle_volume < FORWARD_VOLUME)
goto out;
......@@ -1140,22 +1145,28 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu)
unsigned long imap = bucket->imap;
unsigned int tid;
while (!cpu_online(goal_cpu)) {
if (++goal_cpu >= NR_CPUS)
goal_cpu = 0;
}
if (tlb_type == cheetah) {
tid = __cpu_logical_map[goal_cpu] << 26;
tid = goal_cpu << 26;
tid &= IMAP_AID_SAFARI;
} else if (this_is_starfire == 0) {
tid = __cpu_logical_map[goal_cpu] << 26;
tid = goal_cpu << 26;
tid &= IMAP_TID_UPA;
} else {
tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26);
tid = (starfire_translate(imap, goal_cpu) << 26);
tid &= IMAP_TID_UPA;
}
upa_writel(tid | IMAP_VALID, imap);
goal_cpu++;
if(goal_cpu >= NR_CPUS ||
__cpu_logical_map[goal_cpu] == -1)
while (!cpu_online(goal_cpu)) {
if (++goal_cpu >= NR_CPUS)
goal_cpu = 0;
}
return goal_cpu;
}
......@@ -1326,38 +1337,6 @@ static unsigned int parse_hex_value (const char *buffer,
return 0;
}
static unsigned long hw_to_logical(unsigned long mask)
{
unsigned long new_mask = 0UL;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) {
int logical = cpu_number_map(i);
new_mask |= (1UL << logical);
}
}
return new_mask;
}
static unsigned long logical_to_hw(unsigned long mask)
{
unsigned long new_mask = 0UL;
int i;
for (i = 0; i < NR_CPUS; i++) {
if (mask & (1UL << i)) {
int hw = cpu_logical_map(i);
new_mask |= (1UL << hw);
}
}
return new_mask;
}
static int irq_affinity_read_proc (char *page, char **start, off_t off,
int count, int *eof, void *data)
{
......@@ -1365,8 +1344,6 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
struct irqaction *ap = bp->irq_info;
unsigned long mask = get_smpaff_in_irqaction(ap);
mask = logical_to_hw(mask);
if (count < HEX_DIGITS+1)
return -EINVAL;
return sprintf (page, "%016lx\n", mask == 0 ? ~0UL : mask);
......@@ -1375,14 +1352,11 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
static inline void set_intr_affinity(int irq, unsigned long hw_aff)
{
struct ino_bucket *bp = ivector_table + irq;
unsigned long aff = hw_to_logical(hw_aff);
/*
* Users specify affinity in terms of cpu ids, which is what
* is displayed via /proc/cpuinfo. As soon as we do this,
* handler_irq() might see and take action.
/* Users specify affinity in terms of hw cpu ids.
* As soon as we do this, handler_irq() might see and take action.
*/
put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, aff);
put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
/* Migration is simply done by the next cpu to service this
* interrupt.
......@@ -1393,7 +1367,7 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
unsigned long count, void *data)
{
int irq = (long) data, full_count = count, err;
unsigned long new_value;
unsigned long new_value, i;
err = parse_hex_value(buffer, count, &new_value);
......@@ -1402,7 +1376,12 @@ static int irq_affinity_write_proc (struct file *file, const char *buffer,
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
new_value &= cpu_online_map;
for (i = 0; i < NR_CPUS; i++) {
if ((new_value & (1UL << i)) != 0 &&
!cpu_online(i))
new_value &= ~(1UL << i);
}
if (!new_value)
return -EINVAL;
......
......@@ -649,7 +649,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
(prom_prev >> 8) & 0xff,
prom_prev & 0xff,
linux_num_cpus,
smp_num_cpus
num_online_cpus()
#ifndef CONFIG_SMP
, loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100,
......
This diff is collapsed.
......@@ -59,7 +59,6 @@ struct poll {
short revents;
};
extern unsigned prom_cpu_nodes[64];
extern void die_if_kernel(char *str, struct pt_regs *regs);
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
void _sigpause_common (unsigned int set, struct pt_regs *);
......@@ -103,7 +102,6 @@ extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
#ifdef CONFIG_SMP
extern spinlock_t kernel_flag;
extern int smp_num_cpus;
#ifdef CONFIG_DEBUG_SPINLOCK
extern void _do_spin_lock (spinlock_t *lock, char *str);
extern void _do_spin_unlock (spinlock_t *lock);
......@@ -149,12 +147,9 @@ EXPORT_SYMBOL_NOVERS(mcount);
/* Per-CPU information table */
EXPORT_SYMBOL(cpu_data);
/* Misc SMP information */
#ifdef CONFIG_SMP
EXPORT_SYMBOL(smp_num_cpus);
#endif
EXPORT_SYMBOL(__cpu_number_map);
EXPORT_SYMBOL(__cpu_logical_map);
/* CPU online map and active count. */
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(sparc64_num_cpus_online);
/* Spinlock debugging library, optional. */
#ifdef CONFIG_DEBUG_SPINLOCK
......
......@@ -24,37 +24,13 @@ int this_is_starfire = 0;
void check_if_starfire(void)
{
int ssnode = prom_finddevice("/ssp-serial");
if(ssnode != 0 && ssnode != -1)
if (ssnode != 0 && ssnode != -1)
this_is_starfire = 1;
}
void starfire_cpu_setup(void)
{
if (this_is_starfire) {
/*
* We do this in starfire_translate and xcall_deliver. When we fix our cpu
* arrays to support > 64 processors we can use the real upaid instead
* of the logical cpuid in __cpu_number_map etc, then we can get rid of
* the translations everywhere. - Anton
*/
#if 0
int i;
/*
* Now must fixup cpu MIDs. OBP gave us a logical
* linear cpuid number, not the real upaid.
*/
for(i = 0; i < linux_num_cpus; i++) {
unsigned int mid = linux_cpus[i].mid;
mid = (((mid & 0x3c) << 1) |
((mid & 0x40) >> 4) |
(mid & 0x3));
linux_cpus[i].mid = mid;
}
#endif
}
/* Currently, nothing to do. */
}
int starfire_hard_smp_processor_id(void)
......@@ -84,7 +60,7 @@ void *starfire_hookup(int upaid)
unsigned long treg_base, hwmid, i;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if(!p) {
if (!p) {
prom_printf("starfire_hookup: No memory, this is insane.\n");
prom_halt();
}
......@@ -95,7 +71,7 @@ void *starfire_hookup(int upaid)
p->hwmid = hwmid;
treg_base += (hwmid << 33UL);
treg_base += 0x200UL;
for(i = 0; i < 32; i++) {
for (i = 0; i < 32; i++) {
p->imap_slots[i] = 0UL;
p->tregs[i] = treg_base + (i * 0x10UL);
/* Lets play it safe and not overwrite existing mappings */
......@@ -117,20 +93,20 @@ unsigned int starfire_translate(unsigned long imap,
unsigned int i;
bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
for(p = sflist; p != NULL; p = p->next)
if(p->hwmid == bus_hwmid)
for (p = sflist; p != NULL; p = p->next)
if (p->hwmid == bus_hwmid)
break;
if(p == NULL) {
if (p == NULL) {
prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
((unsigned long)imap));
prom_halt();
}
for(i = 0; i < 32; i++) {
if(p->imap_slots[i] == imap ||
for (i = 0; i < 32; i++) {
if (p->imap_slots[i] == imap ||
p->imap_slots[i] == 0UL)
break;
}
if(i == 32) {
if (i == 32) {
printk("starfire_translate: Are you kidding me?\n");
panic("Lucy in the sky....");
}
......
......@@ -402,7 +402,7 @@ void __init cheetah_ecache_flush_init(void)
{
unsigned long largest_size, smallest_linesize, order;
char type[16];
int node, highest_cpu, i;
int node, i;
/* Scan all cpu device tree nodes, note two values:
* 1) largest E-cache size
......@@ -458,15 +458,7 @@ void __init cheetah_ecache_flush_init(void)
}
/* Now allocate error trap reporting scoreboard. */
highest_cpu = 0;
#ifdef CONFIG_SMP
for (i = 0; i < NR_CPUS; i++) {
if ((1UL << i) & cpu_present_map)
highest_cpu = i;
}
#endif
highest_cpu++;
node = highest_cpu * (2 * sizeof(struct cheetah_err_info));
node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
for (order = 0; order < MAX_ORDER; order++) {
if ((PAGE_SIZE << order) >= node)
break;
......@@ -483,7 +475,7 @@ void __init cheetah_ecache_flush_init(void)
/* Mark all AFSRs as invalid so that the trap handler will
* log new new information there.
*/
for (i = 0; i < 2 * highest_cpu; i++)
for (i = 0; i < 2 * NR_CPUS; i++)
cheetah_error_log[i].afsr = CHAFSR_INVALID;
/* Now patch trap tables. */
......
......@@ -21,6 +21,7 @@
#include <asm/string.h>
#include <asm/oplib.h>
#include <asm/idprom.h>
#include <asm/smp.h>
#include "conv.h"
......@@ -336,8 +337,6 @@ asmlinkage int solaris_sysinfo(int cmd, u32 buf, s32 count)
#define SOLARIS_CONFIG_PHYS_PAGES 26
#define SOLARIS_CONFIG_AVPHYS_PAGES 27
extern unsigned prom_cpu_nodes[NR_CPUS];
asmlinkage int solaris_sysconf(int id)
{
switch (id) {
......@@ -353,7 +352,7 @@ asmlinkage int solaris_sysconf(int id)
"clock-frequency", 167000000);
#ifdef CONFIG_SMP
case SOLARIS_CONFIG_NPROC_CONF: return NR_CPUS;
case SOLARIS_CONFIG_NPROC_ONLN: return smp_num_cpus;
case SOLARIS_CONFIG_NPROC_ONLN: return num_online_cpus();
#else
case SOLARIS_CONFIG_NPROC_CONF: return 1;
case SOLARIS_CONFIG_NPROC_ONLN: return 1;
......
......@@ -64,9 +64,12 @@ static __inline__ int irqs_running(void)
{
int i;
for (i = 0; i < smp_num_cpus; i++)
if (local_irq_count(cpu_logical_map(i)))
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i))
continue;
if (local_irq_count(i))
return 1;
}
return 0;
}
......
......@@ -24,7 +24,8 @@ struct prom_cpuinfo {
};
extern int linux_num_cpus; /* number of CPUs probed */
extern struct prom_cpuinfo linux_cpus[64];
extern struct prom_cpuinfo linux_cpus[NR_CPUS];
extern unsigned int prom_cpu_nodes[NR_CPUS];
#endif /* !(__ASSEMBLY__) */
......@@ -60,9 +61,21 @@ extern cpuinfo_sparc cpu_data[NR_CPUS];
* Private routines/data
*/
#include <asm/bitops.h>
#include <asm/atomic.h>
extern unsigned char boot_cpu_id;
extern unsigned long cpu_present_map;
#define cpu_online_map cpu_present_map
extern unsigned long cpu_online_map;
#define cpu_online(cpu) (cpu_online_map & (1UL << (cpu)))
extern atomic_t sparc64_num_cpus_online;
#define num_online_cpus() (atomic_read(&sparc64_num_cpus_online))
static inline int any_online_cpu(unsigned long mask)
{
if ((mask &= cpu_online_map) != 0UL)
return __ffs(mask);
return -1;
}
/*
* General functions that each host system must provide.
......@@ -72,18 +85,6 @@ extern void smp_callin(void);
extern void smp_boot_cpus(void);
extern void smp_store_cpu_info(int id);
extern __volatile__ int __cpu_number_map[NR_CPUS];
extern __volatile__ int __cpu_logical_map[NR_CPUS];
extern __inline__ int cpu_logical_map(int cpu)
{
return __cpu_logical_map[cpu];
}
extern __inline__ int cpu_number_map(int cpu)
{
return __cpu_number_map[cpu];
}
extern __inline__ int hard_smp_processor_id(void)
{
if (tlb_type == cheetah) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment