smp.c 17.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * SMP support for ppc.
 *
 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
 * deal of code from the sparc and intel versions.
 *
 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
 *
 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/config.h>
#include <linux/kernel.h>
20
#include <linux/module.h>
21 22 23 24 25 26 27 28 29
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
30
#include <linux/err.h>
31 32
#include <linux/sysdev.h>
#include <linux/cpu.h>
33 34 35 36 37 38 39 40 41 42

#include <asm/ptrace.h>
#include <asm/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/hardirq.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
43 44
#include <asm/naca.h>
#include <asm/paca.h>
45 46 47 48 49 50 51
#include <asm/iSeries/LparData.h>
#include <asm/iSeries/HvCall.h>
#include <asm/iSeries/HvCallCfg.h>
#include <asm/time.h>
#include <asm/ppcdebug.h>
#include "open_pic.h"
#include <asm/machdep.h>
Anton Blanchard's avatar
Anton Blanchard committed
52
#include <asm/xics.h>
53
#include <asm/cputable.h>
54
#include <asm/system.h>
55

Anton Blanchard's avatar
Anton Blanchard committed
56
int smp_threads_ready;
57 58
unsigned long cache_decay_ticks;

59
cpumask_t cpu_possible_map = CPU_MASK_NONE;
60
cpumask_t cpu_online_map = CPU_MASK_NONE;
61
cpumask_t cpu_available_map = CPU_MASK_NONE;
62
cpumask_t cpu_present_at_boot = CPU_MASK_NONE;
63

64
EXPORT_SYMBOL(cpu_online_map);
65
EXPORT_SYMBOL(cpu_possible_map);
66

67
struct smp_ops_t *smp_ops;
Anton Blanchard's avatar
Anton Blanchard committed
68

69
static volatile unsigned int cpu_callin_map[NR_CPUS];
70 71 72 73 74

extern unsigned char stab_array[];

extern int cpu_idle(void *unused);
void smp_call_function_interrupt(void);
75 76
extern long register_vpa(unsigned long flags, unsigned long proc,
			 unsigned long vpa);
77

78 79
/* Low level assembly function used to backup CPU 0 state */
extern void __save_cpu_setup(void);
80

81
#ifdef CONFIG_PPC_ISERIES
82 83
static unsigned long iSeries_smp_message[NR_CPUS];

84 85 86 87 88
void iSeries_smp_message_recv( struct pt_regs * regs )
{
	int cpu = smp_processor_id();
	int msg;

89
	if ( num_online_cpus() < 2 )
90 91 92 93 94 95 96
		return;

	for ( msg = 0; msg < 4; ++msg )
		if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
			smp_message_recv( msg, regs );
}

97
static inline void smp_iSeries_do_message(int cpu, int msg)
98
{
99 100 101
	set_bit(msg, &iSeries_smp_message[cpu]);
	HvCall_sendIPI(&(paca[cpu]));
}
102

103
static void smp_iSeries_message_pass(int target, int msg)
104 105
{
	int i;
106

107 108 109 110 111 112 113 114
	if (target < NR_CPUS)
		smp_iSeries_do_message(target, msg);
	else {
		for_each_online_cpu(i) {
			if (target == MSG_ALL_BUT_SELF
			    && i == smp_processor_id())
				continue;
			smp_iSeries_do_message(i, msg);
115 116 117 118 119 120 121 122 123 124
		}
	}
}

static int smp_iSeries_numProcs(void)
{
	unsigned np, i;
	struct ItLpPaca * lpPaca;

	np = 0;
125
        for (i=0; i < NR_CPUS; ++i) {
126
                lpPaca = paca[i].xLpPacaPtr;
127
                if ( lpPaca->xDynProcStatus < 2 ) {
128 129 130
			cpu_set(i, cpu_available_map);
			cpu_set(i, cpu_possible_map);
			cpu_set(i, cpu_present_at_boot);
131 132 133 134 135 136
                        ++np;
                }
        }
	return np;
}

Anton Blanchard's avatar
Anton Blanchard committed
137
static int smp_iSeries_probe(void)
138 139
{
	unsigned i;
Anton Blanchard's avatar
Anton Blanchard committed
140 141
	unsigned np = 0;
	struct ItLpPaca *lpPaca;
142

143
	for (i=0; i < NR_CPUS; ++i) {
144
		lpPaca = paca[i].xLpPacaPtr;
Anton Blanchard's avatar
Anton Blanchard committed
145
		if (lpPaca->xDynProcStatus < 2) {
146
			/*paca[i].active = 1;*/
147 148 149
			++np;
		}
	}
150

Anton Blanchard's avatar
Anton Blanchard committed
151
	return np;
152 153 154 155
}

static void smp_iSeries_kick_cpu(int nr)
{
156
	struct ItLpPaca *lpPaca;
157 158 159

	BUG_ON(nr < 0 || nr >= NR_CPUS);

160
	/* Verify that our partition has a processor nr */
161
	lpPaca = paca[nr].xLpPacaPtr;
162
	if (lpPaca->xDynProcStatus >= 2)
163
		return;
164

165 166 167 168 169
	/* The processor is currently spinning, waiting
	 * for the xProcStart field to become non-zero
	 * After we set xProcStart, the processor will
	 * continue on to secondary_start in iSeries_head.S
	 */
170
	paca[nr].xProcStart = 1;
171 172
}

173
static void __devinit smp_iSeries_setup_cpu(int nr)
174 175 176
{
}

177 178 179 180 181 182 183
static struct smp_ops_t iSeries_smp_ops = {
	.message_pass = smp_iSeries_message_pass,
	.probe        = smp_iSeries_probe,
	.kick_cpu     = smp_iSeries_kick_cpu,
	.setup_cpu    = smp_iSeries_setup_cpu,
};

184
/* This is called very early. */
185
void __init smp_init_iSeries(void)
186
{
187
	smp_ops = &iSeries_smp_ops;
188
	systemcfg->processorCount	= smp_iSeries_numProcs();
189
}
190
#endif
191

192
#ifdef CONFIG_PPC_PSERIES
193
void smp_openpic_message_pass(int target, int msg)
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
{
	/* make sure we're sending something that translates to an IPI */
	if ( msg > 0x3 ){
		printk("SMP %d: smp_message_pass: unknown msg %d\n",
		       smp_processor_id(), msg);
		return;
	}
	switch ( target )
	{
	case MSG_ALL:
		openpic_cause_IPI(msg, 0xffffffff);
		break;
	case MSG_ALL_BUT_SELF:
		openpic_cause_IPI(msg,
				  0xffffffff & ~(1 << smp_processor_id()));
		break;
	default:
		openpic_cause_IPI(msg, 1<<target);
		break;
	}
}

216
static int __init smp_openpic_probe(void)
217
{
218
	int nr_cpus;
Anton Blanchard's avatar
Anton Blanchard committed
219

220
	nr_cpus = cpus_weight(cpu_possible_map);
Anton Blanchard's avatar
Anton Blanchard committed
221 222

	if (nr_cpus > 1)
223
		openpic_request_IPIs();
Anton Blanchard's avatar
Anton Blanchard committed
224 225

	return nr_cpus;
226 227
}

228 229 230 231 232
static void __devinit smp_openpic_setup_cpu(int cpu)
{
	do_openpic_setup_cpu();
}

233
static void smp_pSeries_kick_cpu(int nr)
234
{
235
	BUG_ON(nr < 0 || nr >= NR_CPUS);
236

237 238 239
	/* The processor is currently spinning, waiting
	 * for the xProcStart field to become non-zero
	 * After we set xProcStart, the processor will
Anton Blanchard's avatar
Anton Blanchard committed
240
	 * continue on to secondary_start
241
	 */
242
	paca[nr].xProcStart = 1;
243
}
244
#endif
245

246
static void __init smp_space_timers(unsigned int max_cpus)
247
{
248
	int i;
Anton Blanchard's avatar
Anton Blanchard committed
249 250
	unsigned long offset = tb_ticks_per_jiffy / max_cpus;
	unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
251

252 253
	for_each_cpu(i) {
		if (i != boot_cpuid) {
Anton Blanchard's avatar
Anton Blanchard committed
254 255 256
			paca[i].next_jiffy_update_tb =
				previous_tb + offset;
			previous_tb = paca[i].next_jiffy_update_tb;
257 258
		}
	}
Anton Blanchard's avatar
Anton Blanchard committed
259
}
260

261
#ifdef CONFIG_PPC_PSERIES
262 263
void vpa_init(int cpu)
{
264 265 266 267 268 269 270 271 272
	unsigned long flags;

	/* Register the Virtual Processor Area (VPA) */
	printk(KERN_INFO "register_vpa: cpu 0x%x\n", cpu);
	flags = 1UL << (63 - 18);
	paca[cpu].xLpPaca.xSLBCount = 64; /* SLB restore highwater mark */
	register_vpa(flags, cpu, __pa((unsigned long)&(paca[cpu].xLpPaca))); 
}

273
static inline void smp_xics_do_message(int cpu, int msg)
274
{
275 276 277 278
	set_bit(msg, &xics_ipi_message[cpu].value);
	mb();
	xics_cause_IPI(cpu);
}
279

280
static void smp_xics_message_pass(int target, int msg)
281 282 283 284 285 286 287 288 289 290 291
{
	unsigned int i;

	if (target < NR_CPUS) {
		smp_xics_do_message(target, msg);
	} else {
		for_each_online_cpu(i) {
			if (target == MSG_ALL_BUT_SELF
			    && i == smp_processor_id())
				continue;
			smp_xics_do_message(i, msg);
292
		}
293 294 295
	}
}

296 297
extern void xics_request_IPIs(void);

298
static int __init smp_xics_probe(void)
299
{
300 301 302
#ifdef CONFIG_SMP
	xics_request_IPIs();
#endif
Anton Blanchard's avatar
Anton Blanchard committed
303

304
	return cpus_weight(cpu_possible_map);
Anton Blanchard's avatar
Anton Blanchard committed
305 306
}

307 308 309 310 311 312
static void __devinit smp_xics_setup_cpu(int cpu)
{
	if (cpu != boot_cpuid)
		xics_setup_cpu();
}

Anton Blanchard's avatar
Anton Blanchard committed
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;
static unsigned long timebase = 0;

static void __devinit pSeries_give_timebase(void)
{
	spin_lock(&timebase_lock);
	rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
	timebase = get_tb();
	spin_unlock(&timebase_lock);

	while (timebase)
		barrier();
	rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
}

static void __devinit pSeries_take_timebase(void)
{
	while (!timebase)
		barrier();
	spin_lock(&timebase_lock);
333
	set_tb(timebase >> 32, timebase & 0xffffffff);
Anton Blanchard's avatar
Anton Blanchard committed
334 335
	timebase = 0;
	spin_unlock(&timebase_lock);
336 337
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351
static struct smp_ops_t pSeries_openpic_smp_ops = {
	.message_pass	= smp_openpic_message_pass,
	.probe		= smp_openpic_probe,
	.kick_cpu	= smp_pSeries_kick_cpu,
	.setup_cpu	= smp_openpic_setup_cpu,
};

static struct smp_ops_t pSeries_xics_smp_ops = {
	.message_pass	= smp_xics_message_pass,
	.probe		= smp_xics_probe,
	.kick_cpu	= smp_pSeries_kick_cpu,
	.setup_cpu	= smp_xics_setup_cpu,
};

352
/* This is called very early */
Anton Blanchard's avatar
Anton Blanchard committed
353
void __init smp_init_pSeries(void)
354
{
Anton Blanchard's avatar
Anton Blanchard committed
355

356 357 358 359 360 361
	if (naca->interrupt_controller == IC_OPEN_PIC)
		smp_ops = &pSeries_openpic_smp_ops;
	else
		smp_ops = &pSeries_xics_smp_ops;

	/* Non-lpar has additional take/give timebase */
362
	if (systemcfg->platform == PLATFORM_PSERIES) {
Anton Blanchard's avatar
Anton Blanchard committed
363 364
		smp_ops->give_timebase = pSeries_give_timebase;
		smp_ops->take_timebase = pSeries_take_timebase;
365
	}
Anton Blanchard's avatar
Anton Blanchard committed
366
}
367
#endif
368 369 370 371 372 373 374 375 376 377 378

void smp_local_timer_interrupt(struct pt_regs * regs)
{
	if (!--(get_paca()->prof_counter)) {
		update_process_times(user_mode(regs));
		(get_paca()->prof_counter)=get_paca()->prof_multiplier;
	}
}

void smp_message_recv(int msg, struct pt_regs *regs)
{
379
	switch(msg) {
380 381 382 383 384 385 386
	case PPC_MSG_CALL_FUNCTION:
		smp_call_function_interrupt();
		break;
	case PPC_MSG_RESCHEDULE: 
		/* XXX Do we have to do this? */
		set_need_resched();
		break;
387
#if 0
388
	case PPC_MSG_MIGRATE_TASK:
389
		/* spare */
390
		break;
391
#endif
392 393 394
#ifdef CONFIG_DEBUGGER
	case PPC_MSG_DEBUGGER_BREAK:
		debugger(regs);
395
		break;
396
#endif
397 398 399 400 401 402 403 404 405
	default:
		printk("SMP %d: smp_message_recv(): unknown msg %d\n",
		       smp_processor_id(), msg);
		break;
	}
}

void smp_send_reschedule(int cpu)
{
406
	smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
407 408
}

409 410
#ifdef CONFIG_DEBUGGER
void smp_send_debugger_break(int cpu)
411
{
412
	smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
413
}
414
#endif
415 416 417

static void stop_this_cpu(void *dummy)
{
418
	local_irq_disable();
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
	while (1)
		;
}

void smp_send_stop(void)
{
	smp_call_function(stop_this_cpu, NULL, 1, 0);
}

/*
 * Structure and data for smp_call_function(). This is designed to minimise
 * static memory requirements. It also looks cleaner.
 * Stolen from the i386 version.
 */
static spinlock_t call_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;

static struct call_data_struct {
	void (*func) (void *info);
	void *info;
	atomic_t started;
	atomic_t finished;
	int wait;
} *call_data;

443 444 445
/* delay of at least 8 seconds on 1GHz cpu */
#define SMP_CALL_TIMEOUT (1UL << (30 + 3))

446 447 448 449 450 451 452 453 454 455 456 457 458
/*
 * This function sends a 'generic call function' IPI to all other CPUs
 * in the system.
 *
 * [SUMMARY] Run a function on all other CPUs.
 * <func> The function to run. This must be fast and non-blocking.
 * <info> An arbitrary pointer to pass to the function.
 * <nonatomic> currently unused.
 * <wait> If true, wait (atomically) until function has completed on other CPUs.
 * [RETURNS] 0 on success, else a negative status code. Does not return until
 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
 *
 * You must not call this function with disabled interrupts or from a
459
 * hardware interrupt handler or from a bottom half handler.
460 461
 */
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
Anton Blanchard's avatar
Anton Blanchard committed
462
		       int wait)
463 464
{ 
	struct call_data_struct data;
465
	int ret = -1, cpus = num_online_cpus()-1;
466
	unsigned long timeout;
467 468 469 470 471 472 473 474 475 476 477

	if (!cpus)
		return 0;

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

478
	spin_lock(&call_lock);
479
	call_data = &data;
480
	wmb();
481
	/* Send a message to all other CPUs and wait for them to respond */
482
	smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
483 484

	/* Wait for response */
485
	timeout = SMP_CALL_TIMEOUT;
486 487 488
	while (atomic_read(&data.started) != cpus) {
		HMT_low();
		if (--timeout == 0) {
489 490 491
			printk("smp_call_function on cpu %d: other cpus not "
			       "responding (%d)\n", smp_processor_id(),
			       atomic_read(&data.started));
492
			debugger(0);
493 494 495 496 497
			goto out;
		}
	}

	if (wait) {
498
		timeout = SMP_CALL_TIMEOUT;
499 500 501
		while (atomic_read(&data.finished) != cpus) {
			HMT_low();
			if (--timeout == 0) {
502 503 504 505 506
				printk("smp_call_function on cpu %d: other "
				       "cpus not finishing (%d/%d)\n",
				       smp_processor_id(),
				       atomic_read(&data.finished),
				       atomic_read(&data.started));
507
				debugger(0);
508 509 510 511
				goto out;
			}
		}
	}
512

513 514
	ret = 0;

515
out:
516
	call_data = NULL;
517
	HMT_medium();
518
	spin_unlock(&call_lock);
519 520 521 522 523
	return ret;
}

void smp_call_function_interrupt(void)
{
524 525 526 527 528 529 530 531 532 533 534 535 536
	void (*func) (void *info);
	void *info;
	int wait;

	/* call_data will be NULL if the sender timed out while
	 * waiting on us to receive the call.
	 */
	if (!call_data)
		return;

	func = call_data->func;
	info = call_data->info;
	wait = call_data->wait;
537

538 539 540
	if (!wait)
		smp_mb__before_atomic_inc();

541 542 543 544 545 546 547 548 549
	/*
	 * Notify initiating CPU that I've grabbed the data and am
	 * about to execute the function
	 */
	atomic_inc(&call_data->started);
	/*
	 * At this point the info structure may be out of scope unless wait==1
	 */
	(*func)(info);
550 551
	if (wait) {
		smp_mb__before_atomic_inc();
552
		atomic_inc(&call_data->finished);
553
	}
554 555 556
}

extern unsigned long decr_overclock;
Anton Blanchard's avatar
Anton Blanchard committed
557
extern struct gettimeofday_struct do_gtod;
558

Anton Blanchard's avatar
Anton Blanchard committed
559
struct thread_info *current_set[NR_CPUS];
560

561 562
DECLARE_PER_CPU(unsigned int, pvr);

Anton Blanchard's avatar
Anton Blanchard committed
563
static void __devinit smp_store_cpu_info(int id)
564
{
565
	per_cpu(pvr, id) = _get_PVR();
Anton Blanchard's avatar
Anton Blanchard committed
566
}
567

Anton Blanchard's avatar
Anton Blanchard committed
568 569
void __init smp_prepare_cpus(unsigned int max_cpus)
{
570 571 572 573 574 575
	/* 
	 * setup_cpu may need to be called on the boot cpu. We havent
	 * spun any cpus up but lets be paranoid.
	 */
	BUG_ON(boot_cpuid != smp_processor_id());

Anton Blanchard's avatar
Anton Blanchard committed
576
	/* Fixup boot cpu */
577 578 579 580
	smp_store_cpu_info(boot_cpuid);
	cpu_callin_map[boot_cpuid] = 1;
	paca[boot_cpuid].prof_counter = 1;
	paca[boot_cpuid].prof_multiplier = 1;
581 582

	/*
583
	 * XXX very rough. 
584
	 */
585
	cache_decay_ticks = HZ/100;
586

Anton Blanchard's avatar
Anton Blanchard committed
587 588
#ifndef CONFIG_PPC_ISERIES
	paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();
589

Anton Blanchard's avatar
Anton Blanchard committed
590 591 592 593 594 595
	/*
	 * Should update do_gtod.stamp_xsec.
	 * For now we leave it which means the time can be some
	 * number of msecs off until someone does a settimeofday()
	 */
	do_gtod.tb_orig_stamp = tb_last_stamp;
596 597
#endif

Anton Blanchard's avatar
Anton Blanchard committed
598
	max_cpus = smp_ops->probe();
599 600 601 602
 
	/* Backup CPU 0 state if necessary */
	__save_cpu_setup();

Anton Blanchard's avatar
Anton Blanchard committed
603 604
	smp_space_timers(max_cpus);
}
605

606 607
void __devinit smp_prepare_boot_cpu(void)
{
608
	cpu_set(smp_processor_id(), cpu_online_map);
609 610 611
	/* FIXME: what about cpu_possible()? */
}

612
int __devinit __cpu_up(unsigned int cpu)
Anton Blanchard's avatar
Anton Blanchard committed
613 614 615 616
{
	struct pt_regs regs;
	struct task_struct *p;
	int c;
617

618 619 620 621
	paca[cpu].prof_counter = 1;
	paca[cpu].prof_multiplier = 1;
	paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;

622
	if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
623 624 625 626 627 628 629 630 631
		void *tmp;

		/* maximum of 48 CPUs on machines with a segment table */
		if (cpu >= 48)
			BUG();

		tmp = &stab_array[PAGE_SIZE * cpu];
		memset(tmp, 0, PAGE_SIZE); 
		paca[cpu].xStab_data.virt = (unsigned long)tmp;
632
		paca[cpu].xStab_data.real = virt_to_abs(tmp);
633 634
	}

Anton Blanchard's avatar
Anton Blanchard committed
635 636 637
	/* create a process for the processor */
	/* only regs.msr is actually used, and 0 is OK for it */
	memset(&regs, 0, sizeof(struct pt_regs));
638
	p = copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
Anton Blanchard's avatar
Anton Blanchard committed
639 640
	if (IS_ERR(p))
		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
641

642
	wake_up_forked_process(p);
Anton Blanchard's avatar
Anton Blanchard committed
643 644
	init_idle(p, cpu);
	unhash_process(p);
645

Anton Blanchard's avatar
Anton Blanchard committed
646 647
	paca[cpu].xCurrent = (u64)p;
	current_set[cpu] = p->thread_info;
648

649 650 651 652 653 654
	/* The information for processor bringup must
	 * be written out to main store before we release
	 * the processor.
	 */
	mb();

Anton Blanchard's avatar
Anton Blanchard committed
655 656
	/* wake up cpus */
	smp_ops->kick_cpu(cpu);
657 658

	/*
Anton Blanchard's avatar
Anton Blanchard committed
659 660 661
	 * wait to see if the cpu made a callin (is actually up).
	 * use this value that I found through experimentation.
	 * -- Cort
662
	 */
Anton Blanchard's avatar
Anton Blanchard committed
663 664
	for (c = 5000; c && !cpu_callin_map[cpu]; c--)
		udelay(100);
665

Anton Blanchard's avatar
Anton Blanchard committed
666 667 668
	if (!cpu_callin_map[cpu]) {
		printk("Processor %u is stuck.\n", cpu);
		return -ENOENT;
669 670
	}

Anton Blanchard's avatar
Anton Blanchard committed
671 672 673 674
	printk("Processor %u found.\n", cpu);

	if (smp_ops->give_timebase)
		smp_ops->give_timebase();
675
	cpu_set(cpu, cpu_online_map);
Anton Blanchard's avatar
Anton Blanchard committed
676
	return 0;
677 678 679
}

/* Activate a secondary processor. */
Anton Blanchard's avatar
Anton Blanchard committed
680
int __devinit start_secondary(void *unused)
681
{
Anton Blanchard's avatar
Anton Blanchard committed
682 683
	unsigned int cpu = smp_processor_id();

684 685 686
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;

Anton Blanchard's avatar
Anton Blanchard committed
687 688 689
	smp_store_cpu_info(cpu);
	set_dec(paca[cpu].default_decr);
	cpu_callin_map[cpu] = 1;
690

Anton Blanchard's avatar
Anton Blanchard committed
691 692 693 694
	smp_ops->setup_cpu(cpu);
	if (smp_ops->take_timebase)
		smp_ops->take_timebase();

695 696
	get_paca()->yielded = 0;

697
#ifdef CONFIG_PPC_PSERIES
698 699 700
	if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
		vpa_init(cpu); 
	}
701
#endif
702

Anton Blanchard's avatar
Anton Blanchard committed
703 704 705
	local_irq_enable();

	return cpu_idle(NULL);
706 707
}

708
int setup_profiling_timer(unsigned int multiplier)
709 710 711 712
{
	return 0;
}

713
void __init smp_cpus_done(unsigned int max_cpus)
714
{
715 716 717 718 719 720 721 722 723
	cpumask_t old_mask;

	/* We want the setup_cpu() here to be called from CPU 0, but our
	 * init thread may have been "borrowed" by another CPU in the meantime
	 * se we pin us down to CPU 0 for a short while
	 */
	old_mask = current->cpus_allowed;
	set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
	
Anton Blanchard's avatar
Anton Blanchard committed
724
	smp_ops->setup_cpu(boot_cpuid);
725

Anton Blanchard's avatar
Anton Blanchard committed
726 727
	/* XXX fix this, xics currently relies on it - Anton */
	smp_threads_ready = 1;
728 729

	set_cpus_allowed(current, old_mask);
730
}
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798

#ifdef CONFIG_NUMA
static struct node node_devices[MAX_NUMNODES];

static void register_nodes(void)
{
	int i;
	int ret;

	for (i = 0; i < MAX_NUMNODES; i++) {
		if (node_online(i)) {
			int p_node = parent_node(i);
			struct node *parent = NULL;

			if (p_node != i)
				parent = &node_devices[p_node];

			ret = register_node(&node_devices[i], i, parent);
			if (ret)
				printk(KERN_WARNING "register_nodes: "
				       "register_node %d failed (%d)", i, ret);
		}
	}
}
#else
static void register_nodes(void)
{
	return;
}
#endif

/* Only valid if CPU is online. */
static ssize_t show_physical_id(struct sys_device *dev, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, sysdev);

	return sprintf(buf, "%u\n", get_hard_smp_processor_id(cpu->sysdev.id));
}
static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);

static DEFINE_PER_CPU(struct cpu, cpu_devices);

static int __init topology_init(void)
{
	int cpu;
	struct node *parent = NULL;
	int ret;

	register_nodes();

	for_each_cpu(cpu) {
#ifdef CONFIG_NUMA
		parent = &node_devices[cpu_to_node(cpu)];
#endif
		ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, parent);
		if (ret)
			printk(KERN_WARNING "topology_init: register_cpu %d "
			       "failed (%d)\n", cpu, ret);

		ret = sysdev_create_file(&per_cpu(cpu_devices, cpu).sysdev,
					 &attr_physical_id);
		if (ret)
			printk(KERN_WARNING "toplogy_init: sysdev_create_file "
			       "%d failed (%d)\n", cpu, ret);
	}
	return 0;
}
__initcall(topology_init);