tlb.c 19.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9 10
/*
 * This file contains the routines for TLB flushing.
 * On machines where the MMU does not use a hash table to store virtual to
 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
 * this does -not- include 603 however which shares the implementation with
 * hash based processors)
 *
 *  -- BenH
 *
11 12
 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
 *                     IBM Corp.
13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 *  Derived from arch/ppc/mm/init.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 */

#include <linux/kernel.h>
26
#include <linux/export.h>
27 28 29 30 31 32
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
Yinghai Lu's avatar
Yinghai Lu committed
33
#include <linux/memblock.h>
34
#include <linux/of_fdt.h>
Becky Bruce's avatar
Becky Bruce committed
35
#include <linux/hugetlb.h>
36

37
#include <asm/pgalloc.h>
38 39
#include <asm/tlbflush.h>
#include <asm/tlb.h>
40
#include <asm/code-patching.h>
41
#include <asm/cputhreads.h>
Becky Bruce's avatar
Becky Bruce committed
42
#include <asm/hugetlb.h>
43
#include <asm/paca.h>
44

45
#include <mm/mmu_decl.h>
46

Becky Bruce's avatar
Becky Bruce committed
47 48 49 50 51
/*
 * This struct lists the sw-supported page sizes.  The hardawre MMU may support
 * other sizes not listed here.   The .ind field is only used on MMUs that have
 * indirect page table entries.
 */
52
#if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
53
#ifdef CONFIG_PPC_FSL_BOOK3E
Becky Bruce's avatar
Becky Bruce committed
54 55 56 57 58
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
		.enc	= BOOK3E_PAGESZ_4K,
	},
59 60 61 62
	[MMU_PAGE_2M] = {
		.shift	= 21,
		.enc	= BOOK3E_PAGESZ_2M,
	},
Becky Bruce's avatar
Becky Bruce committed
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
	[MMU_PAGE_4M] = {
		.shift	= 22,
		.enc	= BOOK3E_PAGESZ_4M,
	},
	[MMU_PAGE_16M] = {
		.shift	= 24,
		.enc	= BOOK3E_PAGESZ_16M,
	},
	[MMU_PAGE_64M] = {
		.shift	= 26,
		.enc	= BOOK3E_PAGESZ_64M,
	},
	[MMU_PAGE_256M] = {
		.shift	= 28,
		.enc	= BOOK3E_PAGESZ_256M,
	},
	[MMU_PAGE_1G] = {
		.shift	= 30,
		.enc	= BOOK3E_PAGESZ_1GB,
	},
};
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
#elif defined(CONFIG_PPC_8xx)
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
	/* we only manage 4k and 16k pages as normal pages */
#ifdef CONFIG_PPC_4K_PAGES
	[MMU_PAGE_4K] = {
		.shift	= 12,
	},
#else
	[MMU_PAGE_16K] = {
		.shift	= 14,
	},
#endif
	[MMU_PAGE_512K] = {
		.shift	= 19,
	},
	[MMU_PAGE_8M] = {
		.shift	= 23,
	},
};
Becky Bruce's avatar
Becky Bruce committed
103
#else
104 105 106
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
107
		.ind	= 20,
108 109 110 111 112 113 114 115
		.enc	= BOOK3E_PAGESZ_4K,
	},
	[MMU_PAGE_16K] = {
		.shift	= 14,
		.enc	= BOOK3E_PAGESZ_16K,
	},
	[MMU_PAGE_64K] = {
		.shift	= 16,
116
		.ind	= 28,
117 118 119 120 121 122 123 124
		.enc	= BOOK3E_PAGESZ_64K,
	},
	[MMU_PAGE_1M] = {
		.shift	= 20,
		.enc	= BOOK3E_PAGESZ_1M,
	},
	[MMU_PAGE_16M] = {
		.shift	= 24,
125
		.ind	= 36,
126 127 128 129 130 131 132 133 134 135 136
		.enc	= BOOK3E_PAGESZ_16M,
	},
	[MMU_PAGE_256M] = {
		.shift	= 28,
		.enc	= BOOK3E_PAGESZ_256M,
	},
	[MMU_PAGE_1G] = {
		.shift	= 30,
		.enc	= BOOK3E_PAGESZ_1GB,
	},
};
Becky Bruce's avatar
Becky Bruce committed
137 138
#endif /* CONFIG_FSL_BOOKE */

139 140 141 142 143 144 145 146 147 148
static inline int mmu_get_tsize(int psize)
{
	return mmu_psize_defs[psize].enc;
}
#else
static inline int mmu_get_tsize(int psize)
{
	/* This isn't used on !Book3E for now */
	return 0;
}
Becky Bruce's avatar
Becky Bruce committed
149
#endif /* CONFIG_PPC_BOOK3E_MMU */
150 151 152 153 154 155 156 157 158

/* The variables below are currently only used on 64-bit Book3E
 * though this will probably be made common with other nohash
 * implementations at some point
 */
#ifdef CONFIG_PPC64

int mmu_linear_psize;		/* Page size used for the linear mapping */
int mmu_pte_psize;		/* Page size used for PTE pages */
159
int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */
160
int book3e_htw_mode;		/* HW tablewalk?  Value is PPC_HTW_* */
161 162
unsigned long linear_map_top;	/* Top of linear mapping */

163 164 165 166 167 168 169 170 171

/*
 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
 * exceptions.  This is used for bolted and e6500 TLB miss handlers which
 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
 * this is set to zero.
 */
int extlb_level_exc;

172 173
#endif /* CONFIG_PPC64 */

174 175 176 177 178 179
#ifdef CONFIG_PPC_FSL_BOOK3E
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
DEFINE_PER_CPU(int, next_tlbcam_idx);
EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
#endif

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
/*
 * Base TLB flushing operations:
 *
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes kernel pages
 *
 *  - local_* variants of page and mm only apply to the current
 *    processor
 */

/*
 * These are the base non-SMP variants of page and mm flushing
 */
void local_flush_tlb_mm(struct mm_struct *mm)
{
	unsigned int pid;

	preempt_disable();
	pid = mm->context.id;
	if (pid != MMU_NO_CONTEXT)
		_tlbil_pid(pid);
	preempt_enable();
}
EXPORT_SYMBOL(local_flush_tlb_mm);

207 208
void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
			    int tsize, int ind)
209 210 211 212
{
	unsigned int pid;

	preempt_disable();
213
	pid = mm ? mm->context.id : 0;
214
	if (pid != MMU_NO_CONTEXT)
215
		_tlbil_va(vmaddr, pid, tsize, ind);
216 217 218
	preempt_enable();
}

219 220 221
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
	__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
222
			       mmu_get_tsize(mmu_virtual_psize), 0);
223 224
}
EXPORT_SYMBOL(local_flush_tlb_page);
225 226 227 228 229 230

/*
 * And here are the SMP non-local implementations
 */
#ifdef CONFIG_SMP

231
static DEFINE_RAW_SPINLOCK(tlbivax_lock);
232 233 234 235

struct tlb_flush_param {
	unsigned long addr;
	unsigned int pid;
236 237
	unsigned int tsize;
	unsigned int ind;
238 239 240 241 242 243 244 245 246 247 248 249 250
};

static void do_flush_tlb_mm_ipi(void *param)
{
	struct tlb_flush_param *p = param;

	_tlbil_pid(p ? p->pid : 0);
}

static void do_flush_tlb_page_ipi(void *param)
{
	struct tlb_flush_param *p = param;

251
	_tlbil_va(p->addr, p->pid, p->tsize, p->ind);
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
}


/* Note on invalidations and PID:
 *
 * We snapshot the PID with preempt disabled. At this point, it can still
 * change either because:
 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
 * - we are invaliating some target that isn't currently running here
 *   and is concurrently acquiring a new PID on another CPU
 * - some other CPU is re-acquiring a lost PID for this mm
 * etc...
 *
 * However, this shouldn't be a problem as we only guarantee
 * invalidation of TLB entries present prior to this call, so we
 * don't care about the PID changing, and invalidating a stale PID
 * is generally harmless.
 */

void flush_tlb_mm(struct mm_struct *mm)
{
	unsigned int pid;

	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;
279
	if (!mm_is_core_local(mm)) {
280
		struct tlb_flush_param p = { .pid = pid };
281 282 283
		/* Ignores smp_processor_id() even if set. */
		smp_call_function_many(mm_cpumask(mm),
				       do_flush_tlb_mm_ipi, &p, 1);
284 285 286 287 288 289 290
	}
	_tlbil_pid(pid);
 no_context:
	preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_mm);

291 292
void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
		      int tsize, int ind)
293
{
294
	struct cpumask *cpu_mask;
295 296
	unsigned int pid;

297 298 299 300
	/*
	 * This function as well as __local_flush_tlb_page() must only be called
	 * for user contexts.
	 */
301
	if (WARN_ON(!mm))
302 303
		return;

304
	preempt_disable();
305
	pid = mm->context.id;
306 307
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto bail;
308
	cpu_mask = mm_cpumask(mm);
309
	if (!mm_is_core_local(mm)) {
310 311 312 313
		/* If broadcast tlbivax is supported, use it */
		if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
			int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
			if (lock)
314
				raw_spin_lock(&tlbivax_lock);
315
			_tlbivax_bcast(vmaddr, pid, tsize, ind);
316
			if (lock)
317
				raw_spin_unlock(&tlbivax_lock);
318 319
			goto bail;
		} else {
320 321 322 323 324 325
			struct tlb_flush_param p = {
				.pid = pid,
				.addr = vmaddr,
				.tsize = tsize,
				.ind = ind,
			};
326 327
			/* Ignores smp_processor_id() even if set in cpu_mask */
			smp_call_function_many(cpu_mask,
328 329 330
					       do_flush_tlb_page_ipi, &p, 1);
		}
	}
331
	_tlbil_va(vmaddr, pid, tsize, ind);
332 333 334
 bail:
	preempt_enable();
}
335 336 337

void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
Becky Bruce's avatar
Becky Bruce committed
338
#ifdef CONFIG_HUGETLB_PAGE
339
	if (vma && is_vm_hugetlb_page(vma))
Becky Bruce's avatar
Becky Bruce committed
340 341 342
		flush_hugetlb_page(vma, vmaddr);
#endif

343
	__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
344
			 mmu_get_tsize(mmu_virtual_psize), 0);
345
}
346 347 348 349
EXPORT_SYMBOL(flush_tlb_page);

#endif /* CONFIG_SMP */

350 351 352 353 354 355 356 357 358 359 360
#ifdef CONFIG_PPC_47x
void __init early_init_mmu_47x(void)
{
#ifdef CONFIG_SMP
	unsigned long root = of_get_flat_dt_root();
	if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
		mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
#endif /* CONFIG_SMP */
}
#endif /* CONFIG_PPC_47x */

361 362 363 364 365 366 367 368 369 370
/*
 * Flush kernel TLB entries in the given range
 */
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
	preempt_disable();
	smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
	_tlbil_pid(0);
	preempt_enable();
371
#else
372
	_tlbil_pid(0);
373
#endif
374 375 376 377 378 379 380 381 382 383 384 385 386
}
EXPORT_SYMBOL(flush_tlb_kernel_range);

/*
 * Currently, for range flushing, we just do a full mm flush. This should
 * be optimized based on a threshold on the size of the range, since
 * some implementation can stack multiple tlbivax before a tlbsync but
 * for now, we keep it that way
 */
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
		     unsigned long end)

{
387 388 389 390
	if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK))
		flush_tlb_page(vma, start);
	else
		flush_tlb_mm(vma->vm_mm);
391 392
}
EXPORT_SYMBOL(flush_tlb_range);
393 394 395 396 397

void tlb_flush(struct mmu_gather *tlb)
{
	flush_tlb_mm(tlb->mm);
}
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413

/*
 * Below are functions specific to the 64-bit variant of Book3E though that
 * may change in the future
 */

#ifdef CONFIG_PPC64

/*
 * Handling of virtual linear page tables or indirect TLB entries
 * flushing when PTE pages are freed
 */
void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
{
	int tsize = mmu_psize_defs[mmu_pte_psize].enc;

414
	if (book3e_htw_mode != PPC_HTW_NONE) {
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
		unsigned long start = address & PMD_MASK;
		unsigned long end = address + PMD_SIZE;
		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;

		/* This isn't the most optimal, ideally we would factor out the
		 * while preempt & CPU mask mucking around, or even the IPI but
		 * it will do for now
		 */
		while (start < end) {
			__flush_tlb_page(tlb->mm, start, tsize, 1);
			start += size;
		}
	} else {
		unsigned long rmask = 0xf000000000000000ul;
		unsigned long rid = (address & rmask) | 0x1000000000000000ul;
		unsigned long vpte = address & ~rmask;

		vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
		vpte |= rid;
		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
	}
}

438 439
static void setup_page_sizes(void)
{
440 441 442
	unsigned int tlb0cfg;
	unsigned int tlb0ps;
	unsigned int eptcfg;
443 444
	int i, psize;

445 446
#ifdef CONFIG_PPC_FSL_BOOK3E
	unsigned int mmucfg = mfspr(SPRN_MMUCFG);
447
	int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
448

449
	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
450 451 452 453 454 455 456 457 458 459 460 461 462
		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
		unsigned int min_pg, max_pg;

		min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
		max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;

		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
			struct mmu_psize_def *def;
			unsigned int shift;

			def = &mmu_psize_defs[psize];
			shift = def->shift;

463
			if (shift == 0 || shift & 1)
464 465 466 467 468 469 470 471 472
				continue;

			/* adjust to be in terms of 4^shift Kb */
			shift = (shift - 10) >> 1;

			if ((shift >= min_pg) && (shift <= max_pg))
				def->flags |= MMU_PAGE_SIZE_DIRECT;
		}

473
		goto out;
474
	}
475 476

	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
		u32 tlb1cfg, tlb1ps;

		tlb0cfg = mfspr(SPRN_TLB0CFG);
		tlb1cfg = mfspr(SPRN_TLB1CFG);
		tlb1ps = mfspr(SPRN_TLB1PS);
		eptcfg = mfspr(SPRN_EPTCFG);

		if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
			book3e_htw_mode = PPC_HTW_E6500;

		/*
		 * We expect 4K subpage size and unrestricted indirect size.
		 * The lack of a restriction on indirect size is a Freescale
		 * extension, indicated by PSn = 0 but SPSn != 0.
		 */
		if (eptcfg != 2)
			book3e_htw_mode = PPC_HTW_NONE;
494 495 496 497

		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
			struct mmu_psize_def *def = &mmu_psize_defs[psize];

498 499 500
			if (!def->shift)
				continue;

501 502
			if (tlb1ps & (1U << (def->shift - 10))) {
				def->flags |= MMU_PAGE_SIZE_DIRECT;
503 504 505

				if (book3e_htw_mode && psize == MMU_PAGE_2M)
					def->flags |= MMU_PAGE_SIZE_INDIRECT;
506 507 508
			}
		}

509
		goto out;
510
	}
511 512 513 514 515 516
#endif

	tlb0cfg = mfspr(SPRN_TLB0CFG);
	tlb0ps = mfspr(SPRN_TLB0PS);
	eptcfg = mfspr(SPRN_EPTCFG);

517 518 519 520 521 522 523 524 525
	/* Look for supported direct sizes */
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		struct mmu_psize_def *def = &mmu_psize_defs[psize];

		if (tlb0ps & (1U << (def->shift - 10)))
			def->flags |= MMU_PAGE_SIZE_DIRECT;
	}

	/* Indirect page sizes supported ? */
526 527 528 529 530
	if ((tlb0cfg & TLBnCFG_IND) == 0 ||
	    (tlb0cfg & TLBnCFG_PT) == 0)
		goto out;

	book3e_htw_mode = PPC_HTW_IBM;
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555

	/* Now, we only deal with one IND page size for each
	 * direct size. Hopefully all implementations today are
	 * unambiguous, but we might want to be careful in the
	 * future.
	 */
	for (i = 0; i < 3; i++) {
		unsigned int ps, sps;

		sps = eptcfg & 0x1f;
		eptcfg >>= 5;
		ps = eptcfg & 0x1f;
		eptcfg >>= 5;
		if (!ps || !sps)
			continue;
		for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
			struct mmu_psize_def *def = &mmu_psize_defs[psize];

			if (ps == (def->shift - 10))
				def->flags |= MMU_PAGE_SIZE_INDIRECT;
			if (sps == (def->shift - 10))
				def->ind = ps + 10;
		}
	}

556
out:
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
	/* Cleanup array and print summary */
	pr_info("MMU: Supported page sizes\n");
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		struct mmu_psize_def *def = &mmu_psize_defs[psize];
		const char *__page_type_names[] = {
			"unsupported",
			"direct",
			"indirect",
			"direct & indirect"
		};
		if (def->flags == 0) {
			def->shift = 0;	
			continue;
		}
		pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
			__page_type_names[def->flags & 0x3]);
	}
}

576 577
static void setup_mmu_htw(void)
{
578 579 580 581 582 583 584
	/*
	 * If we want to use HW tablewalk, enable it by patching the TLB miss
	 * handlers to branch to the one dedicated to it.
	 */

	switch (book3e_htw_mode) {
	case PPC_HTW_IBM:
585 586
		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
587
		break;
588
#ifdef CONFIG_PPC_FSL_BOOK3E
589
	case PPC_HTW_E6500:
590
		extlb_level_exc = EX_TLB_SIZE;
591 592 593
		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
		break;
594
#endif
595
	}
596
	pr_info("MMU: Book3E HW tablewalk %s\n",
597
		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
598 599 600 601 602
}

/*
 * Early initialization of the MMU TLB code
 */
603
static void early_init_this_mmu(void)
604
{
605 606 607 608 609
	unsigned int mas4;

	/* Set MAS4 based on page table setting */

	mas4 = 0x4 << MAS4_WIMGED_SHIFT;
610 611 612 613 614 615 616 617 618 619
	switch (book3e_htw_mode) {
	case PPC_HTW_E6500:
		mas4 |= MAS4_INDD;
		mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
		mas4 |= MAS4_TLBSELD(1);
		mmu_pte_psize = MMU_PAGE_2M;
		break;

	case PPC_HTW_IBM:
		mas4 |= MAS4_INDD;
620 621
		mas4 |=	BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
		mmu_pte_psize = MMU_PAGE_1M;
622 623 624
		break;

	case PPC_HTW_NONE:
625 626
		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
		mmu_pte_psize = mmu_virtual_psize;
627
		break;
628 629 630
	}
	mtspr(SPRN_MAS4, mas4);

631 632 633
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		unsigned int num_cams;
634
		bool map = true;
635 636 637

		/* use a quarter of the TLBCAM for bolted linear map */
		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
638 639 640 641 642 643

		/*
		 * Only do the mapping once per core, or else the
		 * transient mapping would cause problems.
		 */
#ifdef CONFIG_SMP
644
		if (hweight32(get_tensr()) > 1)
645 646 647 648 649
			map = false;
#endif

		if (map)
			linear_map_top = map_mem_in_cams(linear_map_top,
650
							 num_cams, false);
651 652
	}
#endif
653

654 655 656 657 658
	/* A sync won't hurt us after mucking around with
	 * the MMU configuration
	 */
	mb();
}
659

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
static void __init early_init_mmu_global(void)
{
	/* XXX This will have to be decided at runtime, but right
	 * now our boot and TLB miss code hard wires it. Ideally
	 * we should find out a suitable page size and patch the
	 * TLB miss code (either that or use the PACA to store
	 * the value we want)
	 */
	mmu_linear_psize = MMU_PAGE_1G;

	/* XXX This should be decided at runtime based on supported
	 * page sizes in the TLB, but for now let's assume 16M is
	 * always there and a good fit (which it probably is)
	 *
	 * Freescale booke only supports 4K pages in TLB0, so use that.
	 */
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
		mmu_vmemmap_psize = MMU_PAGE_4K;
	else
		mmu_vmemmap_psize = MMU_PAGE_16M;

	/* XXX This code only checks for TLB 0 capabilities and doesn't
	 *     check what page size combos are supported by the HW. It
	 *     also doesn't handle the case where a separate array holds
	 *     the IND entries from the array loaded by the PT.
	 */
	/* Look for supported page sizes */
	setup_page_sizes();

	/* Look for HW tablewalk support */
	setup_mmu_htw();

#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
694
		if (book3e_htw_mode == PPC_HTW_NONE) {
695
			extlb_level_exc = EX_TLB_SIZE;
696 697 698 699
			patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
			patch_exception(0x1e0,
				exc_instruction_tlb_miss_bolted_book3e);
		}
700 701 702
	}
#endif

703 704
	/* Set the global containing the top of the linear mapping
	 * for use by the TLB miss code
705
	 */
706
	linear_map_top = memblock_end_of_DRAM();
707 708

	ioremap_bot = IOREMAP_BASE;
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
}

static void __init early_mmu_set_memory_limit(void)
{
#ifdef CONFIG_PPC_FSL_BOOK3E
	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
		/*
		 * Limit memory so we dont have linear faults.
		 * Unlike memblock_set_current_limit, which limits
		 * memory available during early boot, this permanently
		 * reduces the memory available to Linux.  We need to
		 * do this because highmem is not supported on 64-bit.
		 */
		memblock_enforce_memory_limit(linear_map_top);
	}
#endif
725 726

	memblock_set_current_limit(linear_map_top);
727 728
}

729
/* boot cpu only */
730 731
void __init early_init_mmu(void)
{
732 733 734
	early_init_mmu_global();
	early_init_this_mmu();
	early_mmu_set_memory_limit();
735 736
}

737
void early_init_mmu_secondary(void)
738
{
739
	early_init_this_mmu();
740 741
}

742 743 744
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
				phys_addr_t first_memblock_size)
{
745
	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
746 747
	 * the bolted TLB entry. We know for now that only 1G
	 * entries are supported though that may eventually
748 749
	 * change.
	 *
750 751 752 753 754 755 756 757
	 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
	 * unusual memory sizes it's possible for some RAM to not be mapped
	 * (such RAM is not used at all by Linux, since we don't support
	 * highmem on 64-bit).  We limit ppc64_rma_size to what would be
	 * mappable if this memblock is the only one.  Additional memblocks
	 * can only increase, not decrease, the amount that ends up getting
	 * mapped.  We still limit max to 1G even if we'll eventually map
	 * more.  This is due to what the early init code is set up to do.
758 759
	 *
	 * We crop it to the size of the first MEMBLOCK to
760 761
	 * avoid going over total available memory just in case...
	 */
762
#ifdef CONFIG_PPC_FSL_BOOK3E
763
	if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
764
		unsigned long linear_sz;
765 766 767 768 769 770 771 772
		unsigned int num_cams;

		/* use a quarter of the TLBCAM for bolted linear map */
		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;

		linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
					    true);

773 774 775 776
		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
	} else
#endif
		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
777 778

	/* Finally limit subsequent allocations */
779
	memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
780
}
781 782 783 784 785 786
#else /* ! CONFIG_PPC64 */
void __init early_init_mmu(void)
{
#ifdef CONFIG_PPC_47x
	early_init_mmu_47x();
#endif
787 788

#ifdef CONFIG_PPC_MM_SLICES
789
	mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
790
#endif
791
}
792
#endif /* CONFIG_PPC64 */