Commit dad0dc2e authored by David Gibson's avatar David Gibson Committed by Paul Mackerras

PPC32: clean up the initial mapping of RAM, allow for large-page mappings.

parent a321a55f
......@@ -25,7 +25,6 @@
#include <asm/tlbflush.h>
extern void mapin_ram(void);
extern void bat_mapin_ram(void);
extern int map_page(unsigned long va, unsigned long pa, int flags);
extern void setbat(int index, unsigned long virt, unsigned long phys,
unsigned int size, int flags);
......@@ -49,14 +48,17 @@ extern unsigned long Hash_size, Hash_mask;
#if defined(CONFIG_8xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
#define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
extern void MMU_init_hw(void);
#define mmu_mapin_ram() (0UL)
#else
/* anything except 4xx or 8xx */
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
* which includes all new 82xx processors. We need tlbie/tlbsync here
......
......@@ -252,31 +252,14 @@ void __init mapin_ram(void)
{
unsigned long v, p, s, f;
#ifdef HAVE_BATS
if (!__map_without_bats)
bat_mapin_ram();
#endif /* HAVE_BATS */
v = KERNELBASE;
p = PPC_MEMSTART;
for (s = 0; s < total_lowmem; s += PAGE_SIZE) {
/* On the MPC8xx, we want the page shared so we
* don't get ASID compares on kernel space.
*/
f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC;
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* Allows stub to set breakpoints everywhere */
f |= _PAGE_WRENABLE;
#else /* !CONFIG_KGDB && !CONFIG_XMON */
if ((char *) v < _stext || (char *) v >= etext)
f |= _PAGE_WRENABLE;
#ifdef CONFIG_PPC_STD_MMU
s = mmu_mapin_ram();
v = KERNELBASE + s;
p = PPC_MEMSTART + s;
for (; s < total_lowmem; s += PAGE_SIZE) {
if ((char *) v >= _stext && (char *) v < etext)
f = _PAGE_RAM_TEXT;
else
/* On the powerpc (not all), no user access
forces R/W kernel access */
f |= _PAGE_USER;
#endif /* CONFIG_PPC_STD_MMU */
#endif /* CONFIG_KGDB || CONFIG_XMON */
f = _PAGE_RAM;
map_page(v, p, f);
v += PAGE_SIZE;
p += PAGE_SIZE;
......
......@@ -87,12 +87,15 @@ unsigned long p_mapped_by_bats(unsigned long pa)
return 0;
}
void __init bat_mapin_ram(void)
unsigned long __init mmu_mapin_ram(void)
{
unsigned long tot, bl, done;
unsigned long max_size = (256<<20);
unsigned long align;
if (__map_without_bats)
return 0;
/* Set up BAT2 and if necessary BAT3 to cover RAM. */
/* Make sure we don't map a block larger than the
......@@ -119,7 +122,10 @@ void __init bat_mapin_ram(void)
break;
setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl,
_PAGE_KERNEL);
done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
}
return done;
}
/*
......
......@@ -246,6 +246,23 @@ extern unsigned long ioremap_bot, ioremap_base;
#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC
#define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
#define _PAGE_RAM _PAGE_KERNEL
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* We want the debuggers to be able to set breakpoints anywhere, so
* don't write protect the kernel text */
#define _PAGE_RAM_TEXT _PAGE_RAM
#else
#ifdef CONFIG_PPC_STD_MMU
/* On standard PPC MMU, no user access implies kernel read/write
* access, so to write-protect the kernel text we must turn on user
* access */
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE) | _PAGE_USER
#else
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE)
#endif
#endif
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment