Commit b0da4f67 authored by Paul Mackerras's avatar Paul Mackerras

Merge samba.org:/home/paulus/kernel/linux-2.5

into samba.org:/home/paulus/kernel/for-linus-ppc
parents 4872eacc dad0dc2e
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
extern void mapin_ram(void); extern void mapin_ram(void);
extern void bat_mapin_ram(void);
extern int map_page(unsigned long va, unsigned long pa, int flags); extern int map_page(unsigned long va, unsigned long pa, int flags);
extern void setbat(int index, unsigned long virt, unsigned long phys, extern void setbat(int index, unsigned long virt, unsigned long phys,
unsigned int size, int flags); unsigned int size, int flags);
...@@ -49,14 +48,17 @@ extern unsigned long Hash_size, Hash_mask; ...@@ -49,14 +48,17 @@ extern unsigned long Hash_size, Hash_mask;
#if defined(CONFIG_8xx) #if defined(CONFIG_8xx)
#define flush_HPTE(X, va, pg) _tlbie(va) #define flush_HPTE(X, va, pg) _tlbie(va)
#define MMU_init_hw() do { } while(0) #define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx) #elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg) _tlbie(va) #define flush_HPTE(X, va, pg) _tlbie(va)
extern void MMU_init_hw(void); extern void MMU_init_hw(void);
#define mmu_mapin_ram() (0UL)
#else #else
/* anything except 4xx or 8xx */ /* anything except 4xx or 8xx */
extern void MMU_init_hw(void); extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
/* Be careful....this needs to be updated if we ever encounter 603 SMPs, /* Be careful....this needs to be updated if we ever encounter 603 SMPs,
* which includes all new 82xx processors. We need tlbie/tlbsync here * which includes all new 82xx processors. We need tlbie/tlbsync here
......
...@@ -252,31 +252,14 @@ void __init mapin_ram(void) ...@@ -252,31 +252,14 @@ void __init mapin_ram(void)
{ {
unsigned long v, p, s, f; unsigned long v, p, s, f;
#ifdef HAVE_BATS s = mmu_mapin_ram();
if (!__map_without_bats) v = KERNELBASE + s;
bat_mapin_ram(); p = PPC_MEMSTART + s;
#endif /* HAVE_BATS */ for (; s < total_lowmem; s += PAGE_SIZE) {
if ((char *) v >= _stext && (char *) v < etext)
v = KERNELBASE; f = _PAGE_RAM_TEXT;
p = PPC_MEMSTART;
for (s = 0; s < total_lowmem; s += PAGE_SIZE) {
/* On the MPC8xx, we want the page shared so we
* don't get ASID compares on kernel space.
*/
f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC;
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* Allows stub to set breakpoints everywhere */
f |= _PAGE_WRENABLE;
#else /* !CONFIG_KGDB && !CONFIG_XMON */
if ((char *) v < _stext || (char *) v >= etext)
f |= _PAGE_WRENABLE;
#ifdef CONFIG_PPC_STD_MMU
else else
/* On the powerpc (not all), no user access f = _PAGE_RAM;
forces R/W kernel access */
f |= _PAGE_USER;
#endif /* CONFIG_PPC_STD_MMU */
#endif /* CONFIG_KGDB || CONFIG_XMON */
map_page(v, p, f); map_page(v, p, f);
v += PAGE_SIZE; v += PAGE_SIZE;
p += PAGE_SIZE; p += PAGE_SIZE;
......
...@@ -87,12 +87,15 @@ unsigned long p_mapped_by_bats(unsigned long pa) ...@@ -87,12 +87,15 @@ unsigned long p_mapped_by_bats(unsigned long pa)
return 0; return 0;
} }
void __init bat_mapin_ram(void) unsigned long __init mmu_mapin_ram(void)
{ {
unsigned long tot, bl, done; unsigned long tot, bl, done;
unsigned long max_size = (256<<20); unsigned long max_size = (256<<20);
unsigned long align; unsigned long align;
if (__map_without_bats)
return 0;
/* Set up BAT2 and if necessary BAT3 to cover RAM. */ /* Set up BAT2 and if necessary BAT3 to cover RAM. */
/* Make sure we don't map a block larger than the /* Make sure we don't map a block larger than the
...@@ -119,7 +122,10 @@ void __init bat_mapin_ram(void) ...@@ -119,7 +122,10 @@ void __init bat_mapin_ram(void)
break; break;
setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl,
_PAGE_KERNEL); _PAGE_KERNEL);
done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
} }
return done;
} }
/* /*
......
...@@ -246,6 +246,23 @@ extern unsigned long ioremap_bot, ioremap_base; ...@@ -246,6 +246,23 @@ extern unsigned long ioremap_bot, ioremap_base;
#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC #define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC
#define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED #define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
#define _PAGE_RAM _PAGE_KERNEL
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* We want the debuggers to be able to set breakpoints anywhere, so
* don't write protect the kernel text */
#define _PAGE_RAM_TEXT _PAGE_RAM
#else
#ifdef CONFIG_PPC_STD_MMU
/* On standard PPC MMU, no user access implies kernel read/write
* access, so to write-protect the kernel text we must turn on user
* access */
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE) | _PAGE_USER
#else
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE)
#endif
#endif
#define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment