Commit 0a4a7535 authored by Keith M. Wesolowski's avatar Keith M. Wesolowski

Merge foobazco.org:/sources/2.5-bk

into foobazco.org:/sources/2.5-sparc-todave
parents 3c3daf31 21469996
......@@ -22,6 +22,6 @@ EXPORT_SYMBOL(init_task);
* in etrap.S which assumes it.
*/
union thread_union init_thread_union
__attribute__((section (".text")))
__attribute__((section (".text,#alloc")))
__attribute__((aligned (THREAD_SIZE)))
= { INIT_THREAD_INFO(init_task) };
......@@ -174,7 +174,7 @@ static inline int srmmu_pmd_present(pmd_t pmd)
static inline void srmmu_pmd_clear(pmd_t *pmdp) {
int i;
for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++)
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
}
......@@ -234,9 +234,9 @@ static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
int i;
ptp = __nocache_pa((unsigned long) ptep) >> 4;
for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) {
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4);
ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
}
}
......@@ -246,9 +246,9 @@ static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
int i;
ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) {
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4);
ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
}
}
......@@ -263,7 +263,7 @@ extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long addre
static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) srmmu_pgd_page(*dir) +
((address >> SRMMU_PMD_SHIFT_SOFT) & (SRMMU_PTRS_PER_PMD_SOFT - 1));
((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
}
/* Find an entry in the third-level page table.. */
......@@ -273,7 +273,7 @@ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
return (pte_t *) pte +
((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE_SOFT - 1));
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
}
static unsigned long srmmu_swp_type(swp_entry_t entry)
......@@ -487,7 +487,7 @@ static void srmmu_pmd_free(pmd_t * pmd)
static pte_t *
srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
return (pte_t *)srmmu_get_nocache(SRMMU_PTE_SZ_SOFT, SRMMU_PTE_SZ_SOFT);
return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
static struct page *
......@@ -502,7 +502,7 @@ srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
static void srmmu_free_pte_fast(pte_t *pte)
{
srmmu_free_nocache((unsigned long)pte, SRMMU_PTE_SZ_SOFT);
srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
}
static void srmmu_pte_free(struct page *pte)
......@@ -514,7 +514,7 @@ static void srmmu_pte_free(struct page *pte)
BUG();
p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
p = (unsigned long) __nocache_va(p); /* Nocached virtual */
srmmu_free_nocache(p, SRMMU_PTE_SZ_SOFT);
srmmu_free_nocache(p, PTE_SIZE);
}
/*
......@@ -829,7 +829,7 @@ static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long
a = 0x20; b = 0x40; c = 0x60;
d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
start &= SRMMU_PMD_MASK;
start &= SRMMU_REAL_PMD_MASK;
while(start < end) {
faddr = (start + (0x10000 - 0x100));
goto inside;
......@@ -849,7 +849,7 @@ static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long
"r" (a), "r" (b), "r" (c), "r" (d),
"r" (e), "r" (f), "r" (g));
} while (faddr != start);
start += SRMMU_PMD_SIZE;
start += SRMMU_REAL_PMD_SIZE;
}
srmmu_set_context(octx);
local_irq_restore(flags);
......@@ -1067,16 +1067,15 @@ void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned l
}
pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_SZ_SOFT,
SRMMU_PTE_SZ_SOFT);
ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
memset(__nocache_fix(ptep), 0, SRMMU_PTE_SZ_SOFT);
memset(__nocache_fix(ptep), 0, PTE_SIZE);
srmmu_pmd_set(__nocache_fix(pmdp), ptep);
}
if (start > (0xffffffffUL - SRMMU_PMD_SIZE_SOFT))
if (start > (0xffffffffUL - PMD_SIZE))
break;
start = (start + SRMMU_PMD_SIZE_SOFT) & SRMMU_PMD_MASK_SOFT;
start = (start + PMD_SIZE) & PMD_MASK;
}
}
......@@ -1097,16 +1096,16 @@ void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long en
}
pmdp = srmmu_pmd_offset(pgdp, start);
if(srmmu_pmd_none(*pmdp)) {
ptep = (pte_t *) __srmmu_get_nocache(SRMMU_PTE_SZ_SOFT,
SRMMU_PTE_SZ_SOFT);
ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
memset(ptep, 0, SRMMU_PTE_SZ_SOFT);
memset(ptep, 0, PTE_SIZE);
srmmu_pmd_set(pmdp, ptep);
}
if (start > (0xffffffffUL - SRMMU_PMD_SIZE_SOFT))
if (start > (0xffffffffUL - PMD_SIZE))
break;
start = (start + SRMMU_PMD_SIZE_SOFT) & SRMMU_PMD_MASK_SOFT;
start = (start + PMD_SIZE) & PMD_MASK;
}
}
......@@ -1136,8 +1135,8 @@ void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
/* A red snapper, see what it really is. */
what = 0;
if(!(start & ~(SRMMU_PMD_MASK))) {
if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
what = 1;
}
......@@ -1162,11 +1161,11 @@ void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
}
pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = (pte_t *) __srmmu_get_nocache(SRMMU_PTE_SZ_SOFT,
SRMMU_PTE_SZ_SOFT);
ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
memset(__nocache_fix(ptep), 0, SRMMU_PTE_SZ_SOFT);
memset(__nocache_fix(ptep), 0, PTE_SIZE);
srmmu_pmd_set(__nocache_fix(pmdp), ptep);
}
if(what == 1) {
......@@ -1176,9 +1175,9 @@ void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
* good hardware PTE piece. Alternatives seem worse.
*/
unsigned int x; /* Index of HW PMD in soft cluster */
x = (start >> SRMMU_PMD_SHIFT) & 15;
x = (start >> PMD_SHIFT) & 15;
*(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
start += SRMMU_PMD_SIZE;
start += SRMMU_REAL_PMD_SIZE;
continue;
}
ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
......@@ -2139,16 +2138,11 @@ void __init ld_mmu_srmmu(void)
extern void ld_mmu_iounit(void);
extern void ___xchg32_sun4md(void);
BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT_SOFT);
BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE_SOFT);
BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK_SOFT);
BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE_SOFT);
BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD_SOFT);
BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD);
BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
......
......@@ -2137,14 +2137,10 @@ void __init ld_mmu_sun4c(void)
printk("Loading sun4c MMU routines\n");
/* First the constants */
BTFIXUPSET_SIMM13(pmd_shift, SUN4C_PMD_SHIFT);
BTFIXUPSET_SETHI(pmd_size, SUN4C_PMD_SIZE);
BTFIXUPSET_SETHI(pmd_mask, SUN4C_PMD_MASK);
BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT);
BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE);
BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK);
BTFIXUPSET_SIMM13(ptrs_per_pte, SUN4C_PTRS_PER_PTE);
BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD);
BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD);
BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
......
......@@ -2,6 +2,7 @@
#define _ASM_SPARC_DMA_MAPPING_H
#include <linux/config.h>
#include <linux/device.h>
#ifdef CONFIG_PCI
#include <asm-generic/dma-mapping.h>
......
......@@ -24,7 +24,7 @@
#include <asm/fixmap.h>
#include <asm/vaddrs.h>
#include <asm/kmap_types.h>
#include <asm/pgtsrmmu.h>
#include <asm/pgtable.h>
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
......@@ -43,7 +43,7 @@ extern void kmap_init(void) __init;
*/
#define LAST_PKMAP 1024
#define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT)
#define PKMAP_BASE SRMMU_PMD_ALIGN_SOFT(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
#define PKMAP_BASE PMD_ALIGN(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
......
......@@ -75,29 +75,10 @@ BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
/*
*/
BTFIXUPDEF_SIMM13(pmd_shift)
BTFIXUPDEF_SETHI(pmd_size)
BTFIXUPDEF_SETHI(pmd_mask)
extern unsigned int pmd_align(unsigned int addr) __attribute_const__;
extern __inline__ unsigned int pmd_align(unsigned int addr)
{
return ((addr + ~BTFIXUP_SETHI(pmd_mask)) & BTFIXUP_SETHI(pmd_mask));
}
BTFIXUPDEF_SIMM13(pgdir_shift)
BTFIXUPDEF_SETHI(pgdir_size)
BTFIXUPDEF_SETHI(pgdir_mask)
extern unsigned int pgdir_align(unsigned int addr) __attribute_const__;
extern __inline__ unsigned int pgdir_align(unsigned int addr)
{
return ((addr + ~BTFIXUP_SETHI(pgdir_mask)) & BTFIXUP_SETHI(pgdir_mask));
}
BTFIXUPDEF_SIMM13(ptrs_per_pte)
BTFIXUPDEF_SIMM13(ptrs_per_pmd)
BTFIXUPDEF_SIMM13(ptrs_per_pgd)
BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
......@@ -112,19 +93,19 @@ BTFIXUPDEF_INT(page_copy)
BTFIXUPDEF_INT(page_readonly)
BTFIXUPDEF_INT(page_kernel)
#define PMD_SHIFT BTFIXUP_SIMM13(pmd_shift)
#define PMD_SIZE BTFIXUP_SETHI(pmd_size)
#define PMD_MASK BTFIXUP_SETHI(pmd_mask)
#define PMD_ALIGN(addr) pmd_align(addr)
#define PMD_SHIFT SUN4C_PMD_SHIFT
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
#define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
#define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
#define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
#define PGDIR_ALIGN pgdir_align(addr)
#define PTRS_PER_PTE BTFIXUP_SIMM13(ptrs_per_pte)
#define PTRS_PER_PTE 1024
#define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
#define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
#define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
#define FIRST_USER_PGD_NR 0
#define PTE_SIZE (PTRS_PER_PTE*4)
#define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
#define PAGE_SHARED __pgprot(BTFIXUP_INT(page_shared))
......
......@@ -17,10 +17,10 @@
#define SRMMU_MAX_CONTEXTS 65536
/* PMD_SHIFT determines the size of the area a second-level page table entry can map */
#define SRMMU_PMD_SHIFT 18
#define SRMMU_PMD_SIZE (1UL << SRMMU_PMD_SHIFT)
#define SRMMU_PMD_MASK (~(SRMMU_PMD_SIZE-1))
/* #define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK) */
#define SRMMU_REAL_PMD_SHIFT 18
#define SRMMU_REAL_PMD_SIZE (1UL << SRMMU_REAL_PMD_SHIFT)
#define SRMMU_REAL_PMD_MASK (~(SRMMU_REAL_PMD_SIZE-1))
#define SRMMU_REAL_PMD_ALIGN(__addr) (((__addr)+SRMMU_REAL_PMD_SIZE-1)&SRMMU_REAL_PMD_MASK)
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define SRMMU_PGDIR_SHIFT 24
......@@ -28,13 +28,13 @@
#define SRMMU_PGDIR_MASK (~(SRMMU_PGDIR_SIZE-1))
#define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
#define SRMMU_PTRS_PER_PTE 64
#define SRMMU_PTRS_PER_PMD 64
#define SRMMU_PTRS_PER_PGD 256
#define SRMMU_REAL_PTRS_PER_PTE 64
#define SRMMU_REAL_PTRS_PER_PMD 64
#define SRMMU_PTRS_PER_PGD 256
#define SRMMU_PTE_TABLE_SIZE 0x100 /* 64 entries, 4 bytes a piece */
#define SRMMU_PMD_TABLE_SIZE 0x100 /* 64 entries, 4 bytes a piece */
#define SRMMU_PGD_TABLE_SIZE 0x400 /* 256 entries, 4 bytes a piece */
#define SRMMU_REAL_PTE_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PTE*4)
#define SRMMU_PMD_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PMD*4)
#define SRMMU_PGD_TABLE_SIZE (SRMMU_PTRS_PER_PGD*4)
/*
* To support pagetables in highmem, Linux introduces APIs which
......@@ -44,16 +44,11 @@
* software tables.
*
* PMD_SHIFT determines the size of the area a second-level page table entry
* can map, and our pmd_t is 16 times larger than normal.
* can map, and our pmd_t is 16 times larger than normal. The values which
* were once defined here are now generic for 4c and srmmu, so they're
* found in pgtable.h.
*/
#define SRMMU_PTRS_PER_PTE_SOFT (PAGE_SIZE/4) /* 16 hard tables per 4K page */
#define SRMMU_PTRS_PER_PMD_SOFT 4 /* Each pmd_t contains 16 hard PTPs */
#define SRMMU_PTE_SZ_SOFT PAGE_SIZE /* same as above, in bytes */
#define SRMMU_PMD_SHIFT_SOFT 22
#define SRMMU_PMD_SIZE_SOFT (1UL << SRMMU_PMD_SHIFT_SOFT)
#define SRMMU_PMD_MASK_SOFT (~(SRMMU_PMD_SIZE_SOFT-1))
#define SRMMU_PMD_ALIGN_SOFT(addr) (((addr)+SRMMU_PMD_SIZE_SOFT-1)&SRMMU_PMD_MASK_SOFT)
#define SRMMU_PTRS_PER_PMD 4
/* Definition of the values in the ET field of PTD's and PTE's */
#define SRMMU_ET_MASK 0x3
......@@ -255,7 +250,7 @@ extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
{
addr &= SRMMU_PMD_MASK;
addr &= SRMMU_REAL_PMD_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
"r" (addr | 0x100), /* Flush TLB segment.. */
"i" (ASI_M_FLUSH_PROBE) : "memory");
......
......@@ -11,9 +11,6 @@
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define SUN4C_PMD_SHIFT 23
#define SUN4C_PMD_SIZE (1UL << SUN4C_PMD_SHIFT)
#define SUN4C_PMD_MASK (~(SUN4C_PMD_SIZE-1))
#define SUN4C_PMD_ALIGN(addr) (((addr)+SUN4C_PMD_SIZE-1)&SUN4C_PMD_MASK)
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define SUN4C_PGDIR_SHIFT 23
......
......@@ -10,9 +10,6 @@
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define SUN4C_PMD_SHIFT 22
#define SUN4C_PMD_SIZE (1UL << SUN4C_PMD_SHIFT)
#define SUN4C_PMD_MASK (~(SUN4C_PMD_SIZE-1))
#define SUN4C_PMD_ALIGN(addr) (((addr)+SUN4C_PMD_SIZE-1)&SUN4C_PMD_MASK)
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define SUN4C_PGDIR_SHIFT 22
......
......@@ -236,7 +236,7 @@ static inline unsigned long viking_hwprobe(unsigned long vaddr)
: "=r" (val)
: "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE));
if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
vaddr &= ~SRMMU_PMD_MASK;
vaddr &= ~SRMMU_REAL_PMD_MASK;
vaddr >>= PAGE_SHIFT;
return val | (vaddr << 8);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment