Commit 15c5cc6e authored by Pete Zaitcev's avatar Pete Zaitcev Committed by David S. Miller

SPARC: More work to get sparc32 working in 2.5.x

- page-size PTE directory with 16-word pmd_t as suggested by RMK and Riel
- support for 2.5.x softirq infrastructure
- other miscellanea
parent 80e4e144
...@@ -24,9 +24,6 @@ ...@@ -24,9 +24,6 @@
struct linux_ebus *ebus_chain = 0; struct linux_ebus *ebus_chain = 0;
#ifdef CONFIG_SUN_AUXIO
extern void auxio_probe(void);
#endif
extern void rs_init(void); extern void rs_init(void);
/* We are together with pcic.c under CONFIG_PCI. */ /* We are together with pcic.c under CONFIG_PCI. */
...@@ -366,7 +363,4 @@ void __init ebus_init(void) ...@@ -366,7 +363,4 @@ void __init ebus_init(void)
} }
rs_init(); rs_init();
#ifdef CONFIG_SUN_AUXIO
auxio_probe();
#endif
} }
...@@ -415,7 +415,7 @@ void handler_irq(int irq, struct pt_regs * regs) ...@@ -415,7 +415,7 @@ void handler_irq(int irq, struct pt_regs * regs)
extern void smp4m_irq_rotate(int cpu); extern void smp4m_irq_rotate(int cpu);
#endif #endif
irq_enter(cpu, irq); irq_enter();
disable_pil_irq(irq); disable_pil_irq(irq);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */ /* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
...@@ -431,9 +431,7 @@ void handler_irq(int irq, struct pt_regs * regs) ...@@ -431,9 +431,7 @@ void handler_irq(int irq, struct pt_regs * regs)
action = action->next; action = action->next;
} while (action); } while (action);
enable_pil_irq(irq); enable_pil_irq(irq);
irq_exit(cpu, irq); irq_exit();
if (softirq_pending(cpu))
do_softirq();
} }
#ifdef CONFIG_BLK_DEV_FD #ifdef CONFIG_BLK_DEV_FD
...@@ -444,13 +442,14 @@ void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) ...@@ -444,13 +442,14 @@ void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
disable_pil_irq(irq); disable_pil_irq(irq);
irq_enter(cpu, irq); irq_enter();
kstat.irqs[cpu][irq]++; kstat.irqs[cpu][irq]++;
floppy_interrupt(irq, dev_id, regs); floppy_interrupt(irq, dev_id, regs);
irq_exit(cpu, irq); irq_exit();
enable_pil_irq(irq); enable_pil_irq(irq);
if (softirq_pending(cpu)) // XXX Eek, it's totally changed with preempt_count() and such
do_softirq(); // if (softirq_pending(cpu))
// do_softirq();
} }
#endif #endif
......
...@@ -55,10 +55,6 @@ asmlinkage int sys_pciconfig_write(unsigned long bus, ...@@ -55,10 +55,6 @@ asmlinkage int sys_pciconfig_write(unsigned long bus,
#else #else
#ifdef CONFIG_SUN_JSFLASH
extern int jsflash_init(void);
#endif
struct pci_fixup pcibios_fixups[] = { struct pci_fixup pcibios_fixups[] = {
{ 0 } { 0 }
}; };
...@@ -435,7 +431,7 @@ static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic) ...@@ -435,7 +431,7 @@ static void __init pcic_pbm_scan_bus(struct linux_pcic *pcic)
/* /*
* Main entry point from the PCI subsystem. * Main entry point from the PCI subsystem.
*/ */
void __init pcibios_init(void) static int __init pcibios_init(void)
{ {
struct linux_pcic *pcic; struct linux_pcic *pcic;
...@@ -444,7 +440,7 @@ void __init pcibios_init(void) ...@@ -444,7 +440,7 @@ void __init pcibios_init(void)
* So, here we report the presence of PCIC and do some magic passes. * So, here we report the presence of PCIC and do some magic passes.
*/ */
if(!pcic0_up) if(!pcic0_up)
return; return 0;
pcic = &pcic0; pcic = &pcic0;
/* /*
...@@ -465,9 +461,7 @@ void __init pcibios_init(void) ...@@ -465,9 +461,7 @@ void __init pcibios_init(void)
pcic_pbm_scan_bus(pcic); pcic_pbm_scan_bus(pcic);
ebus_init(); ebus_init();
#ifdef CONFIG_SUN_JSFLASH return 0;
jsflash_init();
#endif
} }
int pcic_present(void) int pcic_present(void)
...@@ -1037,3 +1031,5 @@ void insl(unsigned long addr, void *dst, unsigned long count) { ...@@ -1037,3 +1031,5 @@ void insl(unsigned long addr, void *dst, unsigned long count) {
} }
#endif #endif
subsys_initcall(pcibios_init);
...@@ -86,7 +86,8 @@ ret_trap_continue: ...@@ -86,7 +86,8 @@ ret_trap_continue:
wr %t_psr, 0x0, %psr wr %t_psr, 0x0, %psr
WRITE_PAUSE WRITE_PAUSE
ld [%curptr + AOFF_task_thread + AOFF_thread_w_saved], %twin_tmp1 ld [%curptr + TI_TASK], %o5
ld [%o5 + AOFF_task_thread + AOFF_thread_w_saved], %twin_tmp1
orcc %g0, %twin_tmp1, %g0 orcc %g0, %twin_tmp1, %g0
be ret_trap_nobufwins be ret_trap_nobufwins
nop nop
......
...@@ -198,7 +198,7 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs) ...@@ -198,7 +198,7 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
cc_set_iclr(1 << irq); cc_set_iclr(1 << irq);
irq_enter(cpu, irq); irq_enter();
kstat.irqs[cpu][irq]++; kstat.irqs[cpu][irq]++;
if (!sbusl) { if (!sbusl) {
action = *(irq + irq_action); action = *(irq + irq_action);
...@@ -239,9 +239,7 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs) ...@@ -239,9 +239,7 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
} }
} }
} }
irq_exit(cpu, irq); irq_exit();
if (softirq_pending(cpu))
do_softirq();
} }
unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq) unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq)
......
...@@ -458,9 +458,9 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs) ...@@ -458,9 +458,9 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
if(!--prof_counter[cpu]) { if(!--prof_counter[cpu]) {
int user = user_mode(regs); int user = user_mode(regs);
irq_enter(cpu, 0); irq_enter();
update_process_times(user); update_process_times(user);
irq_exit(cpu, 0); irq_exit();
prof_counter[cpu] = prof_multiplier[cpu]; prof_counter[cpu] = prof_multiplier[cpu];
} }
......
...@@ -445,9 +445,9 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs) ...@@ -445,9 +445,9 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
if(!--prof_counter[cpu]) { if(!--prof_counter[cpu]) {
int user = user_mode(regs); int user = user_mode(regs);
irq_enter(cpu, 0); irq_enter();
update_process_times(user); update_process_times(user);
irq_exit(cpu, 0); irq_exit();
prof_counter[cpu] = prof_multiplier[cpu]; prof_counter[cpu] = prof_multiplier[cpu];
} }
......
...@@ -49,6 +49,26 @@ ...@@ -49,6 +49,26 @@
#include <asm/btfixup.h> #include <asm/btfixup.h>
/*
* To support pagetables in highmem, Linux introduces APIs which
* return struct page* and generally manipulate page tables when
* they are not mapped into kernel space. Our hardware page tables
* are smaller than pages. We lump hardware tabes into big, page sized
* software tables.
*
* PMD_SHIFT determines the size of the area a second-level page table entry
* can map, and our pmd_t is 16 times larger than normal.
*/
#define SRMMU_PTRS_PER_PMD_SOFT 0x4 /* Each pmd_t contains 16 hard PTPs */
#define SRMMU_PTRS_PER_PTE_SOFT 0x400 /* 16 hard tables per 4K page */
#define SRMMU_PTE_SZ_SOFT 0x1000 /* same as above, in bytes */
#define SRMMU_PMD_SHIFT_SOFT 22
#define SRMMU_PMD_SIZE_SOFT (1UL << SRMMU_PMD_SHIFT_SOFT)
#define SRMMU_PMD_MASK_SOFT (~(SRMMU_PMD_SIZE_SOFT-1))
// #define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK)
enum mbus_module srmmu_modtype; enum mbus_module srmmu_modtype;
unsigned int hwbug_bitmask; unsigned int hwbug_bitmask;
int vac_cache_size; int vac_cache_size;
...@@ -129,36 +149,30 @@ spinlock_t srmmu_nocache_spinlock; ...@@ -129,36 +149,30 @@ spinlock_t srmmu_nocache_spinlock;
#define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR) #define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
static inline unsigned long srmmu_pgd_page(pgd_t pgd) static inline unsigned long srmmu_pte_pfn(pte_t pte)
{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
static inline unsigned long srmmu_pmd_page_kernel(pmd_t pmd)
{
return (unsigned long)
__nocache_va((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4);
}
static struct page *srmmu_pmd_page(pmd_t pmd) /* XXX inline later */
{ {
if (srmmu_device_memory(pte_val(pte))) {
if (srmmu_device_memory(pmd_val(pmd))) {
/* XXX Anton obviously had something in mind when he did this. /* XXX Anton obviously had something in mind when he did this.
* But what? * But what?
*/ */
/* return (struct page *)~0; */ /* return (struct page *)~0; */
BUG(); /* NO WAY */ BUG();
} }
return virt_to_page(srmmu_pmd_page_kernel(pmd)); return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
} }
static inline unsigned long srmmu_pte_pfn(pte_t pte) static struct page *srmmu_pmd_page(pmd_t pmd)
{ {
if (srmmu_device_memory(pte_val(pte)))
if (srmmu_device_memory(pmd_val(pmd)))
BUG(); BUG();
return (unsigned long) return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
(((pte_val(pte) & SRMMU_PTE_PMASK) << 4) >> PAGE_SHIFT);
} }
static inline unsigned long srmmu_pgd_page(pgd_t pgd)
{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
static inline int srmmu_pte_none(pte_t pte) static inline int srmmu_pte_none(pte_t pte)
{ return !(pte_val(pte) & 0xFFFFFFF); } { return !(pte_val(pte) & 0xFFFFFFF); }
...@@ -177,8 +191,11 @@ static inline int srmmu_pmd_bad(pmd_t pmd) ...@@ -177,8 +191,11 @@ static inline int srmmu_pmd_bad(pmd_t pmd)
static inline int srmmu_pmd_present(pmd_t pmd) static inline int srmmu_pmd_present(pmd_t pmd)
{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
static inline void srmmu_pmd_clear(pmd_t *pmdp) static inline void srmmu_pmd_clear(pmd_t *pmdp) {
{ srmmu_set_pte((pte_t *)pmdp, __pte(0)); } int i;
for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++)
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
}
static inline int srmmu_pgd_none(pgd_t pgd) static inline int srmmu_pgd_none(pgd_t pgd)
{ return !(pgd_val(pgd) & 0xFFFFFFF); } { return !(pgd_val(pgd) & 0xFFFFFFF); }
...@@ -224,7 +241,7 @@ static inline pte_t srmmu_pte_mkyoung(pte_t pte) ...@@ -224,7 +241,7 @@ static inline pte_t srmmu_pte_mkyoung(pte_t pte)
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
*/ */
static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
{ return __pte((((page - mem_map) << PAGE_SHIFT) >> 4) | pgprot_val(pgprot)); } { return __pte(((page - mem_map) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
{ return __pte(((page) >> 4) | pgprot_val(pgprot)); } { return __pte(((page) >> 4) | pgprot_val(pgprot)); }
...@@ -239,16 +256,28 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) ...@@ -239,16 +256,28 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
static inline void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep) static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
{ {
srmmu_set_pte((pte_t *)pmdp, unsigned long ptp; /* Physical address, shifted right by 4 */
(SRMMU_ET_PTD | (__nocache_pa((unsigned long) ptep) >> 4))); int i;
ptp = __nocache_pa((unsigned long) ptep) >> 4;
for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) {
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4);
}
} }
static inline void srmmu_pmd_populate(pmd_t * pmdp, struct page * ptep) static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
{ {
srmmu_set_pte((pte_t *)pmdp, unsigned long ptp; /* Physical address, shifted right by 4 */
(SRMMU_ET_PTD | (((ptep - mem_map) << PAGE_SHIFT) >> 4))); int i;
ptp = (ptep - mem_map) << (PAGE_SHIFT-4); /* watch for overflow */
for (i = 0; i < SRMMU_PTRS_PER_PTE_SOFT/SRMMU_PTRS_PER_PTE; i++) {
srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
ptp += (SRMMU_PTRS_PER_PTE*sizeof(pte_t) >> 4);
}
} }
static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
...@@ -260,19 +289,27 @@ extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long addre ...@@ -260,19 +289,27 @@ extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long addre
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
{ return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)); } {
return (pmd_t *) srmmu_pgd_page(*dir) +
((address >> SRMMU_PMD_SHIFT_SOFT) & (SRMMU_PTRS_PER_PMD_SOFT - 1));
}
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
{ {
unsigned long pte; void *pte;
pte = srmmu_pmd_page_kernel(*dir); pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
return (pte_t *) pte + return (pte_t *) pte +
((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE_SOFT - 1));
} }
unsigned long __srmmu_get_nocache(int size, int align) /*
* size: bytes to allocate in the nocache area.
* align: bytes, number to align at.
* Returns the virtual address of the allocated area.
*/
static unsigned long __srmmu_get_nocache(int size, int align)
{ {
int offset = srmmu_nocache_low; int offset = srmmu_nocache_low;
int i; int i;
...@@ -423,36 +460,55 @@ static void srmmu_free_pgd_fast(pgd_t *pgd) ...@@ -423,36 +460,55 @@ static void srmmu_free_pgd_fast(pgd_t *pgd)
srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
} }
static pte_t *srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
return (pte_t *)srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE); return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
} }
static struct page *srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) static void srmmu_pmd_free(pmd_t * pmd)
{ {
return virt_to_page(srmmu_pte_alloc_one_kernel(mm, address)); srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
} }
static void srmmu_free_pte_fast(pte_t *pte) /*
* Hardware needs alignment to 256 only, but we align to whole page size
* to reduce fragmentation problems due to the buddy principle.
* XXX Provide actual fragmentation statistics in /proc.
*
* Alignments up to the page size are the same for physical and virtual
* addresses of the nocache area.
*/
static pte_t *
srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
srmmu_free_nocache((unsigned long)pte, SRMMU_PTE_TABLE_SIZE); return (pte_t *)srmmu_get_nocache(SRMMU_PTE_SZ_SOFT, SRMMU_PTE_SZ_SOFT);
} }
static void srmmu_pte_free(struct page *pte) static struct page *
srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
srmmu_free_nocache((unsigned long)page_address(pte), SRMMU_PTE_TABLE_SIZE); unsigned long pte;
if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
return NULL;
return mem_map + (__nocache_pa(pte) >> PAGE_SHIFT);
} }
static pmd_t *srmmu_pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) static void srmmu_free_pte_fast(pte_t *pte)
{ {
return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); srmmu_free_nocache((unsigned long)pte, SRMMU_PTE_SZ_SOFT);
} }
static void srmmu_free_pmd_fast(pmd_t * pmd) static void srmmu_pte_free(struct page *pte)
{ {
srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); unsigned long p = (unsigned long)page_address(pte);
if (p == 0)
BUG();
srmmu_free_nocache(p, SRMMU_PTE_SZ_SOFT);
} }
/*
*/
static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
{ {
struct ctx_list *ctxp; struct ctx_list *ctxp;
...@@ -966,7 +1022,8 @@ void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned l ...@@ -966,7 +1022,8 @@ void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned l
while(start < end) { while(start < end) {
pgdp = pgd_offset_k(start); pgdp = pgd_offset_k(start);
if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); pmdp = (pmd_t *) __srmmu_get_nocache(
SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL) if (pmdp == NULL)
early_pgtable_allocfail("pmd"); early_pgtable_allocfail("pmd");
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
...@@ -974,10 +1031,11 @@ void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned l ...@@ -974,10 +1031,11 @@ void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned l
} }
pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE); ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_SZ_SOFT,
SRMMU_PTE_SZ_SOFT);
if (ptep == NULL) if (ptep == NULL)
early_pgtable_allocfail("pte"); early_pgtable_allocfail("pte");
memset(__nocache_fix(ptep), 0, SRMMU_PTE_TABLE_SIZE); memset(__nocache_fix(ptep), 0, SRMMU_PTE_SZ_SOFT);
srmmu_pmd_set(__nocache_fix(pmdp), ptep); srmmu_pmd_set(__nocache_fix(pmdp), ptep);
} }
start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK; start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
...@@ -1001,10 +1059,11 @@ void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long en ...@@ -1001,10 +1059,11 @@ void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long en
} }
pmdp = srmmu_pmd_offset(pgdp, start); pmdp = srmmu_pmd_offset(pgdp, start);
if(srmmu_pmd_none(*pmdp)) { if(srmmu_pmd_none(*pmdp)) {
ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE); ptep = (pte_t *) __srmmu_get_nocache(SRMMU_PTE_SZ_SOFT,
SRMMU_PTE_SZ_SOFT);
if (ptep == NULL) if (ptep == NULL)
early_pgtable_allocfail("pte"); early_pgtable_allocfail("pte");
memset(ptep, 0, SRMMU_PTE_TABLE_SIZE); memset(ptep, 0, SRMMU_PTE_SZ_SOFT);
srmmu_pmd_set(pmdp, ptep); srmmu_pmd_set(pmdp, ptep);
} }
start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK; start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
...@@ -1062,18 +1121,26 @@ void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end) ...@@ -1062,18 +1121,26 @@ void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
srmmu_pgd_set(__nocache_fix(pgdp), pmdp); srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
} }
pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
if(what == 1) {
*(pmd_t *)__nocache_fix(pmdp) = __pmd(prompte);
start += SRMMU_PMD_SIZE;
continue;
}
if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE); ptep = (pte_t *) __srmmu_get_nocache(SRMMU_PTE_SZ_SOFT,
SRMMU_PTE_SZ_SOFT);
if (ptep == NULL) if (ptep == NULL)
early_pgtable_allocfail("pte"); early_pgtable_allocfail("pte");
memset(__nocache_fix(ptep), 0, SRMMU_PTE_TABLE_SIZE); memset(__nocache_fix(ptep), 0, SRMMU_PTE_SZ_SOFT);
srmmu_pmd_set(__nocache_fix(pmdp), ptep); srmmu_pmd_set(__nocache_fix(pmdp), ptep);
} }
if(what == 1) {
/*
* We bend the rule where all 16 PTPs in a pmd_t point
* inside the same PTE page, and we leak a perfectly
* good hardware PTE piece. Alternatives seem worse.
*/
unsigned int x; /* Index of HW PMD in soft cluster */
x = (start >> SRMMU_PMD_SHIFT) & 15;
*(ulong *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
start += SRMMU_PMD_SIZE;
continue;
}
ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
*(pte_t *)__nocache_fix(ptep) = __pte(prompte); *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
start += PAGE_SIZE; start += PAGE_SIZE;
...@@ -2033,16 +2100,16 @@ void __init ld_mmu_srmmu(void) ...@@ -2033,16 +2100,16 @@ void __init ld_mmu_srmmu(void)
extern void ld_mmu_iounit(void); extern void ld_mmu_iounit(void);
extern void ___xchg32_sun4md(void); extern void ___xchg32_sun4md(void);
/* First the constants */ BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT_SOFT);
BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT); BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE_SOFT);
BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE); BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK_SOFT);
BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK);
BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT);
BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE);
BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK);
BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE); BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE_SOFT);
BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD_SOFT);
BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD);
BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
...@@ -2090,12 +2157,13 @@ void __init ld_mmu_srmmu(void) ...@@ -2090,12 +2157,13 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_pmd_fast, srmmu_free_pmd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_alloc_one_fast, srmmu_pmd_alloc_one_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
......
...@@ -1710,13 +1710,13 @@ static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp) ...@@ -1710,13 +1710,13 @@ static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep) static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep)
{ {
*pmdp = __pmd(PGD_TABLE | (unsigned long) ptep); pmdp->pmdv[0] = PGD_TABLE | (unsigned long) ptep;
} }
static void sun4c_pmd_populate(pmd_t * pmdp, struct page * ptep) static void sun4c_pmd_populate(pmd_t * pmdp, struct page * ptep)
{ {
if (page_address(ptep) == NULL) BUG(); /* No highmem on sun4c */ if (page_address(ptep) == NULL) BUG(); /* No highmem on sun4c */
*pmdp = __pmd(PGD_TABLE | (unsigned long) page_address(ptep)); pmdp->pmdv[0] = PGD_TABLE | (unsigned long) page_address(ptep);
} }
static int sun4c_pte_present(pte_t pte) static int sun4c_pte_present(pte_t pte)
...@@ -1735,7 +1735,14 @@ static int sun4c_pmd_present(pmd_t pmd) ...@@ -1735,7 +1735,14 @@ static int sun4c_pmd_present(pmd_t pmd)
{ {
return ((pmd_val(pmd) & PGD_PRESENT) != 0); return ((pmd_val(pmd) & PGD_PRESENT) != 0);
} }
#if 0 /* if PMD takes one word */
static void sun4c_pmd_clear(pmd_t *pmdp) { *pmdp = __pmd(0); } static void sun4c_pmd_clear(pmd_t *pmdp) { *pmdp = __pmd(0); }
#else /* if pmd_t is a longish aggregate */
static void sun4c_pmd_clear(pmd_t *pmdp) {
memset((void *)pmdp, 0, sizeof(pmd_t));
}
#endif
static int sun4c_pgd_none(pgd_t pgd) { return 0; } static int sun4c_pgd_none(pgd_t pgd) { return 0; }
static int sun4c_pgd_bad(pgd_t pgd) { return 0; } static int sun4c_pgd_bad(pgd_t pgd) { return 0; }
...@@ -1913,7 +1920,7 @@ static void sun4c_pte_free(struct page *pte) ...@@ -1913,7 +1920,7 @@ static void sun4c_pte_free(struct page *pte)
* allocating and freeing a pmd is trivial: the 1-entry pmd is * allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it. * inside the pgd, so has no extra memory associated with it.
*/ */
static pmd_t *sun4c_pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) static pmd_t *sun4c_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
BUG(); BUG();
return NULL; return NULL;
...@@ -2176,7 +2183,7 @@ void __init ld_mmu_sun4c(void) ...@@ -2176,7 +2183,7 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(pte_alloc_one_kernel, sun4c_pte_alloc_one_kernel, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one_kernel, sun4c_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_alloc_one, sun4c_pte_alloc_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(pte_alloc_one, sun4c_pte_alloc_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_pmd_fast, sun4c_free_pmd_fast, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(free_pmd_fast, sun4c_free_pmd_fast, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(pmd_alloc_one_fast, sun4c_pmd_alloc_one_fast, BTFIXUPCALL_RETO0); BTFIXUPSET_CALL(pmd_alloc_one, sun4c_pmd_alloc_one, BTFIXUPCALL_RETO0);
BTFIXUPSET_CALL(free_pgd_fast, sun4c_free_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(free_pgd_fast, sun4c_free_pgd_fast, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(get_pgd_fast, sun4c_get_pgd_fast, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(get_pgd_fast, sun4c_get_pgd_fast, BTFIXUPCALL_NORM);
......
...@@ -674,10 +674,9 @@ int jsfd_init(void) { ...@@ -674,10 +674,9 @@ int jsfd_init(void) {
return 0; return 0;
} }
#ifdef MODULE
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
int init_module(void) { static int __init jsflash_init_module(void) {
int rc; int rc;
if ((rc = jsflash_init()) == 0) { if ((rc = jsflash_init()) == 0) {
...@@ -687,7 +686,7 @@ int init_module(void) { ...@@ -687,7 +686,7 @@ int init_module(void) {
return rc; return rc;
} }
void cleanup_module(void) { static void __exit jsflash_cleanup_module(void) {
/* for (all probed units) { } */ /* for (all probed units) { } */
if (jsf0.busy) if (jsf0.busy)
...@@ -700,4 +699,6 @@ void cleanup_module(void) { ...@@ -700,4 +699,6 @@ void cleanup_module(void) {
printk("jsfd: cleanup_module failed\n"); printk("jsfd: cleanup_module failed\n");
blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
} }
#endif
module_init(jsflash_init_module);
module_exit(jsflash_cleanup_module);
...@@ -12,47 +12,108 @@ ...@@ -12,47 +12,108 @@
#include <linux/brlock.h> #include <linux/brlock.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
/* entry.S is sensitive to the offsets of these fields */ /* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
typedef struct { typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
unsigned int __unused_1; unsigned int __unused_1;
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
unsigned int __local_irq_count; unsigned int WAS__local_irq_count;
#else #else
unsigned int __unused_on_SMP; /* DaveM says use brlock for SMP irq. KAO */ unsigned int __unused_on_SMP; /* DaveM says use brlock for SMP irq. KAO */
#endif #endif
unsigned int __local_bh_count; unsigned int WAS__local_bh_count;
unsigned int __syscall_count; unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task; struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/* Note that local_irq_count() is replaced by sparc64 specific version for SMP */
#ifndef CONFIG_SMP /*
#define irq_enter(cpu, irq) ((void)(irq), local_irq_count(cpu)++) * We put the hardirq and softirq counter into the preemption
#define irq_exit(cpu, irq) ((void)(irq), local_irq_count(cpu)--) * counter. The bitmask has the following meaning:
#else *
#undef local_irq_count * - bits 0-7 are the preemption count (max preemption depth: 256)
#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK]) * - bits 8-15 are the softirq count (max # of softirqs: 256)
#define irq_enter(cpu, irq) br_read_lock(BR_GLOBALIRQ_LOCK) * - bits 16-23 are the hardirq count (max # of hardirqs: 256)
#define irq_exit(cpu, irq) br_read_unlock(BR_GLOBALIRQ_LOCK) *
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* HARDIRQ_MASK: 0x0000ff00
* SOFTIRQ_MASK: 0x00ff0000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 8
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define __MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif #endif
/* /*
* Are we in an interrupt context? Either doing bottom half * Are we doing bottom half or hardware interrupt processing?
* or hardware interrupt processing? * Are we in a softirq context? Interrupt context?
*/ */
#define in_interrupt() ((local_irq_count(smp_processor_id()) + \ #define in_irq() (hardirq_count())
local_bh_count(smp_processor_id())) != 0) #define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
/* This tests only the local processors hw IRQ context disposition. */ #define hardirq_trylock() (!in_interrupt())
#define in_irq() (local_irq_count(smp_processor_id()) != 0) #define hardirq_endlock() do { } while (0)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#define hardirq_trylock(cpu) ((void)(cpu), local_irq_count(smp_processor_id()) == 0) #if CONFIG_PREEMPT
#define hardirq_endlock(cpu) do { (void)(cpu); } while(0) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#else
/* Note that local_irq_count() is replaced by sparc64 specific version for SMP */
/* XXX This is likely to be broken by the above preempt-based IRQs */
#define irq_enter() br_read_lock(BR_GLOBALIRQ_LOCK)
#undef local_irq_count
#define local_irq_count(cpu) (__brlock_array[cpu][BR_GLOBALIRQ_LOCK])
#define irq_exit() br_read_unlock(BR_GLOBALIRQ_LOCK)
#endif
#ifndef CONFIG_SMP
#define synchronize_irq() barrier() #define synchronize_irq() barrier()
...@@ -79,17 +140,19 @@ static inline void release_irqlock(int cpu) ...@@ -79,17 +140,19 @@ static inline void release_irqlock(int cpu)
} }
} }
#if 0
static inline int hardirq_trylock(int cpu) static inline int hardirq_trylock(int cpu)
{ {
spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock; spinlock_t *lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;
return (!local_irq_count(cpu) && !spin_is_locked(lock)); return (!local_irq_count(cpu) && !spin_is_locked(lock));
} }
#endif
#define hardirq_endlock(cpu) do { (void)(cpu); } while (0) extern void synchronize_irq(unsigned int irq);
extern void synchronize_irq(void);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
// extern void show_stack(unsigned long * esp);
#endif /* __SPARC_HARDIRQ_H */ #endif /* __SPARC_HARDIRQ_H */
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/head.h> /* for KERNBASE */ /* #include <asm/head.h> XXX */ /* for KERNBASE */
#include <asm/btfixup.h> #include <asm/btfixup.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -89,7 +89,7 @@ extern struct cache_palias *sparc_aliases; ...@@ -89,7 +89,7 @@ extern struct cache_palias *sparc_aliases;
*/ */
typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t; typedef struct { unsigned long iopte; } iopte_t;
typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pmdv[16]; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long ctxd; } ctxd_t; typedef struct { unsigned long ctxd; } ctxd_t;
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
...@@ -97,7 +97,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t; ...@@ -97,7 +97,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define pte_val(x) ((x).pte) #define pte_val(x) ((x).pte)
#define iopte_val(x) ((x).iopte) #define iopte_val(x) ((x).iopte)
#define pmd_val(x) ((x).pmd) #define pmd_val(x) ((x).pmdv[0])
#define pgd_val(x) ((x).pgd) #define pgd_val(x) ((x).pgd)
#define ctxd_val(x) ((x).ctxd) #define ctxd_val(x) ((x).ctxd)
#define pgprot_val(x) ((x).pgprot) #define pgprot_val(x) ((x).pgprot)
...@@ -105,7 +105,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t; ...@@ -105,7 +105,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define __pte(x) ((pte_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } )
#define __iopte(x) ((iopte_t) { (x) } ) #define __iopte(x) ((iopte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } ) /* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __ctxd(x) ((ctxd_t) { (x) } ) #define __ctxd(x) ((ctxd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
...@@ -117,7 +117,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t; ...@@ -117,7 +117,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
*/ */
typedef unsigned long pte_t; typedef unsigned long pte_t;
typedef unsigned long iopte_t; typedef unsigned long iopte_t;
typedef unsigned long pmd_t; typedef struct { unsigned long pmdv[16]; } pmd_t;
typedef unsigned long pgd_t; typedef unsigned long pgd_t;
typedef unsigned long ctxd_t; typedef unsigned long ctxd_t;
typedef unsigned long pgprot_t; typedef unsigned long pgprot_t;
...@@ -125,7 +125,7 @@ typedef unsigned long iopgprot_t; ...@@ -125,7 +125,7 @@ typedef unsigned long iopgprot_t;
#define pte_val(x) (x) #define pte_val(x) (x)
#define iopte_val(x) (x) #define iopte_val(x) (x)
#define pmd_val(x) (x) #define pmd_val(x) ((x).pmdv[0])
#define pgd_val(x) (x) #define pgd_val(x) (x)
#define ctxd_val(x) (x) #define ctxd_val(x) (x)
#define pgprot_val(x) (x) #define pgprot_val(x) (x)
...@@ -133,7 +133,7 @@ typedef unsigned long iopgprot_t; ...@@ -133,7 +133,7 @@ typedef unsigned long iopgprot_t;
#define __pte(x) (x) #define __pte(x) (x)
#define __iopte(x) (x) #define __iopte(x) (x)
#define __pmd(x) (x) /* #define __pmd(x) (x) */ /* XXX later */
#define __pgd(x) (x) #define __pgd(x) (x)
#define __ctxd(x) (x) #define __ctxd(x) (x)
#define __pgprot(x) (x) #define __pgprot(x) (x)
......
...@@ -40,13 +40,8 @@ BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *) ...@@ -40,13 +40,8 @@ BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp) #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
{ #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
return 0;
}
BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one_fast, struct mm_struct *, unsigned long)
#define pmd_alloc_one_fast(mm, address) BTFIXUP_CALL(pmd_alloc_one_fast)(mm, address)
BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *) BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd) #define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
...@@ -65,8 +60,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long ...@@ -65,8 +60,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long
#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr) #define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *) BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
#define free_pte_fast(pte) BTFIXUP_CALL(free_pte_fast)(pte) #define pte_free_kernel(pte) BTFIXUP_CALL(free_pte_fast)(pte)
#define pte_free_kernel(pte) free_pte_fast(pte)
BTFIXUPDEF_CALL(void, pte_free, struct page *) BTFIXUPDEF_CALL(void, pte_free, struct page *)
#define pte_free(pte) BTFIXUP_CALL(pte_free)(pte) #define pte_free(pte) BTFIXUP_CALL(pte_free)(pte)
......
...@@ -11,9 +11,7 @@ ...@@ -11,9 +11,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
/* XXX This creates many nasty warnings. */ /* #include <asm/asi.h> */ /* doesn't seem like being used XXX */
/* #include <linux/highmem.h> */ /* kmap_atomic in pte_offset_map */
#include <asm/asi.h>
#ifdef CONFIG_SUN4 #ifdef CONFIG_SUN4
#include <asm/pgtsun4.h> #include <asm/pgtsun4.h>
#else #else
...@@ -33,10 +31,6 @@ struct vm_area_struct; ...@@ -33,10 +31,6 @@ struct vm_area_struct;
extern void load_mmu(void); extern void load_mmu(void);
extern unsigned long calc_highpages(void); extern unsigned long calc_highpages(void);
BTFIXUPDEF_CALL(void, quick_kernel_fault, unsigned long)
#define quick_kernel_fault(addr) BTFIXUP_CALL(quick_kernel_fault)(addr)
/* Routines for data transfer buffers. */ /* Routines for data transfer buffers. */
BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long) BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long) BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
...@@ -189,14 +183,8 @@ extern unsigned long empty_zero_page; ...@@ -189,14 +183,8 @@ extern unsigned long empty_zero_page;
#define BAD_PAGE __bad_page() #define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/* number of bits that fit into a memory pointer */ /*
#define BITS_PER_PTR (8*sizeof(unsigned long)) */
/* to align the pointer to a pointer address */
#define PTR_MASK (~(sizeof(void*)-1))
#define SIZEOF_PTR_LOG2 2
BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t) BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t) BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
...@@ -340,20 +328,15 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long) ...@@ -340,20 +328,15 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long) BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr) #define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
/* __pte_offset is not BTFIXUP-ed, but PTRS_PER_PTE is, so it's ok. */ /*
#define __pte_offset(address) \ * This shortcut works on sun4m (and sun4d) because the nocache area is static,
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) * and sun4c is guaranteed to have no highmem anyway.
#if 0 /* XXX Should we expose pmd_page_kernel? */ */
#define pte_offset_kernel(dir, addr) \ #define pte_offset_map(d, a) pte_offset_kernel(d,a)
((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(addr)) #define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
#endif
#define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + __pte_offset(addr))
#define pte_offset_map_nested(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + __pte_offset(addr))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap(pte) do{}while(0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) #define pte_unmap_nested(pte) do{}while(0)
/* The permissions for pgprot_val to make a page mapped on the obio space */ /* The permissions for pgprot_val to make a page mapped on the obio space */
extern unsigned int pg_iobits; extern unsigned int pg_iobits;
......
...@@ -10,11 +10,11 @@ ...@@ -10,11 +10,11 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */ #include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */
/* PMD_SHIFT determines the size of the area a second-level page table can map */ /* PMD_SHIFT determines the size of the area a second-level page table entry can map */
#define SRMMU_PMD_SHIFT 18 #define SRMMU_PMD_SHIFT 18
#define SRMMU_PMD_SIZE (1UL << SRMMU_PMD_SHIFT) #define SRMMU_PMD_SIZE (1UL << SRMMU_PMD_SHIFT)
#define SRMMU_PMD_MASK (~(SRMMU_PMD_SIZE-1)) #define SRMMU_PMD_MASK (~(SRMMU_PMD_SIZE-1))
#define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK) /* #define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK) */
/* PGDIR_SHIFT determines what a third-level page table entry can map */ /* PGDIR_SHIFT determines what a third-level page table entry can map */
#define SRMMU_PGDIR_SHIFT 24 #define SRMMU_PGDIR_SHIFT 24
......
...@@ -7,21 +7,25 @@ ...@@ -7,21 +7,25 @@
#ifndef __SPARC_SOFTIRQ_H #ifndef __SPARC_SOFTIRQ_H
#define __SPARC_SOFTIRQ_H #define __SPARC_SOFTIRQ_H
#include <linux/threads.h> /* For NR_CPUS */ // #include <linux/threads.h> /* For NR_CPUS */
#include <asm/atomic.h> // #include <asm/atomic.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#define local_bh_disable() (local_bh_count(smp_processor_id())++) #define local_bh_disable() \
#define __local_bh_enable() (local_bh_count(smp_processor_id())--) do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
#define local_bh_enable() \ #define local_bh_enable() \
do { if (!--local_bh_count(smp_processor_id()) && \ do { \
__local_bh_enable(); \
if (!in_interrupt() && \
softirq_pending(smp_processor_id())) { \ softirq_pending(smp_processor_id())) { \
do_softirq(); \ do_softirq(); \
local_irq_enable(); \
} \ } \
preempt_check_resched(); \
} while (0) } while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#endif /* __SPARC_SOFTIRQ_H */ #endif /* __SPARC_SOFTIRQ_H */
...@@ -104,7 +104,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, ...@@ -104,7 +104,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
* XXX prepare_arch_switch() is much smarter than this in sparc64, are we sure? * XXX prepare_arch_switch() is much smarter than this in sparc64, are we sure?
* XXX Cosider if doing it the flush_user_windows way is faster (by uwinmask). * XXX Cosider if doing it the flush_user_windows way is faster (by uwinmask).
*/ */
#define prepare_arch_switch(rq) do { \ #define prepare_arch_switch(rq, next) do { \
__asm__ __volatile__( \ __asm__ __volatile__( \
".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \ ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
...@@ -112,7 +112,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, ...@@ -112,7 +112,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
"save %sp, -0x40, %sp\n\t" \ "save %sp, -0x40, %sp\n\t" \
"restore; restore; restore; restore; restore; restore; restore"); \ "restore; restore; restore; restore; restore; restore; restore"); \
} while(0) } while(0)
#define finish_arch_switch(rq) do{ }while(0) #define finish_arch_switch(rq, next) do{ }while(0)
/* Much care has gone into this code, do not touch it. /* Much care has gone into this code, do not touch it.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment