Commit cd6f2953 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.arm.linux.org.uk/linux-2.6-rmk

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 8419e5fe 70cc3dfa
...@@ -636,7 +636,7 @@ dmabounce_unregister_dev(struct device *dev) ...@@ -636,7 +636,7 @@ dmabounce_unregister_dev(struct device *dev)
} }
if (!list_empty(&device_info->safe_buffers)) { if (!list_empty(&device_info->safe_buffers)) {
printk(KERN_ERR, printk(KERN_ERR
"%s: Removing from dmabounce with pending buffers!\n", "%s: Removing from dmabounce with pending buffers!\n",
dev->bus_id); dev->bus_id);
BUG(); BUG();
......
This diff is collapsed.
This diff is collapsed.
...@@ -219,9 +219,7 @@ static inline void dump_cache(const char *prefix, unsigned int cache) ...@@ -219,9 +219,7 @@ static inline void dump_cache(const char *prefix, unsigned int cache)
static void __init dump_cpu_info(void) static void __init dump_cpu_info(void)
{ {
unsigned int info; unsigned int info = read_cpuid(CPUID_CACHETYPE);
asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (info));
if (info != processor_id) { if (info != processor_id) {
printk("CPU: D %s cache\n", cache_types[CACHE_TYPE(info)]); printk("CPU: D %s cache\n", cache_types[CACHE_TYPE(info)]);
...@@ -803,9 +801,7 @@ static int c_show(struct seq_file *m, void *v) ...@@ -803,9 +801,7 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "CPU revision\t: %d\n", processor_id & 15); seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
{ {
unsigned int cache_info; unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_info));
if (cache_info != processor_id) { if (cache_info != processor_id) {
seq_printf(m, "Cache type\t: %s\n" seq_printf(m, "Cache type\t: %s\n"
"Cache clean\t: %s\n" "Cache clean\t: %s\n"
......
...@@ -40,7 +40,7 @@ ENTRY(_find_first_zero_bit_le) ...@@ -40,7 +40,7 @@ ENTRY(_find_first_zero_bit_le)
*/ */
ENTRY(_find_next_zero_bit_le) ENTRY(_find_next_zero_bit_le)
teq r1, #0 teq r1, #0
beq 2b beq 3b
ands ip, r2, #7 ands ip, r2, #7
beq 1b @ If new byte, goto old routine beq 1b @ If new byte, goto old routine
ldrb r3, [r0, r2, lsr #3] ldrb r3, [r0, r2, lsr #3]
...@@ -74,7 +74,7 @@ ENTRY(_find_first_bit_le) ...@@ -74,7 +74,7 @@ ENTRY(_find_first_bit_le)
*/ */
ENTRY(_find_next_bit_le) ENTRY(_find_next_bit_le)
teq r1, #0 teq r1, #0
beq 2b beq 3b
ands ip, r2, #7 ands ip, r2, #7
beq 1b @ If new byte, goto old routine beq 1b @ If new byte, goto old routine
ldrb r3, [r0, r2, lsr #3] ldrb r3, [r0, r2, lsr #3]
...@@ -101,15 +101,18 @@ ENTRY(_find_first_zero_bit_be) ...@@ -101,15 +101,18 @@ ENTRY(_find_first_zero_bit_be)
RETINSTR(mov,pc,lr) RETINSTR(mov,pc,lr)
ENTRY(_find_next_zero_bit_be) ENTRY(_find_next_zero_bit_be)
teq r1, #0
beq 3b
ands ip, r2, #7 ands ip, r2, #7
beq 1b @ If new byte, goto old routine beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3] ldrb r3, [r0, r3, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits movs r3, r3, lsr ip @ shift off unused bits
orreq r2, r2, #7 @ if zero, then no bits here bne .found
addeq r2, r2, #1 @ align bit pointer orr r2, r2, #7 @ if zero, then no bits here
beq 2b @ loop for next bit add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENTRY(_find_first_bit_be) ENTRY(_find_first_bit_be)
teq r1, #0 teq r1, #0
...@@ -126,14 +129,17 @@ ENTRY(_find_first_bit_be) ...@@ -126,14 +129,17 @@ ENTRY(_find_first_bit_be)
RETINSTR(mov,pc,lr) RETINSTR(mov,pc,lr)
ENTRY(_find_next_bit_be) ENTRY(_find_next_bit_be)
teq r1, #0
beq 3b
ands ip, r2, #7 ands ip, r2, #7
beq 1b @ If new byte, goto old routine beq 1b @ If new byte, goto old routine
eor r3, r2, #0x18 @ big endian byte ordering eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3] ldrb r3, [r0, r3, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits movs r3, r3, lsr ip @ shift off unused bits
orreq r2, r2, #7 @ if zero, then no bits here bne .found
addeq r2, r2, #1 @ align bit pointer orr r2, r2, #7 @ if zero, then no bits here
beq 2b @ loop for next bit add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
#endif #endif
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# #
obj-y := consistent.o extable.o fault-armv.o \ obj-y := consistent.o extable.o fault-armv.o \
fault-common.o init.o ioremap.o mm-armv.o fault.o init.o ioremap.o mmap.o mm-armv.o
obj-$(CONFIG_MODULES) += proc-syms.o obj-$(CONFIG_MODULES) += proc-syms.o
......
...@@ -10,124 +10,15 @@ ...@@ -10,124 +10,15 @@
*/ */
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include "fault.h"
/*
* Some section permission faults need to be handled gracefully.
* They can happen due to a __{get,put}_user during an oops.
*/
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
return 0;
}
/*
* This abort handler always returns "fault".
*/
static int
do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 1;
}
static struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
const char *name;
} fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, "vector exception" },
{ do_bad, SIGILL, "alignment exception" },
{ do_bad, SIGKILL, "terminal exception" },
{ do_bad, SIGILL, "alignment exception" },
{ do_bad, SIGBUS, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, "section translation fault" },
{ do_bad, SIGBUS, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, "page translation fault" },
{ do_bad, SIGBUS, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, "section domain fault" },
{ do_bad, SIGBUS, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, "page domain fault" },
{ do_bad, SIGBUS, "external abort on translation" },
{ do_sect_fault, SIGSEGV, "section permission fault" },
{ do_bad, SIGBUS, "external abort on translation" },
{ do_page_fault, SIGSEGV, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, "unknown 16" },
{ do_bad, SIGBUS, "unknown 17" },
{ do_bad, SIGBUS, "unknown 18" },
{ do_bad, SIGBUS, "unknown 19" },
{ do_bad, SIGBUS, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, "unknown 21" },
{ do_bad, SIGBUS, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, "unknown 23" },
{ do_bad, SIGBUS, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, "unknown 25" },
{ do_bad, SIGBUS, "unknown 26" },
{ do_bad, SIGBUS, "unknown 27" },
{ do_bad, SIGBUS, "unknown 28" },
{ do_bad, SIGBUS, "unknown 29" },
{ do_bad, SIGBUS, "unknown 30" },
{ do_bad, SIGBUS, "unknown 31" }
};
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, const char *name)
{
if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) {
fsr_info[nr].fn = fn;
fsr_info[nr].sig = sig;
fsr_info[nr].name = name;
}
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
if (!inf->fn(addr, fsr, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
force_sig(inf->sig, current);
show_pte(current->mm, addr);
die_if_kernel("Oops", regs, 0);
}
asmlinkage void
do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
{
do_translation_fault(addr, 0, regs);
}
static unsigned long shared_pte_mask = L_PTE_CACHEABLE; static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
/* /*
......
/* /*
* linux/arch/arm/mm/fault-common.c * linux/arch/arm/mm/fault.c
* *
* Copyright (C) 1995 Linus Torvalds * Copyright (C) 1995 Linus Torvalds
* Modifications for ARM processor (c) 1995-2001 Russell King * Modifications for ARM processor (c) 1995-2004 Russell King
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -11,11 +11,8 @@ ...@@ -11,11 +11,8 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -25,20 +22,6 @@ ...@@ -25,20 +22,6 @@
#include "fault.h" #include "fault.h"
#ifdef CONFIG_CPU_26
#define FAULT_CODE_WRITE 0x02
#define FAULT_CODE_FORCECOW 0x01
#define DO_COW(m) ((m) & (FAULT_CODE_WRITE|FAULT_CODE_FORCECOW))
#define READ_FAULT(m) (!((m) & FAULT_CODE_WRITE))
#else
/*
* "code" is actually the FSR register. Bit 11 set means the
* instruction was performing a write.
*/
#define DO_COW(code) ((code) & (1 << 11))
#define READ_FAULT(code) (!DO_COW(code))
#endif
/* /*
* This is useful to dump out the page tables associated with * This is useful to dump out the page tables associated with
* 'addr' in mm 'mm'. * 'addr' in mm 'mm'.
...@@ -186,10 +169,10 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, ...@@ -186,10 +169,10 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* memory access, so we can handle it. * memory access, so we can handle it.
*/ */
good_area: good_area:
if (READ_FAULT(fsr)) /* read? */ if (fsr & (1 << 11)) /* write? */
mask = VM_READ|VM_EXEC;
else
mask = VM_WRITE; mask = VM_WRITE;
else
mask = VM_READ|VM_EXEC;
fault = VM_FAULT_BADACCESS; fault = VM_FAULT_BADACCESS;
if (!(vma->vm_flags & mask)) if (!(vma->vm_flags & mask))
...@@ -201,7 +184,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, ...@@ -201,7 +184,7 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* than endlessly redo the fault. * than endlessly redo the fault.
*/ */
survive: survive:
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, DO_COW(fsr)); fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
/* /*
* Handle the "normal" cases first - successful and sigbus * Handle the "normal" cases first - successful and sigbus
...@@ -233,7 +216,8 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, ...@@ -233,7 +216,8 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
return fault; return fault;
} }
int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) static int
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{ {
struct task_struct *tsk; struct task_struct *tsk;
struct mm_struct *mm; struct mm_struct *mm;
...@@ -332,8 +316,9 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -332,8 +316,9 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* interrupt or a critical region, and should only copy the information * interrupt or a critical region, and should only copy the information
* from the master page table, nothing more. * from the master page table, nothing more.
*/ */
int do_translation_fault(unsigned long addr, unsigned int fsr, static int
struct pt_regs *regs) do_translation_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{ {
struct task_struct *tsk; struct task_struct *tsk;
unsigned int index; unsigned int index;
...@@ -372,3 +357,108 @@ int do_translation_fault(unsigned long addr, unsigned int fsr, ...@@ -372,3 +357,108 @@ int do_translation_fault(unsigned long addr, unsigned int fsr,
do_bad_area(tsk, tsk->active_mm, addr, fsr, regs); do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
return 0; return 0;
} }
/*
* Some section permission faults need to be handled gracefully.
* They can happen due to a __{get,put}_user during an oops.
*/
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
return 0;
}
/*
* This abort handler always returns "fault".
*/
static int
do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
return 1;
}
static struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
const char *name;
} fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, "vector exception" },
{ do_bad, SIGILL, "alignment exception" },
{ do_bad, SIGKILL, "terminal exception" },
{ do_bad, SIGILL, "alignment exception" },
{ do_bad, SIGBUS, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, "section translation fault" },
{ do_bad, SIGBUS, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, "page translation fault" },
{ do_bad, SIGBUS, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, "section domain fault" },
{ do_bad, SIGBUS, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, "page domain fault" },
{ do_bad, SIGBUS, "external abort on translation" },
{ do_sect_fault, SIGSEGV, "section permission fault" },
{ do_bad, SIGBUS, "external abort on translation" },
{ do_page_fault, SIGSEGV, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, "unknown 16" },
{ do_bad, SIGBUS, "unknown 17" },
{ do_bad, SIGBUS, "unknown 18" },
{ do_bad, SIGBUS, "unknown 19" },
{ do_bad, SIGBUS, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, "unknown 21" },
{ do_bad, SIGBUS, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, "unknown 23" },
{ do_bad, SIGBUS, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, "unknown 25" },
{ do_bad, SIGBUS, "unknown 26" },
{ do_bad, SIGBUS, "unknown 27" },
{ do_bad, SIGBUS, "unknown 28" },
{ do_bad, SIGBUS, "unknown 29" },
{ do_bad, SIGBUS, "unknown 30" },
{ do_bad, SIGBUS, "unknown 31" }
};
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, const char *name)
{
if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) {
fsr_info[nr].fn = fn;
fsr_info[nr].sig = sig;
fsr_info[nr].name = name;
}
}
/*
* Dispatch a data abort to the relevant handler.
*/
asmlinkage void
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6);
if (!inf->fn(addr, fsr, regs))
return;
printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
force_sig(inf->sig, current);
show_pte(current->mm, addr);
die_if_kernel("Oops", regs, 0);
}
asmlinkage void
do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
{
do_translation_fault(addr, 0, regs);
}
...@@ -3,8 +3,4 @@ void do_bad_area(struct task_struct *tsk, struct mm_struct *mm, ...@@ -3,8 +3,4 @@ void do_bad_area(struct task_struct *tsk, struct mm_struct *mm,
void show_pte(struct mm_struct *mm, unsigned long addr); void show_pte(struct mm_struct *mm, unsigned long addr);
int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int do_translation_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr); unsigned long search_exception_table(unsigned long addr);
...@@ -28,8 +28,9 @@ ...@@ -28,8 +28,9 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, static inline void
unsigned long phys_addr, pgprot_t pgprot) remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, pgprot_t pgprot)
{ {
unsigned long end; unsigned long end;
...@@ -37,22 +38,26 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l ...@@ -37,22 +38,26 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned l
end = address + size; end = address + size;
if (end > PMD_SIZE) if (end > PMD_SIZE)
end = PMD_SIZE; end = PMD_SIZE;
if (address >= end) BUG_ON(address >= end);
BUG();
do { do {
if (!pte_none(*pte)) { if (!pte_none(*pte))
printk("remap_area_pte: page already exists\n"); goto bad;
BUG();
}
set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot)); set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
address += PAGE_SIZE; address += PAGE_SIZE;
phys_addr += PAGE_SIZE; phys_addr += PAGE_SIZE;
pte++; pte++;
} while (address && (address < end)); } while (address && (address < end));
return;
bad:
printk("remap_area_pte: page already exists\n");
BUG();
} }
static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, static inline int
unsigned long phys_addr, unsigned long flags) remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{ {
unsigned long end; unsigned long end;
pgprot_t pgprot; pgprot_t pgprot;
...@@ -64,8 +69,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo ...@@ -64,8 +69,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
end = PGDIR_SIZE; end = PGDIR_SIZE;
phys_addr -= address; phys_addr -= address;
if (address >= end) BUG_ON(address >= end);
BUG();
pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags); pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
do { do {
...@@ -79,35 +83,38 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo ...@@ -79,35 +83,38 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
return 0; return 0;
} }
static int remap_area_pages(unsigned long address, unsigned long phys_addr, static int
unsigned long size, unsigned long flags) remap_area_pages(unsigned long start, unsigned long phys_addr,
unsigned long size, unsigned long flags)
{ {
int error; unsigned long address = start;
unsigned long end = start + size;
int err = 0;
pgd_t * dir; pgd_t * dir;
unsigned long end = address + size;
phys_addr -= address; phys_addr -= address;
dir = pgd_offset(&init_mm, address); dir = pgd_offset(&init_mm, address);
flush_cache_all(); BUG_ON(address >= end);
if (address >= end)
BUG();
spin_lock(&init_mm.page_table_lock); spin_lock(&init_mm.page_table_lock);
do { do {
pmd_t *pmd; pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
pmd = pmd_alloc(&init_mm, dir, address); if (!pmd) {
error = -ENOMEM; err = -ENOMEM;
if (!pmd)
break; break;
}
if (remap_area_pmd(pmd, address, end - address, if (remap_area_pmd(pmd, address, end - address,
phys_addr + address, flags)) phys_addr + address, flags)) {
err = -ENOMEM;
break; break;
error = 0; }
address = (address + PGDIR_SIZE) & PGDIR_MASK; address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++; dir++;
} while (address && (address < end)); } while (address && (address < end));
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
flush_tlb_all(); flush_cache_vmap(start, end);
return error; return err;
} }
/* /*
......
/*
* linux/arch/arm/mm/mmap.c
*/
#include <linux/config.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/shm.h>
#include <asm/system.h>
#define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes.
*
* We unconditionally provide this function for all cases, however
* in the VIVT case, we optimise out the alignment rules.
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
#ifdef CONFIG_CPU_V6
unsigned int cache_type;
int do_align = 0, aliasing = 0;
/*
* We only need to do colour alignment if either the I or D
* caches alias. This is indicated by bits 9 and 21 of the
* cache type register.
*/
cache_type = read_cpuid(CPUID_CACHETYPE);
if (cache_type != read_cpuid(CPUID_ID)) {
aliasing = (cache_type | cache_type >> 12) & (1 << 9);
if (aliasing)
do_align = filp || flags & MAP_SHARED;
}
#else
#define do_align 0
#define aliasing 0
#endif
/*
* We should enforce the MAP_FIXED case. However, currently
* the generic kernel code doesn't allow us to handle this.
*/
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
start_addr = addr = mm->free_area_cache;
full_search:
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
addr = vma->vm_end;
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
}
}
...@@ -20,7 +20,8 @@ ...@@ -20,7 +20,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/oprofile.h> #include <linux/oprofile.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/arch/irqs.h> #include <asm/irq.h>
#include <asm/system.h>
#include "op_counter.h" #include "op_counter.h"
#include "op_arm_model.h" #include "op_arm_model.h"
...@@ -399,8 +400,7 @@ static int xscale_detect_pmu(void) ...@@ -399,8 +400,7 @@ static int xscale_detect_pmu(void)
int ret = 0; int ret = 0;
u32 id; u32 id;
__asm__ __volatile__ ("mrc p15, 0, %0, c0, c0, 0" : "=r" (id)); id = (read_cpuid(CPUID_ID) >> 13) & 0x7;
id = (id >> 13) & 0x7;
switch (id) { switch (id) {
case 1: case 1:
......
...@@ -80,7 +80,7 @@ config SERIO_PARKBD ...@@ -80,7 +80,7 @@ config SERIO_PARKBD
config SERIO_RPCKBD config SERIO_RPCKBD
tristate "Acorn RiscPC keyboard controller" tristate "Acorn RiscPC keyboard controller"
depends on ARCH_ACORN && SERIO depends on (ARCH_ACORN || ARCH_CLPS7500) && SERIO
default y default y
help help
Say Y here if you have the Acorn RiscPC and want to use an AT Say Y here if you have the Acorn RiscPC and want to use an AT
...@@ -91,7 +91,7 @@ config SERIO_RPCKBD ...@@ -91,7 +91,7 @@ config SERIO_RPCKBD
config SERIO_AMBAKMI config SERIO_AMBAKMI
tristate "AMBA KMI keyboard controller" tristate "AMBA KMI keyboard controller"
depends on ARCH_INTEGRATOR && SERIO depends on ARM_AMBA && SERIO
config SERIO_SA1111 config SERIO_SA1111
tristate "Intel SA1111 keyboard controller" tristate "Intel SA1111 keyboard controller"
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
* Deep Blue Solutions Ltd. * Deep Blue Solutions Ltd.
* Copyright 2001 Altera Corporation * Copyright 2001 Altera Corporation
* *
* Update for 2.6.4 by Dirk Behme <dirk.behme@de.bosch.com>
*
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or * the Free Software Foundation; either version 2 of the License, or
...@@ -32,7 +34,6 @@ ...@@ -32,7 +34,6 @@
#include <linux/serial.h> #include <linux/serial.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/sysrq.h> #include <linux/sysrq.h>
#include <linux/pld/pld_hotswap.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -251,7 +252,7 @@ static void uart00_modem_status(struct uart_port *port) ...@@ -251,7 +252,7 @@ static void uart00_modem_status(struct uart_port *port)
wake_up_interruptible(&port->info->delta_msr_wait); wake_up_interruptible(&port->info->delta_msr_wait);
} }
static void uart00_int(int irq, void *dev_id, struct pt_regs *regs) static irqreturn_t uart00_int(int irq, void *dev_id, struct pt_regs *regs)
{ {
struct uart_port *port = dev_id; struct uart_port *port = dev_id;
unsigned int status, pass_counter = 0; unsigned int status, pass_counter = 0;
...@@ -269,6 +270,8 @@ static void uart00_int(int irq, void *dev_id, struct pt_regs *regs) ...@@ -269,6 +270,8 @@ static void uart00_int(int irq, void *dev_id, struct pt_regs *regs)
status = UART_GET_INT_STATUS(port); status = UART_GET_INT_STATUS(port);
} while (status); } while (status);
return IRQ_HANDLED;
} }
static unsigned int uart00_tx_empty(struct uart_port *port) static unsigned int uart00_tx_empty(struct uart_port *port)
...@@ -613,7 +616,7 @@ uart00_console_get_options(struct uart_port *port, int *baud, ...@@ -613,7 +616,7 @@ uart00_console_get_options(struct uart_port *port, int *baud,
static int __init uart00_console_setup(struct console *co, char *options) static int __init uart00_console_setup(struct console *co, char *options)
{ {
struct uart_port *port; struct uart_port *port;
int baud = 38400; int baud = 115200;
int bits = 8; int bits = 8;
int parity = 'n'; int parity = 'n';
int flow = 'n'; int flow = 'n';
...@@ -639,7 +642,7 @@ static struct console uart00_console = { ...@@ -639,7 +642,7 @@ static struct console uart00_console = {
.setup = uart00_console_setup, .setup = uart00_console_setup,
.flags = CON_PRINTBUFFER, .flags = CON_PRINTBUFFER,
.index = 0, .index = 0,
.data = &uart00_reg; .data = &uart00_reg,
}; };
static int __init uart00_console_init(void) static int __init uart00_console_init(void)
...@@ -669,9 +672,10 @@ struct dev_port_entry{ ...@@ -669,9 +672,10 @@ struct dev_port_entry{
struct uart_port *port; struct uart_port *port;
}; };
#ifdef CONFIG_PLD_HOTSWAP
static struct dev_port_entry dev_port_map[UART_NR]; static struct dev_port_entry dev_port_map[UART_NR];
#ifdef CONFIG_PLD_HOTSWAP
/* /*
* Keep a mapping of dev_info addresses -> port lines to use when * Keep a mapping of dev_info addresses -> port lines to use when
* removing ports dev==NULL indicates unused entry * removing ports dev==NULL indicates unused entry
......
...@@ -67,12 +67,37 @@ ...@@ -67,12 +67,37 @@
*/ */
#define NR_MONTYPES 6 #define NR_MONTYPES 6
static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = { static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = {
{ 15469, 15781, 49, 51, 0 }, /* TV */ { /* TV */
{ 0, 99999, 0, 199, 0 }, /* Multi Freq */ .hfmin = 15469,
{ 58608, 58608, 64, 64, 0 }, /* Hi-res mono */ .hfmax = 15781,
{ 30000, 70000, 60, 60, 0 }, /* VGA */ .vfmin = 49,
{ 30000, 70000, 56, 75, 0 }, /* SVGA */ .vfmax = 51,
{ 30000, 70000, 60, 60, 0 } }, { /* Multi Freq */
.hfmin = 0,
.hfmax = 99999,
.vfmin = 0,
.vfmax = 199,
}, { /* Hi-res mono */
.hfmin = 58608,
.hfmax = 58608,
.vfmin = 64,
.vfmax = 64,
}, { /* VGA */
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
}, { /* SVGA */
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 56,
.vfmax = 75,
}, {
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 60,
.vfmax = 60,
}
}; };
static struct fb_info fb_info; static struct fb_info fb_info;
......
...@@ -1615,7 +1615,10 @@ static int __init sa1100fb_map_video_memory(struct sa1100fb_info *fbi) ...@@ -1615,7 +1615,10 @@ static int __init sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
/* Fake monspecs to fill in fbinfo structure */ /* Fake monspecs to fill in fbinfo structure */
static struct fb_monspecs monspecs __initdata = { static struct fb_monspecs monspecs __initdata = {
30000, 70000, 50, 65, 0 /* Generic */ .hfmin = 30000,
.hfmax = 70000,
.vfmin = 50,
.vfmax = 65,
}; };
......
...@@ -88,7 +88,7 @@ static inline int atomic_dec_and_test(atomic_t *v) ...@@ -88,7 +88,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
" strex %1, %0, [%2]\n" " strex %1, %0, [%2]\n"
" teq %1, #0\n" " teq %1, #0\n"
" bne 1b" " bne 1b"
: "=&r" (result), "=r" (tmp) : "=&r" (result), "=&r" (tmp)
: "r" (&v->counter) : "r" (&v->counter)
: "cc"); : "cc");
...@@ -106,7 +106,7 @@ static inline int atomic_add_negative(int i, atomic_t *v) ...@@ -106,7 +106,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
" strex %1, %0, [%2]\n" " strex %1, %0, [%2]\n"
" teq %1, #0\n" " teq %1, #0\n"
" bne 1b" " bne 1b"
: "=&r" (result), "=r" (tmp) : "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i) : "r" (&v->counter), "Ir" (i)
: "cc"); : "cc");
......
...@@ -395,6 +395,11 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ...@@ -395,6 +395,11 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
#define HAVE_ARCH_UNMAPPED_AREA
/* /*
* remap a physical address `phys' of size `size' with page protection `prot' * remap a physical address `phys' of size `size' with page protection `prot'
* into virtual address `from' * into virtual address `from'
......
...@@ -6,10 +6,11 @@ ...@@ -6,10 +6,11 @@
* or page size, whichever is greater since the cache aliases * or page size, whichever is greater since the cache aliases
* every size/ways bytes. * every size/ways bytes.
*/ */
#if __LINUX_ARM_ARCH__ > 5 #define SHMLBA (4 * PAGE_SIZE) /* attach addr a multiple of this */
#define SHMLBA (4 * PAGE_SIZE)
#else /*
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ * Enforce SHMLBA in shmat
#endif */
#define __ARCH_FORCE_SHMLBA
#endif /* _ASMARM_SHMPARAM_H */ #endif /* _ASMARM_SHMPARAM_H */
...@@ -42,6 +42,19 @@ ...@@ -42,6 +42,19 @@
#define CR_XP (1 << 23) /* Extended page tables */ #define CR_XP (1 << 23) /* Extended page tables */
#define CR_VE (1 << 24) /* Vectored interrupts */ #define CR_VE (1 << 24) /* Vectored interrupts */
#define CPUID_ID 0
#define CPUID_CACHETYPE 1
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
#define read_cpuid(reg) \
({ \
unsigned int __val; \
asm("mrc%? p15, 0, %0, c0, c0, " __stringify(reg) \
: "=r" (__val)); \
__val; \
})
/* /*
* This is used to ensure the compiler did actually allocate the register we * This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust * asked it for some inline assembly sequences. Apparently we can't trust
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment