Commit 8a0aa9f7 authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.ninka.net:/home/davem/src/BK/net-2.5

into kernel.bkbits.net:/home/davem/net-2.5
parents 67b81891 a58e4d1d
...@@ -31,13 +31,15 @@ apcs-$(CONFIG_CPU_32) :=-mapcs-32 ...@@ -31,13 +31,15 @@ apcs-$(CONFIG_CPU_32) :=-mapcs-32
apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3 apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3
# This selects which instruction set is used. # This selects which instruction set is used.
# Note that GCC is lame - it doesn't numerically define an # Note that GCC does not numerically define an architecture version
# architecture version macro, but instead defines a whole # macro, but instead defines a whole series of macros which makes
# series of macros. # testing for a specific architecture or later rather impossible.
arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3 #
# Note - GCC does accept -march=armv5te, but someone messed up the assembler or the
# gcc specs file - this needs fixing properly - ie in gcc and/or binutils.
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 -march=armv5t
arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 -march=armv5te arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
arch-$(CONFIG_CPU_XSCALE) :=-D__LINUX_ARM_ARCH__=5 -march=armv4 -Wa,-mxscale #-march=armv5te
# This selects how we optimise for the processor. # This selects how we optimise for the processor.
tune-$(CONFIG_CPU_ARM610) :=-mtune=arm610 tune-$(CONFIG_CPU_ARM610) :=-mtune=arm610
...@@ -48,13 +50,13 @@ tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi ...@@ -48,13 +50,13 @@ tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110 tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100 tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) :=-mtune=strongarm #-mtune=xscale tune-$(CONFIG_CPU_XSCALE) :=-mtune=strongarm -Wa,-mxscale #-mtune=xscale
# Force -mno-fpu to be passed to the assembler. Some versions of gcc don't # Force -mno-fpu to be passed to the assembler. Some versions of gcc don't
# do this with -msoft-float # do this with -msoft-float
CFLAGS_BOOT :=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm CFLAGS_BOOT :=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
CFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm CFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
AFLAGS +=$(apcs-y) $(arch-y) -mno-fpu -msoft-float -Wa,-mno-fpu AFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mno-fpu -msoft-float -Wa,-mno-fpu
#Default value #Default value
DATAADDR := . DATAADDR := .
...@@ -208,6 +210,7 @@ zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall ...@@ -208,6 +210,7 @@ zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall
) )
arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \ arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
include/asm-arm/.arch include/asm-arm/.proc \
include/config/MARKER include/config/MARKER
include/asm-$(ARCH)/constants.h: arch/$(ARCH)/kernel/asm-offsets.s include/asm-$(ARCH)/constants.h: arch/$(ARCH)/kernel/asm-offsets.s
......
...@@ -50,6 +50,10 @@ __SA1100_start: ...@@ -50,6 +50,10 @@ __SA1100_start:
10: 10:
#endif #endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
ands r0, r0, #0x0d
beq 99f
@ Data cache might be active. @ Data cache might be active.
@ Be sure to flush kernel binary out of the cache, @ Be sure to flush kernel binary out of the cache,
@ whatever state it is, before it is turned off. @ whatever state it is, before it is turned off.
...@@ -68,11 +72,4 @@ __SA1100_start: ...@@ -68,11 +72,4 @@ __SA1100_start:
bic r0, r0, #0x0d @ clear WB, DC, MMU bic r0, r0, #0x0d @ clear WB, DC, MMU
bic r0, r0, #0x1000 @ clear Icache bic r0, r0, #0x1000 @ clear Icache
mcr p15, 0, r0, c1, c0, 0 mcr p15, 0, r0, c1, c0, 0
99:
/*
* Pause for a short time so that we give enough time
* for the host to start a terminal up.
*/
mov r0, #0x00200000
1: subs r0, r0, #1
bne 1b
This diff is collapsed.
...@@ -418,7 +418,7 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -418,7 +418,7 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING) if (!p || p == current || p->state == TASK_RUNNING)
return 0; return 0;
stack_page = 4096 + (unsigned long)p; stack_page = 4096 + (unsigned long)p->thread_info;
fp = thread_saved_fp(p); fp = thread_saved_fp(p);
do { do {
if (fp < stack_page || fp > 4092+stack_page) if (fp < stack_page || fp > 4092+stack_page)
......
...@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb; ...@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb;
#ifdef MULTI_USER #ifdef MULTI_USER
struct cpu_user_fns cpu_user; struct cpu_user_fns cpu_user;
#endif #endif
#ifdef MULTI_CACHE
struct cpu_cache_fns cpu_cache;
#endif
unsigned char aux_device_present; unsigned char aux_device_present;
char elf_platform[ELF_PLATFORM_SIZE]; char elf_platform[ELF_PLATFORM_SIZE];
...@@ -282,6 +285,9 @@ static void __init setup_processor(void) ...@@ -282,6 +285,9 @@ static void __init setup_processor(void)
#ifdef MULTI_USER #ifdef MULTI_USER
cpu_user = *list->user; cpu_user = *list->user;
#endif #endif
#ifdef MULTI_CACHE
cpu_cache = *list->cache;
#endif
printk("CPU: %s [%08x] revision %d (ARMv%s)\n", printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
cpu_name, processor_id, (int)processor_id & 15, cpu_name, processor_id, (int)processor_id & 15,
...@@ -323,58 +329,77 @@ static struct machine_desc * __init setup_machine(unsigned int nr) ...@@ -323,58 +329,77 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
return list; return list;
} }
static void __init early_initrd(char **p)
{
unsigned long start, size;
start = memparse(*p, p);
if (**p == ',') {
size = memparse((*p) + 1, p);
phys_initrd_start = start;
phys_initrd_size = size;
}
}
__early_param("initrd=", early_initrd);
/* /*
* Initial parsing of the command line. We need to pick out the * Pick out the memory size. We look for mem=size@start,
* memory size. We look for mem=size@start, where start and size * where start and size are "size[KkMm]"
* are "size[KkMm]"
*/ */
static void __init static void __init early_mem(char **p)
parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from) {
static int usermem __initdata = 0;
unsigned long size, start;
/*
* If the user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
meminfo.nr_banks = 0;
}
start = PHYS_OFFSET;
size = memparse(*p, p);
if (**p == '@')
start = memparse(*p + 1, p);
meminfo.bank[meminfo.nr_banks].start = start;
meminfo.bank[meminfo.nr_banks].size = size;
meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
meminfo.nr_banks += 1;
}
__early_param("mem=", early_mem);
/*
* Initial parsing of the command line.
*/
static void __init parse_cmdline(char **cmdline_p, char *from)
{ {
char c = ' ', *to = command_line; char c = ' ', *to = command_line;
int usermem = 0, len = 0; int len = 0;
for (;;) { for (;;) {
if (c == ' ' && !memcmp(from, "mem=", 4)) { if (c == ' ') {
unsigned long size, start; extern struct early_params __early_begin, __early_end;
struct early_params *p;
if (to != command_line)
to -= 1; for (p = &__early_begin; p < &__early_end; p++) {
int len = strlen(p->arg);
/*
* If the user specifies memory size, we if (memcmp(from, p->arg, len) == 0) {
* blow away any automatically generated if (to != command_line)
* size. to -= 1;
*/ from += len;
if (usermem == 0) { p->fn(&from);
usermem = 1;
mi->nr_banks = 0; while (*from != ' ' && *from != '\0')
} from++;
break;
start = PHYS_OFFSET; }
size = memparse(from + 4, &from);
if (*from == '@')
start = memparse(from + 1, &from);
mi->bank[mi->nr_banks].start = start;
mi->bank[mi->nr_banks].size = size;
mi->bank[mi->nr_banks].node = PHYS_TO_NID(start);
mi->nr_banks += 1;
} else if (c == ' ' && !memcmp(from, "initrd=", 7)) {
unsigned long start, size;
/*
* Remove space character
*/
if (to != command_line)
to -= 1;
start = memparse(from + 7, &from);
if (*from == ',') {
size = memparse(from + 1, &from);
phys_initrd_start = start;
phys_initrd_size = size;
} }
} }
c = *from++; c = *from++;
...@@ -536,6 +561,8 @@ __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); ...@@ -536,6 +561,8 @@ __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
static int __init parse_tag_initrd(const struct tag *tag) static int __init parse_tag_initrd(const struct tag *tag)
{ {
printk(KERN_WARNING "ATAG_INITRD is deprecated; "
"please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start); phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size; phys_initrd_size = tag->u.initrd.size;
return 0; return 0;
...@@ -668,7 +695,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -668,7 +695,7 @@ void __init setup_arch(char **cmdline_p)
memcpy(saved_command_line, from, COMMAND_LINE_SIZE); memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
parse_cmdline(&meminfo, cmdline_p, from); parse_cmdline(cmdline_p, from);
bootmem_init(&meminfo); bootmem_init(&meminfo);
paging_init(&meminfo, mdesc); paging_init(&meminfo, mdesc);
request_standard_resources(&meminfo, mdesc); request_standard_resources(&meminfo, mdesc);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
* CPU support functions * CPU support functions
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
...@@ -173,9 +174,9 @@ static int integrator_set_target(struct cpufreq_policy *policy, ...@@ -173,9 +174,9 @@ static int integrator_set_target(struct cpufreq_policy *policy,
return 0; return 0;
} }
static int integrator_cpufreq_init(struct cpufreq *policy) static int integrator_cpufreq_init(struct cpufreq_policy *policy)
{ {
unsigned long cus_allowed; unsigned long cpus_allowed;
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
u_int cm_osc, cm_stat, mem_freq_khz; u_int cm_osc, cm_stat, mem_freq_khz;
struct vco vco; struct vco vco;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/irq.h> #include <asm/irq.h>
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/serial_core.h> #include <linux/serial_core.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -25,6 +26,7 @@ ...@@ -25,6 +26,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
......
...@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o ...@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o
p-$(CONFIG_CPU_26) += proc-arm2_3.o p-$(CONFIG_CPU_26) += proc-arm2_3.o
# ARMv3 # ARMv3
p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o copypage-v3.o p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
# ARMv4 # ARMv4
p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4wt.o abort-lv4t.o p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o cache-v4.o copypage-v4wt.o abort-lv4t.o
p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4wb.o abort-ev4.o minicache.o p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o cache-v4wb.o copypage-v4wb.o abort-ev4.o
p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o p-$(CONFIG_CPU_SA1100) += proc-sa1100.o tlb-v4wb.o cache-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
# ARMv5 # ARMv5
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o
......
/*
* linux/arch/arm/mm/cache-v3.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v3_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v3_flush_kern_cache_all)
/* FALLTHROUGH */
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v3_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v3_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_clean_range)
mov pc, lr
ENTRY(v3_cache_fns)
.long v3_flush_kern_cache_all
.long v3_flush_user_cache_all
.long v3_flush_user_cache_range
.long v3_coherent_kern_range
.long v3_flush_kern_dcache_page
.long v3_dma_inv_range
.long v3_dma_clean_range
.long v3_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v4_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4_flush_kern_cache_all)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vma)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_clean_range)
mov pc, lr
ENTRY(v4_cache_fns)
.long v4_flush_kern_cache_all
.long v4_flush_user_cache_all
.long v4_flush_user_cache_range
.long v4_coherent_kern_range
.long v4_flush_kern_dcache_page
.long v4_dma_inv_range
.long v4_dma_clean_range
.long v4_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wb.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The total size of the data cache.
*/
#if defined(CONFIG_CPU_SA110)
# define CACHE_DSIZE 16384
#elif defined(CONFIG_CPU_SA1100)
# define CACHE_DSIZE 8192
#else
# error Unknown cache size
#endif
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* Size Clean (ticks) Dirty (ticks)
* 4096 21 20 21 53 55 54
* 8192 40 41 40 106 100 102
* 16384 77 77 76 140 140 138
* 32768 150 149 150 214 216 212 <---
* 65536 296 297 296 351 358 361
* 131072 591 591 591 656 657 651
* Whole 132 136 132 221 217 207 <---
*/
#define CACHE_DLIMIT (CACHE_DSIZE * 4)
/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wb_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wb_flush_kern_cache_all)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache:
mov r0, #FLUSH_BASE
add r1, r0, #CACHE_DSIZE
1: ldr r2, [r0], #32
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wb_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region?
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
cmp r3, #CACHE_DLIMIT @ total size >= limit?
bhs __flush_whole_cache @ flush whole D cache
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wb_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
/* fall through */
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* This is actually the same as v4wb_coherent_kern_range()
*/
.globl v4wb_dma_flush_range
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
ENTRY(v4wb_cache_fns)
.long v4wb_flush_kern_cache_all
.long v4wb_flush_user_cache_all
.long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range
.long v4wb_flush_kern_dcache_page
.long v4wb_dma_inv_range
.long v4wb_dma_clean_range
.long v4wb_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wt.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARMv4 write through cache operations support.
*
* We assume that the write buffer is not enabled.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 8
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* *** This needs benchmarking
*/
#define CACHE_DLIMIT 16384
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wt_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wt_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Clean and invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wt_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wt_flush_kern_dcache_page)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, #PAGE_SZ
/* fallthrough */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_inv_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
.globl v4wt_dma_flush_range
.equ v4wt_dma_flush_range, v4wt_dma_inv_range
ENTRY(v4wt_cache_fns)
.long v4wt_flush_kern_cache_all
.long v4wt_flush_user_cache_all
.long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range
.long v4wt_flush_kern_dcache_page
.long v4wt_dma_inv_range
.long v4wt_dma_clean_range
.long v4wt_dma_flush_range
...@@ -161,11 +161,11 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle, ...@@ -161,11 +161,11 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle,
/* /*
* Invalidate any data that might be lurking in the * Invalidate any data that might be lurking in the
* kernel direct-mapped region. * kernel direct-mapped region for device DMA.
*/ */
{ {
unsigned long kaddr = (unsigned long)page_address(page); unsigned long kaddr = (unsigned long)page_address(page);
invalidate_dcache_range(kaddr, kaddr + size); dmac_inv_range(kaddr, kaddr + size);
} }
/* /*
...@@ -330,7 +330,7 @@ static int __init consistent_init(void) ...@@ -330,7 +330,7 @@ static int __init consistent_init(void)
core_initcall(consistent_init); core_initcall(consistent_init);
/* /*
* make an area consistent. * make an area consistent for devices.
*/ */
void consistent_sync(void *vaddr, size_t size, int direction) void consistent_sync(void *vaddr, size_t size, int direction)
{ {
...@@ -339,13 +339,13 @@ void consistent_sync(void *vaddr, size_t size, int direction) ...@@ -339,13 +339,13 @@ void consistent_sync(void *vaddr, size_t size, int direction)
switch (direction) { switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */ case DMA_FROM_DEVICE: /* invalidate only */
invalidate_dcache_range(start, end); dmac_inv_range(start, end);
break; break;
case DMA_TO_DEVICE: /* writeback only */ case DMA_TO_DEVICE: /* writeback only */
clean_dcache_range(start, end); dmac_clean_range(start, end);
break; break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
flush_dcache_range(start, end); dmac_flush_range(start, end);
break; break;
default: default:
BUG(); BUG();
......
...@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page) ...@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page)
{ {
struct mm_struct *mm = current->active_mm; struct mm_struct *mm = current->active_mm;
struct list_head *l; struct list_head *l;
unsigned long kaddr = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0); __cpuc_flush_dcache_page(page_address(page));
if (!page->mapping) if (!page->mapping)
return; return;
...@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) ...@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (page->mapping) { if (page->mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
unsigned long kaddr = (unsigned long)page_address(page);
if (dirty) if (dirty)
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0); __cpuc_flush_dcache_page(page_address(page));
make_coherent(vma, addr, page, dirty); make_coherent(vma, addr, page, dirty);
} }
......
...@@ -12,19 +12,15 @@ ...@@ -12,19 +12,15 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "fault.h" #include "fault.h"
......
...@@ -24,30 +24,82 @@ ...@@ -24,30 +24,82 @@
#include <asm/mach/map.h> #include <asm/mach/map.h>
static unsigned int cachepolicy __initdata = PMD_SECT_WB;
static unsigned int ecc_mask __initdata = 0;
struct cachepolicy {
char *policy;
unsigned int cr_mask;
unsigned int pmd;
};
static struct cachepolicy cache_policies[] __initdata = {
{ "uncached", CR1_W|CR1_C, PMD_SECT_UNCACHED },
{ "buffered", CR1_C, PMD_SECT_BUFFERED },
{ "writethrough", 0, PMD_SECT_WT },
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
{ "writeback", 0, PMD_SECT_WB },
{ "writealloc", 0, PMD_SECT_WBWA }
#endif
};
/* /*
* These are useful for identifing cache coherency * These are useful for identifing cache coherency
* problems by allowing the cache or the cache and * problems by allowing the cache or the cache and
* writebuffer to be turned off. (Note: the write * writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off). * buffer should not be on and the cache off).
*/ */
static int __init nocache_setup(char *__unused) static void __init early_cachepolicy(char **p)
{ {
cr_alignment &= ~CR1_C; int i;
cr_no_alignment &= ~CR1_C;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
int len = strlen(cache_policies[i].policy);
if (memcmp(*p, cache_policies[i].policy, len) == 0) {
cachepolicy = cache_policies[i].pmd;
cr_alignment &= ~cache_policies[i].cr_mask;
cr_no_alignment &= ~cache_policies[i].cr_mask;
*p += len;
break;
}
}
if (i == ARRAY_SIZE(cache_policies))
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
flush_cache_all(); flush_cache_all();
set_cr(cr_alignment); set_cr(cr_alignment);
return 1;
} }
static int __init nowrite_setup(char *__unused) static void __init early_nocache(char **__unused)
{ {
cr_alignment &= ~(CR1_W|CR1_C); char *p = "buffered";
cr_no_alignment &= ~(CR1_W|CR1_C); printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
flush_cache_all(); early_cachepolicy(&p);
set_cr(cr_alignment); }
return 1;
static void __init early_nowrite(char **__unused)
{
char *p = "uncached";
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(&p);
}
static void __init early_ecc(char **p)
{
if (memcmp(*p, "on", 2) == 0) {
ecc_mask = PMD_PROTECTION;
*p += 2;
} else if (memcmp(*p, "off", 3) == 0) {
ecc_mask = 0;
*p += 3;
}
} }
__early_param("nocache", early_nocache);
__early_param("nowb", early_nowrite);
__early_param("cachepolicy=", early_cachepolicy);
__early_param("ecc=", early_ecc);
static int __init noalign_setup(char *__unused) static int __init noalign_setup(char *__unused)
{ {
cr_alignment &= ~CR1_A; cr_alignment &= ~CR1_A;
...@@ -57,8 +109,6 @@ static int __init noalign_setup(char *__unused) ...@@ -57,8 +109,6 @@ static int __init noalign_setup(char *__unused)
} }
__setup("noalign", noalign_setup); __setup("noalign", noalign_setup);
__setup("nocache", nocache_setup);
__setup("nowb", nowrite_setup);
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
...@@ -197,7 +247,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg ...@@ -197,7 +247,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
pmdval = __pa(ptep) | prot_l1; pmdval = __pa(ptep) | prot_l1;
pmdp[0] = __pmd(pmdval); pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
cpu_flush_pmd(pmdp); flush_pmd_entry(pmdp);
} }
ptep = pte_offset_kernel(pmdp, virt); ptep = pte_offset_kernel(pmdp, virt);
...@@ -231,32 +281,20 @@ static struct mem_types mem_types[] __initdata = { ...@@ -231,32 +281,20 @@ static struct mem_types mem_types[] __initdata = {
.domain = DOMAIN_IO, .domain = DOMAIN_IO,
}, },
[MT_CACHECLEAN] = { [MT_CACHECLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4, .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_MINICLEAN] = { [MT_MINICLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE, .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
}, },
[MT_VECTORS] = { [MT_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC, L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4, .prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_USER, .domain = DOMAIN_USER,
}, },
[MT_MEMORY] = { [MT_MEMORY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC | L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE, .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL, .domain = DOMAIN_KERNEL,
} }
...@@ -268,37 +306,50 @@ static struct mem_types mem_types[] __initdata = { ...@@ -268,37 +306,50 @@ static struct mem_types mem_types[] __initdata = {
static void __init build_mem_type_table(void) static void __init build_mem_type_table(void)
{ {
int cpu_arch = cpu_architecture(); int cpu_arch = cpu_architecture();
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH const char *policy;
int writethrough = 1;
#else
int writethrough = 0;
#endif
int writealloc = 0, ecc = 0;
if (cpu_arch < CPU_ARCH_ARMv5) { /*
writealloc = 0; * ARMv5 can use ECC memory.
ecc = 0; */
if (cpu_arch == CPU_ARCH_ARMv5) {
mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask;
} else {
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
if (cachepolicy == PMD_SECT_WBWA)
cachepolicy = PMD_SECT_WB;
ecc_mask = 0;
} }
if (writethrough) { mem_types[MT_MEMORY].prot_sect |= cachepolicy;
switch (cachepolicy) {
default:
case PMD_SECT_UNCACHED:
policy = "uncached";
break;
case PMD_SECT_BUFFERED:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE;
policy = "buffered";
break;
case PMD_SECT_WT:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WT; policy = "write through";
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WT; break;
} else { case PMD_SECT_WB:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WB; policy = "write back";
break;
if (writealloc) case PMD_SECT_WBWA:
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WBWA; mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
else mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WB; policy = "write back, write allocate";
} break;
if (ecc) {
mem_types[MT_VECTORS].prot_sect |= PMD_PROTECTION;
mem_types[MT_MEMORY].prot_sect |= PMD_PROTECTION;
} }
printk("Memory policy: ECC %sabled, Data cache %s\n",
ecc_mask ? "en" : "dis", policy);
} }
/* /*
...@@ -330,6 +381,14 @@ static void __init create_mapping(struct map_desc *md) ...@@ -330,6 +381,14 @@ static void __init create_mapping(struct map_desc *md)
off = md->physical - virt; off = md->physical - virt;
length = md->length; length = md->length;
if (mem_types[md->type].prot_l1 == 0 &&
(virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
printk(KERN_WARNING "MM: map for 0x%08lx at 0x%08lx can not "
"be mapped using pages, ignoring.\n",
md->physical, md->virtual);
return;
}
while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
alloc_init_page(virt, virt + off, prot_l1, prot_pte); alloc_init_page(virt, virt + off, prot_l1, prot_pte);
......
This diff is collapsed.
...@@ -162,7 +162,7 @@ memc_phys_table_32: ...@@ -162,7 +162,7 @@ memc_phys_table_32:
* and inaccessible (0x01f00000). * and inaccessible (0x01f00000).
* Params : r0 = page table pointer * Params : r0 = page table pointer
*/ */
clear_tables: ldr r1, _arm3_set_pgd - 4 clear_tables: ldr r1, _arm3_switch_mm - 4
ldr r2, [r1] ldr r2, [r1]
sub r1, r0, #256 * 4 @ start of MEMC tables sub r1, r0, #256 * 4 @ start of MEMC tables
add r2, r1, r2, lsl #2 @ end of tables add r2, r1, r2, lsl #2 @ end of tables
...@@ -186,14 +186,16 @@ clear_tables: ldr r1, _arm3_set_pgd - 4 ...@@ -186,14 +186,16 @@ clear_tables: ldr r1, _arm3_set_pgd - 4
mov pc, lr mov pc, lr
/* /*
* Function: *_set_pgd(pgd_t *pgd) * Function: *_switch_mm(pgd_t *pgd)
* Params : pgd New page tables/MEMC mapping * Params : pgd New page tables/MEMC mapping
* Purpose : update MEMC hardware with new mapping * Purpose : update MEMC hardware with new mapping
*/ */
.word page_nr .word page_nr
_arm3_set_pgd: mcr p15, 0, r1, c1, c0, 0 @ flush cache _arm3_switch_mm:
_arm2_set_pgd: stmfd sp!, {lr} mcr p15, 0, r1, c1, c0, 0 @ flush cache
ldr r1, _arm3_set_pgd - 4 _arm2_switch_mm:
stmfd sp!, {lr}
ldr r1, _arm3_switch_mm - 4
ldr r2, [r1] ldr r2, [r1]
sub r0, r0, #256 * 4 @ start of MEMC tables sub r0, r0, #256 * 4 @ start of MEMC tables
add r1, r0, r2, lsl #2 @ end of tables add r1, r0, r2, lsl #2 @ end of tables
...@@ -273,9 +275,6 @@ _arm2_xchg_4: mov r2, pc ...@@ -273,9 +275,6 @@ _arm2_xchg_4: mov r2, pc
_arm3_xchg_4: swp r0, r0, [r1] _arm3_xchg_4: swp r0, r0, [r1]
movs pc, lr movs pc, lr
_arm2_3_check_bugs:
bics pc, lr, #0x04000000 @ Clear FIQ disable bit
cpu_arm2_name: cpu_arm2_name:
.asciz "ARM 2" .asciz "ARM 2"
cpu_arm250_name: cpu_arm250_name:
...@@ -290,28 +289,25 @@ cpu_arm3_name: ...@@ -290,28 +289,25 @@ cpu_arm3_name:
*/ */
.globl arm2_processor_functions .globl arm2_processor_functions
arm2_processor_functions: arm2_processor_functions:
.word _arm2_3_check_bugs
.word _arm2_proc_init .word _arm2_proc_init
.word _arm2_proc_fin .word _arm2_proc_fin
.word _arm2_set_pgd .word _arm2_switch_mm
.word _arm2_xchg_1 .word _arm2_xchg_1
.word _arm2_xchg_4 .word _arm2_xchg_4
.globl arm250_processor_functions .globl arm250_processor_functions
arm250_processor_functions: arm250_processor_functions:
.word _arm2_3_check_bugs
.word _arm2_proc_init .word _arm2_proc_init
.word _arm2_proc_fin .word _arm2_proc_fin
.word _arm2_set_pgd .word _arm2_switch_mm
.word _arm3_xchg_1 .word _arm3_xchg_1
.word _arm3_xchg_4 .word _arm3_xchg_4
.globl arm3_processor_functions .globl arm3_processor_functions
arm3_processor_functions: arm3_processor_functions:
.word _arm2_3_check_bugs
.word _arm3_proc_init .word _arm3_proc_init
.word _arm3_proc_fin .word _arm3_proc_fin
.word _arm3_set_pgd .word _arm3_switch_mm
.word _arm3_xchg_1 .word _arm3_xchg_1
.word _arm3_xchg_4 .word _arm3_xchg_4
......
...@@ -188,20 +188,6 @@ Ldata_lateldrpostreg: ...@@ -188,20 +188,6 @@ Ldata_lateldrpostreg:
addeq r7, r0, r2 addeq r7, r0, r2
b Ldata_saver7 b Ldata_saver7
/*
* Function: arm6_7_check_bugs (void)
* : arm6_7_proc_init (void)
* : arm6_7_proc_fin (void)
*
* Notes : This processor does not require these
*/
ENTRY(cpu_arm6_check_bugs)
ENTRY(cpu_arm7_check_bugs)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
ENTRY(cpu_arm6_proc_init) ENTRY(cpu_arm6_proc_init)
ENTRY(cpu_arm7_proc_init) ENTRY(cpu_arm7_proc_init)
mov pc, lr mov pc, lr
...@@ -220,30 +206,19 @@ ENTRY(cpu_arm7_do_idle) ...@@ -220,30 +206,19 @@ ENTRY(cpu_arm7_do_idle)
mov pc, lr mov pc, lr
/* /*
* Function: arm6_7_set_pgd(unsigned long pgd_phys) * Function: arm6_7_switch_mm(unsigned long pgd_phys)
* Params : pgd_phys Physical address of page table * Params : pgd_phys Physical address of page table
* Purpose : Perform a task switch, saving the old processes state, and restoring * Purpose : Perform a task switch, saving the old processes state, and restoring
* the new. * the new.
*/ */
ENTRY(cpu_arm6_set_pgd) ENTRY(cpu_arm6_switch_mm)
ENTRY(cpu_arm7_set_pgd) ENTRY(cpu_arm7_switch_mm)
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ flush cache mcr p15, 0, r1, c7, c0, 0 @ flush cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
mcr p15, 0, r1, c5, c0, 0 @ flush TLBs mcr p15, 0, r1, c5, c0, 0 @ flush TLBs
mov pc, lr mov pc, lr
/*
* Function: arm6_flush_pmd(pmdp)
*
* Params : r0 = Address to set
*
* Purpose : Set a PMD and flush it out of any WB cache
*/
ENTRY(cpu_arm6_flush_pmd)
ENTRY(cpu_arm7_flush_pmd)
mov pc, lr
/* /*
* Function: arm6_7_set_pte(pte_t *ptep, pte_t pte) * Function: arm6_7_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set * Params : r0 = Address to set
...@@ -324,7 +299,6 @@ __arm7_setup: mov r0, #0 ...@@ -324,7 +299,6 @@ __arm7_setup: mov r0, #0
.type arm6_processor_functions, #object .type arm6_processor_functions, #object
ENTRY(arm6_processor_functions) ENTRY(arm6_processor_functions)
.word cpu_arm6_data_abort .word cpu_arm6_data_abort
.word cpu_arm6_check_bugs
.word cpu_arm6_proc_init .word cpu_arm6_proc_init
.word cpu_arm6_proc_fin .word cpu_arm6_proc_fin
.word cpu_arm6_reset .word cpu_arm6_reset
...@@ -345,8 +319,7 @@ ENTRY(arm6_processor_functions) ...@@ -345,8 +319,7 @@ ENTRY(arm6_processor_functions)
.word cpu_arm6_icache_invalidate_page .word cpu_arm6_icache_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm6_set_pgd .word cpu_arm6_switch_mm
.word cpu_arm6_flush_pmd
.word cpu_arm6_set_pte .word cpu_arm6_set_pte
.size arm6_processor_functions, . - arm6_processor_functions .size arm6_processor_functions, . - arm6_processor_functions
...@@ -358,7 +331,6 @@ ENTRY(arm6_processor_functions) ...@@ -358,7 +331,6 @@ ENTRY(arm6_processor_functions)
.type arm7_processor_functions, #object .type arm7_processor_functions, #object
ENTRY(arm7_processor_functions) ENTRY(arm7_processor_functions)
.word cpu_arm7_data_abort .word cpu_arm7_data_abort
.word cpu_arm7_check_bugs
.word cpu_arm7_proc_init .word cpu_arm7_proc_init
.word cpu_arm7_proc_fin .word cpu_arm7_proc_fin
.word cpu_arm7_reset .word cpu_arm7_reset
...@@ -379,8 +351,7 @@ ENTRY(arm7_processor_functions) ...@@ -379,8 +351,7 @@ ENTRY(arm7_processor_functions)
.word cpu_arm7_icache_invalidate_page .word cpu_arm7_icache_invalidate_page
/* pgtable */ /* pgtable */
.word cpu_arm7_set_pgd .word cpu_arm7_switch_mm
.word cpu_arm7_flush_pmd
.word cpu_arm7_set_pte .word cpu_arm7_set_pte
.size arm7_processor_functions, . - arm7_processor_functions .size arm7_processor_functions, . - arm7_processor_functions
......
...@@ -38,47 +38,12 @@ ...@@ -38,47 +38,12 @@
#include <asm/hardware.h> #include <asm/hardware.h>
/* /*
* Function: arm720_cache_clean_invalidate_all (void) * Function: arm720_proc_init (void)
* : arm720_cache_clean_invalidate_page (unsigned long address, int size,
* int flags)
*
* Params : address Area start address
* : size size of area
* : flags b0 = I cache as well
*
* Purpose : Flush all cache lines
*/
ENTRY(cpu_arm720_cache_clean_invalidate_all)
ENTRY(cpu_arm720_cache_clean_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_page)
ENTRY(cpu_arm720_dcache_invalidate_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush cache
mov pc, lr
/*
* These just expect cache lines to be cleaned. Since we have a writethrough
* cache, we never have any dirty cachelines to worry about.
*/
ENTRY(cpu_arm720_dcache_clean_range)
ENTRY(cpu_arm720_dcache_clean_page)
ENTRY(cpu_arm720_dcache_clean_entry)
mov pc, lr
/*
* Function: arm720_check_bugs (void)
* : arm720_proc_init (void)
* : arm720_proc_fin (void) * : arm720_proc_fin (void)
* *
* Notes : This processor does not require these * Notes : This processor does not require these
*/ */
ENTRY(cpu_arm720_check_bugs) ENTRY(cpu_arm720_dcache_clean_area)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
ENTRY(cpu_arm720_proc_init) ENTRY(cpu_arm720_proc_init)
mov pc, lr mov pc, lr
...@@ -102,28 +67,18 @@ ENTRY(cpu_arm720_do_idle) ...@@ -102,28 +67,18 @@ ENTRY(cpu_arm720_do_idle)
mov pc, lr mov pc, lr
/* /*
* Function: arm720_set_pgd(unsigned long pgd_phys) * Function: arm720_switch_mm(unsigned long pgd_phys)
* Params : pgd_phys Physical address of page table * Params : pgd_phys Physical address of page table
* Purpose : Perform a task switch, saving the old process' state and restoring * Purpose : Perform a task switch, saving the old process' state and restoring
* the new. * the new.
*/ */
ENTRY(cpu_arm720_set_pgd) ENTRY(cpu_arm720_switch_mm)
mov r1, #0 mov r1, #0
mcr p15, 0, r1, c7, c7, 0 @ invalidate cache mcr p15, 0, r1, c7, c7, 0 @ invalidate cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
mov pc, lr mov pc, lr
/*
* Function: arm720_flush_pmd(pmdp)
*
* Params : r0 = Address to set
*
* Purpose : Set a PMD and flush it out of any WB cache
*/
ENTRY(cpu_arm720_flush_pmd)
mov pc, lr
/* /*
* Function: arm720_set_pte(pte_t *ptep, pte_t pte) * Function: arm720_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set * Params : r0 = Address to set
...@@ -140,7 +95,7 @@ ENTRY(cpu_arm720_set_pte) ...@@ -140,7 +95,7 @@ ENTRY(cpu_arm720_set_pte)
bic r2, r2, #3 bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec? tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty? tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
...@@ -194,31 +149,13 @@ __arm720_setup: mov r0, #0 ...@@ -194,31 +149,13 @@ __arm720_setup: mov r0, #0
.type arm720_processor_functions, #object .type arm720_processor_functions, #object
ENTRY(arm720_processor_functions) ENTRY(arm720_processor_functions)
.word v4t_late_abort .word v4t_late_abort
.word cpu_arm720_check_bugs
.word cpu_arm720_proc_init .word cpu_arm720_proc_init
.word cpu_arm720_proc_fin .word cpu_arm720_proc_fin
.word cpu_arm720_reset .word cpu_arm720_reset
.word cpu_arm720_do_idle .word cpu_arm720_do_idle
.word cpu_arm720_dcache_clean_area
/* cache */ .word cpu_arm720_switch_mm
.word cpu_arm720_cache_clean_invalidate_all
.word cpu_arm720_cache_clean_invalidate_range
/* dcache */
.word cpu_arm720_dcache_invalidate_range
.word cpu_arm720_dcache_clean_range
.word cpu_arm720_dcache_clean_page
.word cpu_arm720_dcache_clean_entry
/* icache */
.word cpu_arm720_icache_invalidate_range
.word cpu_arm720_icache_invalidate_page
/* pgtable */
.word cpu_arm720_set_pgd
.word cpu_arm720_flush_pmd
.word cpu_arm720_set_pte .word cpu_arm720_set_pte
.size arm720_processor_functions, . - arm720_processor_functions .size arm720_processor_functions, . - arm720_processor_functions
.type cpu_arch_name, #object .type cpu_arch_name, #object
...@@ -249,4 +186,5 @@ __arm720_proc_info: ...@@ -249,4 +186,5 @@ __arm720_proc_info:
.long arm720_processor_functions .long arm720_processor_functions
.long v4_tlb_fns .long v4_tlb_fns
.long v4wt_user_fns .long v4wt_user_fns
.long v4_cache_fns
.size __arm720_proc_info, . - __arm720_proc_info .size __arm720_proc_info, . - __arm720_proc_info
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* linux/arch/arm/mm/proc-sa110.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMU functions for SA110
*
* These are the low level assembler for performing cache and TLB
* functions on the StrongARM-1100 and StrongARM-1110.
*
* Note that SA1100 and SA1110 share everything but their name and CPU ID.
*
* 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
* Flush the read buffer at context switches
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
/*
* the cache line size of the I and D cache
*/
#define DCACHELINESIZE 32
#define FLUSH_OFFSET 32768
.macro flush_1100_dcache rd, ra, re
ldr \rd, =flush_base
ldr \ra, [\rd]
eor \ra, \ra, #FLUSH_OFFSET
str \ra, [\rd]
add \re, \ra, #8192 @ only necessary for 8k
1001: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1001b
#ifdef FLUSH_BASE_MINICACHE
add \ra, \ra, #FLUSH_BASE_MINICACHE - FLUSH_BASE
add \re, \ra, #512 @ only 512 bytes
1002: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1002b
#endif
.endm
.data
flush_base:
.long FLUSH_BASE
.text
__INIT
/*
* cpu_sa1100_proc_init()
*/
ENTRY(cpu_sa1100_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
mov pc, lr
.previous
/*
* cpu_sa1100_proc_fin()
*
* Prepare the CPU for reset:
* - Disable interrupts
* - Clean and turn off caches.
*/
ENTRY(cpu_sa1100_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
flush_1100_dcache r0, r1, r2 @ clean caches
mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_sa1100_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_sa1100_do_idle(type)
*
* Cause the processor to idle
*
* type: call type:
* 0 = slow idle
* 1 = fast idle
* 2 = switch to slow processor clock
* 3 = switch to fast processor clock
*/
.align 5
ENTRY(cpu_sa1100_do_idle)
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, #0
ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address
@ --- aligned to a cache line
mcr p15, 0, r0, c15, c2, 2 @ disable clock switching
ldr r1, [r1, #0] @ force switch to MCLK
mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
mov pc, lr
/* ================================= CACHE ================================ */
/*
* cpu_sa1100_dcache_clean_area(addr,sz)
*
* Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data.
*
* addr: cache-unaligned virtual address
*/
.align 5
ENTRY(cpu_sa1100_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_sa1100_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_sa1100_switch_mm)
flush_1100_dcache r3, ip, r1
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_sa1100_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_sa1100_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User or Exec?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
str r2, [r0] @ hardware version
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
cpu_sa1100_name:
.asciz "StrongARM-1100"
cpu_sa1110_name:
.asciz "StrongARM-1110"
.align
__INIT
__sa1100_setup:
mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mcr p15, 0, r4, c2, c0 @ load page table pointer
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x0e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ .... 000. .... ..0.
orr r0, r0, #0x003d
orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
mov pc, lr
.text
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
/*
* SA1100 and SA1110 share the same function calls
*/
.type sa1100_processor_functions, #object
ENTRY(sa1100_processor_functions)
.word v4_early_abort
.word cpu_sa1100_proc_init
.word cpu_sa1100_proc_fin
.word cpu_sa1100_reset
.word cpu_sa1100_do_idle
.word cpu_sa1100_dcache_clean_area
.word cpu_sa1100_switch_mm
.word cpu_sa1100_set_pte
.size sa1100_processor_functions, . - sa1100_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
.type __sa1100_proc_info,#object
__sa1100_proc_info:
.long 0x4401a110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1100_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1100_proc_info, . - __sa1100_proc_info
.type __sa1110_proc_info,#object
__sa1110_proc_info:
.long 0x6901b110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1110_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1110_proc_info, . - __sa1110_proc_info
...@@ -27,7 +27,6 @@ EXPORT_SYMBOL(cpu_dcache_invalidate_range); ...@@ -27,7 +27,6 @@ EXPORT_SYMBOL(cpu_dcache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_range); EXPORT_SYMBOL(cpu_icache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_page); EXPORT_SYMBOL(cpu_icache_invalidate_page);
EXPORT_SYMBOL(cpu_set_pgd); EXPORT_SYMBOL(cpu_set_pgd);
EXPORT_SYMBOL(cpu_flush_pmd);
EXPORT_SYMBOL(cpu_set_pte); EXPORT_SYMBOL(cpu_set_pte);
#else #else
EXPORT_SYMBOL(processor); EXPORT_SYMBOL(processor);
......
This diff is collapsed.
...@@ -35,6 +35,9 @@ SECTIONS ...@@ -35,6 +35,9 @@ SECTIONS
__setup_start = .; __setup_start = .;
*(.init.setup) *(.init.setup)
__setup_end = .; __setup_end = .;
__early_begin = .;
*(__early_param)
__early_end = .;
__start___param = .; __start___param = .;
*(__param) *(__param)
__stop___param = .; __stop___param = .;
......
This diff is collapsed.
...@@ -3885,7 +3885,7 @@ static int floppy_read_block_0(struct gendisk *disk) ...@@ -3885,7 +3885,7 @@ static int floppy_read_block_0(struct gendisk *disk)
struct block_device *bdev; struct block_device *bdev;
int ret; int ret;
bdev = bdget(MKDEV(disk->major, disk->first_minor)); bdev = bdget_disk(disk, 0);
if (!bdev) { if (!bdev) {
printk("No block device for %s\n", disk->disk_name); printk("No block device for %s\n", disk->disk_name);
BUG(); BUG();
......
...@@ -3995,7 +3995,7 @@ static int floppy_read_block_0(struct gendisk *disk) ...@@ -3995,7 +3995,7 @@ static int floppy_read_block_0(struct gendisk *disk)
struct block_device *bdev; struct block_device *bdev;
int ret; int ret;
bdev = bdget(MKDEV(disk->major, disk->first_minor)); bdev = bdget_disk(disk, 0);
if (!bdev) { if (!bdev) {
printk("No block device for %s\n", disk->disk_name); printk("No block device for %s\n", disk->disk_name);
BUG(); BUG();
......
...@@ -721,6 +721,17 @@ int bdev_read_only(struct block_device *bdev) ...@@ -721,6 +721,17 @@ int bdev_read_only(struct block_device *bdev)
return disk->policy; return disk->policy;
} }
int invalidate_partition(struct gendisk *disk, int index)
{
int res = 0;
struct block_device *bdev = bdget_disk(disk, index);
if (bdev)
res = __invalidate_device(bdev, 1);
bdput(bdev);
return res;
}
EXPORT_SYMBOL(bdev_read_only); EXPORT_SYMBOL(bdev_read_only);
EXPORT_SYMBOL(set_device_ro); EXPORT_SYMBOL(set_device_ro);
EXPORT_SYMBOL(set_disk_ro); EXPORT_SYMBOL(set_disk_ro);
EXPORT_SYMBOL(invalidate_partition);
...@@ -62,7 +62,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg *arg) ...@@ -62,7 +62,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg *arg)
if (disk->part[part - 1]->nr_sects == 0) if (disk->part[part - 1]->nr_sects == 0)
return -ENXIO; return -ENXIO;
/* partition in use? Incomplete check for now. */ /* partition in use? Incomplete check for now. */
bdevp = bdget(MKDEV(disk->major, disk->first_minor) + part); bdevp = bdget_disk(disk, part);
if (!bdevp) if (!bdevp)
return -ENOMEM; return -ENOMEM;
if (bd_claim(bdevp, &holder) < 0) { if (bd_claim(bdevp, &holder) < 0) {
......
This diff is collapsed.
...@@ -153,39 +153,17 @@ static struct file_system_type capifs_fs_type = { ...@@ -153,39 +153,17 @@ static struct file_system_type capifs_fs_type = {
.kill_sb = kill_anon_super, .kill_sb = kill_anon_super,
}; };
static spinlock_t entries_lock = SPIN_LOCK_UNLOCKED;
static struct vfsmount *capifs_mnt; static struct vfsmount *capifs_mnt;
static int entry_count = 0; static int entry_count;
static struct vfsmount *grab_instance(void) static int grab_instance(void)
{ {
struct vfsmount *mnt = NULL; return simple_pin_fs("capifs", &capifs_mnt, &entry_count);
spin_lock(&entries_lock);
if (!capifs_mnt) {
spin_unlock(&entries_lock);
mnt = kern_mount(&capifs_fs_type);
if (IS_ERR(mnt))
return NULL;
spin_lock(&entries_lock);
if (!capifs_mnt)
capifs_mnt = mnt;
}
mntget(capifs_mnt);
entry_count++;
spin_unlock(&entries_lock);
mntput(mnt);
return capifs_mnt;
} }
static void drop_instance(void) static void drop_instance(void)
{ {
struct vfsmount *mnt; return simple_release_fs(&capifs_mnt, &entry_count);
spin_lock(&entries_lock);
mnt = capifs_mnt;
if (!--entry_count)
capifs_mnt = NULL;
spin_unlock(&entries_lock);
mntput(mnt);
} }
static struct dentry *get_node(int type, int num) static struct dentry *get_node(int type, int num)
...@@ -207,7 +185,7 @@ void capifs_new_ncci(char type, unsigned int num, dev_t device) ...@@ -207,7 +185,7 @@ void capifs_new_ncci(char type, unsigned int num, dev_t device)
struct dentry *dentry; struct dentry *dentry;
struct inode *inode; struct inode *inode;
if (!grab_instance()) if (grab_instance() < 0)
return; return;
sb = capifs_mnt->mnt_sb; sb = capifs_mnt->mnt_sb;
inode = new_inode(sb); inode = new_inode(sb);
...@@ -232,7 +210,7 @@ void capifs_new_ncci(char type, unsigned int num, dev_t device) ...@@ -232,7 +210,7 @@ void capifs_new_ncci(char type, unsigned int num, dev_t device)
void capifs_free_ncci(char type, unsigned int num) void capifs_free_ncci(char type, unsigned int num)
{ {
if (grab_instance()) { if (grab_instance() == 0) {
struct dentry *dentry = get_node(type, num); struct dentry *dentry = get_node(type, num);
if (!IS_ERR(dentry)) { if (!IS_ERR(dentry)) {
struct inode *inode = dentry->d_inode; struct inode *inode = dentry->d_inode;
......
...@@ -450,11 +450,11 @@ static int __info(struct mapped_device *md, struct dm_ioctl *param) ...@@ -450,11 +450,11 @@ static int __info(struct mapped_device *md, struct dm_ioctl *param)
if (dm_suspended(md)) if (dm_suspended(md))
param->flags |= DM_SUSPEND_FLAG; param->flags |= DM_SUSPEND_FLAG;
param->dev = MKDEV(disk->major, disk->first_minor); bdev = bdget_disk(disk, 0);
bdev = bdget(param->dev);
if (!bdev) if (!bdev)
return -ENXIO; return -ENXIO;
param->dev = bdev->bd_dev;
param->open_count = bdev->bd_openers; param->open_count = bdev->bd_openers;
bdput(bdev); bdput(bdev);
......
...@@ -345,26 +345,21 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev) ...@@ -345,26 +345,21 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
static int open_dev(struct dm_dev *d, dev_t dev) static int open_dev(struct dm_dev *d, dev_t dev)
{ {
static char *_claim_ptr = "I belong to device-mapper"; static char *_claim_ptr = "I belong to device-mapper";
struct block_device *bdev;
int r; int r;
if (d->bdev) if (d->bdev)
BUG(); BUG();
d->bdev = bdget(dev); bdev = open_by_devnum(dev, d->mode, BDEV_RAW);
if (!d->bdev) if (IS_ERR(bdev))
return -ENOMEM; return PTR_ERR(bdev);
r = bd_claim(bdev, _claim_ptr);
r = blkdev_get(d->bdev, d->mode, 0, BDEV_RAW);
if (r) if (r)
return r; blkdev_put(bdev, BDEV_RAW);
else
r = bd_claim(d->bdev, _claim_ptr); d->bdev = bdev;
if (r) {
blkdev_put(d->bdev, BDEV_RAW);
d->bdev = NULL;
}
return r; return r;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -55,6 +55,10 @@ ...@@ -55,6 +55,10 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#ifdef __arm__
#include <asm/mach-types.h>
#endif
#include "cyber2000fb.h" #include "cyber2000fb.h"
struct cfb_info { struct cfb_info {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -367,6 +367,11 @@ extern void blk_register_region(dev_t dev, unsigned long range, ...@@ -367,6 +367,11 @@ extern void blk_register_region(dev_t dev, unsigned long range,
void *data); void *data);
extern void blk_unregister_region(dev_t dev, unsigned long range); extern void blk_unregister_region(dev_t dev, unsigned long range);
static inline struct block_device *bdget_disk(struct gendisk *disk, int index)
{
return bdget(MKDEV(disk->major, disk->first_minor) + index);
}
#endif #endif
#endif #endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment