Commit ac7ebfb2 authored by Russell King's avatar Russell King

[ARM] Remove more reminants of 26-bit ARM support.

This removes include/asm-arm/proc-armv entirely, merging the
contents into the relevant include files in include/asm-arm.
We also update various files in arch/arm which reference
definitions in the now non-existent directory.
parent de950cef
......@@ -30,12 +30,6 @@ endif
check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi)
comma = ,
# Select CPU dependent flags. Note that order of declaration is important;
# the options further down the list override previous items.
#
apcs-$(CONFIG_CPU_32) :=-mapcs-32
apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3
# This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
......@@ -55,11 +49,9 @@ tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) :=$(call check_gcc,-mtune=xscale,-mtune=strongarm110)
# Force -mno-fpu to be passed to the assembler. Some versions of gcc don't
# do this with -msoft-float
CFLAGS_BOOT :=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
CFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
AFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -msoft-float -Wa,-mno-fpu
CFLAGS_BOOT :=-mapcs-32 $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
CFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
AFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -msoft-float -Wa,-mno-fpu
#Default value
DATAADDR := .
......@@ -146,16 +138,10 @@ include/asm-arm/.arch: $(wildcard include/config/arch/*.h)
@ln -sf arch-$(INCDIR) include/asm-arm/arch
@touch $@
include/asm-arm/.proc: $(wildcard include/config/cpu/32.h)
@echo ' Making asm-arm/proc -> asm-arm/proc-armv symlink'
@rm -f include/asm-arm/proc
@ln -sf proc-armv include/asm-arm/proc
@touch $@
prepare: maketools
.PHONY: maketools FORCE
maketools: include/asm-arm/.arch include/asm-arm/.proc \
maketools: include/asm-arm/.arch \
include/asm-arm/constants.h include/linux/version.h FORCE
$(Q)$(MAKE) $(build)=arch/arm/tools include/asm-arm/mach-types.h
......@@ -171,7 +157,6 @@ zinstall install: vmlinux
MRPROPER_FILES += \
include/asm-arm/arch include/asm-arm/.arch \
include/asm-arm/proc include/asm-arm/.proc \
include/asm-arm/constants.h* \
include/asm-arm/mach-types.h
......@@ -203,7 +188,7 @@ zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall
)
arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
include/asm-arm/.arch include/asm-arm/.proc \
include/asm-arm/.arch \
include/config/MARKER
include/asm-$(ARCH)/constants.h: arch/$(ARCH)/kernel/asm-offsets.s
......
......@@ -15,10 +15,12 @@
*/
#include <linux/config.h>
#include <linux/init.h>
#include "entry-header.S"
#include <asm/thread_info.h>
#include <asm/glue.h>
#include <asm/ptrace.h>
#include "entry-header.S"
#ifdef IOC_BASE
/* IOC / IOMD based hardware */
......
......@@ -8,8 +8,11 @@
* published by the Free Software Foundation.
*/
#include <linux/config.h>
#include "entry-header.S"
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include "entry-header.S"
/*
* We rely on the fact that R0 is at the bottom of the stack (due to
......
......@@ -16,6 +16,7 @@
#include <asm/assembler.h>
#include <asm/mach-types.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/mach/arch.h>
/*
......
......@@ -11,7 +11,6 @@
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/proc/domain.h>
#include <asm/mach/map.h>
#include <asm/arch/hardware.h>
......
......@@ -21,7 +21,7 @@
#include <asm/mach-types.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/proc/domain.h>
#include <asm/domain.h>
#include <asm/setup.h>
#include <asm/mach/map.h>
......
......@@ -34,8 +34,8 @@ struct cachepolicy {
};
static struct cachepolicy cache_policies[] __initdata = {
{ "uncached", CR1_W|CR1_C, PMD_SECT_UNCACHED },
{ "buffered", CR1_C, PMD_SECT_BUFFERED },
{ "uncached", CR_W|CR_C, PMD_SECT_UNCACHED },
{ "buffered", CR_C, PMD_SECT_BUFFERED },
{ "writethrough", 0, PMD_SECT_WT },
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
{ "writeback", 0, PMD_SECT_WB },
......@@ -102,8 +102,8 @@ __early_param("ecc=", early_ecc);
static int __init noalign_setup(char *__unused)
{
cr_alignment &= ~CR1_A;
cr_no_alignment &= ~CR1_A;
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
return 1;
}
......
......@@ -30,6 +30,7 @@
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/hardware.h>
/*
......
......@@ -15,6 +15,7 @@
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
ENTRY(cpu_arm6_dcache_clean_area)
ENTRY(cpu_arm7_dcache_clean_area)
......
......@@ -35,6 +35,7 @@
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/hardware.h>
/*
......
......@@ -31,6 +31,7 @@
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
......
......@@ -32,6 +32,7 @@
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
......
......@@ -31,6 +31,7 @@
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
......
......@@ -18,7 +18,8 @@
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
/*
* the cache line size of the I and D cache
......
......@@ -23,7 +23,8 @@
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
/*
* the cache line size of the I and D cache
......
......@@ -25,8 +25,9 @@
#include <asm/assembler.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
......
/*
* linux/asm/assembler.h
* linux/include/asm-arm/assembler.h
*
* This file contains arm architecture specific defines
* for the different processors.
* Copyright (C) 1996-2000 Russell King
*
* Do not include any C declarations in this file - it is included by
* assembler source.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains arm architecture specific defines
* for the different processors.
*
* Do not include any C declarations in this file - it is included by
* assembler source.
*/
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif
#include <asm/proc/ptrace.h>
#include <asm/proc/assembler.h>
#include <asm/ptrace.h>
/*
* Endian independent macros for shifting bytes within registers.
......@@ -36,3 +41,63 @@
#define PLD(code...)
#endif
#define MODE_USR USR_MODE
#define MODE_FIQ FIQ_MODE
#define MODE_IRQ IRQ_MODE
#define MODE_SVC SVC_MODE
#define DEFAULT_FIQ MODE_FIQ
/*
* LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc})
*/
#ifdef __STDC__
#define LOADREGS(cond, base, reglist...)\
ldm##cond base,reglist
#else
#define LOADREGS(cond, base, reglist...)\
ldm/**/cond base,reglist
#endif
/*
* Build a return instruction for this processor type.
*/
#define RETINSTR(instr, regs...)\
instr regs
/*
* Save the current IRQ state and disable IRQs. Note that this macro
* assumes FIQs are enabled, and that the processor is in SVC mode.
*/
.macro save_and_disable_irqs, oldcpsr, temp
mrs \oldcpsr, cpsr
mov \temp, #PSR_I_BIT | MODE_SVC
msr cpsr_c, \temp
.endm
/*
* Restore interrupt state previously stored in a register. We don't
* guarantee that this will preserve the flags.
*/
.macro restore_irqs, oldcpsr
msr cpsr_c, \oldcpsr
.endm
/*
* These two are used to save LR/restore PC over a user-based access.
* The old 26-bit architecture requires that we do. On 32-bit
* architecture, we can safely ignore this requirement.
*/
.macro save_lr
.endm
.macro restore_pc
mov pc, lr
.endm
#define USER(x...) \
9999: x; \
.section __ex_table,"a"; \
.align 3; \
.long 9999b,9001f; \
.previous
......@@ -27,7 +27,7 @@ typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
#include <asm/proc/system.h>
#include <asm/system.h>
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
......
/*
* linux/include/asm-arm/cacheflush.h
*
* Copyright (C) 2000-2002 Russell King
* Copyright (C) 1999-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -12,6 +12,275 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/proc/cache.h>
#include <asm/mman.h>
#include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_ARM720T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM1020)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user.
*/
#define PG_dcache_dirty PG_arch_1
/*
* MM Cache Management
* ===================
*
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
* implement these methods.
*
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
* See linux/Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* flush_cache_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_cache_user_mm(mm)
*
* Clean and invalidate all user space cache entries
* before a change of page tables.
*
* flush_cache_user_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address space before a change of page tables.
* - start - user start address (inclusive, page aligned)
* - end - user end address (exclusive, page aligned)
* - flags - vma->vm_flags field
*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* DMA Cache Coherency
* ===================
*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* - start - virtual start address
* - end - virtual end address
*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*/
struct cpu_cache_fns {
void (*flush_kern_all)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_page)(void *);
void (*dma_inv_range)(unsigned long, unsigned long);
void (*dma_clean_range)(unsigned long, unsigned long);
void (*dma_flush_range)(unsigned long, unsigned long);
};
/*
* Select the calling method
*/
#ifdef MULTI_CACHE
extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range cpu_cache.dma_inv_range
#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *);
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_inv_range(unsigned long, unsigned long);
extern void dmac_clean_range(unsigned long, unsigned long);
extern void dmac_flush_range(unsigned long, unsigned long);
#endif
/*
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
static inline void flush_cache_mm(struct mm_struct *mm)
{
if (current->active_mm == mm)
__cpuc_flush_user_all();
}
static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (current->active_mm == vma->vm_mm)
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
}
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
{
if (current->active_mm == vma->vm_mm) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
}
/*
* Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU.
*/
#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
/*
* Perform necessary cache operations to ensure that the TLB will
* see data written in the specified area.
*/
#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
/*
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
* If this page isn't mapped (ie, page->mapping = NULL), or it has
* userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
* then we _must_ always clean + invalidate the dcache entries associated
* with the kernel mapping.
*
* Otherwise we can defer the operation, and clean the cache when we are
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
!list_empty(&(map)->i_mmap_shared))
extern void __flush_dcache_page(struct page *);
static inline void flush_dcache_page(struct page *page)
{
if (page->mapping && !mapping_mapped(page->mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
__flush_dcache_page(page);
}
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
*/
#define flush_icache_page(vma,page) do { } while (0)
#endif
/*
* linux/include/asm-arm/proc-armv/domain.h
* linux/include/asm-arm/domain.h
*
* Copyright (C) 1999 Russell King.
*
......
......@@ -7,7 +7,6 @@
#include <asm/ptrace.h>
#include <asm/user.h>
#include <asm/proc/elf.h>
#include <asm/procinfo.h>
typedef unsigned long elf_greg_t;
......@@ -42,6 +41,7 @@ typedef struct user_fp elf_fpregset_t;
#define ELF_ARCH EM_ARM
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
......@@ -76,4 +76,29 @@ typedef struct user_fp elf_fpregset_t;
extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
#ifdef __KERNEL__
/*
* 32-bit code is always OK. Some cpus can do 26-bit, some can't.
*/
#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x))
#define ELF_THUMB_OK(x) \
(( (elf_hwcap & HWCAP_THUMB) && ((x)->e_entry & 1) == 1) || \
((x)->e_entry & 3) == 0)
#define ELF_26BIT_OK(x) \
(( (elf_hwcap & HWCAP_26BIT) && (x)->e_flags & EF_ARM_APCS26) || \
((x)->e_flags & EF_ARM_APCS26) == 0)
/* Old NetWinder binaries were compiled in such a way that the iBCS
heuristic always trips on them. Until these binaries become uncommon
enough not to care, don't trust the `ibcs' flag here. In any case
there is no other ELF system currently supported by iBCS.
@@ Could print a warning message to encourage users to upgrade. */
#define SET_PERSONALITY(ex,ibcs2) \
set_personality(((ex).e_flags&EF_ARM_APCS26 ?PER_LINUX :PER_LINUX_32BIT))
#endif
#endif
/*
* linux/include/asm-arm/proc-armv/locks.h
* linux/include/asm-arm/locks.h
*
* Copyright (C) 2000 Russell King
*
......
......@@ -15,6 +15,8 @@
#include <linux/config.h>
#include <asm/arch/memory.h>
#ifndef __ASSEMBLY__
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
......@@ -120,3 +122,5 @@ static inline void *phys_to_virt(unsigned long x)
#define page_to_bus(page) (virt_to_bus(page_address(page)))
#endif
#endif
/*
* linux/include/asm-arm/page.h
*
* Copyright (C) 1995-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASMARM_PAGE_H
#define _ASMARM_PAGE_H
#include <linux/config.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#ifndef __ASSEMBLY__
#include <asm/glue.h>
......@@ -119,10 +137,12 @@ extern void copy_page(void *to, void *from);
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd[2]; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd[0])
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
......@@ -135,10 +155,12 @@ typedef struct { unsigned long pgprot; } pgprot_t;
*/
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t[2];
typedef unsigned long pgprot_t;
#define pte_val(x) (x)
#define pmd_val(x) (x)
#define pgd_val(x) ((x)[0])
#define pgprot_val(x) (x)
#define __pte(x) (x)
......@@ -146,19 +168,6 @@ typedef unsigned long pgprot_t;
#define __pgprot(x) (x)
#endif /* STRICT_MM_TYPECHECKS */
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#include <asm/proc/page.h>
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/* Pure 2^n version of get_order */
static inline int get_order(unsigned long size)
......
......@@ -11,7 +11,6 @@
#define __ASM_PARAM_H
#include <asm/arch/param.h> /* for HZ */
#include <asm/proc/page.h> /* for EXEC_PAGE_SIZE */
#ifndef __KERNEL_HZ
#define __KERNEL_HZ 100
......@@ -25,6 +24,8 @@
# define HZ 100
#endif
#define EXEC_PAGESIZE 4096
#ifndef NGROUPS
#define NGROUPS 32
#endif
......
......@@ -11,7 +11,8 @@
#define _ASMARM_PGALLOC_H
#include <asm/processor.h>
#include <asm/proc/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/*
* Since we have only two-level page tables, these are trivial
......@@ -28,4 +29,104 @@ extern void free_pgd_slow(pgd_t *pgd);
#define check_pgt_cache() do { } while (0)
/*
* Allocate one PTE table.
*
* This actually allocates two hardware PTE tables, but we wrap this up
* into one table thus:
*
* +------------+
* | h/w pt 0 |
* +------------+
* | h/w pt 1 |
* +------------+
* | Linux pt 0 |
* +------------+
* | Linux pt 1 |
* +------------+
*/
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) {
clear_page(pte);
clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
pte += PTRS_PER_PTE;
}
return pte;
}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (pte) {
void *page = page_address(pte);
clear_page(page);
clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
}
return pte;
}
/*
* Free one PTE table.
*/
static inline void pte_free_kernel(pte_t *pte)
{
if (pte) {
pte -= PTRS_PER_PTE;
free_page((unsigned long)pte);
}
}
static inline void pte_free(struct page *pte)
{
__free_page(pte);
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*
* Ensure that we always set both PMD entries.
*/
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
unsigned long pte_ptr = (unsigned long)ptep;
unsigned long pmdval;
BUG_ON(mm != &init_mm);
/*
* The pmd must be loaded with the physical
* address of the PTE table
*/
pte_ptr -= PTRS_PER_PTE * sizeof(void *);
pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
flush_pmd_entry(pmdp);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
{
unsigned long pmdval;
BUG_ON(mm == &init_mm);
pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
flush_pmd_entry(pmdp);
}
#endif
/*
* linux/include/asm-arm/pgtable.h
*
* Copyright (C) 2000-2002 Russell King
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -15,16 +15,25 @@
#include <asm/proc-fns.h>
#include <asm/arch/vmalloc.h>
/*
* We pull a couple of tricks here:
* 1. We wrap the PMD into the PGD.
* 2. We lie about the size of the PTE and PGD.
* Even though we have 256 PTE entries and 4096 PGD entries, we tell
* Linux that we actually have 512 PTE entries and 2048 PGD entries.
* Each "Linux" PGD entry is made up of two hardware PGD entries, and
* each PTE table is actually two hardware PTE tables.
*/
#define PTRS_PER_PTE 512
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
#define PMD_SHIFT 20
#ifdef CONFIG_CPU_32
#define PGDIR_SHIFT 21
#else
#define PGDIR_SHIFT 20
#endif
#define LIBRARY_TEXT_START 0x0c000000
......@@ -46,6 +55,117 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define FIRST_USER_PGD_NR 1
#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
/*
* Hardware page table definitions.
*
* + Level 1 descriptor (PMD)
* - common
*/
#define PMD_TYPE_MASK (3 << 0)
#define PMD_TYPE_FAULT (0 << 0)
#define PMD_TYPE_TABLE (1 << 0)
#define PMD_TYPE_SECT (2 << 0)
#define PMD_BIT4 (1 << 4)
#define PMD_DOMAIN(x) ((x) << 5)
#define PMD_PROTECTION (1 << 9) /* v5 */
/*
* - section
*/
#define PMD_SECT_BUFFERABLE (1 << 2)
#define PMD_SECT_CACHEABLE (1 << 3)
#define PMD_SECT_AP_WRITE (1 << 10)
#define PMD_SECT_AP_READ (1 << 11)
#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
#define PMD_SECT_APX (1 << 15) /* v6 */
#define PMD_SECT_S (1 << 16) /* v6 */
#define PMD_SECT_nG (1 << 17) /* v6 */
#define PMD_SECT_UNCACHED (0)
#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
/*
* - coarse table (not used)
*/
/*
* + Level 2 descriptor (PTE)
* - common
*/
#define PTE_TYPE_MASK (3 << 0)
#define PTE_TYPE_FAULT (0 << 0)
#define PTE_TYPE_LARGE (1 << 0)
#define PTE_TYPE_SMALL (2 << 0)
#define PTE_TYPE_EXT (3 << 0) /* v5 */
#define PTE_BUFFERABLE (1 << 2)
#define PTE_CACHEABLE (1 << 3)
/*
* - extended small page/tiny page
*/
#define PTE_EXT_AP_UNO_SRO (0 << 4)
#define PTE_EXT_AP_UNO_SRW (1 << 4)
#define PTE_EXT_AP_URO_SRW (2 << 4)
#define PTE_EXT_AP_URW_SRW (3 << 4)
#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
/*
* - small page
*/
#define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
#define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
#define PTE_SMALL_AP_URO_SRW (0xaa << 4)
#define PTE_SMALL_AP_URW_SRW (0xff << 4)
#define PTE_AP_READ PTE_SMALL_AP_URO_SRW
#define PTE_AP_WRITE PTE_SMALL_AP_UNO_SRW
/*
* "Linux" PTE definitions.
*
* We keep two sets of PTEs - the hardware and the linux version.
* This allows greater flexibility in the way we map the Linux bits
* onto the hardware tables, and allows us to have YOUNG and DIRTY
* bits.
*
* The PTE table pointer refers to the hardware entries; the "Linux"
* entries are stored 1024 bytes below.
*/
#define L_PTE_PRESENT (1 << 0)
#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
#define L_PTE_YOUNG (1 << 1)
#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */
#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */
#define L_PTE_USER (1 << 4)
#define L_PTE_WRITE (1 << 5)
#define L_PTE_EXEC (1 << 6)
#define L_PTE_DIRTY (1 << 7)
#ifndef __ASSEMBLY__
#include <asm/domain.h>
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
/*
* The following macros handle the cache and bufferable bits...
*/
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
#define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC)
#define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)
#endif /* __ASSEMBLY__ */
/*
* The table below defines the page protection levels that we insert into our
* Linux page table version. These get translated into the best that the
......@@ -86,9 +206,82 @@ extern struct page *empty_zero_page;
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(ptep) set_pte((ptep), __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define set_pte(ptep, pte) cpu_set_pte(ptep,pte)
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
#define pte_read(pte) (pte_val(pte) & L_PTE_USER)
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
/*
* The following only works if pte_present() is not true.
*/
#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
#define pte_to_pgoff(x) (pte_val(x) >> 2)
#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
#define PTE_FILE_MAX_BITS 30
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/
/*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/
PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC);
PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC);
PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define set_pmd(pmdp,pmd) \
do { \
*pmdp = pmd; \
flush_pmd_entry(pmdp); \
} while (0)
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static inline pte_t *pmd_page_kernel(pmd_t pmd)
{
unsigned long ptr;
ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
ptr += PTRS_PER_PTE * sizeof(void *);
return __va(ptr);
}
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
/*
* Permanent address of a page. We never have highmem, so this is trivial.
......@@ -129,8 +322,6 @@ extern struct page *empty_zero_page;
/* Find an entry in the third-level page table.. */
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#include <asm/proc/pgtable.h>
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
......@@ -164,6 +355,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
typedef pte_t *pte_addr_t;
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
#endif /* _ASMARM_PGTABLE_H */
/*
* linux/asm-arm/proc-armv/assembler.h
*
* Copyright (C) 1996-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains ARM processor specifics for
* the ARM6 and better processors.
*/
#define MODE_USR USR_MODE
#define MODE_FIQ FIQ_MODE
#define MODE_IRQ IRQ_MODE
#define MODE_SVC SVC_MODE
#define DEFAULT_FIQ MODE_FIQ
/*
* LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc})
*/
#ifdef __STDC__
#define LOADREGS(cond, base, reglist...)\
ldm##cond base,reglist
#else
#define LOADREGS(cond, base, reglist...)\
ldm/**/cond base,reglist
#endif
/*
* Build a return instruction for this processor type.
*/
#define RETINSTR(instr, regs...)\
instr regs
/*
* Save the current IRQ state and disable IRQs. Note that this macro
* assumes FIQs are enabled, and that the processor is in SVC mode.
*/
.macro save_and_disable_irqs, oldcpsr, temp
mrs \oldcpsr, cpsr
mov \temp, #PSR_I_BIT | MODE_SVC
msr cpsr_c, \temp
.endm
/*
* Restore interrupt state previously stored in a register. We don't
* guarantee that this will preserve the flags.
*/
.macro restore_irqs, oldcpsr
msr cpsr_c, \oldcpsr
.endm
/*
* These two are used to save LR/restore PC over a user-based access.
* The old 26-bit architecture requires that we do. On 32-bit
* architecture, we can safely ignore this requirement.
*/
.macro save_lr
.endm
.macro restore_pc
mov pc, lr
.endm
#define USER(x...) \
9999: x; \
.section __ex_table,"a"; \
.align 3; \
.long 9999b,9001f; \
.previous
/*
* linux/include/asm-arm/proc-armv/cache.h
*
* Copyright (C) 1999-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/mman.h>
#include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_ARM720T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM1020)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user.
*/
#define PG_dcache_dirty PG_arch_1
/*
* MM Cache Management
* ===================
*
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
* implement these methods.
*
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
* See linux/Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* flush_cache_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_cache_user_mm(mm)
*
* Clean and invalidate all user space cache entries
* before a change of page tables.
*
* flush_cache_user_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address space before a change of page tables.
* - start - user start address (inclusive, page aligned)
* - end - user end address (exclusive, page aligned)
* - flags - vma->vm_flags field
*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* DMA Cache Coherency
* ===================
*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* - start - virtual start address
* - end - virtual end address
*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*/
struct cpu_cache_fns {
void (*flush_kern_all)(void);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_page)(void *);
void (*dma_inv_range)(unsigned long, unsigned long);
void (*dma_clean_range)(unsigned long, unsigned long);
void (*dma_flush_range)(unsigned long, unsigned long);
};
/*
* Select the calling method
*/
#ifdef MULTI_CACHE
extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range cpu_cache.dma_inv_range
#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *);
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_inv_range(unsigned long, unsigned long);
extern void dmac_clean_range(unsigned long, unsigned long);
extern void dmac_flush_range(unsigned long, unsigned long);
#endif
/*
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
static inline void flush_cache_mm(struct mm_struct *mm)
{
if (current->active_mm == mm)
__cpuc_flush_user_all();
}
static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (current->active_mm == vma->vm_mm)
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
}
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
{
if (current->active_mm == vma->vm_mm) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
}
/*
* Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU.
*/
#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
/*
* Perform necessary cache operations to ensure that the TLB will
* see data written in the specified area.
*/
#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
/*
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
* If this page isn't mapped (ie, page->mapping = NULL), or it has
* userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
* then we _must_ always clean + invalidate the dcache entries associated
* with the kernel mapping.
*
* Otherwise we can defer the operation, and clean the cache when we are
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
!list_empty(&(map)->i_mmap_shared))
extern void __flush_dcache_page(struct page *);
static inline void flush_dcache_page(struct page *page)
{
if (page->mapping && !mapping_mapped(page->mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
__flush_dcache_page(page);
}
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
*/
#define flush_icache_page(vma,page) do { } while (0)
/*
* ELF definitions for 32-bit CPUs
*/
#define ELF_EXEC_PAGESIZE 4096
#ifdef __KERNEL__
/*
* 32-bit code is always OK. Some cpus can do 26-bit, some can't.
*/
#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x))
#define ELF_THUMB_OK(x) \
(( (elf_hwcap & HWCAP_THUMB) && ((x)->e_entry & 1) == 1) || \
((x)->e_entry & 3) == 0)
#define ELF_26BIT_OK(x) \
(( (elf_hwcap & HWCAP_26BIT) && (x)->e_flags & EF_ARM_APCS26) || \
((x)->e_flags & EF_ARM_APCS26) == 0)
/* Old NetWinder binaries were compiled in such a way that the iBCS
heuristic always trips on them. Until these binaries become uncommon
enough not to care, don't trust the `ibcs' flag here. In any case
there is no other ELF system currently supported by iBCS.
@@ Could print a warning message to encourage users to upgrade. */
#define SET_PERSONALITY(ex,ibcs2) \
set_personality(((ex).e_flags&EF_ARM_APCS26 ?PER_LINUX :PER_LINUX_32BIT))
#endif
/*
* linux/include/asm-arm/proc-armv/page.h
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_PAGE_H
#define __ASM_PROC_PAGE_H
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define EXEC_PAGESIZE 4096
#ifndef __ASSEMBLY__
#ifdef STRICT_MM_TYPECHECKS
typedef struct {
unsigned long pgd0;
unsigned long pgd1;
} pgd_t;
#define pgd_val(x) ((x).pgd0)
#else
typedef unsigned long pgd_t[2];
#define pgd_val(x) ((x)[0])
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROC_PAGE_H */
/*
* linux/include/asm-arm/proc-armv/pgalloc.h
*
* Copyright (C) 2001-2002 Russell King
*
* Page table allocation/freeing primitives for 32-bit ARM processors.
*/
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "pgtable.h"
/*
* Allocate one PTE table.
*
* This actually allocates two hardware PTE tables, but we wrap this up
* into one table thus:
*
* +------------+
* | h/w pt 0 |
* +------------+
* | h/w pt 1 |
* +------------+
* | Linux pt 0 |
* +------------+
* | Linux pt 1 |
* +------------+
*/
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) {
clear_page(pte);
clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
pte += PTRS_PER_PTE;
}
return pte;
}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (pte) {
void *page = page_address(pte);
clear_page(page);
clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
}
return pte;
}
/*
* Free one PTE table.
*/
static inline void pte_free_kernel(pte_t *pte)
{
if (pte) {
pte -= PTRS_PER_PTE;
free_page((unsigned long)pte);
}
}
static inline void pte_free(struct page *pte)
{
__free_page(pte);
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*
* Ensure that we always set both PMD entries.
*/
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
unsigned long pte_ptr = (unsigned long)ptep;
unsigned long pmdval;
BUG_ON(mm != &init_mm);
/*
* The pmd must be loaded with the physical
* address of the PTE table
*/
pte_ptr -= PTRS_PER_PTE * sizeof(void *);
pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
flush_pmd_entry(pmdp);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
{
unsigned long pmdval;
BUG_ON(mm == &init_mm);
pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
flush_pmd_entry(pmdp);
}
/*
* linux/include/asm-arm/proc-armv/pgtable.h
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 12-Jan-1997 RMK Altered flushing routines to use function pointers
* now possible to combine ARM6, ARM7 and StrongARM versions.
* 17-Apr-1999 RMK Now pass an area size to clean_cache_area and
* flush_icache_area.
*/
#ifndef __ASM_PROC_PGTABLE_H
#define __ASM_PROC_PGTABLE_H
/*
* We pull a couple of tricks here:
* 1. We wrap the PMD into the PGD.
* 2. We lie about the size of the PTE and PGD.
* Even though we have 256 PTE entries and 4096 PGD entries, we tell
* Linux that we actually have 512 PTE entries and 2048 PGD entries.
* Each "Linux" PGD entry is made up of two hardware PGD entries, and
* each PTE table is actually two hardware PTE tables.
*/
#define PTRS_PER_PTE 512
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
/*
* Hardware page table definitions.
*
* + Level 1 descriptor (PMD)
* - common
*/
#define PMD_TYPE_MASK (3 << 0)
#define PMD_TYPE_FAULT (0 << 0)
#define PMD_TYPE_TABLE (1 << 0)
#define PMD_TYPE_SECT (2 << 0)
#define PMD_BIT4 (1 << 4)
#define PMD_DOMAIN(x) ((x) << 5)
#define PMD_PROTECTION (1 << 9) /* v5 */
/*
* - section
*/
#define PMD_SECT_BUFFERABLE (1 << 2)
#define PMD_SECT_CACHEABLE (1 << 3)
#define PMD_SECT_AP_WRITE (1 << 10)
#define PMD_SECT_AP_READ (1 << 11)
#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
#define PMD_SECT_APX (1 << 15) /* v6 */
#define PMD_SECT_S (1 << 16) /* v6 */
#define PMD_SECT_nG (1 << 17) /* v6 */
#define PMD_SECT_UNCACHED (0)
#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
/*
* - coarse table (not used)
*/
/*
* + Level 2 descriptor (PTE)
* - common
*/
#define PTE_TYPE_MASK (3 << 0)
#define PTE_TYPE_FAULT (0 << 0)
#define PTE_TYPE_LARGE (1 << 0)
#define PTE_TYPE_SMALL (2 << 0)
#define PTE_TYPE_EXT (3 << 0) /* v5 */
#define PTE_BUFFERABLE (1 << 2)
#define PTE_CACHEABLE (1 << 3)
/*
* - extended small page/tiny page
*/
#define PTE_EXT_AP_UNO_SRO (0 << 4)
#define PTE_EXT_AP_UNO_SRW (1 << 4)
#define PTE_EXT_AP_URO_SRW (2 << 4)
#define PTE_EXT_AP_URW_SRW (3 << 4)
#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
/*
* - small page
*/
#define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
#define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
#define PTE_SMALL_AP_URO_SRW (0xaa << 4)
#define PTE_SMALL_AP_URW_SRW (0xff << 4)
#define PTE_AP_READ PTE_SMALL_AP_URO_SRW
#define PTE_AP_WRITE PTE_SMALL_AP_UNO_SRW
/*
* "Linux" PTE definitions.
*
* We keep two sets of PTEs - the hardware and the linux version.
* This allows greater flexibility in the way we map the Linux bits
* onto the hardware tables, and allows us to have YOUNG and DIRTY
* bits.
*
* The PTE table pointer refers to the hardware entries; the "Linux"
* entries are stored 1024 bytes below.
*/
#define L_PTE_PRESENT (1 << 0)
#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
#define L_PTE_YOUNG (1 << 1)
#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */
#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */
#define L_PTE_USER (1 << 4)
#define L_PTE_WRITE (1 << 5)
#define L_PTE_EXEC (1 << 6)
#define L_PTE_DIRTY (1 << 7)
#ifndef __ASSEMBLY__
#include <asm/proc/domain.h>
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define set_pmd(pmdp,pmd) \
do { \
*pmdp = pmd; \
flush_pmd_entry(pmdp); \
} while (0)
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static inline pte_t *pmd_page_kernel(pmd_t pmd)
{
unsigned long ptr;
ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
ptr += PTRS_PER_PTE * sizeof(void *);
return __va(ptr);
}
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define set_pte(ptep, pte) cpu_set_pte(ptep,pte)
/*
* The following macros handle the cache and bufferable bits...
*/
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
#define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC)
#define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
#define pte_read(pte) (pte_val(pte) & L_PTE_USER)
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/
/*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/
PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC);
PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC);
PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
#define pgtable_cache_init() do { } while (0)
#define pte_to_pgoff(x) (pte_val(x) >> 2)
#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
#define PTE_FILE_MAX_BITS 30
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROC_PGTABLE_H */
/*
* linux/include/asm-arm/proc-armv/processor.h
*
* Copyright (C) 1996-1999 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Changelog:
* 20-09-1996 RMK Created
* 26-09-1996 RMK Added 'EXTRA_THREAD_STRUCT*'
* 28-09-1996 RMK Moved start_thread into the processor dependencies
* 09-09-1998 PJB Delete redundant `wp_works_ok'
* 30-05-1999 PJB Save sl across context switches
* 31-07-1999 RMK Added 'domain' stuff
*/
#ifndef __ASM_PROC_PROCESSOR_H
#define __ASM_PROC_PROCESSOR_H
#include <asm/proc/domain.h>
#define KERNEL_STACK_SIZE PAGE_SIZE
#define INIT_EXTRA_THREAD_INFO \
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT)
#define start_thread(regs,pc,sp) \
({ \
unsigned long *stack = (unsigned long *)sp; \
set_fs(USER_DS); \
memzero(regs->uregs, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
else \
regs->ARM_cpsr = USR26_MODE; \
if (elf_hwcap & HWCAP_THUMB && pc & 1) \
regs->ARM_cpsr |= PSR_T_BIT; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
})
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019])
#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1017])
#endif
/*
* linux/include/asm-arm/proc-armv/ptrace.h
*
* Copyright (C) 1996-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_PTRACE_H
#define __ASM_PROC_PTRACE_H
#include <linux/config.h>
/*
* PSR bits
*/
#define USR26_MODE 0x00000000
#define FIQ26_MODE 0x00000001
#define IRQ26_MODE 0x00000002
#define SVC26_MODE 0x00000003
#define USR_MODE 0x00000010
#define FIQ_MODE 0x00000011
#define IRQ_MODE 0x00000012
#define SVC_MODE 0x00000013
#define ABT_MODE 0x00000017
#define UND_MODE 0x0000001b
#define SYSTEM_MODE 0x0000001f
#define MODE32_BIT 0x00000010
#define MODE_MASK 0x0000001f
#define PSR_T_BIT 0x00000020
#define PSR_F_BIT 0x00000040
#define PSR_I_BIT 0x00000080
#define PSR_J_BIT 0x01000000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
#define PSR_N_BIT 0x80000000
#define PCMASK 0
/*
* Groups of PSR bits
*/
#define PSR_f 0xff000000 /* Flags */
#define PSR_s 0x00ff0000 /* Status */
#define PSR_x 0x0000ff00 /* Extension */
#define PSR_c 0x000000ff /* Control */
/*
* CR1 bits
*/
#define CR1_M 0x00000001 /* MMU */
#define CR1_A 0x00000002 /* Alignment fault */
#define CR1_C 0x00000004 /* Dcache */
#define CR1_W 0x00000008 /* Write buffer */
#define CR1_P 0x00000010 /* Prog32 */
#define CR1_D 0x00000020 /* Data32 */
#define CR1_L 0x00000040 /* Late abort */
#define CR1_B 0x00000080 /* Big endian */
#define CR1_S 0x00000100 /* System protection */
#define CR1_R 0x00000200 /* ROM protection */
#define CR1_F 0x00000400
#define CR1_Z 0x00000800 /* BTB enable */
#define CR1_I 0x00001000 /* Icache */
#define CR1_V 0x00002000 /* Vector relocation */
#define CR1_RR 0x00004000 /* Round Robin */
#ifndef __ASSEMBLY__
/* this struct defines the way the registers are stored on the
stack during a system call. */
struct pt_regs {
long uregs[18];
};
#define ARM_cpsr uregs[16]
#define ARM_pc uregs[15]
#define ARM_lr uregs[14]
#define ARM_sp uregs[13]
#define ARM_ip uregs[12]
#define ARM_fp uregs[11]
#define ARM_r10 uregs[10]
#define ARM_r9 uregs[9]
#define ARM_r8 uregs[8]
#define ARM_r7 uregs[7]
#define ARM_r6 uregs[6]
#define ARM_r5 uregs[5]
#define ARM_r4 uregs[4]
#define ARM_r3 uregs[3]
#define ARM_r2 uregs[2]
#define ARM_r1 uregs[1]
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[17]
#ifdef __KERNEL__
#define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0)
#ifdef CONFIG_ARM_THUMB
#define thumb_mode(regs) \
(((regs)->ARM_cpsr & PSR_T_BIT))
#else
#define thumb_mode(regs) (0)
#endif
#define processor_mode(regs) \
((regs)->ARM_cpsr & MODE_MASK)
#define interrupts_enabled(regs) \
(!((regs)->ARM_cpsr & PSR_I_BIT))
#define fast_interrupts_enabled(regs) \
(!((regs)->ARM_cpsr & PSR_F_BIT))
#define condition_codes(regs) \
((regs)->ARM_cpsr & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT))
/* Are the current registers suitable for user mode?
* (used to maintain security in signal handlers)
*/
static inline int valid_user_regs(struct pt_regs *regs)
{
if (user_mode(regs) &&
(regs->ARM_cpsr & (PSR_F_BIT|PSR_I_BIT)) == 0)
return 1;
/*
* Force CPSR to something logical...
*/
regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
return 0;
}
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif
/*
* linux/include/asm-arm/proc-armv/shmparam.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* definitions for the shared process memory on ARM v3 or v4
* processors
*/
#ifndef __ASM_PROC_SHMPARAM_H
#define __ASM_PROC_SHMPARAM_H
#ifndef SHMMAX
#define SHMMAX 0x01000000
#endif
#endif
/*
* linux/include/asm-arm/proc-armv/system.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_SYSTEM_H
#define __ASM_PROC_SYSTEM_H
#include <linux/config.h>
#define set_cr(x) \
__asm__ __volatile__( \
"mcr p15, 0, %0, c1, c0, 0 @ set CR" \
: : "r" (x) : "cc")
#define get_cr() \
({ \
unsigned int __val; \
__asm__ __volatile__( \
"mrc p15, 0, %0, c1, c0, 0 @ get CR" \
: "=r" (__val) : : "cc"); \
__val; \
})
#define CR_M (1 << 0) /* MMU enable */
#define CR_A (1 << 1) /* Alignment abort enable */
#define CR_C (1 << 2) /* Dcache enable */
#define CR_W (1 << 3) /* Write buffer enable */
#define CR_P (1 << 4) /* 32-bit exception handler */
#define CR_D (1 << 5) /* 32-bit data address range */
#define CR_L (1 << 6) /* Implementation defined */
#define CR_B (1 << 7) /* Big endian */
#define CR_S (1 << 8) /* System MMU protection */
#define CR_R (1 << 9) /* ROM MMU protection */
#define CR_F (1 << 10) /* Implementation defined */
#define CR_Z (1 << 11) /* Implementation defined */
#define CR_I (1 << 12) /* Icache enable */
#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
#define CR_RR (1 << 14) /* Round Robin cache replacement */
#define CR_L4 (1 << 15) /* LDR pc can set T bit */
#define CR_DT (1 << 16)
#define CR_IT (1 << 18)
#define CR_ST (1 << 19)
#define CR_FI (1 << 21)
#define CR_U (1 << 22) /* Unaligned access operation */
#define CR_XP (1 << 23) /* Extended page tables */
#define CR_VE (1 << 24) /* Vectored interrupts */
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */
#if __LINUX_ARM_ARCH__ >= 4
#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
#else
#define vectors_base() (0)
#endif
/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_irq_save(x) \
({ \
unsigned long temp; \
(void) (&temp == &x); \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_save\n" \
" orr %1, %0, #128\n" \
" msr cpsr_c, %1" \
: "=r" (x), "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Enable IRQs
*/
#define local_irq_enable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_enable\n" \
" bic %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Disable IRQs
*/
#define local_irq_disable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_disable\n" \
" orr %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Enable FIQs
*/
#define __stf() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ stf\n" \
" bic %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Disable FIQs
*/
#define __clf() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ clf\n" \
" orr %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Save the current interrupt enable state.
*/
#define local_save_flags(x) \
({ \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_save_flags" \
: "=r" (x) : : "memory", "cc"); \
})
/*
* restore saved IRQ & FIQ state
*/
#define local_irq_restore(x) \
__asm__ __volatile__( \
"msr cpsr_c, %0 @ local_irq_restore\n" \
: \
: "r" (x) \
: "memory", "cc")
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
* cache totally. This means that the cache becomes inconsistent, and,
* since we use normal loads/stores as well, this is really bad.
* Typically, this causes oopsen in filp_close, but could have other,
* more disasterous effects. There are two work-arounds:
* 1. Disable interrupts and emulate the atomic swap
* 2. Clean the cache, perform atomic swap, flush the cache
*
* We choose (1) since its the "easiest" to achieve here and is not
* dependent on the processor type.
*/
#define swp_is_buggy
#endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
extern void __bad_xchg(volatile void *, int);
unsigned long ret;
#ifdef swp_is_buggy
unsigned long flags;
#endif
switch (size) {
#ifdef swp_is_buggy
case 1:
local_irq_save(flags);
ret = *(volatile unsigned char *)ptr;
*(volatile unsigned char *)ptr = x;
local_irq_restore(flags);
break;
case 4:
local_irq_save(flags);
ret = *(volatile unsigned long *)ptr;
*(volatile unsigned long *)ptr = x;
local_irq_restore(flags);
break;
#else
case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
: "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
: "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
#endif
default: __bad_xchg(ptr, size), ret = 0;
}
return ret;
}
#endif
/*
* linux/include/asm-arm/proc-armv/tlbflush.h
*
* Copyright (C) 1999-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/config.h>
#include <asm/glue.h>
#define TLB_V3_PAGE (1 << 0)
#define TLB_V4_U_PAGE (1 << 1)
#define TLB_V4_D_PAGE (1 << 2)
#define TLB_V4_I_PAGE (1 << 3)
#define TLB_V6_U_PAGE (1 << 4)
#define TLB_V6_D_PAGE (1 << 5)
#define TLB_V6_I_PAGE (1 << 6)
#define TLB_V3_FULL (1 << 8)
#define TLB_V4_U_FULL (1 << 9)
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
#define TLB_V6_U_FULL (1 << 12)
#define TLB_V6_D_FULL (1 << 13)
#define TLB_V6_I_FULL (1 << 14)
#define TLB_V6_U_ASID (1 << 16)
#define TLB_V6_D_ASID (1 << 17)
#define TLB_V6_I_ASID (1 << 18)
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
/*
* MMU TLB Model
* =============
*
* We have the following to choose from:
* v3 - ARMv3
* v4 - ARMv4 without write buffer
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
* v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
*/
#undef _TLB
#undef MULTI_TLB
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# define v3_possible_flags v3_tlb_flags
# define v3_always_flags v3_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v3
# endif
#else
# define v3_possible_flags 0
# define v3_always_flags (-1UL)
#endif
#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
#if defined(CONFIG_CPU_ARM720T)
# define v4_possible_flags v4_tlb_flags
# define v4_always_flags v4_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4
# endif
#else
# define v4_possible_flags 0
# define v4_always_flags (-1UL)
#endif
#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_XSCALE)
# define v4wbi_possible_flags v4wbi_tlb_flags
# define v4wbi_always_flags v4wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wbi
# endif
#else
# define v4wbi_possible_flags 0
# define v4wbi_always_flags (-1UL)
#endif
#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# define v4wb_possible_flags v4wb_tlb_flags
# define v4wb_always_flags v4wb_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wb
# endif
#else
# define v4wb_possible_flags 0
# define v4wb_always_flags (-1UL)
#endif
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
TLB_V6_I_ASID | TLB_V6_D_ASID)
#if defined(CONFIG_CPU_V6)
# define v6wbi_possible_flags v6wbi_tlb_flags
# define v6wbi_always_flags v6wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v6wbi
# endif
#else
# define v6wbi_possible_flags 0
# define v6wbi_always_flags (-1UL)
#endif
#ifndef _TLB
#error Unknown TLB model
#endif
#ifndef __ASSEMBLY__
struct cpu_tlb_fns {
void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
void (*flush_kern_range)(unsigned long, unsigned long);
unsigned long tlb_flags;
};
/*
* Select the calling method
*/
#ifdef MULTI_TLB
#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
#else
#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
#endif
extern struct cpu_tlb_fns cpu_tlb;
#define __cpu_tlb_flags cpu_tlb.tlb_flags
/*
* TLB Management
* ==============
*
* The arch/arm/mm/tlb-*.S files implement these methods.
*
* The TLB specific code is expected to perform whatever tests it
* needs to determine if it should invalidate the TLB for each
* call. Start addresses are inclusive and end addresses are
* exclusive; it is safe to round these addresses down.
*
* flush_tlb_all()
*
* Invalidate the entire TLB.
*
* flush_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address
* space.
* - mm - mm_struct describing address space
*
* flush_tlb_range(mm,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
* - mm - mm_struct describing address space
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
* flush_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address range.
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*
* flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
/*
* We optimise the code below by:
* - building a set of TLB flags that might be set in __cpu_tlb_flags
* - building a set of TLB flags that will always be set in __cpu_tlb_flags
* - if we're going to need __cpu_tlb_flags, access it once and only once
*
* This allows us to build optimal assembly for the single-CPU type case,
* and as close to optimal given the compiler constrants for multi-CPU
* case. We could do better for the multi-CPU case if the compiler
* implemented the "%?" method, but this has been discontinued due to too
* many people getting it wrong.
*/
#define possible_tlb_flags (v3_possible_flags | \
v4_possible_flags | \
v4wbi_possible_flags | \
v4wb_possible_flags | \
v6wbi_possible_flags)
#define always_tlb_flags (v3_always_flags & \
v4_always_flags & \
v4wbi_always_flags & \
v4wb_always_flags & \
v6wbi_always_flags)
#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
static inline void flush_tlb_all(void)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (tlb_flag(TLB_V3_FULL))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL))
asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL))
asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
const int zero = 0;
const int asid = ASID(mm);
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (mm == current->active_mm) {
if (tlb_flag(TLB_V3_FULL))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_U_FULL))
asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_D_FULL))
asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_I_FULL))
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
}
if (tlb_flag(TLB_V6_U_ASID))
asm("mcr%? p15, 0, %0, c8, c7, 2" : : "r" (asid));
if (tlb_flag(TLB_V6_D_ASID))
asm("mcr%? p15, 0, %0, c8, c6, 2" : : "r" (asid));
if (tlb_flag(TLB_V6_I_ASID))
asm("mcr%? p15, 0, %0, c8, c5, 2" : : "r" (asid));
}
static inline void
flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (vma->vm_mm == current->active_mm) {
if (tlb_flag(TLB_V3_PAGE))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (uaddr));
if (tlb_flag(TLB_V4_U_PAGE))
asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr));
if (tlb_flag(TLB_V4_D_PAGE))
asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr));
if (tlb_flag(TLB_V4_I_PAGE))
asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr));
if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
}
if (tlb_flag(TLB_V6_U_PAGE))
asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr));
if (tlb_flag(TLB_V6_D_PAGE))
asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr));
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr));
}
static inline void flush_tlb_kernel_page(unsigned long kaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
kaddr &= PAGE_MASK;
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (tlb_flag(TLB_V3_PAGE))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (kaddr));
if (tlb_flag(TLB_V4_U_PAGE))
asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr));
if (tlb_flag(TLB_V4_D_PAGE))
asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr));
if (tlb_flag(TLB_V4_I_PAGE))
asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr));
if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
if (tlb_flag(TLB_V6_U_PAGE))
asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr));
if (tlb_flag(TLB_V6_D_PAGE))
asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr));
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr));
}
/*
* flush_pmd_entry
*
* Flush a PMD entry (word aligned, or double-word aligned) to
* RAM if the TLB for the CPU we are running on requires this.
* This is typically used when we are creating PMD entries.
*
* clean_pmd_entry
*
* Clean (but don't drain the write buffer) if the CPU requires
* these operations. This is typically used when we are removing
* PMD entries.
*/
static inline void flush_pmd_entry(pmd_t *pmd)
{
const unsigned int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN))
asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd));
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
: : "r" (zero));
}
static inline void clean_pmd_entry(pmd_t *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN))
asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd));
}
#undef tlb_flag
#undef always_tlb_flags
#undef possible_tlb_flags
/*
* Convert calls to our calling convention.
*/
#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
/*
* if PG_dcache_dirty is set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
/*
* ARM processors do not cache TLB tables in RAM.
*/
#define flush_tlb_pgtables(mm,start,end) do { } while (0)
/*
* Old ARM MEMC stuff. This supports the reversed mapping handling that
* we have on the older 26-bit machines. We don't have a MEMC chip, so...
*/
#define memc_update_all() do { } while (0)
#define memc_update_mm(mm) do { } while (0)
#define memc_update_addr(mm,pte,log) do { } while (0)
#define memc_clear(mm,physaddr) do { } while (0)
#endif
/*
* linux/include/asm-arm/proc-armv/uaccess.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/arch/memory.h>
#include <asm/proc/domain.h>
/*
* Note that this is actually 0x1,0000,0000
*/
#define KERNEL_DS 0x00000000
#define USER_DS TASK_SIZE
static inline void set_fs (mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}
/* We use 33-bit arithmetic here... */
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
: "cc"); \
flag; })
#define __addr_ok(addr) ({ \
unsigned long flag; \
__asm__("cmp %2, %0; movlo %0, #0" \
: "=&r" (flag) \
: "0" (current_thread_info()->addr_limit), "r" (addr) \
: "cc"); \
(flag == 0); })
#define __put_user_asm_byte(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strbt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err) \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
__put_user_asm_byte(__temp, __pu_addr, err); \
__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
})
#else
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
__put_user_asm_byte(__temp >> 8, __pu_addr, err); \
__put_user_asm_byte(__temp, __pu_addr + 1, err); \
})
#endif
#define __put_user_asm_word(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err) \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
#define __reg_oper1 "%Q2"
#else
#define __reg_oper0 "%Q2"
#define __reg_oper1 "%R2"
#endif
#define __put_user_asm_dword(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt " __reg_oper1 ", [%1], #4\n" \
"2: strt " __reg_oper0 ", [%1], #0\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, %3\n" \
" b 3b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
" .previous" \
: "+r" (err), "+r" (__pu_addr) \
: "r" (x), "i" (-EFAULT) \
: "cc")
#define __get_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
"1: ldrbt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
(x) = __b1 | (__b2 << 8); \
})
#else
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
(x) = (__b1 << 8) | __b2; \
})
#endif
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
"1: ldrt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT) \
: "cc")
extern unsigned long __arch_copy_from_user(void *to, const void *from, unsigned long n);
#define __do_copy_from_user(to,from,n) \
(n) = __arch_copy_from_user(to,from,n)
extern unsigned long __arch_copy_to_user(void *to, const void *from, unsigned long n);
#define __do_copy_to_user(to,from,n) \
(n) = __arch_copy_to_user(to,from,n)
extern unsigned long __arch_clear_user(void *addr, unsigned long n);
#define __do_clear_user(addr,sz) \
(sz) = __arch_clear_user(addr,sz)
extern unsigned long __arch_strncpy_from_user(char *to, const char *from, unsigned long count);
#define __do_strncpy_from_user(dst,src,count,res) \
(res) = __arch_strncpy_from_user(dst,src,count)
extern unsigned long __arch_strnlen_user(const char *s, long n);
#define __do_strnlen_user(s,n,res) \
(res) = __arch_strnlen_user(s,n)
/*
* linux/include/asm-arm/processor.h
*
* Copyright (C) 1995 Russell King
* Copyright (C) 1995-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -27,9 +27,10 @@
#include <asm/ptrace.h>
#include <asm/procinfo.h>
#include <asm/arch/memory.h>
#include <asm/proc/processor.h>
#include <asm/types.h>
#define KERNEL_STACK_SIZE PAGE_SIZE
union debug_insn {
u32 arm;
u16 thumb;
......@@ -56,6 +57,24 @@ struct thread_struct {
#define INIT_THREAD { }
#define start_thread(regs,pc,sp) \
({ \
unsigned long *stack = (unsigned long *)sp; \
set_fs(USER_DS); \
memzero(regs->uregs, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
else \
regs->ARM_cpsr = USR26_MODE; \
if (elf_hwcap & HWCAP_THUMB && pc & 1) \
regs->ARM_cpsr |= PSR_T_BIT; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
})
/* Forward declaration, a strange C thing */
struct task_struct;
......@@ -74,6 +93,9 @@ unsigned long get_wchan(struct task_struct *p);
*/
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019])
#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1017])
/*
* Prefetching support - only ARMv5.
*/
......
/*
* linux/include/asm-arm/ptrace.h
*
* Copyright (C) 1996-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_PTRACE_H
#define __ASM_ARM_PTRACE_H
#include <linux/config.h>
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
......@@ -8,9 +19,112 @@
#define PTRACE_OLDSETOPTIONS 21
#include <asm/proc/ptrace.h>
/*
* PSR bits
*/
#define USR26_MODE 0x00000000
#define FIQ26_MODE 0x00000001
#define IRQ26_MODE 0x00000002
#define SVC26_MODE 0x00000003
#define USR_MODE 0x00000010
#define FIQ_MODE 0x00000011
#define IRQ_MODE 0x00000012
#define SVC_MODE 0x00000013
#define ABT_MODE 0x00000017
#define UND_MODE 0x0000001b
#define SYSTEM_MODE 0x0000001f
#define MODE32_BIT 0x00000010
#define MODE_MASK 0x0000001f
#define PSR_T_BIT 0x00000020
#define PSR_F_BIT 0x00000040
#define PSR_I_BIT 0x00000080
#define PSR_J_BIT 0x01000000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
#define PSR_N_BIT 0x80000000
#define PCMASK 0
/*
* Groups of PSR bits
*/
#define PSR_f 0xff000000 /* Flags */
#define PSR_s 0x00ff0000 /* Status */
#define PSR_x 0x0000ff00 /* Extension */
#define PSR_c 0x000000ff /* Control */
#ifndef __ASSEMBLY__
/* this struct defines the way the registers are stored on the
stack during a system call. */
struct pt_regs {
long uregs[18];
};
#define ARM_cpsr uregs[16]
#define ARM_pc uregs[15]
#define ARM_lr uregs[14]
#define ARM_sp uregs[13]
#define ARM_ip uregs[12]
#define ARM_fp uregs[11]
#define ARM_r10 uregs[10]
#define ARM_r9 uregs[9]
#define ARM_r8 uregs[8]
#define ARM_r7 uregs[7]
#define ARM_r6 uregs[6]
#define ARM_r5 uregs[5]
#define ARM_r4 uregs[4]
#define ARM_r3 uregs[3]
#define ARM_r2 uregs[2]
#define ARM_r1 uregs[1]
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[17]
#ifdef __KERNEL__
#define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0)
#ifdef CONFIG_ARM_THUMB
#define thumb_mode(regs) \
(((regs)->ARM_cpsr & PSR_T_BIT))
#else
#define thumb_mode(regs) (0)
#endif
#define processor_mode(regs) \
((regs)->ARM_cpsr & MODE_MASK)
#define interrupts_enabled(regs) \
(!((regs)->ARM_cpsr & PSR_I_BIT))
#define fast_interrupts_enabled(regs) \
(!((regs)->ARM_cpsr & PSR_F_BIT))
#define condition_codes(regs) \
((regs)->ARM_cpsr & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT))
/* Are the current registers suitable for user mode?
* (used to maintain security in signal handlers)
*/
static inline int valid_user_regs(struct pt_regs *regs)
{
if (user_mode(regs) &&
(regs->ARM_cpsr & (PSR_F_BIT|PSR_I_BIT)) == 0)
return 1;
/*
* Force CPSR to something logical...
*/
regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
return 0;
}
#endif /* __KERNEL__ */
#define pc_pointer(v) \
((v) & ~PCMASK)
......
......@@ -10,7 +10,7 @@
#include <linux/rwsem.h>
#include <asm/atomic.h>
#include <asm/proc/locks.h>
#include <asm/locks.h>
struct semaphore {
atomic_t count;
......
#ifndef _ASMARM_SHMPARAM_H
#define _ASMARM_SHMPARAM_H
#include <asm/proc/shmparam.h>
/*
* This should be the size of the virtually indexed cache/ways,
* or page size, whichever is greater since the cache aliases
......
......@@ -4,6 +4,45 @@
#ifdef __KERNEL__
#include <linux/config.h>
#define CPU_ARCH_UNKNOWN 0
#define CPU_ARCH_ARMv3 1
#define CPU_ARCH_ARMv4 2
#define CPU_ARCH_ARMv4T 3
#define CPU_ARCH_ARMv5 4
#define CPU_ARCH_ARMv5T 5
#define CPU_ARCH_ARMv5TE 6
#define CPU_ARCH_ARMv6 7
/*
* CR1 bits (CP#15 CR1)
*/
#define CR_M (1 << 0) /* MMU enable */
#define CR_A (1 << 1) /* Alignment abort enable */
#define CR_C (1 << 2) /* Dcache enable */
#define CR_W (1 << 3) /* Write buffer enable */
#define CR_P (1 << 4) /* 32-bit exception handler */
#define CR_D (1 << 5) /* 32-bit data address range */
#define CR_L (1 << 6) /* Implementation defined */
#define CR_B (1 << 7) /* Big endian */
#define CR_S (1 << 8) /* System MMU protection */
#define CR_R (1 << 9) /* ROM MMU protection */
#define CR_F (1 << 10) /* Implementation defined */
#define CR_Z (1 << 11) /* Implementation defined */
#define CR_I (1 << 12) /* Icache enable */
#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
#define CR_RR (1 << 14) /* Round Robin cache replacement */
#define CR_L4 (1 << 15) /* LDR pc can set T bit */
#define CR_DT (1 << 16)
#define CR_IT (1 << 18)
#define CR_ST (1 << 19)
#define CR_FI (1 << 21)
#define CR_U (1 << 22) /* Unaligned access operation */
#define CR_XP (1 << 23) /* Extended page tables */
#define CR_VE (1 << 24) /* Vectored interrupts */
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
struct thread_info;
......@@ -34,21 +73,30 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
extern asmlinkage void __backtrace(void);
#define CPU_ARCH_UNKNOWN 0
#define CPU_ARCH_ARMv3 1
#define CPU_ARCH_ARMv4 2
#define CPU_ARCH_ARMv4T 3
#define CPU_ARCH_ARMv5 4
#define CPU_ARCH_ARMv5T 5
#define CPU_ARCH_ARMv5TE 6
#define CPU_ARCH_ARMv6 7
extern int cpu_architecture(void);
/*
* Include processor dependent parts
*/
#include <asm/proc/system.h>
#define set_cr(x) \
__asm__ __volatile__( \
"mcr p15, 0, %0, c1, c0, 0 @ set CR" \
: : "r" (x) : "cc")
#define get_cr() \
({ \
unsigned int __val; \
__asm__ __volatile__( \
"mrc p15, 0, %0, c1, c0, 0 @ get CR" \
: "=r" (__val) : : "cc"); \
__val; \
})
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */
#if __LINUX_ARM_ARCH__ >= 4
#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
#else
#define vectors_base() (0)
#endif
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb()
......@@ -75,6 +123,102 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
mb(); \
} while (0)
/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_irq_save(x) \
({ \
unsigned long temp; \
(void) (&temp == &x); \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_save\n" \
" orr %1, %0, #128\n" \
" msr cpsr_c, %1" \
: "=r" (x), "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Enable IRQs
*/
#define local_irq_enable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_enable\n" \
" bic %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Disable IRQs
*/
#define local_irq_disable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_disable\n" \
" orr %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Enable FIQs
*/
#define __stf() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ stf\n" \
" bic %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Disable FIQs
*/
#define __clf() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ clf\n" \
" orr %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Save the current interrupt enable state.
*/
#define local_save_flags(x) \
({ \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_save_flags" \
: "=r" (x) : : "memory", "cc"); \
})
/*
* restore saved IRQ & FIQ state
*/
#define local_irq_restore(x) \
__asm__ __volatile__( \
"msr cpsr_c, %0 @ local_irq_restore\n" \
: \
: "r" (x) \
: "memory", "cc")
#ifdef CONFIG_SMP
#error SMP not supported
......@@ -100,8 +244,67 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
flags & PSR_I_BIT; \
})
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
* cache totally. This means that the cache becomes inconsistent, and,
* since we use normal loads/stores as well, this is really bad.
* Typically, this causes oopsen in filp_close, but could have other,
* more disasterous effects. There are two work-arounds:
* 1. Disable interrupts and emulate the atomic swap
* 2. Clean the cache, perform atomic swap, flush the cache
*
* We choose (1) since its the "easiest" to achieve here and is not
* dependent on the processor type.
*/
#define swp_is_buggy
#endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
extern void __bad_xchg(volatile void *, int);
unsigned long ret;
#ifdef swp_is_buggy
unsigned long flags;
#endif
switch (size) {
#ifdef swp_is_buggy
case 1:
local_irq_save(flags);
ret = *(volatile unsigned char *)ptr;
*(volatile unsigned char *)ptr = x;
local_irq_restore(flags);
break;
case 4:
local_irq_save(flags);
ret = *(volatile unsigned long *)ptr;
*(volatile unsigned long *)ptr = x;
local_irq_restore(flags);
break;
#else
case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
: "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
: "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
#endif
default: __bad_xchg(ptr, size), ret = 0;
}
return ret;
}
#endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif
......@@ -18,9 +18,9 @@ struct task_struct;
struct exec_domain;
#include <asm/fpstate.h>
#include <asm/proc/processor.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/domain.h>
typedef unsigned long mm_segment_t;
......@@ -55,17 +55,19 @@ struct thread_info {
union fp_state fpstate;
};
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
INIT_EXTRA_THREAD_INFO, \
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
}
#define init_thread_info (init_thread_union.thread_info)
......
/*
* linux/include/asm-arm/tlbflush.h
*
* Copyright (C) 2000-2002 Russell King
* Copyright (C) 1999-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -10,6 +10,397 @@
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
#include <asm-arm/proc/tlbflush.h>
#include <linux/config.h>
#include <asm/glue.h>
#define TLB_V3_PAGE (1 << 0)
#define TLB_V4_U_PAGE (1 << 1)
#define TLB_V4_D_PAGE (1 << 2)
#define TLB_V4_I_PAGE (1 << 3)
#define TLB_V6_U_PAGE (1 << 4)
#define TLB_V6_D_PAGE (1 << 5)
#define TLB_V6_I_PAGE (1 << 6)
#define TLB_V3_FULL (1 << 8)
#define TLB_V4_U_FULL (1 << 9)
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
#define TLB_V6_U_FULL (1 << 12)
#define TLB_V6_D_FULL (1 << 13)
#define TLB_V6_I_FULL (1 << 14)
#define TLB_V6_U_ASID (1 << 16)
#define TLB_V6_D_ASID (1 << 17)
#define TLB_V6_I_ASID (1 << 18)
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
/*
* MMU TLB Model
* =============
*
* We have the following to choose from:
* v3 - ARMv3
* v4 - ARMv4 without write buffer
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
* v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
*/
#undef _TLB
#undef MULTI_TLB
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# define v3_possible_flags v3_tlb_flags
# define v3_always_flags v3_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v3
# endif
#else
# define v3_possible_flags 0
# define v3_always_flags (-1UL)
#endif
#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
#if defined(CONFIG_CPU_ARM720T)
# define v4_possible_flags v4_tlb_flags
# define v4_always_flags v4_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4
# endif
#else
# define v4_possible_flags 0
# define v4_always_flags (-1UL)
#endif
#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_XSCALE)
# define v4wbi_possible_flags v4wbi_tlb_flags
# define v4wbi_always_flags v4wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wbi
# endif
#else
# define v4wbi_possible_flags 0
# define v4wbi_always_flags (-1UL)
#endif
#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# define v4wb_possible_flags v4wb_tlb_flags
# define v4wb_always_flags v4wb_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wb
# endif
#else
# define v4wb_possible_flags 0
# define v4wb_always_flags (-1UL)
#endif
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
TLB_V6_I_ASID | TLB_V6_D_ASID)
#if defined(CONFIG_CPU_V6)
# define v6wbi_possible_flags v6wbi_tlb_flags
# define v6wbi_always_flags v6wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v6wbi
# endif
#else
# define v6wbi_possible_flags 0
# define v6wbi_always_flags (-1UL)
#endif
#ifndef _TLB
#error Unknown TLB model
#endif
#ifndef __ASSEMBLY__
struct cpu_tlb_fns {
void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
void (*flush_kern_range)(unsigned long, unsigned long);
unsigned long tlb_flags;
};
/*
* Select the calling method
*/
#ifdef MULTI_TLB
#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
#else
#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
#endif
extern struct cpu_tlb_fns cpu_tlb;
#define __cpu_tlb_flags cpu_tlb.tlb_flags
/*
* TLB Management
* ==============
*
* The arch/arm/mm/tlb-*.S files implement these methods.
*
* The TLB specific code is expected to perform whatever tests it
* needs to determine if it should invalidate the TLB for each
* call. Start addresses are inclusive and end addresses are
* exclusive; it is safe to round these addresses down.
*
* flush_tlb_all()
*
* Invalidate the entire TLB.
*
* flush_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address
* space.
* - mm - mm_struct describing address space
*
* flush_tlb_range(mm,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
* - mm - mm_struct describing address space
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
* flush_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address range.
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*
* flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
/*
* We optimise the code below by:
* - building a set of TLB flags that might be set in __cpu_tlb_flags
* - building a set of TLB flags that will always be set in __cpu_tlb_flags
* - if we're going to need __cpu_tlb_flags, access it once and only once
*
* This allows us to build optimal assembly for the single-CPU type case,
* and as close to optimal given the compiler constrants for multi-CPU
* case. We could do better for the multi-CPU case if the compiler
* implemented the "%?" method, but this has been discontinued due to too
* many people getting it wrong.
*/
#define possible_tlb_flags (v3_possible_flags | \
v4_possible_flags | \
v4wbi_possible_flags | \
v4wb_possible_flags | \
v6wbi_possible_flags)
#define always_tlb_flags (v3_always_flags & \
v4_always_flags & \
v4wbi_always_flags & \
v4wb_always_flags & \
v6wbi_always_flags)
#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
static inline void flush_tlb_all(void)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (tlb_flag(TLB_V3_FULL))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL))
asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL))
asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
const int zero = 0;
const int asid = ASID(mm);
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (mm == current->active_mm) {
if (tlb_flag(TLB_V3_FULL))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_U_FULL))
asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_D_FULL))
asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero));
if (tlb_flag(TLB_V4_I_FULL))
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
}
if (tlb_flag(TLB_V6_U_ASID))
asm("mcr%? p15, 0, %0, c8, c7, 2" : : "r" (asid));
if (tlb_flag(TLB_V6_D_ASID))
asm("mcr%? p15, 0, %0, c8, c6, 2" : : "r" (asid));
if (tlb_flag(TLB_V6_I_ASID))
asm("mcr%? p15, 0, %0, c8, c5, 2" : : "r" (asid));
}
static inline void
flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (vma->vm_mm == current->active_mm) {
if (tlb_flag(TLB_V3_PAGE))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (uaddr));
if (tlb_flag(TLB_V4_U_PAGE))
asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr));
if (tlb_flag(TLB_V4_D_PAGE))
asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr));
if (tlb_flag(TLB_V4_I_PAGE))
asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr));
if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
}
if (tlb_flag(TLB_V6_U_PAGE))
asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr));
if (tlb_flag(TLB_V6_D_PAGE))
asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr));
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr));
}
static inline void flush_tlb_kernel_page(unsigned long kaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
kaddr &= PAGE_MASK;
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
if (tlb_flag(TLB_V3_PAGE))
asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (kaddr));
if (tlb_flag(TLB_V4_U_PAGE))
asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr));
if (tlb_flag(TLB_V4_D_PAGE))
asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr));
if (tlb_flag(TLB_V4_I_PAGE))
asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr));
if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
if (tlb_flag(TLB_V6_U_PAGE))
asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr));
if (tlb_flag(TLB_V6_D_PAGE))
asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr));
if (tlb_flag(TLB_V6_I_PAGE))
asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr));
}
/*
* flush_pmd_entry
*
* Flush a PMD entry (word aligned, or double-word aligned) to
* RAM if the TLB for the CPU we are running on requires this.
* This is typically used when we are creating PMD entries.
*
* clean_pmd_entry
*
* Clean (but don't drain the write buffer) if the CPU requires
* these operations. This is typically used when we are removing
* PMD entries.
*/
static inline void flush_pmd_entry(pmd_t *pmd)
{
const unsigned int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN))
asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd));
if (tlb_flag(TLB_WB))
asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
: : "r" (zero));
}
static inline void clean_pmd_entry(pmd_t *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_DCLEAN))
asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
: : "r" (pmd));
}
#undef tlb_flag
#undef always_tlb_flags
#undef possible_tlb_flags
/*
* Convert calls to our calling convention.
*/
#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
/*
* if PG_dcache_dirty is set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
/*
* ARM processors do not cache TLB tables in RAM.
*/
#define flush_tlb_pgtables(mm,start,end) do { } while (0)
#endif
#endif
/*
* linux/include/asm-arm/uaccess.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASMARM_UACCESS_H
#define _ASMARM_UACCESS_H
......@@ -6,6 +13,8 @@
*/
#include <linux/sched.h>
#include <asm/errno.h>
#include <asm/arch/memory.h>
#include <asm/domain.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
......@@ -30,11 +39,39 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
/*
* Note that this is actually 0x1,0000,0000
*/
#define KERNEL_DS 0x00000000
#define USER_DS TASK_SIZE
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs (mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}
#define segment_eq(a,b) ((a) == (b))
#include <asm/proc/uaccess.h>
#define __addr_ok(addr) ({ \
unsigned long flag; \
__asm__("cmp %2, %0; movlo %0, #0" \
: "=&r" (flag) \
: "0" (current_thread_info()->addr_limit), "r" (addr) \
: "cc"); \
(flag == 0); })
/* We use 33-bit arithmetic here... */
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
: "cc"); \
flag; })
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
......@@ -125,17 +162,71 @@ do { \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
#define __get_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
"1: ldrbt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
(x) = __b1 | (__b2 << 8); \
})
#else
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
(x) = (__b1 << 8) | __b2; \
})
#endif
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
"1: ldrt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT) \
: "cc")
extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
extern int __put_user_bad(void);
#define __put_user_x(__r1,__p,__e,__s,__i...) \
#define __put_user_x(__r1,__p,__e,__s) \
__asm__ __volatile__ ("bl __put_user_" #__s \
: "=&r" (__e) \
: "0" (__p), "r" (__r1) \
: __i, "cc")
: "ip", "lr", "cc")
#define put_user(x,p) \
({ \
......@@ -144,16 +235,16 @@ extern int __put_user_bad(void);
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r1, __p, __e, 1, "ip", "lr"); \
__put_user_x(__r1, __p, __e, 1); \
break; \
case 2: \
__put_user_x(__r1, __p, __e, 2, "ip", "lr"); \
__put_user_x(__r1, __p, __e, 2); \
break; \
case 4: \
__put_user_x(__r1, __p, __e, 4, "ip", "lr"); \
__put_user_x(__r1, __p, __e, 4); \
break; \
case 8: \
__put_user_x(__r1, __p, __e, 8, "ip", "lr"); \
__put_user_x(__r1, __p, __e, 8); \
break; \
default: __e = __put_user_bad(); break; \
} \
......@@ -186,10 +277,93 @@ do { \
} \
} while (0)
#define __put_user_asm_byte(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strbt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err) \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
__put_user_asm_byte(__temp, __pu_addr, err); \
__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
})
#else
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
__put_user_asm_byte(__temp >> 8, __pu_addr, err); \
__put_user_asm_byte(__temp, __pu_addr + 1, err); \
})
#endif
#define __put_user_asm_word(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err) \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
#define __reg_oper1 "%Q2"
#else
#define __reg_oper0 "%Q2"
#define __reg_oper1 "%R2"
#endif
#define __put_user_asm_dword(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt " __reg_oper1 ", [%1], #4\n" \
"2: strt " __reg_oper0 ", [%1], #0\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, %3\n" \
" b 3b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
" .previous" \
: "+r" (err), "+r" (__pu_addr) \
: "r" (x), "i" (-EFAULT) \
: "cc")
extern unsigned long __arch_copy_from_user(void *to, const void *from, unsigned long n);
extern unsigned long __arch_copy_to_user(void *to, const void *from, unsigned long n);
extern unsigned long __arch_clear_user(void *addr, unsigned long n);
extern unsigned long __arch_strncpy_from_user(char *to, const char *from, unsigned long count);
extern unsigned long __arch_strnlen_user(const char *s, long n);
static __inline__ unsigned long copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__do_copy_from_user(to, from, n);
n = __arch_copy_from_user(to, from, n);
else /* security hole - plug it */
memzero(to, n);
return n;
......@@ -197,49 +371,44 @@ static __inline__ unsigned long copy_from_user(void *to, const void *from, unsig
static __inline__ unsigned long __copy_from_user(void *to, const void *from, unsigned long n)
{
__do_copy_from_user(to, from, n);
return n;
return __arch_copy_from_user(to, from, n);
}
static __inline__ unsigned long copy_to_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
__do_copy_to_user(to, from, n);
n = __arch_copy_to_user(to, from, n);
return n;
}
static __inline__ unsigned long __copy_to_user(void *to, const void *from, unsigned long n)
{
__do_copy_to_user(to, from, n);
return n;
return __arch_copy_to_user(to, from, n);
}
static __inline__ unsigned long clear_user (void *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
__do_clear_user(to, n);
n = __arch_clear_user(to, n);
return n;
}
static __inline__ unsigned long __clear_user (void *to, unsigned long n)
{
__do_clear_user(to, n);
return n;
return __arch_clear_user(to, n);
}
static __inline__ long strncpy_from_user (char *dst, const char *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
__do_strncpy_from_user(dst, src, count, res);
res = __arch_strncpy_from_user(dst, src, count);
return res;
}
static __inline__ long __strncpy_from_user (char *dst, const char *src, long count)
{
long res;
__do_strncpy_from_user(dst, src, count, res);
return res;
return __arch_strncpy_from_user(dst, src, count);
}
#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
......@@ -249,7 +418,7 @@ static inline long strnlen_user(const char *s, long n)
unsigned long res = 0;
if (__addr_ok(s))
__do_strnlen_user(s, n, res);
res = __arch_strnlen_user(s, n);
return res;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment