Commit ed29a809 authored by Russell King's avatar Russell King

[ARM] Add ARM architecture version 6 support.

This cset adds support ARM architecture version 6.
parent fb01354b
......@@ -31,6 +31,7 @@ comma = ,
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
# testing for a specific architecture or later rather impossible.
arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 -march=armv5t -Wa,-march=armv6
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call check_gcc,-march=armv5te,-march=armv4)
arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
......@@ -45,6 +46,7 @@ tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) :=$(call check_gcc,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_V6) :=-mtune=strongarm
# Need -Uarm for gcc < 3.x
CFLAGS_BOOT :=-mapcs-32 $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
......
......@@ -47,6 +47,10 @@ int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK();
#if __LINUX_ARM_ARCH__ >= 6
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
#endif
BLANK();
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
BLANK();
......
......@@ -66,6 +66,15 @@
msr cpsr_c, \mode
.endm
#if __LINUX_ARM_ARCH__ >= 6
.macro disable_irq, temp
cpsid i
.endm
.macro enable_irq, temp
cpsie i
.endm
#else
.macro disable_irq, temp
set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC
.endm
......@@ -73,6 +82,7 @@
.macro enable_irq, temp
set_cpsr_c \temp, #MODE_SVC
.endm
#endif
.macro save_user_regs
sub sp, sp, #S_FRAME_SIZE
......
......@@ -118,21 +118,21 @@ static struct resource io_res[] = {
#define lp2 io_res[2]
static const char *cache_types[16] = {
"write-through",
"write-back",
"write-back",
"VIVT write-through",
"VIVT write-back",
"VIVT write-back",
"undefined 3",
"undefined 4",
"undefined 5",
"write-back",
"write-back",
"VIVT write-back",
"VIVT write-back",
"undefined 8",
"undefined 9",
"undefined 10",
"undefined 11",
"undefined 12",
"undefined 13",
"undefined 14",
"VIPT write-back",
"undefined 15",
};
......@@ -151,7 +151,7 @@ static const char *cache_clean[16] = {
"undefined 11",
"undefined 12",
"undefined 13",
"undefined 14",
"cp15 c7 ops",
"undefined 15",
};
......@@ -170,7 +170,7 @@ static const char *cache_lockdown[16] = {
"undefined 11",
"undefined 12",
"undefined 13",
"undefined 14",
"format C",
"undefined 15",
};
......@@ -183,7 +183,7 @@ static const char *proc_arch[] = {
"5T",
"5TE",
"5TEJ",
"?(9)",
"6TEJ",
"?(10)",
"?(11)",
"?(12)",
......
......@@ -223,6 +223,17 @@ config CPU_XSCALE
select CPU_TLB_V4WBI
select CPU_MINICACHE
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
depends on ARCH_INTEGRATOR
select CPU_32v6
select CPU_ABRT_EV6
select CPU_CACHE_V6
select CPU_COPY_V6
select CPU_TLB_V6
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
bool
......@@ -233,6 +244,9 @@ config CPU_32v4
config CPU_32v5
bool
config CPU_32v6
bool
# The abort model
config CPU_ABRT_EV4
bool
......@@ -249,6 +263,9 @@ config CPU_ABRT_EV5T
config CPU_ABRT_EV5TJ
bool
config CPU_ABRT_EV6
bool
# The cache model
config CPU_CACHE_V3
bool
......@@ -262,6 +279,9 @@ config CPU_CACHE_V4WT
config CPU_CACHE_V4WB
bool
config CPU_CACHE_V6
bool
# The copy-page model
config CPU_COPY_V3
bool
......@@ -272,6 +292,9 @@ config CPU_COPY_V4WT
config CPU_COPY_V4WB
bool
config CPU_COPY_V6
bool
# This selects the TLB model
config CPU_TLB_V3
bool
......@@ -306,7 +329,7 @@ comment "Processor Features"
config ARM_THUMB
bool "Support Thumb user binaries"
depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE
depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_V6
default y
help
Say Y if you want to include kernel support for running user space
......
......@@ -15,15 +15,18 @@ obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o
obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o
obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o
obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
......@@ -33,6 +36,7 @@ obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
......@@ -48,3 +52,4 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
obj-$(CONFIG_CPU_SA110) += proc-sa110.o
obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
obj-$(CONFIG_CPU_V6) += proc-v6.o blockops.o
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Function: v6_early_abort
*
* Params : r2 = address of aborted instruction
* : r3 = saved SPSR
*
* Returns : r0 = address of abort
* : r1 = FSR, bit 11 = write
* : r2-r8 = corrupted
* : r9 = preserved
* : sp = pointer to registers
*
* Purpose : obtain information about current aborted instruction.
*/
.align 5
ENTRY(v6_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
mov pc, lr
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
extern struct cpu_cache_fns blk_cache_fns;
#define HARVARD_CACHE
/*
* blk_flush_kern_dcache_page(kaddr)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - kaddr - kernel address (guaranteed to be page aligned)
*/
static void __attribute__((naked))
blk_flush_kern_dcache_page(void *kaddr)
{
asm(
"add r1, r0, %0 \n\
1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c5, 0 \n\
mcr p15, 0, r0, c7, c10, 4 \n\
mov pc, lr"
:
: "I" (PAGE_SIZE));
}
/*
* blk_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
static void __attribute__((naked))
blk_dma_inv_range_unified(unsigned long start, unsigned long end)
{
asm(
"tst r0, %0 \n\
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\
tst r1, %0 \n\
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
mov pc, lr"
:
: "I" (L1_CACHE_BYTES - 1));
}
static void __attribute__((naked))
blk_dma_inv_range_harvard(unsigned long start, unsigned long end)
{
asm(
"tst r0, %0 \n\
mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\
tst r1, %0 \n\
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
mov pc, lr"
:
: "I" (L1_CACHE_BYTES - 1));
}
/*
* blk_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
static void __attribute__((naked))
blk_dma_clean_range(unsigned long start, unsigned long end)
{
asm(
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
mov pc, lr");
}
/*
* blk_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
static void __attribute__((naked))
blk_dma_flush_range(unsigned long start, unsigned long end)
{
asm(
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\
mov pc, lr");
}
static int blockops_trap(struct pt_regs *regs, unsigned int instr)
{
regs->ARM_r4 |= regs->ARM_r2;
regs->ARM_pc += 4;
return 0;
}
static char *func[] = {
"Prefetch data range",
"Clean+Invalidate data range",
"Clean data range",
"Invalidate data range",
"Invalidate instr range"
};
static struct undef_hook blockops_hook __initdata = {
.instr_mask = 0x0fffffd0,
.instr_val = 0x0c401f00,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = blockops_trap,
};
static int __init blockops_check(void)
{
register unsigned int err asm("r4") = 0;
unsigned int cache_type;
int i;
asm("mcr p15, 0, %0, c0, c0, 1" : "=r" (cache_type));
printk("Checking V6 block cache operations:\n");
register_undef_hook(&blockops_hook);
__asm__ ("mov r0, %0\n\t"
"mov r1, %1\n\t"
"mov r2, #1\n\t"
".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t"
"mov r2, #2\n\t"
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t"
"mov r2, #4\n\t"
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t"
"mov r2, #8\n\t"
".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t"
"mov r2, #16\n\t"
".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t"
:
: "r" (PAGE_OFFSET), "r" (PAGE_OFFSET + 128)
: "r0", "r1", "r2");
unregister_undef_hook(&blockops_hook);
for (i = 0; i < ARRAY_SIZE(func); i++, err >>= 1)
printk("%30s: %ssupported\n", func[i], err & 1 ? "not " : "");
if ((err & 8) == 0) {
printk(" --> Using %s block cache invalidate\n",
cache_type & (1 << 24) ? "harvard" : "unified");
if (cache_type & (1 << 24))
cpu_cache.dma_inv_range = blk_dma_inv_range_harvard;
else
cpu_cache.dma_inv_range = blk_dma_inv_range_unified;
}
if ((err & 4) == 0) {
printk(" --> Using block cache clean\n");
cpu_cache.dma_clean_range = blk_dma_clean_range;
}
if ((err & 2) == 0) {
printk(" --> Using block cache clean+invalidate\n");
cpu_cache.dma_flush_range = blk_dma_flush_range;
cpu_cache.flush_kern_dcache_page = blk_flush_kern_dcache_page;
}
return 0;
}
__initcall(blockops_check);
/*
* linux/arch/arm/mm/cache-v6.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is the "shell" of the ARMv6 processor support.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include "proc-macros.S"
#define HARVARD_CACHE
#define CACHE_LINE_SIZE 32
#define D_CACHE_LINE_SIZE 32
/*
* v6_flush_cache_all()
*
* Flush the entire cache.
*
* It is assumed that:
*/
ENTRY(v6_flush_kern_cache_all)
mov r0, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#else
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
#endif
mov pc, lr
/*
* v6_flush_cache_all()
*
* Flush all TLB entries in a particular address space
*
* - mm - mm_struct describing address space
*/
ENTRY(v6_flush_user_cache_all)
/*FALLTHROUGH*/
/*
* v6_flush_cache_range(start, end, flags)
*
* Flush a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - flags - vm_area_struct flags describing address space
*
* It is assumed that:
* - we have a VIPT cache.
*/
ENTRY(v6_flush_user_cache_range)
mov pc, lr
/*
* v6_coherent_kern_range(start,end)
*
* Ensure that the I and D caches are coherent within specified
* region. This is typically used when code has been written to
* a memory region, and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
*
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
ENTRY(v6_coherent_kern_range)
bic r0, r0, #CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
#endif
mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
add r0, r0, #CACHE_LINE_SIZE
cmp r0, r1
blo 1b
#ifdef HARVARD_CACHE
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
#endif
mov pc, lr
/*
* v6_flush_kern_dcache_page(kaddr)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - kaddr - kernel address (guaranteed to be page aligned)
*/
ENTRY(v6_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
#ifdef HARVARD_CACHE
mov r0, #0
mcr p15, 0, r0, c7, c10, 4
#endif
mov pc, lr
/*
* v6_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v6_dma_inv_range)
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r0, c7, c10, 1 @ clean D line
#else
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
tst r1, #D_CACHE_LINE_SIZE - 1
bic r1, r1, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
#else
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* v6_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v6_dma_clean_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
#else
mcr p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* v6_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
__INITDATA
.type v6_cache_fns, #object
ENTRY(v6_cache_fns)
.long v6_flush_kern_cache_all
.long v6_flush_user_cache_all
.long v6_flush_user_cache_range
.long v6_coherent_kern_range
.long v6_flush_kern_dcache_page
.long v6_dma_inv_range
.long v6_dma_clean_range
.long v6_dma_flush_range
.size v6_cache_fns, . - v6_cache_fns
/*
* linux/arch/arm/mm/copypage-v6.c
*
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
#if SHMLBA > 16384
#error FIX ME
#endif
#define from_address (0xffff8000)
#define from_pgprot PAGE_KERNEL
#define to_address (0xffffc000)
#define to_pgprot PAGE_KERNEL
static pte_t *from_pte;
static pte_t *to_pte;
static spinlock_t v6_lock = SPIN_LOCK_UNLOCKED;
#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* Copy the page, taking account of the cache colour.
*/
void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long from, to;
spin_lock(&v6_lock);
set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
from = from_address + (offset << PAGE_SHIFT);
to = to_address + (offset << PAGE_SHIFT);
flush_tlb_kernel_page(from);
flush_tlb_kernel_page(to);
copy_page((void *)to, (void *)from);
spin_unlock(&v6_lock);
}
void v6_clear_user_page(void *kaddr, unsigned long vaddr)
{
unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
spin_lock(&v6_lock);
set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
flush_tlb_kernel_page(to);
clear_page((void *)to);
spin_unlock(&v6_lock);
}
struct cpu_user_fns v6_user_fns __initdata = {
.cpu_clear_user_page = v6_clear_user_page,
.cpu_copy_user_page = v6_copy_user_page,
};
static int __init v6_userpage_init(void)
{
pgd_t *pgd;
pmd_t *pmd;
pgd = pgd_offset_k(from_address);
pmd = pmd_alloc(&init_mm, pgd, from_address);
if (!pmd)
BUG();
from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
if (!from_pte)
BUG();
to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
if (!to_pte)
BUG();
return 0;
}
__initcall(v6_userpage_init);
......@@ -35,3 +35,17 @@
ldr \rd, [\rd, #TI_TASK]
ldr \rd, [\rd, #TSK_ACTIVE_MM]
.endm
/*
* mmid - get context id from mm pointer (mm->context.id)
*/
.macro mmid, rd, rn
ldr \rd, [\rn, #MM_CONTEXT_ID]
.endm
/*
* mask_asid - mask the ASID from the context ID
*/
.macro asid, rd, rn
and \rd, \rn, #255
.endm
/*
* linux/arch/arm/mm/proc-v6.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is the "shell" of the ARMv6 processor support.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/pgtable.h>
#include "proc-macros.S"
#define D_CACHE_LINE_SIZE 32
.macro cpsie, flags
.ifc \flags, f
.long 0xf1080040
.exitm
.endif
.ifc \flags, i
.long 0xf1080080
.exitm
.endif
.ifc \flags, if
.long 0xf10800c0
.exitm
.endif
.err
.endm
.macro cpsid, flags
.ifc \flags, f
.long 0xf10c0040
.exitm
.endif
.ifc \flags, i
.long 0xf10c0080
.exitm
.endif
.ifc \flags, if
.long 0xf10c00c0
.exitm
.endif
.err
.endm
ENTRY(cpu_v6_proc_init)
mov pc, lr
ENTRY(cpu_v6_proc_fin)
mov pc, lr
/*
* cpu_v6_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* - loc - location to jump to for soft reset
*
* It is assumed that:
*/
.align 5
ENTRY(cpu_v6_reset)
mov pc, r0
/*
* cpu_v6_do_idle()
*
* Idle the processor (eg, wait for interrupt).
*
* IRQs are already disabled.
*/
ENTRY(cpu_v6_do_idle)
mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
mov pc, lr
ENTRY(cpu_v6_dcache_clean_area)
#ifndef TLB_CAN_READ_FROM_L1_CACHE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #D_CACHE_LINE_SIZE
subs r1, r1, #D_CACHE_LINE_SIZE
bhi 1b
#endif
mov pc, lr
/*
* cpu_arm926_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys
*
* - pgd_phys - physical address of new TTB
*
* It is assumed that:
* - we are not using split page tables
*/
ENTRY(cpu_v6_switch_mm)
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
mcr p15, 0, r1, c13, c0, 1 @ set context ID
mov pc, lr
#define nG (1 << 11)
#define APX (1 << 9)
#define AP1 (1 << 5)
#define AP0 (1 << 4)
#define XN (1 << 0)
/*
* cpu_v6_set_pte(ptep, pte)
*
* Set a level 2 translation table entry.
*
* - ptep - pointer to level 2 translation table entry
* (hardware version is stored at -1024 bytes)
* - pte - PTE value to store
*
* Permissions:
* YUWD APX AP1 AP0 SVC User
* 0xxx 0 0 0 no acc no acc
* 100x 1 0 1 r/o no acc
* 10x0 1 0 1 r/o no acc
* 1011 0 0 1 r/w no acc
* 110x 1 1 0 r/o r/o
* 11x0 1 1 0 r/o r/o
* 1111 0 1 1 r/w r/w
*/
ENTRY(cpu_v6_set_pte)
str r1, [r0], #-2048 @ linux version
bic r2, r1, #0x00000ff0
bic r2, r2, #0x00000003
orr r2, r2, #AP0 | 2
tst r1, #L_PTE_WRITE
tstne r1, #L_PTE_DIRTY
orreq r2, r2, #APX
tst r1, #L_PTE_USER
orrne r2, r2, #AP1 | nG
tstne r2, #APX
eorne r2, r2, #AP0
tst r1, #L_PTE_YOUNG
biceq r2, r2, #APX | AP1 | AP0
@ tst r1, #L_PTE_EXEC
@ orreq r2, r2, #XN
tst r1, #L_PTE_PRESENT
moveq r2, #0
str r2, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
mov pc, lr
cpu_v6_name:
.asciz "Some Random V6 Processor"
.align
.section ".text.init", #alloc, #execinstr
/*
* __v6_setup
*
* Initialise TLB, Caches, and MMU state ready to switch the MMU
* on. Return in r0 the new CP15 C1 control register setting.
*
* We automatically detect if we have a Harvard cache, and use the
* Harvard cache control instructions insead of the unified cache
* control instructions.
*
* This should be able to cover all ARMv6 cores.
*
* It is assumed that:
* - cache type register is implemented
*/
__v6_setup:
mrc p15, 0, r10, c0, c0, 1 @ read cache type register
tst r10, #1 << 24 @ Harvard cache?
mov r10, #0
mcrne p15, 0, r10, c7, c14, 0 @ clean+invalidate D cache
mcrne p15, 0, r10, c7, c5, 0 @ invalidate I cache
mcreq p15, 0, r10, c7, c15, 0 @ clean+invalidate cache
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
mcr p15, 0, r4, c2, c0, 0 @ load TTB0
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
mrc p15, 0, r0, c1, c0, 0 @ read control register
ldr r10, cr1_clear @ get mask for bits to clear
bic r0, r0, r10 @ clear bits them
ldr r10, cr1_set @ get mask for bits to set
orr r0, r0, r10 @ set them
mov pc, lr @ return to head.S:__ret
/*
* V X F I D LR
* .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
* rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 0 110 0011 1.00 .111 1101 < we want
*/
.type cr1_clear, #object
.type cr1_set, #object
cr1_clear:
.word 0x0120c302
cr1_set:
.word 0x00c0387d
.type v6_processor_functions, #object
ENTRY(v6_processor_functions)
.word v6_early_abort
.word cpu_v6_proc_init
.word cpu_v6_proc_fin
.word cpu_v6_reset
.word cpu_v6_do_idle
.word cpu_v6_dcache_clean_area
.word cpu_v6_switch_mm
.word cpu_v6_set_pte
.size v6_processor_functions, . - v6_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv6"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v6"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
/*
* Match any ARMv6 processor core.
*/
.type __v6_proc_info, #object
__v6_proc_info:
.long 0x00070000
.long 0x00ff0000
.long 0x00000c0e
b __v6_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_FAST_MULT | HWCAP_VFP
.long cpu_v6_name
.long v6_processor_functions
.long v6wbi_tlb_fns
.long v6_user_fns
.long v6_cache_fns
.size __v6_proc_info, . - __v6_proc_info
/*
* linux/arch/arm/mm/tlb-v6.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 6 TLB handling functions.
* These assume a split I/D TLB.
*/
#include <linux/linkage.h>
#include <asm/constants.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
#define HARVARD_TLB
/*
* v6wbi_flush_user_tlb_range(start, end, vma)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
ENTRY(v6wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mov ip, #0
mmid r3, r3 @ get vm_mm->context.id
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
asid r3, r3 @ mask ASID
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
vma_vm_flags r2, r2 @ get vma->vm_flags
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1)
tst r2, #VM_EXEC @ Executable area ?
mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1)
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA (was 1)
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
/*
* v6wbi_flush_kern_tlb_range(start,end)
*
* Invalidate a range of kernel TLB entries
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
ENTRY(v6wbi_flush_kern_tlb_range)
mov r2, #0
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA
mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
.section ".text.init", #alloc, #execinstr
.type v6wbi_tlb_fns, #object
ENTRY(v6wbi_tlb_fns)
.long v6wbi_flush_user_tlb_range
.long v6wbi_flush_kern_tlb_range
.long v6wbi_tlb_flags
.size v6wbi_tlb_fns, . - v6wbi_tlb_fns
/*
* linux/include/asm-arm/atomic.h
*
* Copyright (c) 1996 Russell King.
* Copyright (C) 1996 Russell King.
* Copyright (C) 2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Changelog:
* 27-06-1996 RMK Created
* 13-04-1997 RMK Made functions atomic!
* 07-12-1997 RMK Upgraded for v2.1.
* 26-08-1998 PJB Added #ifdef __KERNEL__
*/
#ifndef __ASM_ARM_ATOMIC_H
#define __ASM_ARM_ATOMIC_H
#include <linux/config.h>
#ifdef CONFIG_SMP
#error SMP not supported
#endif
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
#include <asm/system.h>
#define atomic_read(v) ((v)->counter)
#if __LINUX_ARM_ARCH__ >= 6
/*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens. Writing to 'v->counter'
* without using the following operations WILL break the atomic
* nature of these ops.
*/
static inline void atomic_set(atomic_t *v, int i)
{
unsigned long tmp;
__asm__ __volatile__("@ atomic_set\n"
"1: ldrex %0, [%1]\n"
" strex %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
}
static inline void atomic_add(int i, volatile atomic_t *v)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
static inline void atomic_sub(int i, volatile atomic_t *v)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
static inline int atomic_dec_and_test(volatile atomic_t *v)
{
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_dec_and_test\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=r" (tmp)
: "r" (&v->counter)
: "cc");
return result == 0;
}
static inline int atomic_add_negative(int i, volatile atomic_t *v)
{
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_add_negative\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
return result < 0;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_clear_mask\n"
"1: ldrex %0, %2\n"
" bic %0, %0, %3\n"
" strex %1, %0, %2\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (addr), "Ir" (mask)
: "cc");
}
#else /* ARM_ARCH_6 */
#include <asm/system.h>
#ifdef CONFIG_SMP
#error SMP not supported on pre-ARMv6 CPUs
#endif
#define atomic_set(v,i) (((v)->counter) = (i))
static inline void atomic_add(int i, volatile atomic_t *v)
......@@ -103,6 +209,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
local_irq_restore(flags);
}
#endif /* __LINUX_ARM_ARCH__ */
/* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
......
......@@ -69,6 +69,14 @@
# endif
#endif
#if defined(CONFIG_CPU_V6)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v6
//# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
......
......@@ -38,6 +38,7 @@
* v4t_early - ARMv4 with Thumb early abort handler
* v5tej_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
*/
#undef CPU_ABORT_HANDLER
#undef MULTI_ABORT
......@@ -98,6 +99,14 @@
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_ABORT_HANDLER
# define MULTI_ABORT 1
# else
# define CPU_ABORT_HANDLER v6_early_abort
# endif
#endif
#ifndef CPU_ABORT_HANDLER
#error Unknown data abort handler type
#endif
......
......@@ -12,6 +12,127 @@
#ifndef __ASM_PROC_LOCKS_H
#define __ASM_PROC_LOCKS_H
#if __LINUX_ARM_ARCH__ >= 6
#define __down_op(ptr,fail) \
({ \
__asm__ __volatile__( \
"@ down_op\n" \
"1: ldrex lr, [%0]\n" \
" sub lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movmi ip, %0\n" \
" blmi " #fail \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
})
#define __down_op_ret(ptr,fail) \
({ \
unsigned int ret; \
__asm__ __volatile__( \
"@ down_op_ret\n" \
"1: ldrex lr, [%1]\n" \
" sub lr, lr, %2\n" \
" strex ip, lr, [%1]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movmi ip, %1\n" \
" movpl ip, #0\n" \
" blmi " #fail "\n" \
" mov %0, ip" \
: "=&r" (ret) \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
ret; \
})
#define __up_op(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op\n" \
"1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movle ip, %0\n" \
" blle " #wake \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
})
/*
* The value 0x01000000 supports up to 128 processors and
* lots of processes. BIAS must be chosen such that sub'ing
* BIAS once per CPU will result in the long remaining
* negative.
*/
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
#define __down_op_write(ptr,fail) \
({ \
__asm__ __volatile__( \
"@ down_op_write\n" \
"1: ldrex lr, [%0]\n" \
" sub lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movne ip, %0\n" \
" blne " #fail \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc", "memory"); \
})
#define __up_op_write(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
"1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" movcs ip, %0\n" \
" blcs " #wake \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc", "memory"); \
})
#define __down_op_read(ptr,fail) \
__down_op(ptr, fail)
#define __up_op_read(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
"1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" moveq ip, %0\n" \
" bleq " #wake \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
})
#else
#define __down_op(ptr,fail) \
({ \
__asm__ __volatile__( \
......@@ -137,3 +258,5 @@
})
#endif
#endif
......@@ -84,6 +84,14 @@
# endif
#endif
#ifdef CONFIG_CPU_COPY_V6
# ifdef _USER
# define MULTI_USER 1
# else
# define _USER v6
# endif
#endif
#ifndef _USER
#error Unknown user operations model
#endif
......
......@@ -130,6 +130,14 @@
# define CPU_NAME cpu_xscale
# endif
# endif
# ifdef CONFIG_CPU_V6
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
# endif
#endif
#ifndef MULTI_CPU
......
......@@ -6,6 +6,10 @@
* or page size, whichever is greater since the cache aliases
* every size/ways bytes.
*/
#if __LINUX_ARM_ARCH__ > 5
#define SHMLBA (4 * PAGE_SIZE)
#else
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#endif
#endif /* _ASMARM_SHMPARAM_H */
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
#error ARM architecture does not support SMP spin locks
#if __LINUX_ARM_ARCH__ < 6
#error SMP not supported on pre-ARMv6 CPUs
#endif
/*
* ARMv6 Spin-locking.
*
* We (exclusively) read the old value, and decrement it. If it
* hits zero, we may have won the lock, so we try (exclusively)
* storing it.
*
* Unlocked value: 0
* Locked value: 1
*/
typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
static inline void _raw_spin_lock(spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "cc", "memory");
}
static inline int _raw_spin_trylock(spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "cc", "memory");
return tmp == 0;
}
static inline void _raw_spin_unlock(spinlock_t *lock)
{
__asm__ __volatile__(
" str %1, [%0]"
:
: "r" (&lock->lock), "r" (0)
: "cc", "memory");
}
/*
* RWLOCKS
*/
typedef struct {
volatile unsigned int lock;
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0)
/*
* Write locks are easy - we just set bit 31. When unlocking, we can
* just write zero since the lock is exclusively held.
*/
static inline void _raw_write_lock(rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc", "memory");
}
static inline void _raw_write_unlock(rwlock_t *rw)
{
__asm__ __volatile__(
"str %1, [%0]"
:
: "r" (&rw->lock), "r" (0)
: "cc", "memory");
}
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
* - Increment it.
* - Store new lock value if positive, and we still own this location.
* If the value is negative, we've already failed.
* - If we failed to store the value, we want a negative result.
* - If we failed, try again.
* Unlocking is similarly hairy. We may have multiple read locks
* currently active. However, we know we won't have any write
* locks.
*/
static inline void _raw_read_lock(rwlock_t *rw)
{
unsigned long tmp, tmp2;
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc", "memory");
}
static inline void _raw_read_unlock(rwlock_t *rw)
{
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc", "memory");
}
static inline int _raw_write_trylock(rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc", "memory");
return tmp == 0;
}
#endif /* __ASM_SPINLOCK_H */
......@@ -12,7 +12,8 @@
#define CPU_ARCH_ARMv5 4
#define CPU_ARCH_ARMv5T 5
#define CPU_ARCH_ARMv5TE 6
#define CPU_ARCH_ARMv6 7
#define CPU_ARCH_ARMv5TEJ 7
#define CPU_ARCH_ARMv6 8
/*
* CR1 bits (CP#15 CR1)
......@@ -123,6 +124,26 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
mb(); \
} while (0)
/*
* CPU interrupt mask handling.
*/
#if __LINUX_ARM_ARCH__ >= 6
#define local_irq_save(x) \
({ \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_save\n" \
"cpsid i" \
: "=r" (x) : : "memory", "cc"); \
})
#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
#else
/*
* Save the current interrupt enable state & disable IRQs
*/
......@@ -199,6 +220,8 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
: "memory", "cc"); \
})
#endif
/*
* Save the current interrupt enable state.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment