Commit 0822977d authored by Russell King's avatar Russell King

2.5.8 ARM updates:

 - preempt updates
 - build fixes
 - new tlb flush macro
 - add asm/cacheflush.h and asm/tlbflush.h
parent f582a9c3
......@@ -17,6 +17,7 @@
#include <linux/mman.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <asm/dma.h>
......
......@@ -756,7 +756,6 @@ __irq_svc: sub sp, sp, #S_FRAME_SIZE
#ifdef CONFIG_PREEMPT
ldr r0, [r8, #TI_FLAGS] @ get flags
tst r0, #_TIF_NEED_RESCHED
ldrne r6, .LCirq_stat
blne svc_preempt
preempt_return:
ldr r0, [r8, #TI_PREEMPT] @ read preempt value
......@@ -770,20 +769,20 @@ preempt_return:
#ifdef CONFIG_PREEMPT
svc_preempt: teq r9, #0 @ was preempt count = 0
ldreq r6, .LCirq_stat
movne pc, lr @ no
ldr r0, [r6, #4] @ local_irq_count
ldr r1, [r6, #8] @ local_bh_count
adds r0, r0, r1
movne pc, lr
ldr r1, [r8, #TI_TASK]
set_cpsr_c r2, #MODE_SVC @ enable IRQs
str r0, [r1, #0] @ current->state = TASK_RUNNING
1: bl SYMBOL_NAME(schedule)
mov r7, #PREEMPT_ACTIVE
str r7, [r8, #TI_PREEMPT] @ set PREEMPT_ACTIVE
1: set_cpsr_c r2, #MODE_SVC @ enable IRQs
bl SYMBOL_NAME(schedule)
set_cpsr_c r0, #PSR_I_BIT | MODE_SVC @ disable IRQs
ldr r0, [r8, #TI_FLAGS]
ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
beq preempt_return
set_cpsr_c r0, #MODE_SVC @ enable IRQs
beq preempt_return @ go again
b 1b
#endif
......
......@@ -76,6 +76,9 @@ __do_notify_resume:
* This is how we return from a fork.
*/
ENTRY(ret_from_fork)
#ifdef CONFIG_PREEMPT
bl schedule_tail
#endif
get_thread_info tsk
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
mov why, #1
......
......@@ -29,6 +29,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/errno.h>
#include <asm/irq.h>
#include <asm/system.h>
......
......@@ -73,7 +73,7 @@ void (*pm_power_off)(void);
* This is our default idle handler. We need to disable
* interrupts here to ensure we don't miss a wakeup call.
*/
static void default_idle(void)
void default_idle(void)
{
__cli();
if (!need_resched() && !hlt_counter)
......
......@@ -13,6 +13,7 @@
*/
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <asm/semaphore.h>
......
......@@ -628,14 +628,12 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall)
case SIGSTOP: {
struct signal_struct *sig;
current->state = TASK_STOPPED;
current->exit_code = signr;
sig = current->parent->sig;
preempt_disable();
current->state = TASK_STOPPED;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
continue;
}
......
......@@ -25,6 +25,7 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/timex.h>
#include <linux/errno.h>
#include <asm/hardware.h>
#include <asm/io.h>
......
......@@ -44,7 +44,7 @@ static pte_t *minicache_pte;
unsigned long map_page_minicache(unsigned long virt)
{
set_pte(minicache_pte, mk_pte_phys(__pa(virt), minicache_pgprot));
flush_kern_tlb_page(minicache_address);
flush_tlb_kernel_page(minicache_address);
return minicache_address;
}
......
......@@ -723,7 +723,6 @@ ENTRY(xscale_processor_functions)
.word cpu_xscale_set_pgd
.word cpu_xscale_set_pmd
.word cpu_xscale_set_pte
.size xscale_processor_functions, . - xscale_processor_functions
.type cpu_80200_info, #object
......@@ -779,6 +778,5 @@ __pxa250_proc_info:
.long xscale_processor_functions
.long v4wbi_tlb_fns
.long v5te_mc_user_fns
.size __cotulla_proc_info, . - __cotulla_proc_info
.size __pxa250_proc_info, . - __pxa250_proc_info
......@@ -53,6 +53,7 @@ ENTRY(v3_flush_user_tlb_range)
act_mm r3 @ get current->active_mm
teq r2, r3 @ == mm ?
movne pc, lr @ no, we dont do anything
ENTRY(v3_flush_kern_tlb_range)
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c6, c0, 0 @ invalidate TLB entry
......@@ -87,5 +88,6 @@ ENTRY(v3_tlb_fns)
.long v3_flush_user_tlb_mm
.long v3_flush_user_tlb_range
.long v3_flush_user_tlb_page
.long v3_flush_kern_tlb_range
.long v3_flush_kern_tlb_page
.size v3_tlb_fns, . - v3_tlb_fns
......@@ -42,7 +42,7 @@ ENTRY(v4_flush_kern_tlb_all)
/*
* v4_flush_user_tlb_range(start, end, mm)
*
* Invalidate a range of TLB entries in the specified address space.
* Invalidate a range of TLB entries in the specified user address space.
*
* - start - range start address
* - end - range end address
......@@ -85,6 +85,27 @@ ENTRY(v4_flush_user_tlb_page)
mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr
/*
* v4_flush_kerm_tlb_range(start, end)
*
* Invalidate a range of TLB entries in the specified kernel
* address range.
*
* - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned)
*/
.align 5
ENTRY(v4_flush_kern_tlb_range)
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mov pc, lr
/*
* v4_flush_kern_tlb_page(kaddr)
*
......@@ -106,5 +127,6 @@ ENTRY(v4_tlb_fns)
.long v4_flush_user_tlb_mm
.long v4_flush_user_tlb_range
.long v4_flush_user_tlb_page
.long v4_flush_kern_tlb_range
.long v4_flush_kern_tlb_page
.size v4_tlb_fns, . - v4_tlb_fns
......@@ -88,7 +88,41 @@ ENTRY(v4wb_flush_user_tlb_page)
mcr p15, 0, r3, c7, c10, 4 @ drain WB
tst r2, #VM_EXEC
mcrne p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mov pc, lr
/*
* v4_flush_kerm_tlb_range(start, end)
*
* Invalidate a range of TLB entries in the specified kernel
* address range.
*
* - start - virtual address (may not be aligned)
* - end - virtual address (may not be aligned)
*/
ENTRY(v4wb_flush_kern_tlb_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB
1: mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
/*
* v4_flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
*
* - kaddr - Kernel virtual memory address
*/
ENTRY(v4wb_flush_kern_tlb_page)
mcr p15, 0, r3, c8, c5, 0 @ invalidate I TLB
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mov pc, lr
......@@ -107,14 +141,17 @@ ENTRY(v4wb_flush_kern_tlb_page)
*/
.align 5
ENTRY(v4wbi_flush_user_tlb_range)
vma_vm_mm ip, r2
act_mm r3 @ get current->active_mm
teq r2, r3 @ == mm ?
eors r3, ip, r3 @ == mm ?
movne pc, lr @ no, we dont do anything
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
vma_vm_flags r2, r2
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
1: tst r2, #VM_EXEC
mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
......@@ -140,7 +177,23 @@ ENTRY(v4wbi_flush_user_tlb_page)
mcr p15, 0, r3, c7, c10, 4 @ drain WB
tst r2, #VM_EXEC
mcrne p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mov pc, lr
ENTRY(v4wbi_flush_kern_tlb_range)
mov r3, #0
mcr p15, 0, r3, c7, c10, 4 @ drain WB
bic r0, r0, #0x0ff
bic r0, r0, #0xf00
1: mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
ENTRY(v4wbi_flush_kern_tlb_page)
mcr p15, 0, r0, c8, c5, 1 @ invalidate I TLB entry
mcr p15, 0, r0, c8, c6, 1 @ invalidate D TLB entry
mov pc, lr
......@@ -152,6 +205,7 @@ ENTRY(v4wb_tlb_fns)
.long v4wb_flush_user_tlb_mm
.long v4wb_flush_user_tlb_range
.long v4wb_flush_user_tlb_page
.long v4wb_flush_kern_tlb_range
.long v4wb_flush_kern_tlb_page
.size v4wb_tlb_fns, . - v4wb_tlb_fns
......@@ -161,5 +215,6 @@ ENTRY(v4wbi_tlb_fns)
.long v4wbi_flush_user_tlb_mm
.long v4wbi_flush_user_tlb_range
.long v4wbi_flush_user_tlb_page
.long v4wbi_flush_kern_tlb_range
.long v4wbi_flush_kern_tlb_page
.size v4wbi_tlb_fns, . - v4wbi_tlb_fns
......@@ -28,6 +28,7 @@
#include <linux/config.h>
/* XXX */
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
......
/*
* linux/include/asm-arm/cacheflush.h
*
* Copyright (C) 2000-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASMARM_CACHEFLUSH_H
#define _ASMARM_CACHEFLUSH_H
#include <asm/proc/cache.h>
#endif
......@@ -272,7 +272,7 @@ extern void consistent_sync(void *vaddr, size_t size, int rw);
/*
* Change "struct page" to physical address.
*/
#ifdef CONFIG_DISCONTIG
#ifdef CONFIG_DISCONTIGMEM
#define page_to_phys(page) \
((((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
+ page_zone(page)->zone_start_paddr)
......
......@@ -10,11 +10,10 @@
#ifndef _ASMARM_PGALLOC_H
#define _ASMARM_PGALLOC_H
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/proc/cache.h>
#include <asm/proc/pgalloc.h>
/*
......
......@@ -27,66 +27,3 @@
/* DAG: ARM3 will flush cache on MEMC updates anyway? so don't bother */
#define clean_cache_area(_start,_size) do { } while (0)
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
*/
#define flush_tlb_all() memc_update_all()
#define flush_tlb_mm(mm) memc_update_mm(mm)
#define flush_tlb_range(vma,start,end) \
do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0)
#define flush_tlb_page(vma, vmaddr) do { } while (0)
/*
* The following handle the weird MEMC chip
*/
static inline void memc_update_all(void)
{
struct task_struct *p;
cpu_memc_update_all(init_mm.pgd);
for_each_task(p) {
if (!p->mm)
continue;
cpu_memc_update_all(p->mm->pgd);
}
processor._set_pgd(current->active_mm->pgd);
}
static inline void memc_update_mm(struct mm_struct *mm)
{
cpu_memc_update_all(mm->pgd);
if (mm == current->active_mm)
processor._set_pgd(mm->pgd);
}
static inline void
memc_clear(struct mm_struct *mm, struct page *page)
{
cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0);
if (mm == current->active_mm)
processor._set_pgd(mm->pgd);
}
static inline void
memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr)
{
cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr);
if (mm == current->active_mm)
processor._set_pgd(mm->pgd);
}
static inline void
update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
struct mm_struct *mm = vma->vm_mm;
memc_update_addr(mm, pte, addr);
}
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
*/
#define flush_tlb_all() memc_update_all()
#define flush_tlb_mm(mm) memc_update_mm(mm)
#define flush_tlb_range(vma,start,end) \
do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0)
#define flush_tlb_page(vma, vmaddr) do { } while (0)
/*
* The following handle the weird MEMC chip
*/
static inline void memc_update_all(void)
{
struct task_struct *p;
cpu_memc_update_all(init_mm.pgd);
for_each_task(p) {
if (!p->mm)
continue;
cpu_memc_update_all(p->mm->pgd);
}
processor._set_pgd(current->active_mm->pgd);
}
static inline void memc_update_mm(struct mm_struct *mm)
{
cpu_memc_update_all(mm->pgd);
if (mm == current->active_mm)
processor._set_pgd(mm->pgd);
}
static inline void
memc_clear(struct mm_struct *mm, struct page *page)
{
cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0);
if (mm == current->active_mm)
processor._set_pgd(mm->pgd);
}
static inline void
memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr)
{
cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr);
if (mm == current->active_mm)
processor._set_pgd(mm->pgd);
}
static inline void
update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
struct mm_struct *mm = vma->vm_mm;
memc_update_addr(mm, pte, addr);
}
......@@ -132,108 +132,3 @@ static inline void flush_dcache_page(struct page *page)
do { \
cpu_icache_invalidate_range((_s), (_e)); \
} while (0)
/*
* TLB Management
* ==============
*
* The arch/arm/mm/tlb-*.S files implement these methods.
*
* The TLB specific code is expected to perform whatever tests it
* needs to determine if it should invalidate the TLB for each
* call. Start addresses are inclusive and end addresses are
* exclusive; it is safe to round these addresses down.
*
* flush_tlb_all()
*
* Invalidate the entire TLB.
*
* flush_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address
* space.
* - mm - mm_struct describing address space
*
* flush_tlb_range(mm,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
* - mm - mm_struct describing address space
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
* flush_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address range.
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*
* flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
struct cpu_tlb_fns {
void (*flush_kern_all)(void);
void (*flush_user_mm)(struct mm_struct *);
void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
void (*flush_user_page)(unsigned long, struct vm_area_struct *);
void (*flush_kern_page)(unsigned long);
};
/*
* Convert calls to our calling convention.
*/
#define flush_tlb_all() __cpu_flush_kern_tlb_all()
#define flush_tlb_mm(mm) __cpu_flush_user_tlb_mm(mm)
#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
#define flush_tlb_page(vma,vaddr) __cpu_flush_user_tlb_page(vaddr,vma)
#define flush_kern_tlb_page(kaddr) __cpu_flush_kern_tlb_page(kaddr)
/*
* Now select the calling method
*/
#ifdef MULTI_TLB
extern struct cpu_tlb_fns cpu_tlb;
#define __cpu_flush_kern_tlb_all cpu_tlb.flush_kern_all
#define __cpu_flush_user_tlb_mm cpu_tlb.flush_user_mm
#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
#define __cpu_flush_user_tlb_page cpu_tlb.flush_user_page
#define __cpu_flush_kern_tlb_page cpu_tlb.flush_kern_page
#else
#define __cpu_flush_kern_tlb_all __glue(_TLB,_flush_kern_tlb_all)
#define __cpu_flush_user_tlb_mm __glue(_TLB,_flush_user_tlb_mm)
#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
#define __cpu_flush_user_tlb_page __glue(_TLB,_flush_user_tlb_page)
#define __cpu_flush_kern_tlb_page __glue(_TLB,_flush_kern_tlb_page)
extern void __cpu_flush_kern_tlb_all(void);
extern void __cpu_flush_user_tlb_mm(struct mm_struct *);
extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
extern void __cpu_flush_user_tlb_page(unsigned long, struct vm_area_struct *);
extern void __cpu_flush_kern_tlb_page(unsigned long);
#endif
/*
* if PG_dcache_dirty is set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
/*
* Old ARM MEMC stuff. This supports the reversed mapping handling that
* we have on the older 26-bit machines. We don't have a MEMC chip, so...
*/
#define memc_update_all() do { } while (0)
#define memc_update_mm(mm) do { } while (0)
#define memc_update_addr(mm,pte,log) do { } while (0)
#define memc_clear(mm,physaddr) do { } while (0)
/*
* linux/include/asm-arm/proc-armv/tlbflush.h
*
* Copyright (C) 1999-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* TLB Management
* ==============
*
* The arch/arm/mm/tlb-*.S files implement these methods.
*
* The TLB specific code is expected to perform whatever tests it
* needs to determine if it should invalidate the TLB for each
* call. Start addresses are inclusive and end addresses are
* exclusive; it is safe to round these addresses down.
*
* flush_tlb_all()
*
* Invalidate the entire TLB.
*
* flush_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address
* space.
* - mm - mm_struct describing address space
*
* flush_tlb_range(mm,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
* - mm - mm_struct describing address space
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
* flush_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address range.
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*
* flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
struct cpu_tlb_fns {
void (*flush_kern_all)(void);
void (*flush_user_mm)(struct mm_struct *);
void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
void (*flush_user_page)(unsigned long, struct vm_area_struct *);
void (*flush_kern_range)(unsigned long, unsigned long);
void (*flush_kern_page)(unsigned long);
};
/*
* Convert calls to our calling convention.
*/
#define flush_tlb_all() __cpu_flush_kern_tlb_all()
#define flush_tlb_mm(mm) __cpu_flush_user_tlb_mm(mm)
#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
#define flush_tlb_page(vma,vaddr) __cpu_flush_user_tlb_page(vaddr,vma)
#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
#define flush_tlb_kernel_page(kaddr) __cpu_flush_kern_tlb_page(kaddr)
/*
* Now select the calling method
*/
#ifdef MULTI_TLB
extern struct cpu_tlb_fns cpu_tlb;
#define __cpu_flush_kern_tlb_all cpu_tlb.flush_kern_all
#define __cpu_flush_user_tlb_mm cpu_tlb.flush_user_mm
#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
#define __cpu_flush_user_tlb_page cpu_tlb.flush_user_page
#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
#define __cpu_flush_kern_tlb_page cpu_tlb.flush_kern_page
#else
#define __cpu_flush_kern_tlb_all __glue(_TLB,_flush_kern_tlb_all)
#define __cpu_flush_user_tlb_mm __glue(_TLB,_flush_user_tlb_mm)
#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
#define __cpu_flush_user_tlb_page __glue(_TLB,_flush_user_tlb_page)
#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
#define __cpu_flush_kern_tlb_page __glue(_TLB,_flush_kern_tlb_page)
extern void __cpu_flush_kern_tlb_all(void);
extern void __cpu_flush_user_tlb_mm(struct mm_struct *);
extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
extern void __cpu_flush_user_tlb_page(unsigned long, struct vm_area_struct *);
extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
extern void __cpu_flush_kern_tlb_page(unsigned long);
#endif
/*
* if PG_dcache_dirty is set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
/*
* ARM processors do not cache TLB tables in RAM.
*/
#define flush_tlb_pgtables(mm,start,end) do { } while (0)
/*
* Old ARM MEMC stuff. This supports the reversed mapping handling that
* we have on the older 26-bit machines. We don't have a MEMC chip, so...
*/
#define memc_update_all() do { } while (0)
#define memc_update_mm(mm) do { } while (0)
#define memc_update_addr(mm,pte,log) do { } while (0)
#define memc_clear(mm,physaddr) do { } while (0)
......@@ -102,6 +102,8 @@ static inline unsigned long __thread_saved_fp(struct thread_info *thread)
#endif
#define PREEMPT_ACTIVE 0x04000000
/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
......
/*
* linux/include/asm-arm/tlbflush.h
*
* Copyright (C) 2000-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
#include <asm-arm/proc/tlbflush.h>
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment