Commit c9e35b4a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-6.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC fixes from Vineet Gupta:

 - Incorrect VIPT aliasing assumption

 - Misc build warning fixes and some typos

* tag 'arc-6.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: [plat-hsdk]: Remove misplaced interrupt-cells property
  ARC: Fix typos
  ARC: mm: fix new code about cache aliasing
  ARC: Fix -Wmissing-prototypes warnings
parents bbacf717 61231eb8
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
config ARC config ARC
def_bool y def_bool y
select ARC_TIMERS select ARC_TIMERS
select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_DMA_PREP_COHERENT
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
# uImage build relies on mkimage being availble on your host for ARC target # uImage build relies on mkimage being available on your host for ARC target
# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
# and make sure it's reacable from your PATH # and make sure it's reachable from your PATH
OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
......
...@@ -119,9 +119,9 @@ mmc@15000 { ...@@ -119,9 +119,9 @@ mmc@15000 {
/* /*
* The DW APB ICTL intc on MB is connected to CPU intc via a * The DW APB ICTL intc on MB is connected to CPU intc via a
* DT "invisible" DW APB GPIO block, configured to simply pass thru * DT "invisible" DW APB GPIO block, configured to simply pass thru
* interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c) * interrupts - setup accordingly in platform init (plat-axs10x/ax10x.c)
* *
* So here we mimic a direct connection betwen them, ignoring the * So here we mimic a direct connection between them, ignoring the
* ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core) * ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core)
* instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO) * instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO)
* *
......
...@@ -205,7 +205,6 @@ dmac_cfg_clk: dmac-gpu-cfg-clk { ...@@ -205,7 +205,6 @@ dmac_cfg_clk: dmac-gpu-cfg-clk {
}; };
gmac: ethernet@8000 { gmac: ethernet@8000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac"; compatible = "snps,dwmac";
reg = <0x8000 0x2000>; reg = <0x8000 0x2000>;
interrupts = <10>; interrupts = <10>;
......
...@@ -113,7 +113,7 @@ mmc@15000 { ...@@ -113,7 +113,7 @@ mmc@15000 {
/* /*
* Embedded Vision subsystem UIO mappings; only relevant for EV VDK * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
* *
* This node is intentionally put outside of MB above becase * This node is intentionally put outside of MB above because
* it maps areas outside of MB's 0xez-0xfz. * it maps areas outside of MB's 0xez-0xfz.
*/ */
uio_ev: uio@d0000000 { uio_ev: uio@d0000000 {
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARC_CACHETYPE_H
#define __ASM_ARC_CACHETYPE_H
#include <linux/types.h>
#define cpu_dcache_is_aliasing() true
#endif
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
/* /*
* DSP-related saved registers - need to be saved only when you are * DSP-related saved registers - need to be saved only when you are
* scheduled out. * scheduled out.
* structure fields name must correspond to aux register defenitions for * structure fields name must correspond to aux register definitions for
* automatic offset calculation in DSP_AUX_SAVE_RESTORE macros * automatic offset calculation in DSP_AUX_SAVE_RESTORE macros
*/ */
struct dsp_callee_regs { struct dsp_callee_regs {
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* Stack switching code can no longer reliably rely on the fact that * Stack switching code can no longer reliably rely on the fact that
* if we are NOT in user mode, stack is switched to kernel mode. * if we are NOT in user mode, stack is switched to kernel mode.
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
* it's prologue including stack switching from user mode * its prologue including stack switching from user mode
* *
* Vineetg: Aug 28th 2008: Bug #94984 * Vineetg: Aug 28th 2008: Bug #94984
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
...@@ -143,7 +143,7 @@ ...@@ -143,7 +143,7 @@
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode) * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
* 3. But before it could switch SP from USER to KERNEL stack * 3. But before it could switch SP from USER to KERNEL stack
* a L2 IRQ "Interrupts" L1 * a L2 IRQ "Interrupts" L1
* Thay way although L2 IRQ happened in Kernel mode, stack is still * That way although L2 IRQ happened in Kernel mode, stack is still
* not switched. * not switched.
* To handle this, we may need to switch stack even if in kernel mode * To handle this, we may need to switch stack even if in kernel mode
* provided SP has values in range of USER mode stack ( < 0x7000_0000 ) * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
...@@ -173,7 +173,7 @@ ...@@ -173,7 +173,7 @@
GET_CURR_TASK_ON_CPU r9 GET_CURR_TASK_ON_CPU r9
/* With current tsk in r9, get it's kernel mode stack base */ /* With current tsk in r9, get its kernel mode stack base */
GET_TSK_STACK_BASE r9, r9 GET_TSK_STACK_BASE r9, r9
/* save U mode SP @ pt_regs->sp */ /* save U mode SP @ pt_regs->sp */
...@@ -282,7 +282,7 @@ ...@@ -282,7 +282,7 @@
* NOTE: * NOTE:
* *
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
* for memory load operations. If used in that way interrupts are deffered * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good. * by hardware and that is not good.
*-------------------------------------------------------------*/ *-------------------------------------------------------------*/
.macro EXCEPTION_EPILOGUE .macro EXCEPTION_EPILOGUE
...@@ -350,7 +350,7 @@ ...@@ -350,7 +350,7 @@
* NOTE: * NOTE:
* *
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
* for memory load operations. If used in that way interrupts are deffered * for memory load operations. If used in that way interrupts are deferred
* by hardware and that is not good. * by hardware and that is not good.
*-------------------------------------------------------------*/ *-------------------------------------------------------------*/
.macro INTERRUPT_EPILOGUE LVL .macro INTERRUPT_EPILOGUE LVL
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifndef __ASM_ARC_ENTRY_H #ifndef __ASM_ARC_ENTRY_H
#define __ASM_ARC_ENTRY_H #define __ASM_ARC_ENTRY_H
#include <asm/unistd.h> /* For NR_syscalls defination */ #include <asm/unistd.h> /* For NR_syscalls definition */
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */ #include <asm/processor.h> /* For VMALLOC_START */
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
.endm .endm
/*------------------------------------------------------------- /*-------------------------------------------------------------
* given a tsk struct, get to the base of it's kernel mode stack * given a tsk struct, get to the base of its kernel mode stack
* tsk->thread_info is really a PAGE, whose bottom hoists stack * tsk->thread_info is really a PAGE, whose bottom hoists stack
* which grows upwards towards thread_info * which grows upwards towards thread_info
*------------------------------------------------------------*/ *------------------------------------------------------------*/
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* ARCv2 can support 240 interrupts in the core interrupts controllers and * ARCv2 can support 240 interrupts in the core interrupts controllers and
* 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most * 128 interrupts in IDU. Thus 512 virtual IRQs must be enough for most
* configurations of boards. * configurations of boards.
* This doesnt affect ARCompact, but we change it to same value * This doesn't affect ARCompact, but we change it to same value
*/ */
#define NR_IRQS 512 #define NR_IRQS 512
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
* IRQ Control Macros * IRQ Control Macros
* *
* All of them have "memory" clobber (compiler barrier) which is needed to * All of them have "memory" clobber (compiler barrier) which is needed to
* ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available) * ensure that LD/ST requiring irq safety (R-M-W when LLSC is not available)
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register) * are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
* *
* Noted at the time of Abilis Timer List corruption * Noted at the time of Abilis Timer List corruption
......
...@@ -165,7 +165,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -165,7 +165,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* for retiring-mm. However destroy_context( ) still needs to do that because * for retiring-mm. However destroy_context( ) still needs to do that because
* between mm_release( ) = >deactive_mm( ) and * between mm_release( ) = >deactive_mm( ) and
* mmput => .. => __mmdrop( ) => destroy_context( ) * mmput => .. => __mmdrop( ) => destroy_context( )
* there is a good chance that task gets sched-out/in, making it's ASID valid * there is a good chance that task gets sched-out/in, making its ASID valid
* again (this teased me for a whole day). * again (this teased me for a whole day).
*/ */
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
* Other rules which cause the divergence from 1:1 mapping * Other rules which cause the divergence from 1:1 mapping
* *
* 1. Although ARC700 can do exclusive execute/write protection (meaning R * 1. Although ARC700 can do exclusive execute/write protection (meaning R
* can be tracked independet of X/W unlike some other CPUs), still to * can be tracked independently of X/W unlike some other CPUs), still to
* keep things consistent with other archs: * keep things consistent with other archs:
* -Write implies Read: W => R * -Write implies Read: W => R
* -Execute implies Read: X => R * -Execute implies Read: X => R
......
...@@ -169,7 +169,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs, ...@@ -169,7 +169,7 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset); return *(unsigned long *)((unsigned long)regs + offset);
} }
extern int syscall_trace_entry(struct pt_regs *); extern int syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_exit(struct pt_regs *); extern void syscall_trace_exit(struct pt_regs *);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#ifndef __ARC_ASM_SHMPARAM_H #ifndef __ARC_ASM_SHMPARAM_H
#define __ARC_ASM_SHMPARAM_H #define __ARC_ASM_SHMPARAM_H
/* Handle upto 2 cache bins */ /* Handle up to 2 cache bins */
#define SHMLBA (2 * PAGE_SIZE) #define SHMLBA (2 * PAGE_SIZE)
/* Enforce SHMLBA in shmat */ /* Enforce SHMLBA in shmat */
......
...@@ -77,7 +77,7 @@ static inline const char *arc_platform_smp_cpuinfo(void) ...@@ -77,7 +77,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
/* /*
* ARC700 doesn't support atomic Read-Modify-Write ops. * ARC700 doesn't support atomic Read-Modify-Write ops.
* Originally Interrupts had to be disabled around code to gaurantee atomicity. * Originally Interrupts had to be disabled around code to guarantee atomicity.
* The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
* based on retry-if-irq-in-atomic (with hardware assist). * based on retry-if-irq-in-atomic (with hardware assist).
* However despite these, we provide the IRQ disabling variant * However despite these, we provide the IRQ disabling variant
...@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void) ...@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
* support needed. * support needed.
* *
* (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
* gaurantted by the platform (not something which core handles). * guaranteed by the platform (not something which core handles).
* Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
* disabling for atomicity. * disabling for atomicity.
* *
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
struct thread_info { struct thread_info {
unsigned long flags; /* low level flags */ unsigned long flags; /* low level flags */
unsigned long ksp; /* kernel mode stack top in __switch_to */ unsigned long ksp; /* kernel mode stack top in __switch_to */
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptible, <0 => BUG */
int cpu; /* current CPU */ int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */ unsigned long thr_ptr; /* TLS ptr */
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
* 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem * 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
* *
* Joern suggested a better "C" algorithm which is great since * Joern suggested a better "C" algorithm which is great since
* (1) It is portable to any architecure * (1) It is portable to any architecture
* (2) At the same time it takes advantage of ARC ISA (rotate intrns) * (2) At the same time it takes advantage of ARC ISA (rotate intrns)
*/ */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
*/ */
#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */ #include <linux/linkage.h> /* ARC_{ENTRY,EXIT} */
#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */ #include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/arcregs.h> #include <asm/arcregs.h>
...@@ -31,7 +31,7 @@ VECTOR res_service ; Reset Vector ...@@ -31,7 +31,7 @@ VECTOR res_service ; Reset Vector
VECTOR mem_service ; Mem exception VECTOR mem_service ; Mem exception
VECTOR instr_service ; Instrn Error VECTOR instr_service ; Instrn Error
VECTOR EV_MachineCheck ; Fatal Machine check VECTOR EV_MachineCheck ; Fatal Machine check
VECTOR EV_TLBMissI ; Intruction TLB miss VECTOR EV_TLBMissI ; Instruction TLB miss
VECTOR EV_TLBMissD ; Data TLB miss VECTOR EV_TLBMissD ; Data TLB miss
VECTOR EV_TLBProtV ; Protection Violation VECTOR EV_TLBProtV ; Protection Violation
VECTOR EV_PrivilegeV ; Privilege Violation VECTOR EV_PrivilegeV ; Privilege Violation
...@@ -76,11 +76,11 @@ ENTRY(handle_interrupt) ...@@ -76,11 +76,11 @@ ENTRY(handle_interrupt)
# query in hard ISR path would return false (since .IE is set) which would # query in hard ISR path would return false (since .IE is set) which would
# trips genirq interrupt handling asserts. # trips genirq interrupt handling asserts.
# #
# So do a "soft" disable of interrutps here. # So do a "soft" disable of interrupts here.
# #
# Note this disable is only for consistent book-keeping as further interrupts # Note this disable is only for consistent book-keeping as further interrupts
# will be disabled anyways even w/o this. Hardware tracks active interrupts # will be disabled anyways even w/o this. Hardware tracks active interrupts
# seperately in AUX_IRQ_ACT.active and will not take new interrupts # separately in AUX_IRQ_ACT.active and will not take new interrupts
# unless this one returns (or higher prio becomes pending in 2-prio scheme) # unless this one returns (or higher prio becomes pending in 2-prio scheme)
IRQ_DISABLE IRQ_DISABLE
......
...@@ -95,7 +95,7 @@ ENTRY(EV_MachineCheck) ...@@ -95,7 +95,7 @@ ENTRY(EV_MachineCheck)
lr r0, [efa] lr r0, [efa]
mov r1, sp mov r1, sp
; MC excpetions disable MMU ; MC exceptions disable MMU
ARC_MMU_REENABLE r3 ARC_MMU_REENABLE r3
lsr r3, r10, 8 lsr r3, r10, 8
...@@ -209,7 +209,7 @@ trap_with_param: ...@@ -209,7 +209,7 @@ trap_with_param:
; --------------------------------------------- ; ---------------------------------------------
; syscall TRAP ; syscall TRAP
; ABI: (r0-r7) upto 8 args, (r8) syscall number ; ABI: (r0-r7) up to 8 args, (r8) syscall number
; --------------------------------------------- ; ---------------------------------------------
ENTRY(EV_Trap) ENTRY(EV_Trap)
......
...@@ -165,7 +165,7 @@ ENTRY(first_lines_of_secondary) ...@@ -165,7 +165,7 @@ ENTRY(first_lines_of_secondary)
; setup stack (fp, sp) ; setup stack (fp, sp)
mov fp, 0 mov fp, 0
; set it's stack base to tsk->thread_info bottom ; set its stack base to tsk->thread_info bottom
GET_TSK_STACK_BASE r0, sp GET_TSK_STACK_BASE r0, sp
j start_kernel_secondary j start_kernel_secondary
......
...@@ -56,7 +56,7 @@ void arc_init_IRQ(void) ...@@ -56,7 +56,7 @@ void arc_init_IRQ(void)
WRITE_AUX(AUX_IRQ_CTRL, ictrl); WRITE_AUX(AUX_IRQ_CTRL, ictrl);
/* /*
* ARCv2 core intc provides multiple interrupt priorities (upto 16). * ARCv2 core intc provides multiple interrupt priorities (up to 16).
* Typical builds though have only two levels (0-high, 1-low) * Typical builds though have only two levels (0-high, 1-low)
* Linux by default uses lower prio 1 for most irqs, reserving 0 for * Linux by default uses lower prio 1 for most irqs, reserving 0 for
* NMI style interrupts in future (say perf) * NMI style interrupts in future (say perf)
......
...@@ -190,7 +190,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs) ...@@ -190,7 +190,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
} }
} }
int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs) static int
__kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{ {
struct kprobe *p; struct kprobe *p;
struct kprobe_ctlblk *kcb; struct kprobe_ctlblk *kcb;
...@@ -241,8 +242,8 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs) ...@@ -241,8 +242,8 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
return 0; return 0;
} }
static int __kprobes arc_post_kprobe_handler(unsigned long addr, static int
struct pt_regs *regs) __kprobes arc_post_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{ {
struct kprobe *cur = kprobe_running(); struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
* (based on a specific RTL build) * (based on a specific RTL build)
* Below is the static map between perf generic/arc specific event_id and * Below is the static map between perf generic/arc specific event_id and
* h/w condition names. * h/w condition names.
* At the time of probe, we loop thru each index and find it's name to * At the time of probe, we loop thru each index and find its name to
* complete the mapping of perf event_id to h/w index as latter is needed * complete the mapping of perf event_id to h/w index as latter is needed
* to program the counter really * to program the counter really
*/ */
......
...@@ -390,7 +390,7 @@ static void arc_chk_core_config(struct cpuinfo_arc *info) ...@@ -390,7 +390,7 @@ static void arc_chk_core_config(struct cpuinfo_arc *info)
#ifdef CONFIG_ARC_HAS_DCCM #ifdef CONFIG_ARC_HAS_DCCM
/* /*
* DCCM can be arbit placed in hardware. * DCCM can be arbit placed in hardware.
* Make sure it's placement/sz matches what Linux is built with * Make sure its placement/sz matches what Linux is built with
*/ */
if ((unsigned int)__arc_dccm_base != info->dccm.base) if ((unsigned int)__arc_dccm_base != info->dccm.base)
panic("Linux built with incorrect DCCM Base address\n"); panic("Linux built with incorrect DCCM Base address\n");
......
...@@ -8,15 +8,16 @@ ...@@ -8,15 +8,16 @@
* *
* vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK) * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
* -do_signal() supports TIF_RESTORE_SIGMASK * -do_signal() supports TIF_RESTORE_SIGMASK
* -do_signal() no loner needs oldset, required by OLD sys_sigsuspend * -do_signal() no longer needs oldset, required by OLD sys_sigsuspend
* -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen * -sys_rt_sigsuspend() now comes from generic code, so discard arch
* implementation
* -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed * -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
* -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves * -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
* the job to do_signal() * the job to do_signal()
* *
* vineetg: July 2009 * vineetg: July 2009
* -Modified Code to support the uClibc provided userland sigreturn stub * -Modified Code to support the uClibc provided userland sigreturn stub
* to avoid kernel synthesing it on user stack at runtime, costing TLB * to avoid kernel synthesizing it on user stack at runtime, costing TLB
* probes and Cache line flushes. * probes and Cache line flushes.
* *
* vineetg: July 2009 * vineetg: July 2009
......
...@@ -89,7 +89,7 @@ int do_misaligned_access(unsigned long address, struct pt_regs *regs, ...@@ -89,7 +89,7 @@ int do_misaligned_access(unsigned long address, struct pt_regs *regs,
/* /*
* Entry point for miscll errors such as Nested Exceptions * Entry point for miscll errors such as Nested Exceptions
* -Duplicate TLB entry is handled seperately though * -Duplicate TLB entry is handled separately though
*/ */
void do_machine_check_fault(unsigned long address, struct pt_regs *regs) void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
{ {
......
...@@ -41,8 +41,8 @@ SECTIONS ...@@ -41,8 +41,8 @@ SECTIONS
#endif #endif
/* /*
* The reason for having a seperate subsection .init.ramfs is to * The reason for having a separate subsection .init.ramfs is to
* prevent objump from including it in kernel dumps * prevent objdump from including it in kernel dumps
* *
* Reason for having .init.ramfs above .init is to make sure that the * Reason for having .init.ramfs above .init is to make sure that the
* binary blob is tucked away to one side, reducing the displacement * binary blob is tucked away to one side, reducing the displacement
......
...@@ -212,7 +212,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -212,7 +212,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long flags; unsigned long flags;
/* If range @start to @end is more than 32 TLB entries deep, /* If range @start to @end is more than 32 TLB entries deep,
* its better to move to a new ASID rather than searching for * it's better to move to a new ASID rather than searching for
* individual entries and then shooting them down * individual entries and then shooting them down
* *
* The calc above is rough, doesn't account for unaligned parts, * The calc above is rough, doesn't account for unaligned parts,
...@@ -408,7 +408,7 @@ static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *p ...@@ -408,7 +408,7 @@ static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *p
* -More importantly it makes this handler inconsistent with fast-path * -More importantly it makes this handler inconsistent with fast-path
* TLB Refill handler which always deals with "current" * TLB Refill handler which always deals with "current"
* *
* Lets see the use cases when current->mm != vma->mm and we land here * Let's see the use cases when current->mm != vma->mm and we land here
* 1. execve->copy_strings()->__get_user_pages->handle_mm_fault * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
* Here VM wants to pre-install a TLB entry for user stack while * Here VM wants to pre-install a TLB entry for user stack while
* current->mm still points to pre-execve mm (hence the condition). * current->mm still points to pre-execve mm (hence the condition).
......
...@@ -5,19 +5,19 @@ ...@@ -5,19 +5,19 @@
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
* *
* Vineetg: April 2011 : * Vineetg: April 2011 :
* -MMU v1: moved out legacy code into a seperate file * -MMU v1: moved out legacy code into a separate file
* -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
* helps avoid a shift when preparing PD0 from PTE * helps avoid a shift when preparing PD0 from PTE
* *
* Vineetg: July 2009 * Vineetg: July 2009
* -For MMU V2, we need not do heuristics at the time of commiting a D-TLB * -For MMU V2, we need not do heuristics at the time of committing a D-TLB
* entry, so that it doesn't knock out it's I-TLB entry * entry, so that it doesn't knock out its I-TLB entry
* -Some more fine tuning: * -Some more fine tuning:
* bmsk instead of add, asl.cc instead of branch, delay slot utilise etc * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
* *
* Vineetg: July 2009 * Vineetg: July 2009
* -Practically rewrote the I/D TLB Miss handlers * -Practically rewrote the I/D TLB Miss handlers
* Now 40 and 135 instructions a peice as compared to 131 and 449 resp. * Now 40 and 135 instructions apiece as compared to 131 and 449 resp.
* Hence Leaner by 1.5 K * Hence Leaner by 1.5 K
* Used Conditional arithmetic to replace excessive branching * Used Conditional arithmetic to replace excessive branching
* Also used short instructions wherever possible * Also used short instructions wherever possible
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment