Commit 76d3f4c2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-v3.11-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull first batch of ARC changes from Vineet Gupta:
 "There's a second bunch to follow next week - which depends on commits
  on other trees (irq/net).  I'd have preferred the accompanying ARC
  change via respective trees, but it didn't workout somehow.

  Highlights of changes:

   - Continuation of ARC MM changes from 3.10 including

       zero page optimization
       Setting pagecache pages dirty by default
       Non executable stack by default
       Reducing dcache flushes for aliasing VIPT config

   - Long overdue rework of pt_regs machinery - removing the unused word
     gutters and adding ECR register to baseline (helps cleanup lot of
     low level code)

   - Support for ARC gcc 4.8

   - Few other preventive fixes, cosmetics, usage of Kconfig helper..

  The diffstat is larger than normal primarily because of arcregs.h
  header split as well as beautification of macros in entry.h"

* tag 'arc-v3.11-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (32 commits)
  ARC: warn on improper stack unwind FDE entries
  arc: delete __cpuinit usage from all arc files
  ARC: [tlb-miss] Fix bug with CONFIG_ARC_DBG_TLB_MISS_COUNT
  ARC: [tlb-miss] Extraneous PTE bit testing/setting
  ARC: Adjustments for gcc 4.8
  ARC: Setup Vector Table Base in early boot
  ARC: Remove explicit passing around of ECR
  ARC: pt_regs update #5: Use real ECR for pt_regs->event vs. synth values
  ARC: stop using pt_regs->orig_r8
  ARC: pt_regs update #4: r25 saved/restored unconditionally
  ARC: K/U SP saved from one location in stack switching macro
  ARC: Entry Handler tweaks: Simplify branch for in-kernel preemption
  ARC: Entry Handler tweaks: Avoid hardcoded LIMMS for ECR values
  ARC: Increase readability of entry handlers
  ARC: pt_regs update #3: Remove unused gutter at start of callee_regs
  ARC: pt_regs update #2: Remove unused gutter at start of pt_regs
  ARC: pt_regs update #1: Align pt_regs end with end of kernel stack page
  ARC: pt_regs update #0: remove kernel stack canary
  ARC: [mm] Remove @write argument to do_page_fault()
  ARC: [mm] Make stack/heap Non-executable by default
  ...
parents c1101cbc baadb8fd
...@@ -184,6 +184,7 @@ config ARC_CACHE_PAGES ...@@ -184,6 +184,7 @@ config ARC_CACHE_PAGES
config ARC_CACHE_VIPT_ALIASING config ARC_CACHE_VIPT_ALIASING
bool "Support VIPT Aliasing D$" bool "Support VIPT Aliasing D$"
depends on ARC_HAS_DCACHE
default n default n
endif #ARC_CACHE endif #ARC_CACHE
...@@ -361,13 +362,6 @@ config ARC_MISALIGN_ACCESS ...@@ -361,13 +362,6 @@ config ARC_MISALIGN_ACCESS
Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
potential bugs in code potential bugs in code
config ARC_STACK_NONEXEC
bool "Make stack non-executable"
default n
help
To disable the execute permissions of stack/heap of processes
which are enabled by default.
config HZ config HZ
int "Timer Frequency" int "Timer Frequency"
default 100 default 100
......
...@@ -9,25 +9,27 @@ ...@@ -9,25 +9,27 @@
UTS_MACHINE := arc UTS_MACHINE := arc
ifeq ($(CROSS_COMPILE),) ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := arc-elf32- CROSS_COMPILE := arc-linux-uclibc-
endif endif
KBUILD_DEFCONFIG := fpga_defconfig KBUILD_DEFCONFIG := fpga_defconfig
cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__ cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
LINUXINCLUDE += -include ${src}/arch/arc/include/asm/defines.h
ifdef CONFIG_ARC_CURR_IN_REG ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file # For a global register defintion, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using # We had a customer reported bug where some code built in kernel was NOT using
# any kernel headers, and missing the r25 global register # any kernel headers, and missing the r25 global register
# Can't do unconditionally (like above) because of recursive include issues # Can't do unconditionally because of recursive include issues
# due to <linux/thread_info.h> # due to <linux/thread_info.h>
LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
endif endif
atleast_gcc44 := $(call cc-ifversion, -gt, 0402, y) upto_gcc42 := $(call cc-ifversion, -le, 0402, y)
upto_gcc44 := $(call cc-ifversion, -le, 0404, y)
atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
atleast_gcc48 := $(call cc-ifversion, -ge, 0408, y)
cflags-$(atleast_gcc44) += -fsection-anchors cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
...@@ -35,6 +37,11 @@ cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape ...@@ -35,6 +37,11 @@ cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc
cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
# By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
ifeq ($(atleast_gcc48),y)
cflags-$(CONFIG_ARC_DW2_UNWIND) += -gdwarf-2
endif
ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
# Generic build system uses -O2, we want -O3 # Generic build system uses -O2, we want -O3
cflags-y += -O3 cflags-y += -O3
...@@ -48,11 +55,10 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp ...@@ -48,11 +55,10 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
# STAR 9000518362: # STAR 9000518362: (fixed with binutils shipping with gcc 4.8)
# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept # arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
# --build-id w/o "-marclinux". # --build-id w/o "-marclinux". Default arc-elf32-ld is OK
# Default arc-elf32-ld is OK ldflags-$(upto_gcc44) += -marclinux
ldflags-y += -marclinux
ARC_LIBGCC := -mA7 ARC_LIBGCC := -mA7
cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16 cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16
...@@ -66,8 +72,8 @@ ifndef CONFIG_ARC_HAS_HW_MPY ...@@ -66,8 +72,8 @@ ifndef CONFIG_ARC_HAS_HW_MPY
# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments, # With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted # e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
ARC_LIBGCC := -marc600 ifeq ($(upto_gcc42),y)
ifneq ($(atleast_gcc44),y) ARC_LIBGCC := -marc600
cflags-y += -multcost=30 cflags-y += -multcost=30
endif endif
endif endif
......
CONFIG_CROSS_COMPILE="arc-elf32-" CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux" CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
......
CONFIG_CROSS_COMPILE="arc-elf32-" CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux" CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
......
CONFIG_CROSS_COMPILE="arc-elf32-" CONFIG_CROSS_COMPILE="arc-linux-uclibc-"
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="tb10x" CONFIG_DEFAULT_HOSTNAME="tb10x"
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#define ARC_REG_PERIBASE_BCR 0x69 #define ARC_REG_PERIBASE_BCR 0x69
#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */ #define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */ #define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
#define ARC_REG_MMU_BCR 0x6f
#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */ #define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
#define ARC_REG_TIMERS_BCR 0x75 #define ARC_REG_TIMERS_BCR 0x75
#define ARC_REG_ICCM_BCR 0x78 #define ARC_REG_ICCM_BCR 0x78
...@@ -34,22 +33,12 @@ ...@@ -34,22 +33,12 @@
#define ARC_REG_D_UNCACH_BCR 0x6A #define ARC_REG_D_UNCACH_BCR 0x6A
/* status32 Bits Positions */ /* status32 Bits Positions */
#define STATUS_H_BIT 0 /* CPU Halted */
#define STATUS_E1_BIT 1 /* Int 1 enable */
#define STATUS_E2_BIT 2 /* Int 2 enable */
#define STATUS_A1_BIT 3 /* Int 1 active */
#define STATUS_A2_BIT 4 /* Int 2 active */
#define STATUS_AE_BIT 5 /* Exception active */ #define STATUS_AE_BIT 5 /* Exception active */
#define STATUS_DE_BIT 6 /* PC is in delay slot */ #define STATUS_DE_BIT 6 /* PC is in delay slot */
#define STATUS_U_BIT 7 /* User/Kernel mode */ #define STATUS_U_BIT 7 /* User/Kernel mode */
#define STATUS_L_BIT 12 /* Loop inhibit */ #define STATUS_L_BIT 12 /* Loop inhibit */
/* These masks correspond to the status word(STATUS_32) bits */ /* These masks correspond to the status word(STATUS_32) bits */
#define STATUS_H_MASK (1<<STATUS_H_BIT)
#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
#define STATUS_AE_MASK (1<<STATUS_AE_BIT) #define STATUS_AE_MASK (1<<STATUS_AE_BIT)
#define STATUS_DE_MASK (1<<STATUS_DE_BIT) #define STATUS_DE_MASK (1<<STATUS_DE_BIT)
#define STATUS_U_MASK (1<<STATUS_U_BIT) #define STATUS_U_MASK (1<<STATUS_U_BIT)
...@@ -71,6 +60,7 @@ ...@@ -71,6 +60,7 @@
#define ECR_V_ITLB_MISS 0x21 #define ECR_V_ITLB_MISS 0x21
#define ECR_V_DTLB_MISS 0x22 #define ECR_V_DTLB_MISS 0x22
#define ECR_V_PROTV 0x23 #define ECR_V_PROTV 0x23
#define ECR_V_TRAP 0x25
/* Protection Violation Exception Cause Code Values */ /* Protection Violation Exception Cause Code Values */
#define ECR_C_PROTV_INST_FETCH 0x00 #define ECR_C_PROTV_INST_FETCH 0x00
...@@ -79,94 +69,23 @@ ...@@ -79,94 +69,23 @@
#define ECR_C_PROTV_XCHG 0x03 #define ECR_C_PROTV_XCHG 0x03
#define ECR_C_PROTV_MISALIG_DATA 0x04 #define ECR_C_PROTV_MISALIG_DATA 0x04
#define ECR_C_BIT_PROTV_MISALIG_DATA 10
/* Machine Check Cause Code Values */
#define ECR_C_MCHK_DUP_TLB 0x01
/* DTLB Miss Exception Cause Code Values */ /* DTLB Miss Exception Cause Code Values */
#define ECR_C_BIT_DTLB_LD_MISS 8 #define ECR_C_BIT_DTLB_LD_MISS 8
#define ECR_C_BIT_DTLB_ST_MISS 9 #define ECR_C_BIT_DTLB_ST_MISS 9
/* Dummy ECR values for Interrupts */
#define event_IRQ1 0x0031abcd
#define event_IRQ2 0x0032abcd
/* Auxiliary registers */ /* Auxiliary registers */
#define AUX_IDENTITY 4 #define AUX_IDENTITY 4
#define AUX_INTR_VEC_BASE 0x25 #define AUX_INTR_VEC_BASE 0x25
#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
#define AUX_IRQ_LV12 0x43 /* interrupt level register */
#define AUX_IENABLE 0x40c
#define AUX_ITRIGGER 0x40d
#define AUX_IPULSE 0x415
/* Timer related Aux registers */
#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
/* MMU Management regs */
#define ARC_REG_TLBPD0 0x405
#define ARC_REG_TLBPD1 0x406
#define ARC_REG_TLBINDEX 0x407
#define ARC_REG_TLBCOMMAND 0x408
#define ARC_REG_PID 0x409
#define ARC_REG_SCRATCH_DATA0 0x418
/* Bits in MMU PID register */
#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
/* Error code if probe fails */
#define TLB_LKUP_ERR 0x80000000
/* TLB Commands */
#define TLBWrite 0x1
#define TLBRead 0x2
#define TLBGetIndex 0x3
#define TLBProbe 0x4
#if (CONFIG_ARC_MMU_VER >= 2)
#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
#else
#undef TLBWriteNI /* These cmds don't exist on older MMU */
#undef TLBIVUTLB
#endif
/* Instruction cache related Auxiliary registers */
#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
#define ARC_REG_IC_IVIC 0x10
#define ARC_REG_IC_CTRL 0x11
#define ARC_REG_IC_IVIL 0x19
#if (CONFIG_ARC_MMU_VER > 2)
#define ARC_REG_IC_PTAG 0x1E
#endif
/* Bit val in IC_CTRL */
#define IC_CTRL_CACHE_DISABLE 0x1
/* Data cache related Auxiliary registers */
#define ARC_REG_DC_BCR 0x72
#define ARC_REG_DC_IVDC 0x47
#define ARC_REG_DC_CTRL 0x48
#define ARC_REG_DC_IVDL 0x4A
#define ARC_REG_DC_FLSH 0x4B
#define ARC_REG_DC_FLDL 0x4C
#if (CONFIG_ARC_MMU_VER > 2)
#define ARC_REG_DC_PTAG 0x5C
#endif
/* Bit val in DC_CTRL */
#define DC_CTRL_INV_MODE_FLUSH 0x40
#define DC_CTRL_FLUSH_STATUS 0x100
/* MMU Management regs */
#define ARC_REG_PID 0x409
#define ARC_REG_SCRATCH_DATA0 0x418
/* Bits in MMU PID register */
#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
/* /*
* Floating Pt Registers * Floating Pt Registers
...@@ -293,24 +212,6 @@ struct bcr_identity { ...@@ -293,24 +212,6 @@ struct bcr_identity {
#endif #endif
}; };
struct bcr_mmu_1_2 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
#else
unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
#endif
};
struct bcr_mmu_3 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
u_itlb:4, u_dtlb:4;
#else
unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
ways:4, ver:8;
#endif
};
#define EXTN_SWAP_VALID 0x1 #define EXTN_SWAP_VALID 0x1
#define EXTN_NORM_VALID 0x2 #define EXTN_NORM_VALID 0x2
#define EXTN_MINMAX_VALID 0x2 #define EXTN_MINMAX_VALID 0x2
...@@ -343,14 +244,6 @@ struct bcr_extn_xymem { ...@@ -343,14 +244,6 @@ struct bcr_extn_xymem {
#endif #endif
}; };
struct bcr_cache {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
#else
unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
#endif
};
struct bcr_perip { struct bcr_perip {
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int start:8, pad2:8, sz:8, pad:8; unsigned int start:8, pad2:8, sz:8, pad:8;
...@@ -403,7 +296,7 @@ struct cpuinfo_arc_mmu { ...@@ -403,7 +296,7 @@ struct cpuinfo_arc_mmu {
}; };
struct cpuinfo_arc_cache { struct cpuinfo_arc_cache {
unsigned int has_aliasing, sz, line_len, assoc, ver; unsigned int sz, line_len, assoc, ver;
}; };
struct cpuinfo_arc_ccm { struct cpuinfo_arc_ccm {
......
...@@ -18,9 +18,8 @@ struct task_struct; ...@@ -18,9 +18,8 @@ struct task_struct;
void show_regs(struct pt_regs *regs); void show_regs(struct pt_regs *regs);
void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs); void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
void show_kernel_fault_diag(const char *str, struct pt_regs *regs, void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
unsigned long address, unsigned long cause_reg); unsigned long address);
void die(const char *str, struct pt_regs *regs, unsigned long address, void die(const char *str, struct pt_regs *regs, unsigned long address);
unsigned long cause_reg);
#define BUG() do { \ #define BUG() do { \
dump_stack(); \ dump_stack(); \
......
...@@ -18,21 +18,19 @@ ...@@ -18,21 +18,19 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define ARC_ICACHE_WAYS 2 /* For a rare case where customers have differently config I/D */
#define ARC_DCACHE_WAYS 4
/* Helpers */
#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES #define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES #define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1)) #define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1)) #define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN /*
#error "Need to fix some code as I/D cache lines not same" * ARC700 doesn't cache any access in top 256M.
#else * Ideal for wiring memory mapped peripherals as we don't need to do
#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK)) * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
#endif */
#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -57,16 +55,10 @@ ...@@ -57,16 +55,10 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/*
* ARC700 doesn't cache any access in top 256M.
* Ideal for wiring memory mapped peripherals as we don't need to do
* explicit uncached accesses (LD.di/ST.di) hence more portable drivers
*/
#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
extern void arc_cache_init(void); extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void __init read_decode_cache_bcr(void); extern void __init read_decode_cache_bcr(void);
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_CACHE_H */ #endif /* _ASM_CACHE_H */
...@@ -80,17 +80,20 @@ void flush_anon_page(struct vm_area_struct *vma, ...@@ -80,17 +80,20 @@ void flush_anon_page(struct vm_area_struct *vma,
#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ #endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
/*
* A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
* This works around some PIO based drivers which don't call flush_dcache_page
* to record that they dirtied the dcache
*/
#define PG_dc_clean PG_arch_1
/* /*
* Simple wrapper over config option * Simple wrapper over config option
* Bootup code ensures that hardware matches kernel configuration * Bootup code ensures that hardware matches kernel configuration
*/ */
static inline int cache_is_vipt_aliasing(void) static inline int cache_is_vipt_aliasing(void)
{ {
#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
return 1;
#else
return 0;
#endif
} }
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1) #define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
......
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARC_ASM_DEFINES_H__
#define __ARC_ASM_DEFINES_H__
#if defined(CONFIG_ARC_MMU_V1)
#define CONFIG_ARC_MMU_VER 1
#elif defined(CONFIG_ARC_MMU_V2)
#define CONFIG_ARC_MMU_VER 2
#elif defined(CONFIG_ARC_MMU_V3)
#define CONFIG_ARC_MMU_VER 3
#endif
#ifdef CONFIG_ARC_HAS_LLSC
#define __CONFIG_ARC_HAS_LLSC_VAL 1
#else
#define __CONFIG_ARC_HAS_LLSC_VAL 0
#endif
#ifdef CONFIG_ARC_HAS_SWAPE
#define __CONFIG_ARC_HAS_SWAPE_VAL 1
#else
#define __CONFIG_ARC_HAS_SWAPE_VAL 0
#endif
#ifdef CONFIG_ARC_HAS_RTSC
#define __CONFIG_ARC_HAS_RTSC_VAL 1
#else
#define __CONFIG_ARC_HAS_RTSC_VAL 0
#endif
#ifdef CONFIG_ARC_MMU_SASID
#define __CONFIG_ARC_MMU_SASID_VAL 1
#else
#define __CONFIG_ARC_MMU_SASID_VAL 0
#endif
#ifdef CONFIG_ARC_HAS_ICACHE
#define __CONFIG_ARC_HAS_ICACHE 1
#else
#define __CONFIG_ARC_HAS_ICACHE 0
#endif
#ifdef CONFIG_ARC_HAS_DCACHE
#define __CONFIG_ARC_HAS_DCACHE 1
#else
#define __CONFIG_ARC_HAS_DCACHE 0
#endif
#endif /* __ARC_ASM_DEFINES_H__ */
This diff is collapsed.
...@@ -21,6 +21,6 @@ ...@@ -21,6 +21,6 @@
extern void __init arc_init_IRQ(void); extern void __init arc_init_IRQ(void);
extern int __init get_hw_config_num_irq(void); extern int __init get_hw_config_num_irq(void);
void __cpuinit arc_local_timer_setup(unsigned int cpu); void arc_local_timer_setup(unsigned int cpu);
#endif #endif
...@@ -19,6 +19,26 @@ ...@@ -19,6 +19,26 @@
#include <asm/arcregs.h> #include <asm/arcregs.h>
/* status32 Reg bits related to Interrupt Handling */
#define STATUS_E1_BIT 1 /* Int 1 enable */
#define STATUS_E2_BIT 2 /* Int 2 enable */
#define STATUS_A1_BIT 3 /* Int 1 active */
#define STATUS_A2_BIT 4 /* Int 2 active */
#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
/* Other Interrupt Handling related Aux regs */
#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
#define AUX_IRQ_LV12 0x43 /* interrupt level register */
#define AUX_IENABLE 0x40c
#define AUX_ITRIGGER 0x40d
#define AUX_IPULSE 0x415
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/****************************************************************** /******************************************************************
......
...@@ -31,7 +31,7 @@ static inline void arch_kgdb_breakpoint(void) ...@@ -31,7 +31,7 @@ static inline void arch_kgdb_breakpoint(void)
__asm__ __volatile__ ("trap_s 0x4\n"); __asm__ __volatile__ ("trap_s 0x4\n");
} }
extern void kgdb_trap(struct pt_regs *regs, int param); extern void kgdb_trap(struct pt_regs *regs);
enum arc700_linux_regnums { enum arc700_linux_regnums {
_R0 = 0, _R0 = 0,
...@@ -53,7 +53,7 @@ enum arc700_linux_regnums { ...@@ -53,7 +53,7 @@ enum arc700_linux_regnums {
}; };
#else #else
#define kgdb_trap(regs, param) #define kgdb_trap(regs)
#endif #endif
#endif /* __ARC_KGDB_H__ */ #endif /* __ARC_KGDB_H__ */
...@@ -50,11 +50,9 @@ struct kprobe_ctlblk { ...@@ -50,11 +50,9 @@ struct kprobe_ctlblk {
int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause); int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
void kretprobe_trampoline(void); void kretprobe_trampoline(void);
void trap_is_kprobe(unsigned long cause, unsigned long address, void trap_is_kprobe(unsigned long address, struct pt_regs *regs);
struct pt_regs *regs);
#else #else
static void trap_is_kprobe(unsigned long cause, unsigned long address, static void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
struct pt_regs *regs)
{ {
} }
#endif #endif
......
...@@ -9,6 +9,40 @@ ...@@ -9,6 +9,40 @@
#ifndef _ASM_ARC_MMU_H #ifndef _ASM_ARC_MMU_H
#define _ASM_ARC_MMU_H #define _ASM_ARC_MMU_H
#if defined(CONFIG_ARC_MMU_V1)
#define CONFIG_ARC_MMU_VER 1
#elif defined(CONFIG_ARC_MMU_V2)
#define CONFIG_ARC_MMU_VER 2
#elif defined(CONFIG_ARC_MMU_V3)
#define CONFIG_ARC_MMU_VER 3
#endif
/* MMU Management regs */
#define ARC_REG_MMU_BCR 0x06f
#define ARC_REG_TLBPD0 0x405
#define ARC_REG_TLBPD1 0x406
#define ARC_REG_TLBINDEX 0x407
#define ARC_REG_TLBCOMMAND 0x408
#define ARC_REG_PID 0x409
#define ARC_REG_SCRATCH_DATA0 0x418
/* Bits in MMU PID register */
#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
/* Error code if probe fails */
#define TLB_LKUP_ERR 0x80000000
/* TLB Commands */
#define TLBWrite 0x1
#define TLBRead 0x2
#define TLBGetIndex 0x3
#define TLBProbe 0x4
#if (CONFIG_ARC_MMU_VER >= 2)
#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
typedef struct { typedef struct {
...@@ -18,6 +52,16 @@ typedef struct { ...@@ -18,6 +52,16 @@ typedef struct {
#endif #endif
} mm_context_t; } mm_context_t;
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
#else
#define tlb_paranoid_check(a, b)
#endif #endif
void arc_mmu_init(void);
extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
void __init read_decode_mmu_bcr(void);
#endif /* !__ASSEMBLY__ */
#endif #endif
...@@ -96,13 +96,8 @@ typedef unsigned long pgtable_t; ...@@ -96,13 +96,8 @@ typedef unsigned long pgtable_t;
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/* Default Permissions for page, used in mmap.c */ /* Default Permissions for stack/heaps pages (Non Executable) */
#ifdef CONFIG_ARC_STACK_NONEXEC
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
#else
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif
#define WANT_PAGE_VIRTUAL 1 #define WANT_PAGE_VIRTUAL 1
......
...@@ -135,6 +135,12 @@ ...@@ -135,6 +135,12 @@
/* ioremap */ /* ioremap */
#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
/* Masks for actual TLB "PD"s */
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
_PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
/************************************************************************** /**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific) * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
* *
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */ #include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
#include <asm/ptrace.h>
/* Arch specific stuff which needs to be saved per task. /* Arch specific stuff which needs to be saved per task.
* However these items are not so important so as to earn a place in * However these items are not so important so as to earn a place in
...@@ -28,10 +29,6 @@ struct thread_struct { ...@@ -28,10 +29,6 @@ struct thread_struct {
unsigned long ksp; /* kernel mode stack pointer */ unsigned long ksp; /* kernel mode stack pointer */
unsigned long callee_reg; /* pointer to callee regs */ unsigned long callee_reg; /* pointer to callee regs */
unsigned long fault_address; /* dbls as brkpt holder as well */ unsigned long fault_address; /* dbls as brkpt holder as well */
unsigned long cause_code; /* Exception Cause Code (ECR) */
#ifdef CONFIG_ARC_CURR_IN_REG
unsigned long user_r25;
#endif
#ifdef CONFIG_ARC_FPU_SAVE_RESTORE #ifdef CONFIG_ARC_FPU_SAVE_RESTORE
struct arc_fpu fpu; struct arc_fpu fpu;
#endif #endif
...@@ -50,7 +47,7 @@ struct task_struct; ...@@ -50,7 +47,7 @@ struct task_struct;
unsigned long thread_saved_pc(struct task_struct *t); unsigned long thread_saved_pc(struct task_struct *t);
#define task_pt_regs(p) \ #define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1) ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
#define release_thread(thread) do { } while (0) #define release_thread(thread) do { } while (0)
...@@ -75,11 +72,15 @@ unsigned long thread_saved_pc(struct task_struct *t); ...@@ -75,11 +72,15 @@ unsigned long thread_saved_pc(struct task_struct *t);
/* /*
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
* These can't be derived from pt_regs as that would give correp user-mode val * Look in process.c for details of kernel stack layout
*/ */
#define KSTK_ESP(tsk) (tsk->thread.ksp) #define KSTK_ESP(tsk) (tsk->thread.ksp)
#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4))) #define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
sizeof(struct callee_regs) + off)))
#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
/* /*
* Do necessary setup to start up a newly executed thread. * Do necessary setup to start up a newly executed thread.
......
...@@ -17,12 +17,6 @@ ...@@ -17,12 +17,6 @@
/* THE pt_regs: Defines how regs are saved during entry into kernel */ /* THE pt_regs: Defines how regs are saved during entry into kernel */
struct pt_regs { struct pt_regs {
/*
* 1 word gutter after reg-file has been saved
* Technically not needed, Since SP always points to a "full" location
* (vs. "empty"). But pt_regs is shared with tools....
*/
long res;
/* Real registers */ /* Real registers */
long bta; /* bta_l1, bta_l2, erbta */ long bta; /* bta_l1, bta_l2, erbta */
...@@ -50,22 +44,32 @@ struct pt_regs { ...@@ -50,22 +44,32 @@ struct pt_regs {
long sp; /* user/kernel sp depending on where we came from */ long sp; /* user/kernel sp depending on where we came from */
long orig_r0; long orig_r0;
/*to distinguish bet excp, syscall, irq */ /*
* To distinguish bet excp, syscall, irq
* For traps and exceptions, Exception Cause Register.
* ECR: <00> <VV> <CC> <PP>
* Last word used by Linux for extra state mgmt (syscall-restart)
* For interrupts, use artificial ECR values to note current prio-level
*/
union { union {
struct {
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
/* so that assembly code is same for LE/BE */ unsigned long state:8, ecr_vec:8,
unsigned long orig_r8:16, event:16; ecr_cause:8, ecr_param:8;
#else #else
unsigned long event:16, orig_r8:16; unsigned long ecr_param:8, ecr_cause:8,
ecr_vec:8, state:8;
#endif #endif
long orig_r8_word; };
unsigned long event;
}; };
long user_r25;
}; };
/* Callee saved registers - need to be saved only when you are scheduled out */ /* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs { struct callee_regs {
long res; /* Again this is not needed */
long r25; long r25;
long r24; long r24;
long r23; long r23;
...@@ -99,18 +103,20 @@ struct callee_regs { ...@@ -99,18 +103,20 @@ struct callee_regs {
/* return 1 if PC in delay slot */ /* return 1 if PC in delay slot */
#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK) #define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL) #define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT) #define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
#define STATE_SCALL_RESTARTED 0x01
#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED) #define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED) #define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED)
#define current_pt_regs() \ #define current_pt_regs() \
({ \ ({ \
/* open-coded current_thread_info() */ \ /* open-coded current_thread_info() */ \
register unsigned long sp asm ("sp"); \ register unsigned long sp asm ("sp"); \
unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
(struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \ (struct pt_regs *)(pg_start + THREAD_SIZE) - 1; \
}) })
static inline long regs_return_value(struct pt_regs *regs) static inline long regs_return_value(struct pt_regs *regs)
...@@ -120,11 +126,4 @@ static inline long regs_return_value(struct pt_regs *regs) ...@@ -120,11 +126,4 @@ static inline long regs_return_value(struct pt_regs *regs)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define orig_r8_IS_SCALL 0x0001
#define orig_r8_IS_SCALL_RESTARTED 0x0002
#define orig_r8_IS_BRKPT 0x0004
#define orig_r8_IS_EXCPN 0x0008
#define orig_r8_IS_IRQ1 0x0010
#define orig_r8_IS_IRQ2 0x0020
#endif /* __ASM_PTRACE_H */ #endif /* __ASM_PTRACE_H */
...@@ -18,7 +18,7 @@ static inline long ...@@ -18,7 +18,7 @@ static inline long
syscall_get_nr(struct task_struct *task, struct pt_regs *regs) syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{ {
if (user_mode(regs) && in_syscall(regs)) if (user_mode(regs) && in_syscall(regs))
return regs->orig_r8; return regs->r8;
else else
return -1; return -1;
} }
...@@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs) ...@@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
static inline void static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs) syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{ {
/* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */ regs->r0 = regs->orig_r0;
regs->r8 = regs->orig_r8;
} }
static inline long static inline long
......
...@@ -9,9 +9,9 @@ ...@@ -9,9 +9,9 @@
#ifndef __ASM_TLB_MMU_V1_H__ #ifndef __ASM_TLB_MMU_V1_H__
#define __ASM_TLB_MMU_V1_H__ #define __ASM_TLB_MMU_V1_H__
#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1) #include <asm/mmu.h>
#include <asm/tlb.h> #if defined(__ASSEMBLY__) && (CONFIG_ARC_MMU_VER == 1)
.macro TLB_WRITE_HEURISTICS .macro TLB_WRITE_HEURISTICS
......
...@@ -9,18 +9,6 @@ ...@@ -9,18 +9,6 @@
#ifndef _ASM_ARC_TLB_H #ifndef _ASM_ARC_TLB_H
#define _ASM_ARC_TLB_H #define _ASM_ARC_TLB_H
#ifdef __KERNEL__
#include <asm/pgtable.h>
/* Masks for actual TLB "PD"s */
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
_PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
#ifndef __ASSEMBLY__
#define tlb_flush(tlb) \ #define tlb_flush(tlb) \
do { \ do { \
if (tlb->fullmm) \ if (tlb->fullmm) \
...@@ -56,18 +44,4 @@ do { \ ...@@ -56,18 +44,4 @@ do { \
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
#else
#define tlb_paranoid_check(a, b)
#endif
void arc_mmu_init(void);
extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
void __init read_decode_mmu_bcr(void);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_ARC_TLB_H */ #endif /* _ASM_ARC_TLB_H */
...@@ -16,11 +16,11 @@ ...@@ -16,11 +16,11 @@
#ifdef CONFIG_ARC_MISALIGN_ACCESS #ifdef CONFIG_ARC_MISALIGN_ACCESS
int misaligned_fixup(unsigned long address, struct pt_regs *regs, int misaligned_fixup(unsigned long address, struct pt_regs *regs,
unsigned long cause, struct callee_regs *cregs); struct callee_regs *cregs);
#else #else
static inline int static inline int
misaligned_fixup(unsigned long address, struct pt_regs *regs, misaligned_fixup(unsigned long address, struct pt_regs *regs,
unsigned long cause, struct callee_regs *cregs) struct callee_regs *cregs)
{ {
return 0; return 0;
} }
......
...@@ -20,28 +20,31 @@ ...@@ -20,28 +20,31 @@
* *
* This is to decouple pt_regs from user-space ABI, to be able to change it * This is to decouple pt_regs from user-space ABI, to be able to change it
* w/o affecting the ABI. * w/o affecting the ABI.
* Although the layout (initial padding) is similar to pt_regs to have some *
* optimizations when copying pt_regs to/from user_regs_struct. * The intermediate pad,pad2 are relics of initial layout based on pt_regs
* for optimizations when copying pt_regs to/from user_regs_struct.
* We no longer need them, but can't be changed as they are part of ABI now.
* *
* Also, sigcontext only care about the scratch regs as that is what we really * Also, sigcontext only care about the scratch regs as that is what we really
* save/restore for signal handling. * save/restore for signal handling. However gdb also uses the same struct
* hence callee regs need to be in there too.
*/ */
struct user_regs_struct { struct user_regs_struct {
long pad;
struct { struct {
long pad;
long bta, lp_start, lp_end, lp_count; long bta, lp_start, lp_end, lp_count;
long status32, ret, blink, fp, gp; long status32, ret, blink, fp, gp;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp; long sp;
} scratch; } scratch;
long pad2;
struct { struct {
long pad;
long r25, r24, r23, r22, r21, r20; long r25, r24, r23, r22, r21, r20;
long r19, r18, r17, r16, r15, r14, r13; long r19, r18, r17, r16, r15, r14, r13;
} callee; } callee;
long efa; /* break pt addr, for break points in delay slots */ long efa; /* break pt addr, for break points in delay slots */
long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */ long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
}; };
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -24,9 +24,6 @@ int main(void) ...@@ -24,9 +24,6 @@ int main(void)
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg)); DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
#ifdef CONFIG_ARC_CURR_IN_REG
DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25));
#endif
DEFINE(THREAD_FAULT_ADDR, DEFINE(THREAD_FAULT_ADDR,
offsetof(struct thread_struct, fault_address)); offsetof(struct thread_struct, fault_address));
...@@ -49,7 +46,7 @@ int main(void) ...@@ -49,7 +46,7 @@ int main(void)
BLANK(); BLANK();
DEFINE(PT_status32, offsetof(struct pt_regs, status32)); DEFINE(PT_status32, offsetof(struct pt_regs, status32));
DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word)); DEFINE(PT_event, offsetof(struct pt_regs, event));
DEFINE(PT_sp, offsetof(struct pt_regs, sp)); DEFINE(PT_sp, offsetof(struct pt_regs, sp));
DEFINE(PT_r0, offsetof(struct pt_regs, r0)); DEFINE(PT_r0, offsetof(struct pt_regs, r0));
DEFINE(PT_r1, offsetof(struct pt_regs, r1)); DEFINE(PT_r1, offsetof(struct pt_regs, r1));
...@@ -60,5 +57,7 @@ int main(void) ...@@ -60,5 +57,7 @@ int main(void)
DEFINE(PT_r6, offsetof(struct pt_regs, r6)); DEFINE(PT_r6, offsetof(struct pt_regs, r6));
DEFINE(PT_r7, offsetof(struct pt_regs, r7)); DEFINE(PT_r7, offsetof(struct pt_regs, r7));
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
return 0; return 0;
} }
...@@ -23,10 +23,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) ...@@ -23,10 +23,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
unsigned int tmp; unsigned int tmp;
unsigned int prev = (unsigned int)prev_task; unsigned int prev = (unsigned int)prev_task;
unsigned int next = (unsigned int)next_task; unsigned int next = (unsigned int)next_task;
int num_words_to_skip = 1;
#ifdef CONFIG_ARC_CURR_IN_REG
num_words_to_skip++;
#endif
__asm__ __volatile__( __asm__ __volatile__(
/* FP/BLINK save generated by gcc (standard function prologue */ /* FP/BLINK save generated by gcc (standard function prologue */
...@@ -44,8 +40,9 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) ...@@ -44,8 +40,9 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
"st.a r24, [sp, -4] \n\t" "st.a r24, [sp, -4] \n\t"
#ifndef CONFIG_ARC_CURR_IN_REG #ifndef CONFIG_ARC_CURR_IN_REG
"st.a r25, [sp, -4] \n\t" "st.a r25, [sp, -4] \n\t"
#else
"sub sp, sp, 4 \n\t" /* usual r25 placeholder */
#endif #endif
"sub sp, sp, %4 \n\t" /* create gutter at top */
/* set ksp of outgoing task in tsk->thread.ksp */ /* set ksp of outgoing task in tsk->thread.ksp */
"st.as sp, [%3, %1] \n\t" "st.as sp, [%3, %1] \n\t"
...@@ -76,10 +73,10 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) ...@@ -76,10 +73,10 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
/* start loading it's CALLEE reg file */ /* start loading it's CALLEE reg file */
"add sp, sp, %4 \n\t" /* skip gutter at top */
#ifndef CONFIG_ARC_CURR_IN_REG #ifndef CONFIG_ARC_CURR_IN_REG
"ld.ab r25, [sp, 4] \n\t" "ld.ab r25, [sp, 4] \n\t"
#else
"add sp, sp, 4 \n\t"
#endif #endif
"ld.ab r24, [sp, 4] \n\t" "ld.ab r24, [sp, 4] \n\t"
"ld.ab r23, [sp, 4] \n\t" "ld.ab r23, [sp, 4] \n\t"
...@@ -100,8 +97,7 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task) ...@@ -100,8 +97,7 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
/* FP/BLINK restore generated by gcc (standard func epilogue */ /* FP/BLINK restore generated by gcc (standard func epilogue */
: "=r"(tmp) : "=r"(tmp)
: "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev), : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev)
"n"(num_words_to_skip * 4)
: "blink" : "blink"
); );
......
...@@ -142,7 +142,7 @@ VECTOR reserved ; Reserved Exceptions ...@@ -142,7 +142,7 @@ VECTOR reserved ; Reserved Exceptions
.endr .endr
#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */ #include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */ #include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,SYS...} */
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
...@@ -274,10 +274,8 @@ ARC_ENTRY instr_service ...@@ -274,10 +274,8 @@ ARC_ENTRY instr_service
SWITCH_TO_KERNEL_STK SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS SAVE_ALL_SYS
lr r0, [ecr] lr r0, [efa]
lr r1, [efa] mov r1, sp
mov r2, sp
FAKE_RET_FROM_EXCPN r9 FAKE_RET_FROM_EXCPN r9
...@@ -298,9 +296,8 @@ ARC_ENTRY mem_service ...@@ -298,9 +296,8 @@ ARC_ENTRY mem_service
SWITCH_TO_KERNEL_STK SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS SAVE_ALL_SYS
lr r0, [ecr] lr r0, [efa]
lr r1, [efa] mov r1, sp
mov r2, sp
bl do_memory_error bl do_memory_error
b ret_from_exception b ret_from_exception
ARC_EXIT mem_service ARC_EXIT mem_service
...@@ -317,11 +314,14 @@ ARC_ENTRY EV_MachineCheck ...@@ -317,11 +314,14 @@ ARC_ENTRY EV_MachineCheck
SWITCH_TO_KERNEL_STK SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS SAVE_ALL_SYS
lr r0, [ecr] lr r2, [ecr]
lr r1, [efa] lr r0, [efa]
mov r2, sp mov r1, sp
lsr r3, r2, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f
brne r0, 0x200100, 1f
bl do_tlb_overlap_fault bl do_tlb_overlap_fault
b ret_from_exception b ret_from_exception
...@@ -355,8 +355,8 @@ ARC_ENTRY EV_TLBProtV ...@@ -355,8 +355,8 @@ ARC_ENTRY EV_TLBProtV
; ecr and efa were not saved in case an Intr sneaks in ; ecr and efa were not saved in case an Intr sneaks in
; after fake rtie ; after fake rtie
; ;
lr r3, [ecr] lr r2, [ecr]
lr r4, [efa] lr r1, [efa] ; Faulting Data address
; --------(4) Return from CPU Exception Mode --------- ; --------(4) Return from CPU Exception Mode ---------
; Fake a rtie, but rtie to next label ; Fake a rtie, but rtie to next label
...@@ -368,31 +368,25 @@ ARC_ENTRY EV_TLBProtV ...@@ -368,31 +368,25 @@ ARC_ENTRY EV_TLBProtV
;------ (5) Type of Protection Violation? ---------- ;------ (5) Type of Protection Violation? ----------
; ;
; ProtV Hardware Exception is triggered for Access Faults of 2 types ; ProtV Hardware Exception is triggered for Access Faults of 2 types
; -Access Violaton (WRITE to READ ONLY Page) - for linux COW ; -Access Violaton : 00_23_(00|01|02|03)_00
; -Unaligned Access (READ/WRITE on odd boundary) ; x r w r+w
; -Unaligned Access : 00_23_04_00
; ;
cmp r3, 0x230400 ; Misaligned data access ? bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
beq 4f
;========= (6a) Access Violation Processing ======== ;========= (6a) Access Violation Processing ========
cmp r3, 0x230100
mov r1, 0x0 ; if LD exception ? write = 0
mov.ne r1, 0x1 ; else write = 1
mov r2, r4 ; faulting address
mov r0, sp ; pt_regs mov r0, sp ; pt_regs
bl do_page_fault bl do_page_fault
b ret_from_exception b ret_from_exception
;========== (6b) Non aligned access ============ ;========== (6b) Non aligned access ============
4: 4:
mov r0, r3 ; cause code mov r0, r1
mov r1, r4 ; faulting address mov r1, sp ; pt_regs
mov r2, sp ; pt_regs
#ifdef CONFIG_ARC_MISALIGN_ACCESS #ifdef CONFIG_ARC_MISALIGN_ACCESS
SAVE_CALLEE_SAVED_USER SAVE_CALLEE_SAVED_USER
mov r3, sp ; callee_regs mov r2, sp ; callee_regs
bl do_misaligned_access bl do_misaligned_access
...@@ -419,9 +413,8 @@ ARC_ENTRY EV_PrivilegeV ...@@ -419,9 +413,8 @@ ARC_ENTRY EV_PrivilegeV
SWITCH_TO_KERNEL_STK SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS SAVE_ALL_SYS
lr r0, [ecr] lr r0, [efa]
lr r1, [efa] mov r1, sp
mov r2, sp
FAKE_RET_FROM_EXCPN r9 FAKE_RET_FROM_EXCPN r9
...@@ -440,9 +433,8 @@ ARC_ENTRY EV_Extension ...@@ -440,9 +433,8 @@ ARC_ENTRY EV_Extension
SWITCH_TO_KERNEL_STK SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS SAVE_ALL_SYS
lr r0, [ecr] lr r0, [efa]
lr r1, [efa] mov r1, sp
mov r2, sp
bl do_extension_fault bl do_extension_fault
b ret_from_exception b ret_from_exception
ARC_EXIT EV_Extension ARC_EXIT EV_Extension
...@@ -498,11 +490,8 @@ tracesys_exit: ...@@ -498,11 +490,8 @@ tracesys_exit:
trap_with_param: trap_with_param:
; stop_pc info by gdb needs this info ; stop_pc info by gdb needs this info
stw orig_r8_IS_BRKPT, [sp, PT_orig_r8] lr r0, [efa]
mov r1, sp
mov r0, r12
lr r1, [efa]
mov r2, sp
; Now that we have read EFA, its safe to do "fake" rtie ; Now that we have read EFA, its safe to do "fake" rtie
; and get out of CPU exception mode ; and get out of CPU exception mode
...@@ -544,11 +533,11 @@ ARC_ENTRY EV_Trap ...@@ -544,11 +533,11 @@ ARC_ENTRY EV_Trap
lr r9, [erstatus] lr r9, [erstatus]
SWITCH_TO_KERNEL_STK SWITCH_TO_KERNEL_STK
SAVE_ALL_TRAP SAVE_ALL_SYS
;------- (4) What caused the Trap -------------- ;------- (4) What caused the Trap --------------
lr r12, [ecr] lr r12, [ecr]
and.f 0, r12, ECR_PARAM_MASK bmsk.f 0, r12, 7
bnz trap_with_param bnz trap_with_param
; ======= (5a) Trap is due to System Call ======== ; ======= (5a) Trap is due to System Call ========
...@@ -589,11 +578,7 @@ ARC_ENTRY ret_from_exception ...@@ -589,11 +578,7 @@ ARC_ENTRY ret_from_exception
; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32 ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
ld r8, [sp, PT_status32] ; returning to User/Kernel Mode ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
#ifdef CONFIG_PREEMPT
bbit0 r8, STATUS_U_BIT, resume_kernel_mode bbit0 r8, STATUS_U_BIT, resume_kernel_mode
#else
bbit0 r8, STATUS_U_BIT, restore_regs
#endif
; Before returning to User mode check-for-and-complete any pending work ; Before returning to User mode check-for-and-complete any pending work
; such as rescheduling/signal-delivery etc. ; such as rescheduling/signal-delivery etc.
...@@ -653,10 +638,10 @@ resume_user_mode_begin: ...@@ -653,10 +638,10 @@ resume_user_mode_begin:
b resume_user_mode_begin ; unconditionally back to U mode ret chks b resume_user_mode_begin ; unconditionally back to U mode ret chks
; for single exit point from this block ; for single exit point from this block
#ifdef CONFIG_PREEMPT
resume_kernel_mode: resume_kernel_mode:
#ifdef CONFIG_PREEMPT
; Can't preempt if preemption disabled ; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10 GET_CURR_THR_INFO_FROM_SP r10
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
...@@ -687,17 +672,6 @@ restore_regs : ...@@ -687,17 +672,6 @@ restore_regs :
; XXX can this be optimised out ; XXX can this be optimised out
IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
#ifdef CONFIG_ARC_CURR_IN_REG
; Restore User R25
; Earlier this used to be only for returning to user mode
; However with 2 levels of IRQ this can also happen even if
; in kernel mode
ld r9, [sp, PT_sp]
brhs r9, VMALLOC_START, 8f
RESTORE_USER_R25
8:
#endif
; Restore REG File. In case multiple Events outstanding, ; Restore REG File. In case multiple Events outstanding,
; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
; Note that we use realtime STATUS32 (not pt_regs->status32) to ; Note that we use realtime STATUS32 (not pt_regs->status32) to
...@@ -714,8 +688,17 @@ not_exception: ...@@ -714,8 +688,17 @@ not_exception:
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
; Level 2 interrupt return Path - from hardware standpoint
bbit0 r10, STATUS_A2_BIT, not_level2_interrupt bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
;------------------------------------------------------------------
; However the context returning might not have taken L2 intr itself
; e.g. Task'A' user-code -> L2 intr -> schedule -> 'B' user-code ret
; Special considerations needed for the context which took L2 intr
ld r9, [sp, PT_event] ; Ensure this is L2 intr context
brne r9, event_IRQ2, 149f
;------------------------------------------------------------------ ;------------------------------------------------------------------
; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier
; so that sched doesnt move to new task, causing L1 to be delayed ; so that sched doesnt move to new task, causing L1 to be delayed
...@@ -723,19 +706,15 @@ not_exception: ...@@ -723,19 +706,15 @@ not_exception:
; things to what they were, before returning from L2 context ; things to what they were, before returning from L2 context
;---------------------------------------------------------------- ;----------------------------------------------------------------
ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs) ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
; A1 is set in status32_l2
; decrement thread_info->preempt_count (re-enable preemption) ; decrement thread_info->preempt_count (re-enable preemption)
GET_CURR_THR_INFO_FROM_SP r10 GET_CURR_THR_INFO_FROM_SP r10
ld r9, [r10, THREAD_INFO_PREEMPT_COUNT] ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
; paranoid check, given A1 was active when A2 happened, preempt count ; paranoid check, given A1 was active when A2 happened, preempt count
; must not be 0 beccause we would have incremented it. ; must not be 0 because we would have incremented it.
; If this does happen we simply HALT as it means a BUG !!! ; If this does happen we simply HALT as it means a BUG !!!
cmp r9, 0 cmp r9, 0
bnz 2f bnz 2f
......
...@@ -27,6 +27,8 @@ stext: ...@@ -27,6 +27,8 @@ stext:
; Don't clobber r0-r4 yet. It might have bootloader provided info ; Don't clobber r0-r4 yet. It might have bootloader provided info
;------------------------------------------------------------------- ;-------------------------------------------------------------------
sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
; Only Boot (Master) proceeds. Others wait in platform dependent way ; Only Boot (Master) proceeds. Others wait in platform dependent way
; IDENTITY Reg [ 3 2 1 0 ] ; IDENTITY Reg [ 3 2 1 0 ]
......
...@@ -28,25 +28,17 @@ ...@@ -28,25 +28,17 @@
* -Disable all IRQs (on CPU side) * -Disable all IRQs (on CPU side)
* -Optionally, setup the High priority Interrupts as Level 2 IRQs * -Optionally, setup the High priority Interrupts as Level 2 IRQs
*/ */
void __cpuinit arc_init_IRQ(void) void arc_init_IRQ(void)
{ {
int level_mask = 0; int level_mask = 0;
write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
/* Disable all IRQs: enable them as devices request */ /* Disable all IRQs: enable them as devices request */
write_aux_reg(AUX_IENABLE, 0); write_aux_reg(AUX_IENABLE, 0);
/* setup any high priority Interrupts (Level2 in ARCompact jargon) */ /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
#ifdef CONFIG_ARC_IRQ3_LV2 level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
level_mask |= (1 << 3); level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
#endif level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
#ifdef CONFIG_ARC_IRQ5_LV2
level_mask |= (1 << 5);
#endif
#ifdef CONFIG_ARC_IRQ6_LV2
level_mask |= (1 << 6);
#endif
if (level_mask) { if (level_mask) {
pr_info("Level-2 interrupts bitset %x\n", level_mask); pr_info("Level-2 interrupts bitset %x\n", level_mask);
......
...@@ -169,7 +169,7 @@ int kgdb_arch_init(void) ...@@ -169,7 +169,7 @@ int kgdb_arch_init(void)
return 0; return 0;
} }
void kgdb_trap(struct pt_regs *regs, int param) void kgdb_trap(struct pt_regs *regs)
{ {
/* trap_s 3 is used for breakpoints that overwrite existing /* trap_s 3 is used for breakpoints that overwrite existing
* instructions, while trap_s 4 is used for compiled breakpoints. * instructions, while trap_s 4 is used for compiled breakpoints.
...@@ -181,7 +181,7 @@ void kgdb_trap(struct pt_regs *regs, int param) ...@@ -181,7 +181,7 @@ void kgdb_trap(struct pt_regs *regs, int param)
* with trap_s 4 (compiled) breakpoints, continuation needs to * with trap_s 4 (compiled) breakpoints, continuation needs to
* start after the breakpoint. * start after the breakpoint.
*/ */
if (param == 3) if (regs->ecr_param == 3)
instruction_pointer(regs) -= BREAK_INSTR_SIZE; instruction_pointer(regs) -= BREAK_INSTR_SIZE;
kgdb_handle_exception(1, SIGTRAP, 0, regs); kgdb_handle_exception(1, SIGTRAP, 0, regs);
......
...@@ -517,8 +517,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) ...@@ -517,8 +517,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
return 0; return 0;
} }
void trap_is_kprobe(unsigned long cause, unsigned long address, void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
struct pt_regs *regs)
{ {
notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP); notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
} }
...@@ -55,10 +55,8 @@ asmlinkage void ret_from_fork(void); ...@@ -55,10 +55,8 @@ asmlinkage void ret_from_fork(void);
* | ... | * | ... |
* | unused | * | unused |
* | | * | |
* ------------------ <==== top of Stack (thread.ksp)
* | UNUSED 1 word|
* ------------------ * ------------------
* | r25 | * | r25 | <==== top of Stack (thread.ksp)
* ~ ~ * ~ ~
* | --to-- | (CALLEE Regs of user mode) * | --to-- | (CALLEE Regs of user mode)
* | r13 | * | r13 |
...@@ -76,7 +74,10 @@ asmlinkage void ret_from_fork(void); ...@@ -76,7 +74,10 @@ asmlinkage void ret_from_fork(void);
* | --to-- | (scratch Regs of user mode) * | --to-- | (scratch Regs of user mode)
* | r0 | * | r0 |
* ------------------ * ------------------
* | UNUSED 1 word| * | SP |
* | orig_r0 |
* | event/ECR |
* | user_r25 |
* ------------------ <===== END of PAGE * ------------------ <===== END of PAGE
*/ */
int copy_thread(unsigned long clone_flags, int copy_thread(unsigned long clone_flags,
......
...@@ -40,7 +40,15 @@ static int genregs_get(struct task_struct *target, ...@@ -40,7 +40,15 @@ static int genregs_get(struct task_struct *target,
offsetof(struct user_regs_struct, LOC), \ offsetof(struct user_regs_struct, LOC), \
offsetof(struct user_regs_struct, LOC) + 4); offsetof(struct user_regs_struct, LOC) + 4);
#define REG_O_ZERO(LOC) \
if (!ret) \
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, \
offsetof(struct user_regs_struct, LOC), \
offsetof(struct user_regs_struct, LOC) + 4);
REG_O_ZERO(pad);
REG_O_CHUNK(scratch, callee, ptregs); REG_O_CHUNK(scratch, callee, ptregs);
REG_O_ZERO(pad2);
REG_O_CHUNK(callee, efa, cregs); REG_O_CHUNK(callee, efa, cregs);
REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address); REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
...@@ -88,8 +96,10 @@ static int genregs_set(struct task_struct *target, ...@@ -88,8 +96,10 @@ static int genregs_set(struct task_struct *target,
offsetof(struct user_regs_struct, LOC), \ offsetof(struct user_regs_struct, LOC), \
offsetof(struct user_regs_struct, LOC) + 4); offsetof(struct user_regs_struct, LOC) + 4);
/* TBD: disallow updates to STATUS32, orig_r8 etc*/ REG_IGNORE_ONE(pad);
REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */ /* TBD: disallow updates to STATUS32 etc*/
REG_IN_CHUNK(scratch, pad2, ptregs); /* pt_regs[bta..sp] */
REG_IGNORE_ONE(pad2);
REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */ REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
REG_IGNORE_ONE(efa); /* efa update invalid */ REG_IGNORE_ONE(efa); /* efa update invalid */
REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */ REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
......
...@@ -31,14 +31,14 @@ ...@@ -31,14 +31,14 @@
int running_on_hw = 1; /* vs. on ISS */ int running_on_hw = 1; /* vs. on ISS */
char __initdata command_line[COMMAND_LINE_SIZE]; char __initdata command_line[COMMAND_LINE_SIZE];
struct machine_desc *machine_desc __cpuinitdata; struct machine_desc *machine_desc;
struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
void __cpuinit read_arc_build_cfg_regs(void) void read_arc_build_cfg_regs(void)
{ {
struct bcr_perip uncached_space; struct bcr_perip uncached_space;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
...@@ -182,7 +182,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -182,7 +182,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
FIX_PTR(cpu); FIX_PTR(cpu);
#define IS_AVAIL1(var, str) ((var) ? str : "") #define IS_AVAIL1(var, str) ((var) ? str : "")
#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "") #define IS_AVAIL2(var, str) ((var == 0x2) ? str : "")
#define IS_USED(var) ((var) ? "(in-use)" : "(not used)") #define IS_USED(cfg) (IS_ENABLED(cfg) ? "(in-use)" : "(not used)")
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n,
"Extn [700-Base]\t: %s %s %s %s %s %s\n", "Extn [700-Base]\t: %s %s %s %s %s %s\n",
...@@ -202,9 +202,9 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -202,9 +202,9 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
if (cpu->core.family == 0x34) { if (cpu->core.family == 0x34) {
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n,
"Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n", "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n",
IS_USED(__CONFIG_ARC_HAS_LLSC_VAL), IS_USED(CONFIG_ARC_HAS_LLSC),
IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL), IS_USED(CONFIG_ARC_HAS_SWAPE),
IS_USED(__CONFIG_ARC_HAS_RTSC_VAL)); IS_USED(CONFIG_ARC_HAS_RTSC));
} }
n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s", n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
...@@ -237,7 +237,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -237,7 +237,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
return buf; return buf;
} }
void __cpuinit arc_chk_ccms(void) void arc_chk_ccms(void)
{ {
#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM) #if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
...@@ -272,7 +272,7 @@ void __cpuinit arc_chk_ccms(void) ...@@ -272,7 +272,7 @@ void __cpuinit arc_chk_ccms(void)
* hardware has dedicated regs which need to be saved/restored on ctx-sw * hardware has dedicated regs which need to be saved/restored on ctx-sw
* (Single Precision uses core regs), thus kernel is kind of oblivious to it * (Single Precision uses core regs), thus kernel is kind of oblivious to it
*/ */
void __cpuinit arc_chk_fpu(void) void arc_chk_fpu(void)
{ {
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
...@@ -293,7 +293,7 @@ void __cpuinit arc_chk_fpu(void) ...@@ -293,7 +293,7 @@ void __cpuinit arc_chk_fpu(void)
* such as only for boot CPU etc * such as only for boot CPU etc
*/ */
void __cpuinit setup_processor(void) void setup_processor(void)
{ {
char str[512]; char str[512];
int cpu_id = smp_processor_id(); int cpu_id = smp_processor_id();
......
...@@ -117,7 +117,7 @@ const char *arc_platform_smp_cpuinfo(void) ...@@ -117,7 +117,7 @@ const char *arc_platform_smp_cpuinfo(void)
* Called from asm stub in head.S * Called from asm stub in head.S
* "current"/R25 already setup by low level boot code * "current"/R25 already setup by low level boot code
*/ */
void __cpuinit start_kernel_secondary(void) void start_kernel_secondary(void)
{ {
struct mm_struct *mm = &init_mm; struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -154,7 +154,7 @@ void __cpuinit start_kernel_secondary(void) ...@@ -154,7 +154,7 @@ void __cpuinit start_kernel_secondary(void)
* *
* Essential requirements being where to run from (PC) and stack (SP) * Essential requirements being where to run from (PC) and stack (SP)
*/ */
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) int __cpu_up(unsigned int cpu, struct task_struct *idle)
{ {
unsigned long wait_till; unsigned long wait_till;
......
...@@ -79,7 +79,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk, ...@@ -79,7 +79,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
* assembly code * assembly code
*/ */
frame_info->regs.r27 = 0; frame_info->regs.r27 = 0;
frame_info->regs.r28 += 64; frame_info->regs.r28 += 60;
frame_info->call_frame = 0; frame_info->call_frame = 0;
} else { } else {
......
...@@ -44,13 +44,24 @@ ...@@ -44,13 +44,24 @@
#include <asm/clk.h> #include <asm/clk.h>
#include <asm/mach_desc.h> #include <asm/mach_desc.h>
/* Timer related Aux registers */
#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
#define ARC_TIMER_MAX 0xFFFFFFFF #define ARC_TIMER_MAX 0xFFFFFFFF
/********** Clock Source Device *********/ /********** Clock Source Device *********/
#ifdef CONFIG_ARC_HAS_RTSC #ifdef CONFIG_ARC_HAS_RTSC
int __cpuinit arc_counter_setup(void) int arc_counter_setup(void)
{ {
/* RTSC insn taps into cpu clk, needs no setup */ /* RTSC insn taps into cpu clk, needs no setup */
...@@ -105,7 +116,7 @@ static bool is_usable_as_clocksource(void) ...@@ -105,7 +116,7 @@ static bool is_usable_as_clocksource(void)
/* /*
* set 32bit TIMER1 to keep counting monotonically and wraparound * set 32bit TIMER1 to keep counting monotonically and wraparound
*/ */
int __cpuinit arc_counter_setup(void) int arc_counter_setup(void)
{ {
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX); write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0); write_aux_reg(ARC_REG_TIMER1_CNT, 0);
...@@ -212,7 +223,7 @@ static struct irqaction arc_timer_irq = { ...@@ -212,7 +223,7 @@ static struct irqaction arc_timer_irq = {
* Setup the local event timer for @cpu * Setup the local event timer for @cpu
* N.B. weak so that some exotic ARC SoCs can completely override it * N.B. weak so that some exotic ARC SoCs can completely override it
*/ */
void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu) void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
{ {
struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
......
...@@ -28,10 +28,9 @@ void __init trap_init(void) ...@@ -28,10 +28,9 @@ void __init trap_init(void)
return; return;
} }
void die(const char *str, struct pt_regs *regs, unsigned long address, void die(const char *str, struct pt_regs *regs, unsigned long address)
unsigned long cause_reg)
{ {
show_kernel_fault_diag(str, regs, address, cause_reg); show_kernel_fault_diag(str, regs, address);
/* DEAD END */ /* DEAD END */
__asm__("flag 1"); __asm__("flag 1");
...@@ -42,14 +41,13 @@ void die(const char *str, struct pt_regs *regs, unsigned long address, ...@@ -42,14 +41,13 @@ void die(const char *str, struct pt_regs *regs, unsigned long address,
* -for user faults enqueues requested signal * -for user faults enqueues requested signal
* -for kernel, chk if due to copy_(to|from)_user, otherwise die() * -for kernel, chk if due to copy_(to|from)_user, otherwise die()
*/ */
static noinline int handle_exception(unsigned long cause, char *str, static noinline int
struct pt_regs *regs, siginfo_t *info) handle_exception(const char *str, struct pt_regs *regs, siginfo_t *info)
{ {
if (user_mode(regs)) { if (user_mode(regs)) {
struct task_struct *tsk = current; struct task_struct *tsk = current;
tsk->thread.fault_address = (__force unsigned int)info->si_addr; tsk->thread.fault_address = (__force unsigned int)info->si_addr;
tsk->thread.cause_code = cause;
force_sig_info(info->si_signo, info, tsk); force_sig_info(info->si_signo, info, tsk);
...@@ -58,14 +56,14 @@ static noinline int handle_exception(unsigned long cause, char *str, ...@@ -58,14 +56,14 @@ static noinline int handle_exception(unsigned long cause, char *str,
if (fixup_exception(regs)) if (fixup_exception(regs))
return 0; return 0;
die(str, regs, (unsigned long)info->si_addr, cause); die(str, regs, (unsigned long)info->si_addr);
} }
return 1; return 1;
} }
#define DO_ERROR_INFO(signr, str, name, sicode) \ #define DO_ERROR_INFO(signr, str, name, sicode) \
int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \ int name(unsigned long address, struct pt_regs *regs) \
{ \ { \
siginfo_t info = { \ siginfo_t info = { \
.si_signo = signr, \ .si_signo = signr, \
...@@ -73,7 +71,7 @@ int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \ ...@@ -73,7 +71,7 @@ int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \
.si_code = sicode, \ .si_code = sicode, \
.si_addr = (void __user *)address, \ .si_addr = (void __user *)address, \
}; \ }; \
return handle_exception(cause, str, regs, &info);\ return handle_exception(str, regs, &info);\
} }
/* /*
...@@ -90,11 +88,11 @@ DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) ...@@ -90,11 +88,11 @@ DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
/* /*
* Entry Point for Misaligned Data access Exception, for emulating in software * Entry Point for Misaligned Data access Exception, for emulating in software
*/ */
int do_misaligned_access(unsigned long cause, unsigned long address, int do_misaligned_access(unsigned long address, struct pt_regs *regs,
struct pt_regs *regs, struct callee_regs *cregs) struct callee_regs *cregs)
{ {
if (misaligned_fixup(address, regs, cause, cregs) != 0) if (misaligned_fixup(address, regs, cregs) != 0)
return do_misaligned_error(cause, address, regs); return do_misaligned_error(address, regs);
return 0; return 0;
} }
...@@ -104,10 +102,9 @@ int do_misaligned_access(unsigned long cause, unsigned long address, ...@@ -104,10 +102,9 @@ int do_misaligned_access(unsigned long cause, unsigned long address,
* Entry point for miscll errors such as Nested Exceptions * Entry point for miscll errors such as Nested Exceptions
* -Duplicate TLB entry is handled seperately though * -Duplicate TLB entry is handled seperately though
*/ */
void do_machine_check_fault(unsigned long cause, unsigned long address, void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
struct pt_regs *regs)
{ {
die("Machine Check Exception", regs, address, cause); die("Machine Check Exception", regs, address);
} }
...@@ -120,23 +117,22 @@ void do_machine_check_fault(unsigned long cause, unsigned long address, ...@@ -120,23 +117,22 @@ void do_machine_check_fault(unsigned long cause, unsigned long address,
* -1 used for software breakpointing (gdb) * -1 used for software breakpointing (gdb)
* -2 used by kprobes * -2 used by kprobes
*/ */
void do_non_swi_trap(unsigned long cause, unsigned long address, void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
struct pt_regs *regs)
{ {
unsigned int param = cause & 0xff; unsigned int param = regs->ecr_param;
switch (param) { switch (param) {
case 1: case 1:
trap_is_brkpt(cause, address, regs); trap_is_brkpt(address, regs);
break; break;
case 2: case 2:
trap_is_kprobe(param, address, regs); trap_is_kprobe(address, regs);
break; break;
case 3: case 3:
case 4: case 4:
kgdb_trap(regs, param); kgdb_trap(regs);
break; break;
default: default:
...@@ -149,14 +145,14 @@ void do_non_swi_trap(unsigned long cause, unsigned long address, ...@@ -149,14 +145,14 @@ void do_non_swi_trap(unsigned long cause, unsigned long address,
* -For a corner case, ARC kprobes implementation resorts to using * -For a corner case, ARC kprobes implementation resorts to using
* this exception, hence the check * this exception, hence the check
*/ */
void do_insterror_or_kprobe(unsigned long cause, void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
unsigned long address,
struct pt_regs *regs)
{ {
int rc;
/* Check if this exception is caused by kprobes */ /* Check if this exception is caused by kprobes */
if (notify_die(DIE_IERR, "kprobe_ierr", regs, address, rc = notify_die(DIE_IERR, "kprobe_ierr", regs, address, 0, SIGILL);
cause, SIGILL) == NOTIFY_STOP) if (rc == NOTIFY_STOP)
return; return;
insterror_is_error(cause, address, regs); insterror_is_error(address, regs);
} }
...@@ -117,23 +117,22 @@ static void show_faulting_vma(unsigned long address, char *buf) ...@@ -117,23 +117,22 @@ static void show_faulting_vma(unsigned long address, char *buf)
static void show_ecr_verbose(struct pt_regs *regs) static void show_ecr_verbose(struct pt_regs *regs)
{ {
unsigned int vec, cause_code, cause_reg; unsigned int vec, cause_code;
unsigned long address; unsigned long address;
cause_reg = current->thread.cause_code; pr_info("\n[ECR ]: 0x%08lx => ", regs->event);
pr_info("\n[ECR ]: 0x%08x => ", cause_reg);
/* For Data fault, this is data address not instruction addr */ /* For Data fault, this is data address not instruction addr */
address = current->thread.fault_address; address = current->thread.fault_address;
vec = cause_reg >> 16; vec = regs->ecr_vec;
cause_code = (cause_reg >> 8) & 0xFF; cause_code = regs->ecr_cause;
/* For DTLB Miss or ProtV, display the memory involved too */ /* For DTLB Miss or ProtV, display the memory involved too */
if (vec == ECR_V_DTLB_MISS) { if (vec == ECR_V_DTLB_MISS) {
pr_cont("Invalid %s 0x%08lx by insn @ 0x%08lx\n", pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n",
(cause_code == 0x01) ? "Read From" : (cause_code == 0x01) ? "Read" :
((cause_code == 0x02) ? "Write to" : "EX"), ((cause_code == 0x02) ? "Write" : "EX"),
address, regs->ret); address, regs->ret);
} else if (vec == ECR_V_ITLB_MISS) { } else if (vec == ECR_V_ITLB_MISS) {
pr_cont("Insn could not be fetched\n"); pr_cont("Insn could not be fetched\n");
...@@ -144,14 +143,12 @@ static void show_ecr_verbose(struct pt_regs *regs) ...@@ -144,14 +143,12 @@ static void show_ecr_verbose(struct pt_regs *regs)
} else if (vec == ECR_V_PROTV) { } else if (vec == ECR_V_PROTV) {
if (cause_code == ECR_C_PROTV_INST_FETCH) if (cause_code == ECR_C_PROTV_INST_FETCH)
pr_cont("Execute from Non-exec Page\n"); pr_cont("Execute from Non-exec Page\n");
else if (cause_code == ECR_C_PROTV_LOAD)
pr_cont("Read from Non-readable Page\n");
else if (cause_code == ECR_C_PROTV_STORE)
pr_cont("Write to Non-writable Page\n");
else if (cause_code == ECR_C_PROTV_XCHG)
pr_cont("Data exchange protection violation\n");
else if (cause_code == ECR_C_PROTV_MISALIG_DATA) else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
pr_cont("Misaligned r/w from 0x%08lx\n", address); pr_cont("Misaligned r/w from 0x%08lx\n", address);
else
pr_cont("%s access not allowed on page\n",
(cause_code == 0x01) ? "Read" :
((cause_code == 0x02) ? "Write" : "EX"));
} else if (vec == ECR_V_INSN_ERR) { } else if (vec == ECR_V_INSN_ERR) {
pr_cont("Illegal Insn\n"); pr_cont("Illegal Insn\n");
} else { } else {
...@@ -176,8 +173,7 @@ void show_regs(struct pt_regs *regs) ...@@ -176,8 +173,7 @@ void show_regs(struct pt_regs *regs)
print_task_path_n_nm(tsk, buf); print_task_path_n_nm(tsk, buf);
show_regs_print_info(KERN_INFO); show_regs_print_info(KERN_INFO);
if (current->thread.cause_code) show_ecr_verbose(regs);
show_ecr_verbose(regs);
pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n", pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n",
current->thread.fault_address, current->thread.fault_address,
...@@ -213,10 +209,9 @@ void show_regs(struct pt_regs *regs) ...@@ -213,10 +209,9 @@ void show_regs(struct pt_regs *regs)
} }
void show_kernel_fault_diag(const char *str, struct pt_regs *regs, void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
unsigned long address, unsigned long cause_reg) unsigned long address)
{ {
current->thread.fault_address = address; current->thread.fault_address = address;
current->thread.cause_code = cause_reg;
/* Caller and Callee regs */ /* Caller and Callee regs */
show_regs(regs); show_regs(regs);
......
...@@ -187,7 +187,7 @@ fault: state->fault = 1; ...@@ -187,7 +187,7 @@ fault: state->fault = 1;
* Returns 0 if successfully handled, 1 if some error happened * Returns 0 if successfully handled, 1 if some error happened
*/ */
int misaligned_fixup(unsigned long address, struct pt_regs *regs, int misaligned_fixup(unsigned long address, struct pt_regs *regs,
unsigned long cause, struct callee_regs *cregs) struct callee_regs *cregs)
{ {
struct disasm_state state; struct disasm_state state;
char buf[TASK_COMM_LEN]; char buf[TASK_COMM_LEN];
......
...@@ -289,6 +289,8 @@ static void __init setup_unwind_table(struct unwind_table *table, ...@@ -289,6 +289,8 @@ static void __init setup_unwind_table(struct unwind_table *table,
* instead of the initial loc addr * instead of the initial loc addr
* return; * return;
*/ */
WARN(1, "unwinder: FDE->initial_location NULL %p\n",
(const u8 *)(fde + 1) + *fde);
} }
++n; ++n;
} }
......
...@@ -125,6 +125,11 @@ SECTIONS ...@@ -125,6 +125,11 @@ SECTIONS
*(.debug_frame) *(.debug_frame)
__end_unwind = .; __end_unwind = .;
} }
/*
* gcc 4.8 generates this for -fasynchonous-unwind-tables,
* while we still use the .debug_frame based unwinder
*/
/DISCARD/ : { *(.eh_frame) }
#else #else
/DISCARD/ : { *(.debug_frame) } /DISCARD/ : { *(.debug_frame) }
#endif #endif
...@@ -142,15 +147,18 @@ SECTIONS ...@@ -142,15 +147,18 @@ SECTIONS
*(.arcextmap.*) *(.arcextmap.*)
} }
#ifndef CONFIG_DEBUG_INFO
/* open-coded because we need .debug_frame seperately for unwinding */ /* open-coded because we need .debug_frame seperately for unwinding */
.debug_aranges 0 : { *(.debug_aranges) } /DISCARD/ : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) } /DISCARD/ : { *(.debug_pubnames) }
.debug_info 0 : { *(.debug_info) } /DISCARD/ : { *(.debug_info) }
.debug_abbrev 0 : { *(.debug_abbrev) } /DISCARD/ : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) } /DISCARD/ : { *(.debug_line) }
.debug_str 0 : { *(.debug_str) } /DISCARD/ : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) } /DISCARD/ : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) } /DISCARD/ : { *(.debug_macinfo) }
/DISCARD/ : { *(.debug_ranges) }
#endif
#ifdef CONFIG_ARC_HAS_DCCM #ifdef CONFIG_ARC_HAS_DCCM
. = CONFIG_ARC_DCCM_BASE; . = CONFIG_ARC_DCCM_BASE;
......
...@@ -73,6 +73,33 @@ ...@@ -73,6 +73,33 @@
#include <asm/cachectl.h> #include <asm/cachectl.h>
#include <asm/setup.h> #include <asm/setup.h>
/* Instruction cache related Auxiliary registers */
#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
#define ARC_REG_IC_IVIC 0x10
#define ARC_REG_IC_CTRL 0x11
#define ARC_REG_IC_IVIL 0x19
#if (CONFIG_ARC_MMU_VER > 2)
#define ARC_REG_IC_PTAG 0x1E
#endif
/* Bit val in IC_CTRL */
#define IC_CTRL_CACHE_DISABLE 0x1
/* Data cache related Auxiliary registers */
#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
#define ARC_REG_DC_IVDC 0x47
#define ARC_REG_DC_CTRL 0x48
#define ARC_REG_DC_IVDL 0x4A
#define ARC_REG_DC_FLSH 0x4B
#define ARC_REG_DC_FLDL 0x4C
#if (CONFIG_ARC_MMU_VER > 2)
#define ARC_REG_DC_PTAG 0x5C
#endif
/* Bit val in DC_CTRL */
#define DC_CTRL_INV_MODE_FLUSH 0x40
#define DC_CTRL_FLUSH_STATUS 0x100
char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
{ {
int n = 0; int n = 0;
...@@ -89,8 +116,10 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -89,8 +116,10 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
enb ? "" : "DISABLED (kernel-build)"); \ enb ? "" : "DISABLED (kernel-build)"); \
} }
PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache"); PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE),
PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache"); "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE),
"D-Cache");
return buf; return buf;
} }
...@@ -100,17 +129,23 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -100,17 +129,23 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
* the cpuinfo structure for later use. * the cpuinfo structure for later use.
* No Validation done here, simply read/convert the BCRs * No Validation done here, simply read/convert the BCRs
*/ */
void __cpuinit read_decode_cache_bcr(void) void read_decode_cache_bcr(void)
{ {
struct bcr_cache ibcr, dbcr;
struct cpuinfo_arc_cache *p_ic, *p_dc; struct cpuinfo_arc_cache *p_ic, *p_dc;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct bcr_cache {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
#else
unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
#endif
} ibcr, dbcr;
p_ic = &cpuinfo_arc700[cpu].icache; p_ic = &cpuinfo_arc700[cpu].icache;
READ_BCR(ARC_REG_IC_BCR, ibcr); READ_BCR(ARC_REG_IC_BCR, ibcr);
if (ibcr.config == 0x3) BUG_ON(ibcr.config != 3);
p_ic->assoc = 2; p_ic->assoc = 2; /* Fixed to 2w set assoc */
p_ic->line_len = 8 << ibcr.line_len; p_ic->line_len = 8 << ibcr.line_len;
p_ic->sz = 0x200 << ibcr.sz; p_ic->sz = 0x200 << ibcr.sz;
p_ic->ver = ibcr.ver; p_ic->ver = ibcr.ver;
...@@ -118,8 +153,8 @@ void __cpuinit read_decode_cache_bcr(void) ...@@ -118,8 +153,8 @@ void __cpuinit read_decode_cache_bcr(void)
p_dc = &cpuinfo_arc700[cpu].dcache; p_dc = &cpuinfo_arc700[cpu].dcache;
READ_BCR(ARC_REG_DC_BCR, dbcr); READ_BCR(ARC_REG_DC_BCR, dbcr);
if (dbcr.config == 0x2) BUG_ON(dbcr.config != 2);
p_dc->assoc = 4; p_dc->assoc = 4; /* Fixed to 4w set assoc */
p_dc->line_len = 16 << dbcr.line_len; p_dc->line_len = 16 << dbcr.line_len;
p_dc->sz = 0x200 << dbcr.sz; p_dc->sz = 0x200 << dbcr.sz;
p_dc->ver = dbcr.ver; p_dc->ver = dbcr.ver;
...@@ -132,14 +167,12 @@ void __cpuinit read_decode_cache_bcr(void) ...@@ -132,14 +167,12 @@ void __cpuinit read_decode_cache_bcr(void)
* 3. Enable the Caches, setup default flush mode for D-Cache * 3. Enable the Caches, setup default flush mode for D-Cache
* 3. Calculate the SHMLBA used by user space * 3. Calculate the SHMLBA used by user space
*/ */
void __cpuinit arc_cache_init(void) void arc_cache_init(void)
{ {
unsigned int temp;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
int way_pg_ratio = way_pg_ratio; unsigned int dcache_does_alias, temp;
int dcache_does_alias;
char str[256]; char str[256];
printk(arc_cache_mumbojumbo(0, str, sizeof(str))); printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
...@@ -149,20 +182,11 @@ void __cpuinit arc_cache_init(void) ...@@ -149,20 +182,11 @@ void __cpuinit arc_cache_init(void)
#ifdef CONFIG_ARC_HAS_ICACHE #ifdef CONFIG_ARC_HAS_ICACHE
/* 1. Confirm some of I-cache params which Linux assumes */ /* 1. Confirm some of I-cache params which Linux assumes */
if ((ic->assoc != ARC_ICACHE_WAYS) || if (ic->line_len != ARC_ICACHE_LINE_LEN)
(ic->line_len != ARC_ICACHE_LINE_LEN)) {
panic("Cache H/W doesn't match kernel Config"); panic("Cache H/W doesn't match kernel Config");
}
#if (CONFIG_ARC_MMU_VER > 2)
if (ic->ver != 3) {
if (running_on_hw)
panic("Cache ver doesn't match MMU ver\n");
/* For ISS - suggest the toggles to use */
pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
} if (ic->ver != CONFIG_ARC_MMU_VER)
#endif panic("Cache ver doesn't match MMU ver\n");
#endif #endif
/* Enable/disable I-Cache */ /* Enable/disable I-Cache */
...@@ -181,14 +205,12 @@ void __cpuinit arc_cache_init(void) ...@@ -181,14 +205,12 @@ void __cpuinit arc_cache_init(void)
return; return;
#ifdef CONFIG_ARC_HAS_DCACHE #ifdef CONFIG_ARC_HAS_DCACHE
if ((dc->assoc != ARC_DCACHE_WAYS) || if (dc->line_len != ARC_DCACHE_LINE_LEN)
(dc->line_len != ARC_DCACHE_LINE_LEN)) {
panic("Cache H/W doesn't match kernel Config"); panic("Cache H/W doesn't match kernel Config");
}
dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
/* check for D-Cache aliasing */ /* check for D-Cache aliasing */
dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
if (dcache_does_alias && !cache_is_vipt_aliasing()) if (dcache_does_alias && !cache_is_vipt_aliasing())
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
else if (!dcache_does_alias && cache_is_vipt_aliasing()) else if (!dcache_does_alias && cache_is_vipt_aliasing())
...@@ -239,11 +261,9 @@ static inline void wait_for_flush(void) ...@@ -239,11 +261,9 @@ static inline void wait_for_flush(void)
*/ */
static inline void __dc_entire_op(const int cacheop) static inline void __dc_entire_op(const int cacheop)
{ {
unsigned long flags, tmp = tmp; unsigned int tmp = tmp;
int aux; int aux;
local_irq_save(flags);
if (cacheop == OP_FLUSH_N_INV) { if (cacheop == OP_FLUSH_N_INV) {
/* Dcache provides 2 cmd: FLUSH or INV /* Dcache provides 2 cmd: FLUSH or INV
* INV inturn has sub-modes: DISCARD or FLUSH-BEFORE * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
...@@ -267,8 +287,6 @@ static inline void __dc_entire_op(const int cacheop) ...@@ -267,8 +287,6 @@ static inline void __dc_entire_op(const int cacheop)
/* Switch back the DISCARD ONLY Invalidate mode */ /* Switch back the DISCARD ONLY Invalidate mode */
if (cacheop == OP_FLUSH_N_INV) if (cacheop == OP_FLUSH_N_INV)
write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
local_irq_restore(flags);
} }
/* /*
...@@ -459,8 +477,15 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, ...@@ -459,8 +477,15 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline void __ic_entire_inv(void)
{
write_aux_reg(ARC_REG_IC_IVIC, 1);
read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
}
#else #else
#define __ic_entire_inv()
#define __ic_line_inv_vaddr(pstart, vstart, sz) #define __ic_line_inv_vaddr(pstart, vstart, sz)
#endif /* CONFIG_ARC_HAS_ICACHE */ #endif /* CONFIG_ARC_HAS_ICACHE */
...@@ -487,7 +512,7 @@ void flush_dcache_page(struct page *page) ...@@ -487,7 +512,7 @@ void flush_dcache_page(struct page *page)
struct address_space *mapping; struct address_space *mapping;
if (!cache_is_vipt_aliasing()) { if (!cache_is_vipt_aliasing()) {
set_bit(PG_arch_1, &page->flags); clear_bit(PG_dc_clean, &page->flags);
return; return;
} }
...@@ -501,7 +526,7 @@ void flush_dcache_page(struct page *page) ...@@ -501,7 +526,7 @@ void flush_dcache_page(struct page *page)
* Make a note that K-mapping is dirty * Make a note that K-mapping is dirty
*/ */
if (!mapping_mapped(mapping)) { if (!mapping_mapped(mapping)) {
set_bit(PG_arch_1, &page->flags); clear_bit(PG_dc_clean, &page->flags);
} else if (page_mapped(page)) { } else if (page_mapped(page)) {
/* kernel reading from page with U-mapping */ /* kernel reading from page with U-mapping */
...@@ -629,26 +654,13 @@ void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr) ...@@ -629,26 +654,13 @@ void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
__dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
} }
void flush_icache_all(void)
{
unsigned long flags;
local_irq_save(flags);
write_aux_reg(ARC_REG_IC_IVIC, 1);
/* lr will not complete till the icache inv operation is not over */
read_aux_reg(ARC_REG_IC_CTRL);
local_irq_restore(flags);
}
noinline void flush_cache_all(void) noinline void flush_cache_all(void)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
flush_icache_all(); __ic_entire_inv();
__dc_entire_op(OP_FLUSH_N_INV); __dc_entire_op(OP_FLUSH_N_INV);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -667,7 +679,12 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, ...@@ -667,7 +679,12 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
{ {
unsigned int paddr = pfn << PAGE_SHIFT; unsigned int paddr = pfn << PAGE_SHIFT;
__sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); u_vaddr &= PAGE_MASK;
___flush_dcache_page(paddr, u_vaddr);
if (vma->vm_flags & VM_EXEC)
__inv_icache_page(paddr, u_vaddr);
} }
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
...@@ -717,7 +734,7 @@ void copy_user_highpage(struct page *to, struct page *from, ...@@ -717,7 +734,7 @@ void copy_user_highpage(struct page *to, struct page *from,
* non copied user pages (e.g. read faults which wire in pagecache page * non copied user pages (e.g. read faults which wire in pagecache page
* directly). * directly).
*/ */
set_bit(PG_arch_1, &to->flags); clear_bit(PG_dc_clean, &to->flags);
/* /*
* if SRC was already usermapped and non-congruent to kernel mapping * if SRC was already usermapped and non-congruent to kernel mapping
...@@ -725,15 +742,16 @@ void copy_user_highpage(struct page *to, struct page *from, ...@@ -725,15 +742,16 @@ void copy_user_highpage(struct page *to, struct page *from,
*/ */
if (clean_src_k_mappings) { if (clean_src_k_mappings) {
__flush_dcache_page(kfrom, kfrom); __flush_dcache_page(kfrom, kfrom);
set_bit(PG_dc_clean, &from->flags);
} else { } else {
set_bit(PG_arch_1, &from->flags); clear_bit(PG_dc_clean, &from->flags);
} }
} }
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
{ {
clear_page(to); clear_page(to);
set_bit(PG_arch_1, &page->flags); clear_bit(PG_dc_clean, &page->flags);
} }
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/mmu.h>
static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
{ {
...@@ -51,14 +52,14 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) ...@@ -51,14 +52,14 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
return 1; return 1;
} }
void do_page_fault(struct pt_regs *regs, int write, unsigned long address, void do_page_fault(struct pt_regs *regs, unsigned long address)
unsigned long cause_code)
{ {
struct vm_area_struct *vma = NULL; struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm; struct mm_struct *mm = tsk->mm;
siginfo_t info; siginfo_t info;
int fault, ret; int fault, ret;
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0); (write ? FAULT_FLAG_WRITE : 0);
...@@ -109,7 +110,8 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address, ...@@ -109,7 +110,8 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
/* Handle protection violation, execute on heap or stack */ /* Handle protection violation, execute on heap or stack */
if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH)) if ((regs->ecr_vec == ECR_V_PROTV) &&
(regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
goto bad_area; goto bad_area;
if (write) { if (write) {
...@@ -176,7 +178,6 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address, ...@@ -176,7 +178,6 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
/* User mode accesses just cause a SIGSEGV */ /* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) { if (user_mode(regs)) {
tsk->thread.fault_address = address; tsk->thread.fault_address = address;
tsk->thread.cause_code = cause_code;
info.si_signo = SIGSEGV; info.si_signo = SIGSEGV;
info.si_errno = 0; info.si_errno = 0;
/* info.si_code has been set above */ /* info.si_code has been set above */
...@@ -197,7 +198,7 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address, ...@@ -197,7 +198,7 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
if (fixup_exception(regs)) if (fixup_exception(regs))
return; return;
die("Oops", regs, address, cause_code); die("Oops", regs, address);
out_of_memory: out_of_memory:
if (is_global_init(tsk)) { if (is_global_init(tsk)) {
...@@ -218,7 +219,6 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address, ...@@ -218,7 +219,6 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
goto no_context; goto no_context;
tsk->thread.fault_address = address; tsk->thread.fault_address = address;
tsk->thread.cause_code = cause_code;
info.si_signo = SIGBUS; info.si_signo = SIGBUS;
info.si_errno = 0; info.si_errno = 0;
info.si_code = BUS_ADRERR; info.si_code = BUS_ADRERR;
......
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlb.h> #include <asm/mmu.h>
/* Need for ARC MMU v2 /* Need for ARC MMU v2
* *
...@@ -97,6 +97,7 @@ ...@@ -97,6 +97,7 @@
* J-TLB entry got evicted/replaced. * J-TLB entry got evicted/replaced.
*/ */
/* A copy of the ASID from the PID reg is kept in asid_cache */ /* A copy of the ASID from the PID reg is kept in asid_cache */
int asid_cache = FIRST_ASID; int asid_cache = FIRST_ASID;
...@@ -432,9 +433,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, ...@@ -432,9 +433,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
{ {
unsigned long vaddr = vaddr_unaligned & PAGE_MASK; unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
unsigned long paddr = pte_val(*ptep) & PAGE_MASK; unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep); create_tlb(vma, vaddr, ptep);
if (page == ZERO_PAGE(0)) {
return;
}
/* /*
* Exec page : Independent of aliasing/page-color considerations, * Exec page : Independent of aliasing/page-color considerations,
* since icache doesn't snoop dcache on ARC, any dirty * since icache doesn't snoop dcache on ARC, any dirty
...@@ -446,9 +452,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, ...@@ -446,9 +452,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
*/ */
if ((vma->vm_flags & VM_EXEC) || if ((vma->vm_flags & VM_EXEC) ||
addr_not_cache_congruent(paddr, vaddr)) { addr_not_cache_congruent(paddr, vaddr)) {
struct page *page = pfn_to_page(pte_pfn(*ptep));
int dirty = test_and_clear_bit(PG_arch_1, &page->flags); int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
if (dirty) { if (dirty) {
/* wback + inv dcache lines */ /* wback + inv dcache lines */
__flush_dcache_page(paddr, paddr); __flush_dcache_page(paddr, paddr);
...@@ -464,12 +469,27 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, ...@@ -464,12 +469,27 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
* the cpuinfo structure for later use. * the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs * No Validation is done here, simply read/convert the BCRs
*/ */
void __cpuinit read_decode_mmu_bcr(void) void read_decode_mmu_bcr(void)
{ {
unsigned int tmp;
struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */
struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
unsigned int tmp;
struct bcr_mmu_1_2 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
#else
unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
#endif
} *mmu2;
struct bcr_mmu_3 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
u_itlb:4, u_dtlb:4;
#else
unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
ways:4, ver:8;
#endif
} *mmu3;
tmp = read_aux_reg(ARC_REG_MMU_BCR); tmp = read_aux_reg(ARC_REG_MMU_BCR);
mmu->ver = (tmp >> 24); mmu->ver = (tmp >> 24);
...@@ -505,12 +525,12 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -505,12 +525,12 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
"J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n", "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb, p_mmu->u_dtlb, p_mmu->u_itlb,
__CONFIG_ARC_MMU_SASID_VAL ? "SASID" : ""); IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : "");
return buf; return buf;
} }
void __cpuinit arc_mmu_init(void) void arc_mmu_init(void)
{ {
char str[256]; char str[256];
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/entry.h> #include <asm/entry.h>
#include <asm/tlb.h> #include <asm/mmu.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/cache.h> #include <asm/cache.h>
...@@ -147,9 +147,9 @@ ex_saved_reg1: ...@@ -147,9 +147,9 @@ ex_saved_reg1:
#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
and.f 0, r0, _PAGE_PRESENT and.f 0, r0, _PAGE_PRESENT
bz 1f bz 1f
ld r2, [num_pte_not_present] ld r3, [num_pte_not_present]
add r2, r2, 1 add r3, r3, 1
st r2, [num_pte_not_present] st r3, [num_pte_not_present]
1: 1:
#endif #endif
...@@ -271,22 +271,22 @@ ARC_ENTRY EV_TLBMissI ...@@ -271,22 +271,22 @@ ARC_ENTRY EV_TLBMissI
#endif #endif
;---------------------------------------------------------------- ;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
LOAD_FAULT_PTE LOAD_FAULT_PTE
;---------------------------------------------------------------- ;----------------------------------------------------------------
; VERIFY_PTE: Check if PTE permissions approp for executing code ; VERIFY_PTE: Check if PTE permissions approp for executing code
cmp_s r2, VMALLOC_START cmp_s r2, VMALLOC_START
mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE) mov.lo r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE)
mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE) mov.hs r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE)
and r3, r0, r2 ; Mask out NON Flag bits from PTE and r3, r0, r2 ; Mask out NON Flag bits from PTE
xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
bnz do_slow_path_pf bnz do_slow_path_pf
; Let Linux VM know that the page was accessed ; Let Linux VM know that the page was accessed
or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; set Accessed Bit or r0, r0, _PAGE_ACCESSED ; set Accessed Bit
st_s r0, [r1] ; Write back PTE st_s r0, [r1] ; Write back PTE
CONV_PTE_TO_TLB CONV_PTE_TO_TLB
COMMIT_ENTRY_TO_MMU COMMIT_ENTRY_TO_MMU
...@@ -311,7 +311,7 @@ ARC_ENTRY EV_TLBMissD ...@@ -311,7 +311,7 @@ ARC_ENTRY EV_TLBMissD
;---------------------------------------------------------------- ;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed ; Get the PTE corresponding to V-addr accessed
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
LOAD_FAULT_PTE LOAD_FAULT_PTE
;---------------------------------------------------------------- ;----------------------------------------------------------------
...@@ -345,7 +345,7 @@ ARC_ENTRY EV_TLBMissD ...@@ -345,7 +345,7 @@ ARC_ENTRY EV_TLBMissD
;---------------------------------------------------------------- ;----------------------------------------------------------------
; UPDATE_PTE: Let Linux VM know that page was accessed/dirty ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
lr r3, [ecr] lr r3, [ecr]
or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always or r0, r0, _PAGE_ACCESSED ; Accessed bit always
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well
st_s r0, [r1] ; Write back PTE st_s r0, [r1] ; Write back PTE
...@@ -381,18 +381,7 @@ do_slow_path_pf: ...@@ -381,18 +381,7 @@ do_slow_path_pf:
; ------- setup args for Linux Page fault Hanlder --------- ; ------- setup args for Linux Page fault Hanlder ---------
mov_s r0, sp mov_s r0, sp
lr r2, [efa] lr r1, [efa]
lr r3, [ecr]
; Both st and ex imply WRITE access of some sort, hence do_page_fault( )
; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or
; DTLB-ld Miss
; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03
; Following code uses that fact that st/ex have one bit in common
btst_s r3, ECR_C_BIT_DTLB_ST_MISS
mov.z r1, 0
mov.nz r1, 1
; We don't want exceptions to be disabled while the fault is handled. ; We don't want exceptions to be disabled while the fault is handled.
; Now that we have saved the context we return from exception hence ; Now that we have saved the context we return from exception hence
......
...@@ -77,6 +77,7 @@ static void __init setup_bvci_lat_unit(void) ...@@ -77,6 +77,7 @@ static void __init setup_bvci_lat_unit(void)
/*----------------------- Platform Devices -----------------------------*/ /*----------------------- Platform Devices -----------------------------*/
#if IS_ENABLED(CONFIG_SERIAL_ARC)
static unsigned long arc_uart_info[] = { static unsigned long arc_uart_info[] = {
0, /* uart->is_emulated (runtime @running_on_hw) */ 0, /* uart->is_emulated (runtime @running_on_hw) */
0, /* uart->port.uartclk */ 0, /* uart->port.uartclk */
...@@ -115,7 +116,7 @@ static struct platform_device arc_uart0_dev = { ...@@ -115,7 +116,7 @@ static struct platform_device arc_uart0_dev = {
static struct platform_device *fpga_early_devs[] __initdata = { static struct platform_device *fpga_early_devs[] __initdata = {
&arc_uart0_dev, &arc_uart0_dev,
}; };
#endif #endif /* CONFIG_SERIAL_ARC_CONSOLE */
static void arc_fpga_serial_init(void) static void arc_fpga_serial_init(void)
{ {
...@@ -152,8 +153,13 @@ static void arc_fpga_serial_init(void) ...@@ -152,8 +153,13 @@ static void arc_fpga_serial_init(void)
* otherwise the early console never gets a chance to run. * otherwise the early console never gets a chance to run.
*/ */
add_preferred_console("ttyARC", 0, "115200"); add_preferred_console("ttyARC", 0, "115200");
#endif #endif /* CONFIG_SERIAL_ARC_CONSOLE */
}
#else /* !IS_ENABLED(CONFIG_SERIAL_ARC) */
static void arc_fpga_serial_init(void)
{
} }
#endif
static void __init plat_fpga_early_init(void) static void __init plat_fpga_early_init(void)
{ {
...@@ -169,7 +175,7 @@ static void __init plat_fpga_early_init(void) ...@@ -169,7 +175,7 @@ static void __init plat_fpga_early_init(void)
} }
static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = { static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = {
#if defined(CONFIG_SERIAL_ARC) || defined(CONFIG_SERIAL_ARC_MODULE) #if IS_ENABLED(CONFIG_SERIAL_ARC)
OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info), OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info),
#endif #endif
{} {}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment