Commit bbc4fd12 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze

* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (49 commits)
  microblaze: Add KGDB support
  microblaze: Support brki rX, 0x18 for user application debugging
  microblaze: Remove nop after MSRCLR/SET, MTS, MFS instructions
  microblaze: Simplify syscall rutine
  microblaze: Move PT_MODE saving to delay slot
  microblaze: Fix _interrupt function
  microblaze: Fix _user_exception function
  microblaze: Put together addik instructions
  microblaze: Use delay slot in syscall macros
  microblaze: Save kernel mode in delay slot
  microblaze: Do not mix register saving and mode setting
  microblaze: Move SAVE_STATE upward
  microblaze: entry.S: Macro optimization
  microblaze: Optimize hw exception rutine
  microblaze: Implement clear_ums macro and fix SAVE_STATE macro
  microblaze: Remove additional setup for kernel_mode
  microblaze: Optimize SAVE_STATE macro
  microblaze: Remove additional loading
  microblaze: Completely remove working with R11 register
  microblaze: Do not setup BIP in _debug_exception
  ...
parents 673b864f 2d5973cb
...@@ -14,6 +14,7 @@ config MICROBLAZE ...@@ -14,6 +14,7 @@ config MICROBLAZE
select USB_ARCH_HAS_EHCI select USB_ARCH_HAS_EHCI
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_ARCH_KGDB
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select TRACING_SUPPORT select TRACING_SUPPORT
...@@ -223,6 +224,36 @@ config TASK_SIZE ...@@ -223,6 +224,36 @@ config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000" default "0x80000000"
choice
prompt "Page size"
default MICROBLAZE_4K_PAGES
depends on ADVANCED_OPTIONS && !MMU
help
Select the kernel logical page size. Increasing the page size
will reduce software overhead at each page boundary, allow
hardware prefetch mechanisms to be more effective, and allow
larger dma transfers increasing IO efficiency and reducing
overhead. However the utilization of memory will increase.
For example, each cached file will using a multiple of the
page size to hold its contents and the difference between the
end of file and the end of page is wasted.
If unsure, choose 4K_PAGES.
config MICROBLAZE_4K_PAGES
bool "4k page size"
config MICROBLAZE_8K_PAGES
bool "8k page size"
config MICROBLAZE_16K_PAGES
bool "16k page size"
config MICROBLAZE_32K_PAGES
bool "32k page size"
endchoice
endmenu endmenu
source "mm/Kconfig" source "mm/Kconfig"
......
...@@ -10,6 +10,7 @@ source "lib/Kconfig.debug" ...@@ -10,6 +10,7 @@ source "lib/Kconfig.debug"
config EARLY_PRINTK config EARLY_PRINTK
bool "Early printk function for kernel" bool "Early printk function for kernel"
depends on SERIAL_UARTLITE_CONSOLE
default n default n
help help
This option turns on/off early printk messages to console. This option turns on/off early printk messages to console.
......
...@@ -35,7 +35,8 @@ quiet_cmd_cp = CP $< $@$2 ...@@ -35,7 +35,8 @@ quiet_cmd_cp = CP $< $@$2
cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
quiet_cmd_strip = STRIP $@ quiet_cmd_strip = STRIP $@
cmd_strip = $(STRIP) -K _start -K _end -K __log_buf -K _fdt_start vmlinux -o $@ cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
-K _fdt_start vmlinux -o $@
quiet_cmd_uimage = UIMAGE $@.ub quiet_cmd_uimage = UIMAGE $@.ub
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
/* Somebody depends on this; sigh... */ /* Somebody depends on this; sigh... */
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/io.h>
/* Look at Documentation/cachetlb.txt */ /* Look at Documentation/cachetlb.txt */
...@@ -60,7 +61,6 @@ void microblaze_cache_init(void); ...@@ -60,7 +61,6 @@ void microblaze_cache_init(void);
#define invalidate_icache() mbc->iin(); #define invalidate_icache() mbc->iin();
#define invalidate_icache_range(start, end) mbc->iinr(start, end); #define invalidate_icache_range(start, end) mbc->iinr(start, end);
#define flush_icache_user_range(vma, pg, adr, len) flush_icache(); #define flush_icache_user_range(vma, pg, adr, len) flush_icache();
#define flush_icache_page(vma, pg) do { } while (0) #define flush_icache_page(vma, pg) do { } while (0)
...@@ -72,9 +72,15 @@ void microblaze_cache_init(void); ...@@ -72,9 +72,15 @@ void microblaze_cache_init(void);
#define flush_dcache() mbc->dfl(); #define flush_dcache() mbc->dfl();
#define flush_dcache_range(start, end) mbc->dflr(start, end); #define flush_dcache_range(start, end) mbc->dflr(start, end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
/* D-cache aliasing problem can't happen - cache is between MMU and ram */ /* MS: We have to implement it because of rootfs-jffs2 issue on WB */
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) \
do { \
unsigned long addr = (unsigned long) page_address(page); /* virtual */ \
addr = (u32)virt_to_phys((void *)addr); \
flush_dcache_range((unsigned) (addr), (unsigned) (addr) + PAGE_SIZE); \
} while (0);
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
...@@ -97,8 +103,10 @@ void microblaze_cache_init(void); ...@@ -97,8 +103,10 @@ void microblaze_cache_init(void);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
u32 addr = virt_to_phys(dst); \
invalidate_icache_range((unsigned) (addr), (unsigned) (addr) + (len));\
memcpy((dst), (src), (len)); \ memcpy((dst), (src), (len)); \
flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \ flush_dcache_range((unsigned) (addr), (unsigned) (addr) + (len));\
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
......
...@@ -79,12 +79,6 @@ static inline int dma_supported(struct device *dev, u64 mask) ...@@ -79,12 +79,6 @@ static inline int dma_supported(struct device *dev, u64 mask)
return ops->dma_supported(dev, mask); return ops->dma_supported(dev, mask);
} }
#ifdef CONFIG_PCI
/* We have our own implementation of pci_set_dma_mask() */
#define HAVE_ARCH_PCI_SET_DMA_MASK
#endif
static inline int dma_set_mask(struct device *dev, u64 dma_mask) static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); struct dma_map_ops *ops = get_dma_ops(dev);
......
...@@ -77,7 +77,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; ...@@ -77,7 +77,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2MSB #define ELF_DATA ELFDATA2MSB
#endif #endif
#define ELF_EXEC_PAGESIZE 4096 #define ELF_EXEC_PAGESIZE PAGE_SIZE
#define ELF_CORE_COPY_REGS(_dest, _regs) \ #define ELF_CORE_COPY_REGS(_dest, _regs) \
......
...@@ -14,6 +14,11 @@ ...@@ -14,6 +14,11 @@
#define _ASM_MICROBLAZE_EXCEPTIONS_H #define _ASM_MICROBLAZE_EXCEPTIONS_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef CONFIG_MMU
#define EX_HANDLER_STACK_SIZ (4*19)
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Macros to enable and disable HW exceptions in the MSR */ /* Macros to enable and disable HW exceptions in the MSR */
...@@ -64,22 +69,6 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, ...@@ -64,22 +69,6 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
void die(const char *str, struct pt_regs *fp, long err); void die(const char *str, struct pt_regs *fp, long err);
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr);
#if defined(CONFIG_KGDB)
void (*debugger)(struct pt_regs *regs);
int (*debugger_bpt)(struct pt_regs *regs);
int (*debugger_sstep)(struct pt_regs *regs);
int (*debugger_iabr_match)(struct pt_regs *regs);
int (*debugger_dabr_match)(struct pt_regs *regs);
void (*debugger_fault_handler)(struct pt_regs *regs);
#else
#define debugger(regs) do { } while (0)
#define debugger_bpt(regs) 0
#define debugger_sstep(regs) 0
#define debugger_iabr_match(regs) 0
#define debugger_dabr_match(regs) 0
#define debugger_fault_handler ((void (*)(struct pt_regs *))0)
#endif
#endif /*__ASSEMBLY__ */ #endif /*__ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_EXCEPTIONS_H */ #endif /* _ASM_MICROBLAZE_EXCEPTIONS_H */
#ifdef __KERNEL__
#ifndef __MICROBLAZE_KGDB_H__
#define __MICROBLAZE_KGDB_H__
#ifndef __ASSEMBLY__
#define CACHE_FLUSH_IS_SAFE 1
#define BUFMAX 2048
/*
* 32 32-bit general purpose registers (r0-r31)
* 6 32-bit special registers (pc, msr, ear, esr, fsr, btr)
* 12 32-bit PVR
* 7 32-bit MMU Regs (redr, rpid, rzpr, rtlbx, rtlbsx, rtlblo, rtlbhi)
* ------
* 57 registers
*/
#define NUMREGBYTES (57 * 4)
#define BREAK_INSTR_SIZE 4
static inline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__("brki r16, 0x18;");
}
#endif /* __ASSEMBLY__ */
#endif /* __MICROBLAZE_KGDB_H__ */
#endif /* __KERNEL__ */
...@@ -23,8 +23,16 @@ ...@@ -23,8 +23,16 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
/* PAGE_SHIFT determines the page size */ /* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT (12) #if defined(CONFIG_MICROBLAZE_32K_PAGES)
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_SHIFT 15
#elif defined(CONFIG_MICROBLAZE_16K_PAGES)
#define PAGE_SHIFT 14
#elif defined(CONFIG_MICROBLAZE_8K_PAGES)
#define PAGE_SHIFT 13
#else
#define PAGE_SHIFT 12
#endif
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_KERNEL_BASE_ADDR)) #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_KERNEL_BASE_ADDR))
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#define PVR_MSR_BIT 0x400 #define PVR_MSR_BIT 0x400
struct pvr_s { struct pvr_s {
unsigned pvr[16]; unsigned pvr[12];
}; };
/* The following taken from Xilinx's standalone BSP pvr.h */ /* The following taken from Xilinx's standalone BSP pvr.h */
......
...@@ -28,8 +28,6 @@ void disable_early_printk(void); ...@@ -28,8 +28,6 @@ void disable_early_printk(void);
void heartbeat(void); void heartbeat(void);
void setup_heartbeat(void); void setup_heartbeat(void);
unsigned long long sched_clock(void);
# ifdef CONFIG_MMU # ifdef CONFIG_MMU
extern void mmu_reset(void); extern void mmu_reset(void);
extern void early_console_reg_tlb_alloc(unsigned int addr); extern void early_console_reg_tlb_alloc(unsigned int addr);
......
...@@ -45,7 +45,6 @@ extern struct task_struct *_switch_to(struct thread_info *prev, ...@@ -45,7 +45,6 @@ extern struct task_struct *_switch_to(struct thread_info *prev,
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
void show_trace(struct task_struct *task, unsigned long *stack);
void __bad_xchg(volatile void *ptr, int size); void __bad_xchg(volatile void *ptr, int size);
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
......
...@@ -359,7 +359,7 @@ extern long __user_bad(void); ...@@ -359,7 +359,7 @@ extern long __user_bad(void);
__copy_tofrom_user((__force void __user *)(to), \ __copy_tofrom_user((__force void __user *)(to), \
(void __user *)(from), (n)) (void __user *)(from), (n))
#define __copy_from_user_inatomic(to, from, n) \ #define __copy_from_user_inatomic(to, from, n) \
copy_from_user((to), (from), (n)) __copy_from_user((to), (from), (n))
static inline long copy_from_user(void *to, static inline long copy_from_user(void *to,
const void __user *from, unsigned long n) const void __user *from, unsigned long n)
...@@ -373,7 +373,7 @@ static inline long copy_from_user(void *to, ...@@ -373,7 +373,7 @@ static inline long copy_from_user(void *to,
#define __copy_to_user(to, from, n) \ #define __copy_to_user(to, from, n) \
__copy_tofrom_user((void __user *)(to), \ __copy_tofrom_user((void __user *)(to), \
(__force const void __user *)(from), (n)) (__force const void __user *)(from), (n))
#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n)) #define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n))
static inline long copy_to_user(void __user *to, static inline long copy_to_user(void __user *to,
const void *from, unsigned long n) const void *from, unsigned long n)
......
/*
* Backtrace support for Microblaze
*
* Copyright (C) 2010 Digital Design Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __MICROBLAZE_UNWIND_H
#define __MICROBLAZE_UNWIND_H
struct stack_trace;
struct trap_handler_info {
unsigned long start_addr;
unsigned long end_addr;
const char *trap_name;
};
extern struct trap_handler_info microblaze_trap_handlers;
extern const char _hw_exception_handler;
extern const char ex_handler_unhandled;
void microblaze_unwind(struct task_struct *task, struct stack_trace *trace);
#endif /* __MICROBLAZE_UNWIND_H */
...@@ -17,7 +17,7 @@ extra-y := head.o vmlinux.lds ...@@ -17,7 +17,7 @@ extra-y := head.o vmlinux.lds
obj-y += dma.o exceptions.o \ obj-y += dma.o exceptions.o \
hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
of_platform.o process.o prom.o prom_parse.o ptrace.o \ of_platform.o process.o prom.o prom_parse.o ptrace.o \
setup.o signal.o sys_microblaze.o timer.o traps.o reset.o reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
obj-y += cpu/ obj-y += cpu/
...@@ -28,5 +28,6 @@ obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o ...@@ -28,5 +28,6 @@ obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
obj-$(CONFIG_MMU) += misc.o obj-$(CONFIG_MMU) += misc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-y += entry$(MMU).o obj-y += entry$(MMU).o
...@@ -126,6 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -126,6 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpuinfo.pvr_user1, cpuinfo.pvr_user1,
cpuinfo.pvr_user2); cpuinfo.pvr_user2);
count += seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE);
return 0; return 0;
} }
......
...@@ -588,3 +588,31 @@ sys_rt_sigsuspend_wrapper: ...@@ -588,3 +588,31 @@ sys_rt_sigsuspend_wrapper:
#include "syscall_table.S" #include "syscall_table.S"
syscall_table_size=(.-sys_call_table) syscall_table_size=(.-sys_call_table)
type_SYSCALL:
.ascii "SYSCALL\0"
type_IRQ:
.ascii "IRQ\0"
type_IRQ_PREEMPT:
.ascii "IRQ (PREEMPTED)\0"
type_SYSCALL_PREEMPT:
.ascii " SYSCALL (PREEMPTED)\0"
/*
* Trap decoding for stack unwinder
* Tuples are (start addr, end addr, string)
* If return address lies on [start addr, end addr],
* unwinder displays 'string'
*/
.align 4
.global microblaze_trap_handlers
microblaze_trap_handlers:
/* Exact matches come first */
.word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
.word ret_from_intr; .word ret_from_intr ; .word type_IRQ
/* Fuzzy matches go here */
.word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
.word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
/* End of table */
.word 0 ; .word 0 ; .word 0
This diff is collapsed.
...@@ -48,12 +48,17 @@ void die(const char *str, struct pt_regs *fp, long err) ...@@ -48,12 +48,17 @@ void die(const char *str, struct pt_regs *fp, long err)
do_exit(err); do_exit(err);
} }
/* for user application debugging */
void sw_exception(struct pt_regs *regs)
{
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
}
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{ {
siginfo_t info; siginfo_t info;
if (kernel_mode(regs)) { if (kernel_mode(regs)) {
debugger(regs);
die("Exception in kernel mode", regs, signr); die("Exception in kernel mode", regs, signr);
} }
info.si_signo = signr; info.si_signo = signr;
...@@ -143,7 +148,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type, ...@@ -143,7 +148,7 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
case MICROBLAZE_PRIVILEGED_EXCEPTION: case MICROBLAZE_PRIVILEGED_EXCEPTION:
pr_debug(KERN_WARNING "Privileged exception\n"); pr_debug(KERN_WARNING "Privileged exception\n");
/* "brk r0,r0" - used as debug breakpoint */ /* "brk r0,r0" - used as debug breakpoint - old toolchain */
if (get_user(code, (unsigned long *)regs->pc) == 0 if (get_user(code, (unsigned long *)regs->pc) == 0
&& code == 0x980c0000) { && code == 0x980c0000) {
_exception(SIGTRAP, regs, TRAP_BRKPT, addr); _exception(SIGTRAP, regs, TRAP_BRKPT, addr);
......
...@@ -43,10 +43,10 @@ ...@@ -43,10 +43,10 @@
.global empty_zero_page .global empty_zero_page
.align 12 .align 12
empty_zero_page: empty_zero_page:
.space 4096 .space PAGE_SIZE
.global swapper_pg_dir .global swapper_pg_dir
swapper_pg_dir: swapper_pg_dir:
.space 4096 .space PAGE_SIZE
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -78,9 +78,6 @@ ...@@ -78,9 +78,6 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
/* Helpful Macros */ /* Helpful Macros */
#ifndef CONFIG_MMU
#define EX_HANDLER_STACK_SIZ (4*19)
#endif
#define NUM_TO_REG(num) r ## num #define NUM_TO_REG(num) r ## num
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
...@@ -988,6 +985,7 @@ ex_unaligned_fixup: ...@@ -988,6 +985,7 @@ ex_unaligned_fixup:
.end _unaligned_data_exception .end _unaligned_data_exception
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
.global ex_handler_unhandled
ex_handler_unhandled: ex_handler_unhandled:
/* FIXME add handle function for unhandled exception - dump register */ /* FIXME add handle function for unhandled exception - dump register */
bri 0 bri 0
......
...@@ -37,6 +37,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -37,6 +37,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
{ {
unsigned int irq; unsigned int irq;
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
trace_hardirqs_off();
irq_enter(); irq_enter();
irq = get_irq(regs); irq = get_irq(regs);
...@@ -53,6 +54,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -53,6 +54,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
irq_exit(); irq_exit();
set_irq_regs(old_regs); set_irq_regs(old_regs);
trace_hardirqs_on();
} }
int show_interrupts(struct seq_file *p, void *v) int show_interrupts(struct seq_file *p, void *v)
......
/*
* Microblaze KGDB support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/asm-offsets.h>
#include <asm/pvr.h>
#define GDB_REG 0
#define GDB_PC 32
#define GDB_MSR 33
#define GDB_EAR 34
#define GDB_ESR 35
#define GDB_FSR 36
#define GDB_BTR 37
#define GDB_PVR 38
#define GDB_REDR 50
#define GDB_RPID 51
#define GDB_RZPR 52
#define GDB_RTLBX 53
#define GDB_RTLBSX 54 /* mfs can't read it */
#define GDB_RTLBLO 55
#define GDB_RTLBHI 56
/* keep pvr separately because it is unchangeble */
struct pvr_s pvr;
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
int i;
unsigned long *pt_regb = (unsigned long *)regs;
int temp;
/* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
gdb_regs[i] = pt_regb[i];
/* Branch target register can't be changed */
__asm__ __volatile__ ("mfs %0, rbtr;" : "=r"(temp) : );
gdb_regs[GDB_BTR] = temp;
/* pvr part - we have 11 pvr regs */
for (i = 0; i < sizeof(struct pvr_s)/4; i++)
gdb_regs[GDB_PVR + i] = pvr.pvr[i];
/* read special registers - can't be changed */
__asm__ __volatile__ ("mfs %0, redr;" : "=r"(temp) : );
gdb_regs[GDB_REDR] = temp;
__asm__ __volatile__ ("mfs %0, rpid;" : "=r"(temp) : );
gdb_regs[GDB_RPID] = temp;
__asm__ __volatile__ ("mfs %0, rzpr;" : "=r"(temp) : );
gdb_regs[GDB_RZPR] = temp;
__asm__ __volatile__ ("mfs %0, rtlbx;" : "=r"(temp) : );
gdb_regs[GDB_RTLBX] = temp;
__asm__ __volatile__ ("mfs %0, rtlblo;" : "=r"(temp) : );
gdb_regs[GDB_RTLBLO] = temp;
__asm__ __volatile__ ("mfs %0, rtlbhi;" : "=r"(temp) : );
gdb_regs[GDB_RTLBHI] = temp;
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
int i;
unsigned long *pt_regb = (unsigned long *)regs;
/* pt_regs and gdb_regs have the same 37 values.
* The rest of gdb_regs are unused and can't be changed.
* r0 register value can't be changed too. */
for (i = 1; i < (sizeof(struct pt_regs) / 4) - 1; i++)
pt_regb[i] = gdb_regs[i];
}
void microblaze_kgdb_break(struct pt_regs *regs)
{
if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
return 0;
/* Jump over the first arch_kgdb_breakpoint which is barrier to
* get kgdb work. The same solution is used for powerpc */
if (*(u32 *) (regs->pc) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
regs->pc += BREAK_INSTR_SIZE;
}
/* untested */
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int i;
unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
/* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
gdb_regs[i] = pt_regb[i];
/* pvr part - we have 11 pvr regs */
for (i = 0; i < sizeof(struct pvr_s)/4; i++)
gdb_regs[GDB_PVR + i] = pvr.pvr[i];
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->pc = ip;
}
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *regs)
{
char *ptr;
unsigned long address;
int cpu = smp_processor_id();
switch (remcom_in_buffer[0]) {
case 'c':
/* handle the optional parameter */
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &address))
regs->pc = address;
return 0;
}
return -1; /* this means that we do not want to exit from the handler */
}
int kgdb_arch_init(void)
{
get_pvr(&pvr); /* Fill PVR structure */
return 0;
}
void kgdb_arch_exit(void)
{
/* Nothing to do */
}
/*
* Global data
*/
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */
};
...@@ -76,7 +76,7 @@ early_console_reg_tlb_alloc: ...@@ -76,7 +76,7 @@ early_console_reg_tlb_alloc:
* the UARTs nice and early. We use a 4k real==virtual mapping. * the UARTs nice and early. We use a 4k real==virtual mapping.
*/ */
ori r4, r0, MICROBLAZE_TLB_SIZE - 1 ori r4, r0, MICROBLAZE_TLB_SIZE - 1
mts rtlbx, r4 /* TLB slot 2 */ mts rtlbx, r4 /* TLB slot 63 */
or r4,r5,r0 or r4,r5,r0
andi r4,r4,0xfffff000 andi r4,r4,0xfffff000
......
...@@ -76,8 +76,11 @@ __setup("hlt", hlt_setup); ...@@ -76,8 +76,11 @@ __setup("hlt", hlt_setup);
void default_idle(void) void default_idle(void)
{ {
if (likely(hlt_counter)) { if (likely(hlt_counter)) {
while (!need_resched()) local_irq_disable();
stop_critical_timings();
cpu_relax(); cpu_relax();
start_critical_timings();
local_irq_enable();
} else { } else {
clear_thread_flag(TIF_POLLING_NRFLAG); clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
/* Returns the address where the register at REG_OFFS in P is stashed away. */ /* Returns the address where the register at REG_OFFS in P is stashed away. */
static microblaze_reg_t *reg_save_addr(unsigned reg_offs, static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
...@@ -101,8 +103,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ...@@ -101,8 +103,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
microblaze_reg_t *reg_addr = reg_save_addr(addr, child); microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
if (request == PTRACE_PEEKUSR) if (request == PTRACE_PEEKUSR)
val = *reg_addr; val = *reg_addr;
else else {
#if 1
*reg_addr = data; *reg_addr = data;
#else
/* MS potential problem on WB system
* Be aware that reg_addr is virtual address
* virt_to_phys conversion is necessary.
* This could be sensible solution.
*/
u32 paddr = virt_to_phys((u32)reg_addr);
invalidate_icache_range(paddr, paddr + 4);
*reg_addr = data;
flush_dcache_range(paddr, paddr + 4);
#endif
}
} else } else
rval = -EIO; rval = -EIO;
......
...@@ -14,52 +14,18 @@ ...@@ -14,52 +14,18 @@
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/unwind.h>
/* FIXME initial support */
void save_stack_trace(struct stack_trace *trace) void save_stack_trace(struct stack_trace *trace)
{ {
unsigned long *sp; /* Exclude our helper functions from the trace*/
unsigned long addr; trace->skip += 2;
asm("addik %0, r1, 0" : "=r" (sp)); microblaze_unwind(NULL, trace);
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = addr;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
} }
EXPORT_SYMBOL_GPL(save_stack_trace); EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{ {
unsigned int *sp; microblaze_unwind(tsk, trace);
unsigned long addr;
struct thread_info *ti = task_thread_info(tsk);
if (tsk == current)
asm("addik %0, r1, 0" : "=r" (sp));
else
sp = (unsigned int *)ti->cpu_context.r1;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = addr;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
} }
EXPORT_SYMBOL_GPL(save_stack_trace_tsk); EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/system.h> #include <asm/system.h>
#include <linux/cnt32_to_63.h>
#ifdef CONFIG_SELFMOD_TIMER #ifdef CONFIG_SELFMOD_TIMER
#include <asm/selfmod.h> #include <asm/selfmod.h>
...@@ -135,7 +136,7 @@ static void microblaze_timer_set_mode(enum clock_event_mode mode, ...@@ -135,7 +136,7 @@ static void microblaze_timer_set_mode(enum clock_event_mode mode,
static struct clock_event_device clockevent_microblaze_timer = { static struct clock_event_device clockevent_microblaze_timer = {
.name = "microblaze_clockevent", .name = "microblaze_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
.shift = 24, .shift = 8,
.rating = 300, .rating = 300,
.set_next_event = microblaze_timer_set_next_event, .set_next_event = microblaze_timer_set_next_event,
.set_mode = microblaze_timer_set_mode, .set_mode = microblaze_timer_set_mode,
...@@ -195,7 +196,7 @@ static cycle_t microblaze_cc_read(const struct cyclecounter *cc) ...@@ -195,7 +196,7 @@ static cycle_t microblaze_cc_read(const struct cyclecounter *cc)
static struct cyclecounter microblaze_cc = { static struct cyclecounter microblaze_cc = {
.read = microblaze_cc_read, .read = microblaze_cc_read,
.mask = CLOCKSOURCE_MASK(32), .mask = CLOCKSOURCE_MASK(32),
.shift = 24, .shift = 8,
}; };
int __init init_microblaze_timecounter(void) int __init init_microblaze_timecounter(void)
...@@ -213,7 +214,7 @@ static struct clocksource clocksource_microblaze = { ...@@ -213,7 +214,7 @@ static struct clocksource clocksource_microblaze = {
.rating = 300, .rating = 300,
.read = microblaze_read, .read = microblaze_read,
.mask = CLOCKSOURCE_MASK(32), .mask = CLOCKSOURCE_MASK(32),
.shift = 24, /* I can shift it */ .shift = 8, /* I can shift it */
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
...@@ -235,6 +236,12 @@ static int __init microblaze_clocksource_init(void) ...@@ -235,6 +236,12 @@ static int __init microblaze_clocksource_init(void)
return 0; return 0;
} }
/*
* We have to protect accesses before timer initialization
* and return 0 for sched_clock function below.
*/
static int timer_initialized;
void __init time_init(void) void __init time_init(void)
{ {
u32 irq, i = 0; u32 irq, i = 0;
...@@ -289,4 +296,15 @@ void __init time_init(void) ...@@ -289,4 +296,15 @@ void __init time_init(void)
#endif #endif
microblaze_clocksource_init(); microblaze_clocksource_init();
microblaze_clockevent_init(); microblaze_clockevent_init();
timer_initialized = 1;
}
unsigned long long notrace sched_clock(void)
{
if (timer_initialized) {
struct clocksource *cs = &clocksource_microblaze;
cycle_t cyc = cnt32_to_63(cs->read(NULL));
return clocksource_cyc2ns(cyc, cs->mult, cs->shift);
}
return 0;
} }
...@@ -16,13 +16,14 @@ ...@@ -16,13 +16,14 @@
#include <asm/exceptions.h> #include <asm/exceptions.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/unwind.h>
void trap_init(void) void trap_init(void)
{ {
__enable_hw_exceptions(); __enable_hw_exceptions();
} }
static unsigned long kstack_depth_to_print = 24; static unsigned long kstack_depth_to_print; /* 0 == entire stack */
static int __init kstack_setup(char *s) static int __init kstack_setup(char *s)
{ {
...@@ -30,31 +31,47 @@ static int __init kstack_setup(char *s) ...@@ -30,31 +31,47 @@ static int __init kstack_setup(char *s)
} }
__setup("kstack=", kstack_setup); __setup("kstack=", kstack_setup);
void show_trace(struct task_struct *task, unsigned long *stack) void show_stack(struct task_struct *task, unsigned long *sp)
{ {
unsigned long addr; unsigned long words_to_show;
u32 fp = (u32) sp;
if (fp == 0) {
if (task) {
fp = ((struct thread_info *)
(task->stack))->cpu_context.r1;
} else {
/* Pick up caller of dump_stack() */
fp = (u32)&sp - 8;
}
}
if (!stack) words_to_show = (THREAD_SIZE - (fp & (THREAD_SIZE - 1))) >> 2;
stack = (unsigned long *)&stack; if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print))
words_to_show = kstack_depth_to_print;
pr_info("Kernel Stack:\n");
printk(KERN_NOTICE "Call Trace: ");
#ifdef CONFIG_KALLSYMS
printk(KERN_NOTICE "\n");
#endif
while (!kstack_end(stack)) {
addr = *stack++;
/* /*
* If the address is either in the text segment of the * Make the first line an 'odd' size if necessary to get
* kernel, or in the region which contains vmalloc'ed * remaining lines to start at an address multiple of 0x10
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/ */
if (kernel_text_address(addr)) if (fp & 0xF) {
print_ip_sym(addr); unsigned long line1_words = (0x10 - (fp & 0xF)) >> 2;
if (line1_words < words_to_show) {
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32,
4, (void *)fp, line1_words << 2, 0);
fp += line1_words << 2;
words_to_show -= line1_words;
}
} }
printk(KERN_NOTICE "\n"); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
words_to_show << 2, 0);
printk(KERN_INFO "\n\n");
pr_info("Call Trace:\n");
microblaze_unwind(task, NULL);
pr_info("\n");
if (!task) if (!task)
task = current; task = current;
...@@ -62,34 +79,6 @@ void show_trace(struct task_struct *task, unsigned long *stack) ...@@ -62,34 +79,6 @@ void show_trace(struct task_struct *task, unsigned long *stack)
debug_show_held_locks(task); debug_show_held_locks(task);
} }
void show_stack(struct task_struct *task, unsigned long *sp)
{
unsigned long *stack;
int i;
if (sp == NULL) {
if (task)
sp = (unsigned long *) ((struct thread_info *)
(task->stack))->cpu_context.r1;
else
sp = (unsigned long *)&sp;
}
stack = sp;
printk(KERN_INFO "\nStack:\n ");
for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(sp))
break;
if (i && ((i % 8) == 0))
printk("\n ");
printk("%08lx ", *sp++);
}
printk("\n");
show_trace(task, stack);
}
void dump_stack(void) void dump_stack(void)
{ {
show_stack(NULL, NULL); show_stack(NULL, NULL);
......
/*
* Backtrace support for Microblaze
*
* Copyright (C) 2010 Digital Design Corporation
*
* Based on arch/sh/kernel/cpu/sh5/unwind.c code which is:
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
/* #define DEBUG 1 */
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
#include <asm/sections.h>
#include <asm/exceptions.h>
#include <asm/unwind.h>
struct stack_trace;
/*
* On Microblaze, finding the previous stack frame is a little tricky.
* At this writing (3/2010), Microblaze does not support CONFIG_FRAME_POINTERS,
* and even if it did, gcc (4.1.2) does not store the frame pointer at
* a consistent offset within each frame. To determine frame size, it is
* necessary to search for the assembly instruction that creates or reclaims
* the frame and extract the size from it.
*
* Microblaze stores the stack pointer in r1, and creates a frame via
*
* addik r1, r1, -FRAME_SIZE
*
* The frame is reclaimed via
*
* addik r1, r1, FRAME_SIZE
*
* Frame creation occurs at or near the top of a function.
* Depending on the compiler, reclaim may occur at the end, or before
* a mid-function return.
*
* A stack frame is usually not created in a leaf function.
*
*/
/**
* get_frame_size - Extract the stack adjustment from an
* "addik r1, r1, adjust" instruction
* @instr : Microblaze instruction
*
* Return - Number of stack bytes the instruction reserves or reclaims
*/
inline long get_frame_size(unsigned long instr)
{
return abs((s16)(instr & 0xFFFF));
}
/**
* find_frame_creation - Search backward to find the instruction that creates
* the stack frame (hopefully, for the same function the
* initial PC is in).
* @pc : Program counter at which to begin the search
*
* Return - PC at which stack frame creation occurs
* NULL if this cannot be found, i.e. a leaf function
*/
static unsigned long *find_frame_creation(unsigned long *pc)
{
int i;
/* NOTE: Distance to search is arbitrary
* 250 works well for most things,
* 750 picks up things like tcp_recvmsg(),
* 1000 needed for fat_fill_super()
*/
for (i = 0; i < 1000; i++, pc--) {
unsigned long instr;
s16 frame_size;
if (!kernel_text_address((unsigned long) pc))
return NULL;
instr = *pc;
/* addik r1, r1, foo ? */
if ((instr & 0xFFFF0000) != 0x30210000)
continue; /* No */
frame_size = get_frame_size(instr);
if ((frame_size < 8) || (frame_size & 3)) {
pr_debug(" Invalid frame size %d at 0x%p\n",
frame_size, pc);
return NULL;
}
pr_debug(" Found frame creation at 0x%p, size %d\n", pc,
frame_size);
return pc;
}
return NULL;
}
/**
* lookup_prev_stack_frame - Find the stack frame of the previous function.
* @fp : Frame (stack) pointer for current function
* @pc : Program counter within current function
* @leaf_return : r15 value within current function. If the current function
* is a leaf, this is the caller's return address.
* @pprev_fp : On exit, set to frame (stack) pointer for previous function
* @pprev_pc : On exit, set to current function caller's return address
*
* Return - 0 on success, -EINVAL if the previous frame cannot be found
*/
static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
unsigned long leaf_return,
unsigned long *pprev_fp,
unsigned long *pprev_pc)
{
unsigned long *prologue = NULL;
/* _switch_to is a special leaf function */
if (pc != (unsigned long) &_switch_to)
prologue = find_frame_creation((unsigned long *)pc);
if (prologue) {
long frame_size = get_frame_size(*prologue);
*pprev_fp = fp + frame_size;
*pprev_pc = *(unsigned long *)fp;
} else {
if (!leaf_return)
return -EINVAL;
*pprev_pc = leaf_return;
*pprev_fp = fp;
}
/* NOTE: don't check kernel_text_address here, to allow display
* of userland return address
*/
return (!*pprev_pc || (*pprev_pc & 3)) ? -EINVAL : 0;
}
static void microblaze_unwind_inner(struct task_struct *task,
unsigned long pc, unsigned long fp,
unsigned long leaf_return,
struct stack_trace *trace);
/**
* unwind_trap - Unwind through a system trap, that stored previous state
* on the stack.
*/
#ifdef CONFIG_MMU
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
unsigned long fp, struct stack_trace *trace)
{
/* To be implemented */
}
#else
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
unsigned long fp, struct stack_trace *trace)
{
const struct pt_regs *regs = (const struct pt_regs *) fp;
microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace);
}
#endif
/**
* microblaze_unwind_inner - Unwind the stack from the specified point
* @task : Task whose stack we are to unwind (may be NULL)
* @pc : Program counter from which we start unwinding
* @fp : Frame (stack) pointer from which we start unwinding
* @leaf_return : Value of r15 at pc. If the function is a leaf, this is
* the caller's return address.
* @trace : Where to store stack backtrace (PC values).
* NULL == print backtrace to kernel log
*/
void microblaze_unwind_inner(struct task_struct *task,
unsigned long pc, unsigned long fp,
unsigned long leaf_return,
struct stack_trace *trace)
{
int ofs = 0;
pr_debug(" Unwinding with PC=%p, FP=%p\n", (void *)pc, (void *)fp);
if (!pc || !fp || (pc & 3) || (fp & 3)) {
pr_debug(" Invalid state for unwind, aborting\n");
return;
}
for (; pc != 0;) {
unsigned long next_fp, next_pc = 0;
unsigned long return_to = pc + 2 * sizeof(unsigned long);
const struct trap_handler_info *handler =
&microblaze_trap_handlers;
/* Is previous function the HW exception handler? */
if ((return_to >= (unsigned long)&_hw_exception_handler)
&&(return_to < (unsigned long)&ex_handler_unhandled)) {
/*
* HW exception handler doesn't save all registers,
* so we open-code a special case of unwind_trap()
*/
#ifndef CONFIG_MMU
const struct pt_regs *regs =
(const struct pt_regs *) fp;
#endif
pr_info("HW EXCEPTION\n");
#ifndef CONFIG_MMU
microblaze_unwind_inner(task, regs->r17 - 4,
fp + EX_HANDLER_STACK_SIZ,
regs->r15, trace);
#endif
return;
}
/* Is previous function a trap handler? */
for (; handler->start_addr; ++handler) {
if ((return_to >= handler->start_addr)
&& (return_to <= handler->end_addr)) {
if (!trace)
pr_info("%s\n", handler->trap_name);
unwind_trap(task, pc, fp, trace);
return;
}
}
pc -= ofs;
if (trace) {
#ifdef CONFIG_STACKTRACE
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = pc;
if (trace->nr_entries >= trace->max_entries)
break;
#endif
} else {
/* Have we reached userland? */
if (unlikely(pc == task_pt_regs(task)->pc)) {
pr_info("[<%p>] PID %lu [%s]\n",
(void *) pc,
(unsigned long) task->pid,
task->comm);
break;
} else
print_ip_sym(pc);
}
/* Stop when we reach anything not part of the kernel */
if (!kernel_text_address(pc))
break;
if (lookup_prev_stack_frame(fp, pc, leaf_return, &next_fp,
&next_pc) == 0) {
ofs = sizeof(unsigned long);
pc = next_pc & ~3;
fp = next_fp;
leaf_return = 0;
} else {
pr_debug(" Failed to find previous stack frame\n");
break;
}
pr_debug(" Next PC=%p, next FP=%p\n",
(void *)next_pc, (void *)next_fp);
}
}
/**
* microblaze_unwind - Stack unwinder for Microblaze (external entry point)
* @task : Task whose stack we are to unwind (NULL == current)
* @trace : Where to store stack backtrace (PC values).
* NULL == print backtrace to kernel log
*/
void microblaze_unwind(struct task_struct *task, struct stack_trace *trace)
{
if (task) {
if (task == current) {
const struct pt_regs *regs = task_pt_regs(task);
microblaze_unwind_inner(task, regs->pc, regs->r1,
regs->r15, trace);
} else {
struct thread_info *thread_info =
(struct thread_info *)(task->stack);
const struct cpu_context *cpu_context =
&thread_info->cpu_context;
microblaze_unwind_inner(task,
(unsigned long) &_switch_to,
cpu_context->r1,
cpu_context->r15, trace);
}
} else {
unsigned long pc, fp;
__asm__ __volatile__ ("or %0, r1, r0" : "=r" (fp));
__asm__ __volatile__ (
"brlid %0, 0f;"
"nop;"
"0:"
: "=r" (pc)
);
/* Since we are not a leaf function, use leaf_return = 0 */
microblaze_unwind_inner(current, pc, fp, 0, trace);
}
}
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
OUTPUT_FORMAT("elf32-microblaze", "elf32-microblaze", "elf32-microblaze") OUTPUT_FORMAT("elf32-microblaze", "elf32-microblaze", "elf32-microblaze")
OUTPUT_ARCH(microblaze) OUTPUT_ARCH(microblaze)
ENTRY(_start) ENTRY(microblaze_start)
#include <asm/page.h> #include <asm/page.h>
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
...@@ -20,7 +20,7 @@ jiffies = jiffies_64 + 4; ...@@ -20,7 +20,7 @@ jiffies = jiffies_64 + 4;
SECTIONS { SECTIONS {
. = CONFIG_KERNEL_START; . = CONFIG_KERNEL_START;
_start = CONFIG_KERNEL_BASE_ADDR; microblaze_start = CONFIG_KERNEL_BASE_ADDR;
.text : AT(ADDR(.text) - LOAD_OFFSET) { .text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = . ; _text = . ;
_stext = . ; _stext = . ;
...@@ -55,7 +55,7 @@ SECTIONS { ...@@ -55,7 +55,7 @@ SECTIONS {
*/ */
.sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) { .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
_ssrw = .; _ssrw = .;
. = ALIGN(4096); /* page aligned when MMU used - origin 0x8 */ . = ALIGN(PAGE_SIZE); /* page aligned when MMU used */
*(.sdata2) *(.sdata2)
. = ALIGN(8); . = ALIGN(8);
_essrw = .; _essrw = .;
...@@ -70,7 +70,7 @@ SECTIONS { ...@@ -70,7 +70,7 @@ SECTIONS {
/* Reserve some low RAM for r0 based memory references */ /* Reserve some low RAM for r0 based memory references */
. = ALIGN(0x4) ; . = ALIGN(0x4) ;
r0_ram = . ; r0_ram = . ;
. = . + 4096; /* a page should be enough */ . = . + PAGE_SIZE; /* a page should be enough */
/* Under the microblaze ABI, .sdata and .sbss must be contiguous */ /* Under the microblaze ABI, .sdata and .sbss must be contiguous */
. = ALIGN(8); . = ALIGN(8);
...@@ -120,7 +120,7 @@ SECTIONS { ...@@ -120,7 +120,7 @@ SECTIONS {
__init_end_before_initramfs = .; __init_end_before_initramfs = .;
.init.ramfs ALIGN(4096) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { .init.ramfs ALIGN(PAGE_SIZE) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
__initramfs_start = .; __initramfs_start = .;
*(.init.ramfs) *(.init.ramfs)
__initramfs_end = .; __initramfs_end = .;
...@@ -132,11 +132,11 @@ SECTIONS { ...@@ -132,11 +132,11 @@ SECTIONS {
* so that __init_end == __bss_start. This will make image.elf * so that __init_end == __bss_start. This will make image.elf
* consistent with the image.bin * consistent with the image.bin
*/ */
/* . = ALIGN(4096); */ /* . = ALIGN(PAGE_SIZE); */
} }
__init_end = .; __init_end = .;
.bss ALIGN (4096) : AT(ADDR(.bss) - LOAD_OFFSET) { .bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
/* page aligned when MMU used */ /* page aligned when MMU used */
__bss_start = . ; __bss_start = . ;
*(.bss*) *(.bss*)
...@@ -145,7 +145,7 @@ SECTIONS { ...@@ -145,7 +145,7 @@ SECTIONS {
__bss_stop = . ; __bss_stop = . ;
_ebss = . ; _ebss = . ;
} }
. = ALIGN(4096); . = ALIGN(PAGE_SIZE);
_end = .; _end = .;
DISCARDS DISCARDS
......
...@@ -37,10 +37,6 @@ ...@@ -37,10 +37,6 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/exceptions.h> #include <asm/exceptions.h>
#if defined(CONFIG_KGDB)
int debugger_kernel_faults = 1;
#endif
static unsigned long pte_misses; /* updated by do_page_fault() */ static unsigned long pte_misses; /* updated by do_page_fault() */
static unsigned long pte_errors; /* updated by do_page_fault() */ static unsigned long pte_errors; /* updated by do_page_fault() */
...@@ -81,10 +77,6 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) ...@@ -81,10 +77,6 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
} }
/* kernel has accessed a bad area */ /* kernel has accessed a bad area */
#if defined(CONFIG_KGDB)
if (debugger_kernel_faults)
debugger(regs);
#endif
die("kernel access of bad area", regs, sig); die("kernel access of bad area", regs, sig);
} }
...@@ -115,13 +107,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -115,13 +107,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0; is_write = 0;
#if defined(CONFIG_KGDB)
if (debugger_fault_handler && regs->trap == 0x300) {
debugger_fault_handler(regs);
return;
}
#endif /* CONFIG_KGDB */
if (unlikely(in_atomic() || !mm)) { if (unlikely(in_atomic() || !mm)) {
if (kernel_mode(regs)) if (kernel_mode(regs))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
...@@ -226,7 +211,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -226,7 +211,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
survive:
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
......
...@@ -134,13 +134,8 @@ void __init setup_memory(void) ...@@ -134,13 +134,8 @@ void __init setup_memory(void)
* for 4GB of memory, using 4kB pages), plus 1 page * for 4GB of memory, using 4kB pages), plus 1 page
* (in case the address isn't page-aligned). * (in case the address isn't page-aligned).
*/ */
#ifndef CONFIG_MMU map_size = init_bootmem_node(NODE_DATA(0),
map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)klimit)),
min_low_pfn, max_low_pfn);
#else
map_size = init_bootmem_node(&contig_page_data,
PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
#endif
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
/* free bootmem is whole main memory */ /* free bootmem is whole main memory */
......
...@@ -528,7 +528,7 @@ config LOCKDEP ...@@ -528,7 +528,7 @@ config LOCKDEP
bool bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE select STACKTRACE
select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE
select KALLSYMS select KALLSYMS
select KALLSYMS_ALL select KALLSYMS_ALL
...@@ -958,13 +958,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER ...@@ -958,13 +958,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64 depends on !X86_64
select STACKTRACE select STACKTRACE
select FRAME_POINTER if !PPC && !S390 select FRAME_POINTER if !PPC && !S390 && !MICROBLAZE
help help
Provide stacktrace filter for fault-injection capabilities Provide stacktrace filter for fault-injection capabilities
config LATENCYTOP config LATENCYTOP
bool "Latency measuring infrastructure" bool "Latency measuring infrastructure"
select FRAME_POINTER if !MIPS && !PPC && !S390 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
select KALLSYMS select KALLSYMS
select KALLSYMS_ALL select KALLSYMS_ALL
select STACKTRACE select STACKTRACE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment