Commit 989b0b93 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/czankel/xtensa-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/czankel/xtensa-2.6: (29 commits)
  [XTENSA] Allow debugger to modify the WINDOWBASE register.
  [XTENSA] Fix makefile to work with binutils-2.18.
  [XTENSA] Fix register corruption for certain processor configurations
  [XTENSA] Fix cache flush macro for D$/I$ aliasing/non-aliasing
  [XTENSA] Exclude thread-global registers from the xtregs structures.
  [XTENSA] Add support for the sa_restorer function
  [XTENSA] Add support for configurable registers and coprocessors
  [XTENSA] Clean up stat structs.
  [XTENSA] Use preprocessor to generate the linker script for the ELF boot image
  [XTENSA] Add missing RELOCATE_ENTRY for debug vector
  [XTENSA] Add volatile keyword to asm statements accessing counter registers
  [XTENSA] Remove unused code
  [XTENSA] Fix modules for non-exec processor configurations
  [XTENSA] Add missing cast in elf.h ELF_CORE_COPY_REGS()
  [XTENSA] Fix comments regarding the number of frames to save
  [XTENSA] Add missing a2 register restore in register spill routine
  [XTENSA] adjust boot linker script start addresses
  [XTENSA] Remove oldmask from sigcontext and fix register flush
  [XTENSA] Clean up elf-gregset.
  [XTENSA] Fix icache flush for cache aliasing
  ...
parents e03f1a84 42086cec
...@@ -59,7 +59,7 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) ...@@ -59,7 +59,7 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
head-y := arch/xtensa/kernel/head.o head-y := arch/xtensa/kernel/head.o
core-y += arch/xtensa/kernel/ arch/xtensa/mm/ core-y += arch/xtensa/kernel/ arch/xtensa/mm/
ifneq ($(PLATFORM),) ifneq ($(PLATFORM),)
core-y += arch/xtensa/platform-$(PLATFORM)/ core-y += arch/xtensa/platforms/$(PLATFORM)/
endif endif
libs-y += arch/xtensa/lib/ $(LIBGCC) libs-y += arch/xtensa/lib/ $(LIBGCC)
......
...@@ -14,25 +14,26 @@ OBJCOPY_ARGS := -O elf32-xtensa-le ...@@ -14,25 +14,26 @@ OBJCOPY_ARGS := -O elf32-xtensa-le
endif endif
export OBJCOPY_ARGS export OBJCOPY_ARGS
export CPPFLAGS_boot.lds += -P -C
boot-y := bootstrap.o boot-y := bootstrap.o
OBJS := $(addprefix $(obj)/,$(boot-y)) OBJS := $(addprefix $(obj)/,$(boot-y))
Image: vmlinux $(OBJS) Image: vmlinux $(OBJS) arch/$(ARCH)/boot/boot-elf/boot.lds
$(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \ $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \
vmlinux vmlinux.tmp vmlinux vmlinux.tmp
$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \ $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
--add-section image=vmlinux.tmp \ --add-section image=vmlinux.tmp \
--set-section-flags image=contents,alloc,load,load,data \ --set-section-flags image=contents,alloc,load,load,data \
$(OBJS) $@.tmp $(OBJS) $@.tmp
$(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \ $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
-T $(srctree)/arch/$(ARCH)/boot/boot-elf/boot.ld \ -T arch/$(ARCH)/boot/boot-elf/boot.lds \
-o arch/$(ARCH)/boot/$@.elf $@.tmp -o arch/$(ARCH)/boot/$@.elf $@.tmp
rm -f $@.tmp vmlinux.tmp rm -f $@.tmp vmlinux.tmp
Image.initrd: vmlinux $(OBJS) Image.initrd: vmlinux $(OBJS)
$(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \ $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \
--add-section .initrd=arch/$(ARCH)/boot/ramdisk \ --add-section .initrd=arch/$(ARCH)/boot/ramdisk \
--set-section-flags .initrd=contents,alloc,load,load,data \ --set-section-flags .initrd=contents,alloc,load,load,data \
vmlinux vmlinux.tmp vmlinux vmlinux.tmp
......
#include <asm/variant/core.h>
OUTPUT_ARCH(xtensa) OUTPUT_ARCH(xtensa)
ENTRY(_ResetVector)
SECTIONS SECTIONS
{ {
...@@ -61,7 +63,7 @@ SECTIONS ...@@ -61,7 +63,7 @@ SECTIONS
_end = .; _end = .;
_param_start = .; _param_start = .;
.ResetVector.text 0xfe000020 : .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
{ {
*(.ResetVector.text) *(.ResetVector.text)
} }
......
...@@ -22,7 +22,7 @@ LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a ...@@ -22,7 +22,7 @@ LIBS := arch/xtensa/boot/lib/lib.a arch/xtensa/lib/lib.a
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
zImage: vmlinux $(OBJS) $(LIBS) zImage: vmlinux $(OBJS) $(LIBS)
$(OBJCOPY) --strip-all -R .comment -R .xt.insn -O binary \ $(OBJCOPY) --strip-all -R .comment -R .note.gnu.build-id -O binary \
vmlinux vmlinux.tmp vmlinux vmlinux.tmp
gzip -vf9 vmlinux.tmp gzip -vf9 vmlinux.tmp
$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \ $(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
......
...@@ -2,7 +2,7 @@ OUTPUT_ARCH(xtensa) ...@@ -2,7 +2,7 @@ OUTPUT_ARCH(xtensa)
SECTIONS SECTIONS
{ {
.start 0xD0200000 : { *(.start) } .start 0xD1000000 : { *(.start) }
.text : .text :
{ {
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val)) #define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
...@@ -64,6 +63,8 @@ int main(void) ...@@ -64,6 +63,8 @@ int main(void)
DEFINE(PT_SIZE, sizeof(struct pt_regs)); DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
/* struct task_struct */ /* struct task_struct */
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace)); DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
...@@ -77,7 +78,19 @@ int main(void) ...@@ -77,7 +78,19 @@ int main(void)
/* struct thread_info (offset from start_struct) */ /* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save)); DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
#if XTENSA_HAVE_COPROCESSORS
DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
#endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
/* struct mm_struct */ /* struct mm_struct */
......
...@@ -8,193 +8,328 @@ ...@@ -8,193 +8,328 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2003 - 2005 Tensilica Inc. * Copyright (C) 2003 - 2007 Tensilica Inc.
*
* Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
*/ */
/*
* This module contains a table that describes the layout of the various
* custom registers and states associated with each coprocessor, as well
* as those not associated with any coprocessor ("extra state").
* This table is included with core dumps and is available via the ptrace
* interface, allowing the layout of such register/state information to
* be modified in the kernel without affecting the debugger. Each
* register or state is identified using a 32-bit "libdb target number"
* assigned when the Xtensa processor is generated.
*/
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/coprocessor.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
#include <asm/tlbflush.h>
#if XCHAL_HAVE_CP /*
* Entry condition:
*
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
* a3: dispatch table
* depc: a2, original value saved on stack (PT_DEPC)
* excsave_1: a3
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE) /* IO protection is currently unsupported. */
ENTRY(release_coprocessors) ENTRY(fast_io_protect)
wsr a0, EXCSAVE_1
movi a0, unrecoverable_exception
callx0 a0
entry a1, 16 #if XTENSA_HAVE_COPROCESSORS
# a2: task
movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
movi a4, coprocessor_info+CP_LAST # a4: owner-table
# a5: tmp
movi a6, 0 # a6: 0
rsil a7, LOCKLEVEL # a7: PS
1: /* Check if task is coprocessor owner of coprocessor[i]. */ /*
* Macros for lazy context switch.
*/
l32i a5, a4, COPROCESSOR_INFO_OWNER #define SAVE_CP_REGS(x) \
srli a3, a3, 1 .align 4; \
beqz a3, 1f .Lsave_cp_regs_cp##x: \
addi a4, a4, -8 .if XTENSA_HAVE_COPROCESSOR(x); \
beq a2, a5, 1b xchal_cp##x##_store a2 a4 a5 a6 a7; \
.endif; \
jx a0
/* Found an entry: Clear entry CPENABLE bit to disable CP. */ #define SAVE_CP_REGS_TAB(x) \
.if XTENSA_HAVE_COPROCESSOR(x); \
.long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \
.else; \
.long 0; \
.endif; \
.long THREAD_XTREGS_CP##x
rsr a5, CPENABLE
s32i a6, a4, COPROCESSOR_INFO_OWNER
xor a5, a3, a5
wsr a5, CPENABLE
bnez a3, 1b #define LOAD_CP_REGS(x) \
.align 4; \
.Lload_cp_regs_cp##x: \
.if XTENSA_HAVE_COPROCESSOR(x); \
xchal_cp##x##_load a2 a4 a5 a6 a7; \
.endif; \
jx a0
1: wsr a7, PS #define LOAD_CP_REGS_TAB(x) \
rsync .if XTENSA_HAVE_COPROCESSOR(x); \
retw .long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
.else; \
.long 0; \
.endif; \
.long THREAD_XTREGS_CP##x
SAVE_CP_REGS(0)
SAVE_CP_REGS(1)
SAVE_CP_REGS(2)
SAVE_CP_REGS(3)
SAVE_CP_REGS(4)
SAVE_CP_REGS(5)
SAVE_CP_REGS(6)
SAVE_CP_REGS(7)
ENTRY(disable_coprocessor) LOAD_CP_REGS(0)
entry sp, 16 LOAD_CP_REGS(1)
rsil a7, LOCKLEVEL LOAD_CP_REGS(2)
rsr a3, CPENABLE LOAD_CP_REGS(3)
movi a4, 1 LOAD_CP_REGS(4)
ssl a2 LOAD_CP_REGS(5)
sll a4, a4 LOAD_CP_REGS(6)
and a4, a3, a4 LOAD_CP_REGS(7)
xor a3, a3, a4
wsr a3, CPENABLE
wsr a7, PS
rsync
retw
ENTRY(enable_coprocessor) .align 4
entry sp, 16 .Lsave_cp_regs_jump_table:
rsil a7, LOCKLEVEL SAVE_CP_REGS_TAB(0)
rsr a3, CPENABLE SAVE_CP_REGS_TAB(1)
movi a4, 1 SAVE_CP_REGS_TAB(2)
ssl a2 SAVE_CP_REGS_TAB(3)
sll a4, a4 SAVE_CP_REGS_TAB(4)
or a3, a3, a4 SAVE_CP_REGS_TAB(5)
wsr a3, CPENABLE SAVE_CP_REGS_TAB(6)
wsr a7, PS SAVE_CP_REGS_TAB(7)
rsync
retw
.Lload_cp_regs_jump_table:
LOAD_CP_REGS_TAB(0)
LOAD_CP_REGS_TAB(1)
LOAD_CP_REGS_TAB(2)
LOAD_CP_REGS_TAB(3)
LOAD_CP_REGS_TAB(4)
LOAD_CP_REGS_TAB(5)
LOAD_CP_REGS_TAB(6)
LOAD_CP_REGS_TAB(7)
ENTRY(save_coprocessor_extra) /*
entry sp, 16 * coprocessor_save(buffer, index)
xchal_extra_store_funcbody * a2 a3
retw * coprocessor_load(buffer, index)
* a2 a3
*
* Save or load coprocessor registers for coprocessor 'index'.
* The register values are saved to or loaded from them 'buffer' address.
*
* Note that these functions don't update the coprocessor_owner information!
*
*/
ENTRY(restore_coprocessor_extra) ENTRY(coprocessor_save)
entry sp, 16 entry a1, 32
xchal_extra_load_funcbody s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table
addx8 a3, a3, a0
l32i a3, a3, 0
beqz a3, 1f
add a0, a0, a3
callx0 a0
1: l32i a0, a1, 0
retw retw
ENTRY(save_coprocessor_registers) ENTRY(coprocessor_load)
entry sp, 16 entry a1, 32
xchal_cpi_store_funcbody s32i a0, a1, 0
movi a0, .Lload_cp_regs_jump_table
addx4 a3, a3, a0
l32i a3, a3, 0
beqz a3, 1f
add a0, a0, a3
callx0 a0
1: l32i a0, a1, 0
retw retw
ENTRY(restore_coprocessor_registers) /*
entry sp, 16 * coprocessor_flush(struct task_info*, index)
xchal_cpi_load_funcbody * a2 a3
* coprocessor_restore(struct task_info*, index)
* a2 a3
*
* Save or load coprocessor registers for coprocessor 'index'.
* The register values are saved to or loaded from the coprocessor area
* inside the task_info structure.
*
* Note that these functions don't update the coprocessor_owner information!
*
*/
ENTRY(coprocessor_flush)
entry a1, 32
s32i a0, a1, 0
movi a0, .Lsave_cp_regs_jump_table
addx8 a3, a3, a0
l32i a4, a3, 4
l32i a3, a3, 0
add a2, a2, a4
beqz a3, 1f
add a0, a0, a3
callx0 a0
1: l32i a0, a1, 0
retw retw
ENTRY(coprocessor_restore)
entry a1, 32
s32i a0, a1, 0
movi a0, .Lload_cp_regs_jump_table
addx4 a3, a3, a0
l32i a4, a3, 4
l32i a3, a3, 0
add a2, a2, a4
beqz a3, 1f
add a0, a0, a3
callx0 a0
1: l32i a0, a1, 0
retw
/* /*
* The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros * Entry condition:
* describe the contents of coprocessor & extra save areas in terms of
* undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
* latter macros here; they expand into a table of the format we want.
* The general format is:
* *
* CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum, * a0: trashed, original value saved on stack (PT_AREG0)
* bitmask, rsv2, rsv3) * a1: a1
* CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum, * a2: new stack pointer, original in DEPC
* bitmask, rsv2, rsv3) * a3: dispatch table
* CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, * depc: a2, original value saved on stack (PT_DEPC)
* numentries, contentsize, regname_base, * excsave_1: a3
* regfile_name, rsv2, rsv3)
* *
* For this table, we only care about the <libdbnum>, <offset> and <size> * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* fields. * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/ */
/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */ ENTRY(fast_coprocessor_double)
wsr a0, EXCSAVE_1
#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \ movi a0, unrecoverable_exception
bitmask, rsv2, rsv3) \ callx0 a0
reg_entry libdbnum, offset, size ;
#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
bitmask, rsv2, rsv3) \
reg_entry libdbnum, offset, size ;
#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
numentries, contentsize, regname_base, \
regfile_name, rsv2, rsv3) \
reg_entry libdbnum, offset, size ;
/* A single table entry: */
.macro reg_entry libdbnum, offset, size
.ifne (__last_offset-(__last_group_offset+\offset))
/* padding entry */
.word (0xFC000000+__last_offset-(__last_group_offset+\offset))
.endif
.word \libdbnum /* actual entry */
.set __last_offset, __last_group_offset+\offset+\size
.endm /* reg_entry */
/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
.macro reg_group cpnum, num_entries, align
.set __last_group_offset, (__last_offset + \align- 1) & -\align
.ifne \num_entries
.word 0xFD000000+(\cpnum<<16)+\num_entries
.endif
.endm /* reg_group */
/*
* Register info tables.
*/
.section .rodata, "a" ENTRY(fast_coprocessor)
.globl _xtensa_reginfo_tables
.globl _xtensa_reginfo_table_size /* Save remaining registers a1-a3 and SAR */
.align 4
_xtensa_reginfo_table_size: xsr a3, EXCSAVE_1
.word _xtensa_reginfo_table_end - _xtensa_reginfo_tables s32i a3, a2, PT_AREG3
rsr a3, SAR
_xtensa_reginfo_tables: s32i a1, a2, PT_AREG1
.set __last_offset, 0 s32i a3, a2, PT_SAR
reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN mov a1, a2
XCHAL_EXTRA_SA_CONTENTS_LIBDB rsr a2, DEPC
reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN s32i a2, a1, PT_AREG2
XCHAL_CP0_SA_CONTENTS_LIBDB
reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN /*
XCHAL_CP1_SA_CONTENTS_LIBDB * The hal macros require up to 4 temporary registers. We use a3..a6.
reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN */
XCHAL_CP2_SA_CONTENTS_LIBDB
reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN s32i a4, a1, PT_AREG4
XCHAL_CP3_SA_CONTENTS_LIBDB s32i a5, a1, PT_AREG5
reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN s32i a6, a1, PT_AREG6
XCHAL_CP4_SA_CONTENTS_LIBDB
reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
XCHAL_CP5_SA_CONTENTS_LIBDB
reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN rsr a3, EXCCAUSE
XCHAL_CP6_SA_CONTENTS_LIBDB addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
XCHAL_CP7_SA_CONTENTS_LIBDB /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
.word 0xFC000000 /* invalid register number,marks end of table*/
_xtensa_reginfo_table_end: ssl a3 # SAR: 32 - coprocessor_number
#endif movi a2, 1
rsr a0, CPENABLE
sll a2, a2
or a0, a0, a2
wsr a0, CPENABLE
rsync
/* Retrieve previous owner. (a3 still holds CP number) */
movi a0, coprocessor_owner # list of owners
addx4 a0, a3, a0 # entry for CP
l32i a4, a0, 0
beqz a4, 1f # skip 'save' if no previous owner
/* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
l32i a5, a4, THREAD_CPENABLE
xor a5, a5, a2 # (1 << cp-id) still in a2
s32i a5, a4, THREAD_CPENABLE
/*
* Get context save area and 'call' save routine.
* (a4 still holds previous owner (thread_info), a3 CP number)
*/
movi a5, .Lsave_cp_regs_jump_table
movi a0, 2f # a0: 'return' address
addx8 a3, a3, a5 # a3: coprocessor number
l32i a2, a3, 4 # a2: xtregs offset
l32i a3, a3, 0 # a3: jump offset
add a2, a2, a4
add a4, a3, a5 # a4: address of save routine
jx a4
/* Note that only a0 and a1 were preserved. */
2: rsr a3, EXCCAUSE
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
movi a0, coprocessor_owner
addx4 a0, a3, a0
/* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
1: GET_THREAD_INFO (a4, a1)
s32i a4, a0, 0
/* Get context save area and 'call' load routine. */
movi a5, .Lload_cp_regs_jump_table
movi a0, 1f
addx8 a3, a3, a5
l32i a2, a3, 4 # a2: xtregs offset
l32i a3, a3, 0 # a3: jump offset
add a2, a2, a4
add a4, a3, a5
jx a4
/* Restore all registers and return from exception handler. */
1: l32i a6, a1, PT_AREG6
l32i a5, a1, PT_AREG5
l32i a4, a1, PT_AREG4
l32i a0, a1, PT_SAR
l32i a3, a1, PT_AREG3
l32i a2, a1, PT_AREG2
wsr a0, SAR
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfe
.data
ENTRY(coprocessor_owner)
.fill XCHAL_CP_MAX, 4, 0
#endif /* XTENSA_HAVE_COPROCESSORS */
...@@ -25,10 +25,10 @@ ...@@ -25,10 +25,10 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/signal.h> #include <asm/signal.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/variant/tie-asm.h>
/* Unimplemented features. */ /* Unimplemented features. */
#undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
#undef KERNEL_STACK_OVERFLOW_CHECK #undef KERNEL_STACK_OVERFLOW_CHECK
#undef PREEMPTIBLE_KERNEL #undef PREEMPTIBLE_KERNEL
#undef ALLOCA_EXCEPTION_IN_IRAM #undef ALLOCA_EXCEPTION_IN_IRAM
...@@ -214,19 +214,7 @@ _user_exception: ...@@ -214,19 +214,7 @@ _user_exception:
/* We are back to the original stack pointer (a1) */ /* We are back to the original stack pointer (a1) */
2: 2: /* Now, jump to the common exception handler. */
#if XCHAL_EXTRA_SA_SIZE
/* For user exceptions, save the extra state into the user's TCB.
* Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
*/
GET_CURRENT(a2,a1)
addi a2, a2, THREAD_CP_SAVE
xchal_extra_store_funcbody
#endif
/* Now, jump to the common exception handler. */
j common_exception j common_exception
...@@ -382,6 +370,10 @@ common_exception: ...@@ -382,6 +370,10 @@ common_exception:
s32i a2, a1, PT_LBEG s32i a2, a1, PT_LBEG
s32i a3, a1, PT_LEND s32i a3, a1, PT_LEND
/* Save optional registers. */
save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
/* Go to second-level dispatcher. Set up parameters to pass to the /* Go to second-level dispatcher. Set up parameters to pass to the
* exception handler and call the exception handler. * exception handler and call the exception handler.
*/ */
...@@ -403,74 +395,49 @@ common_exception_return: ...@@ -403,74 +395,49 @@ common_exception_return:
/* Jump if we are returning from kernel exceptions. */ /* Jump if we are returning from kernel exceptions. */
1: l32i a3, a1, PT_PS 1: l32i a3, a1, PT_PS
_bbsi.l a3, PS_UM_BIT, 2f _bbci.l a3, PS_UM_BIT, 4f
j kernel_exception_exit
/* Specific to a user exception exit: /* Specific to a user exception exit:
* We need to check some flags for signal handling and rescheduling, * We need to check some flags for signal handling and rescheduling,
* and have to restore WB and WS, extra states, and all registers * and have to restore WB and WS, extra states, and all registers
* in the register file that were in use in the user task. * in the register file that were in use in the user task.
*/ * Note that we don't disable interrupts here.
2: wsr a3, PS /* disable interrupts */
/* Check for signals (keep interrupts disabled while we read TI_FLAGS)
* Note: PS.INTLEVEL = 0, PS.EXCM = 1
*/ */
GET_THREAD_INFO(a2,a1) GET_THREAD_INFO(a2,a1)
l32i a4, a2, TI_FLAGS l32i a4, a2, TI_FLAGS
/* Enable interrupts again.
* Note: When we get here, we certainly have handled any interrupts.
* (Hint: There is only one user exception frame on stack)
*/
movi a3, 1 << PS_WOE_BIT
_bbsi.l a4, TIF_NEED_RESCHED, 3f _bbsi.l a4, TIF_NEED_RESCHED, 3f
_bbci.l a4, TIF_SIGPENDING, 4f _bbci.l a4, TIF_SIGPENDING, 4f
#ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
l32i a4, a1, PT_DEPC l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
#endif
/* Reenable interrupts and call do_signal() */ /* Call do_signal() */
wsr a3, PS
movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*) movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*)
mov a6, a1 mov a6, a1
movi a7, 0 movi a7, 0
callx4 a4 callx4 a4
j 1b j 1b
3: /* Reenable interrupts and reschedule */ 3: /* Reschedule */
wsr a3, PS
movi a4, schedule # void schedule (void) movi a4, schedule # void schedule (void)
callx4 a4 callx4 a4
j 1b j 1b
/* Restore the state of the task and return from the exception. */ 4: /* Restore optional registers. */
4: /* a2 holds GET_CURRENT(a2,a1) */
#if XCHAL_EXTRA_SA_SIZE
/* For user exceptions, restore the extra state from the user's TCB. */ load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
/* Note: a2 still contains GET_CURRENT(a2,a1) */ wsr a3, PS /* disable interrupts */
addi a2, a2, THREAD_CP_SAVE
xchal_extra_load_funcbody
/* We must assume that xchal_extra_store_funcbody destroys _bbci.l a3, PS_UM_BIT, kernel_exception_exit
* registers a2..a15. FIXME, this list can eventually be
* reduced once real register requirements of the macro are
* finalized. */
#endif /* XCHAL_EXTRA_SA_SIZE */ user_exception_exit:
/* Restore the state of the task and return from the exception. */
/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
...@@ -536,10 +503,6 @@ common_exception_return: ...@@ -536,10 +503,6 @@ common_exception_return:
kernel_exception_exit: kernel_exception_exit:
/* Disable interrupts (a3 holds PT_PS) */
wsr a3, PS
#ifdef PREEMPTIBLE_KERNEL #ifdef PREEMPTIBLE_KERNEL
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
...@@ -618,6 +581,8 @@ kernel_exception_exit: ...@@ -618,6 +581,8 @@ kernel_exception_exit:
common_exception_exit: common_exception_exit:
/* Restore address registers. */
_bbsi.l a2, 1, 1f _bbsi.l a2, 1, 1f
l32i a4, a1, PT_AREG4 l32i a4, a1, PT_AREG4
l32i a5, a1, PT_AREG5 l32i a5, a1, PT_AREG5
...@@ -1150,7 +1115,6 @@ CATCH ...@@ -1150,7 +1115,6 @@ CATCH
* excsave_1: a3 * excsave_1: a3
* *
* Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
* Note: We don't need to save a2 in depc (return value)
*/ */
ENTRY(fast_syscall_spill_registers) ENTRY(fast_syscall_spill_registers)
...@@ -1166,29 +1130,31 @@ ENTRY(fast_syscall_spill_registers) ...@@ -1166,29 +1130,31 @@ ENTRY(fast_syscall_spill_registers)
rsr a0, SAR rsr a0, SAR
xsr a3, EXCSAVE_1 # restore a3 and excsave_1 xsr a3, EXCSAVE_1 # restore a3 and excsave_1
s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
s32i a3, a2, PT_AREG3 s32i a3, a2, PT_AREG3
s32i a4, a2, PT_AREG4
s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
/* The spill routine might clobber a7, a11, and a15. */ /* The spill routine might clobber a7, a11, and a15. */
s32i a7, a2, PT_AREG5 s32i a7, a2, PT_AREG7
s32i a11, a2, PT_AREG6 s32i a11, a2, PT_AREG11
s32i a15, a2, PT_AREG7 s32i a15, a2, PT_AREG15
call0 _spill_registers # destroys a3, DEPC, and SAR call0 _spill_registers # destroys a3, a4, and SAR
/* Advance PC, restore registers and SAR, and return from exception. */ /* Advance PC, restore registers and SAR, and return from exception. */
l32i a3, a2, PT_AREG4 l32i a3, a2, PT_AREG5
l32i a4, a2, PT_AREG4
l32i a0, a2, PT_AREG0 l32i a0, a2, PT_AREG0
wsr a3, SAR wsr a3, SAR
l32i a3, a2, PT_AREG3 l32i a3, a2, PT_AREG3
/* Restore clobbered registers. */ /* Restore clobbered registers. */
l32i a7, a2, PT_AREG5 l32i a7, a2, PT_AREG7
l32i a11, a2, PT_AREG6 l32i a11, a2, PT_AREG11
l32i a15, a2, PT_AREG7 l32i a15, a2, PT_AREG15
movi a2, 0 movi a2, 0
rfe rfe
...@@ -1247,16 +1213,6 @@ fast_syscall_spill_registers_fixup: ...@@ -1247,16 +1213,6 @@ fast_syscall_spill_registers_fixup:
* Note: This frame might be the same as above. * Note: This frame might be the same as above.
*/ */
#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
/* Restore registers we precautiously saved.
* We have the value of the 'right' a3
*/
l32i a7, a2, PT_AREG5
l32i a11, a2, PT_AREG6
l32i a15, a2, PT_AREG7
#endif
/* Setup stack pointer. */ /* Setup stack pointer. */
addi a2, a2, -PT_USER_SIZE addi a2, a2, -PT_USER_SIZE
...@@ -1271,9 +1227,9 @@ fast_syscall_spill_registers_fixup: ...@@ -1271,9 +1227,9 @@ fast_syscall_spill_registers_fixup:
movi a3, exc_table movi a3, exc_table
rsr a0, EXCCAUSE rsr a0, EXCCAUSE
addx4 a0, a0, a3 # find entry in table addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a0, a0, EXC_TABLE_FAST_USER # load handler
jx a0 jx a0
fast_syscall_spill_registers_fixup_return: fast_syscall_spill_registers_fixup_return:
...@@ -1290,14 +1246,6 @@ fast_syscall_spill_registers_fixup_return: ...@@ -1290,14 +1246,6 @@ fast_syscall_spill_registers_fixup_return:
s32i a2, a3, EXC_TABLE_PARAM s32i a2, a3, EXC_TABLE_PARAM
l32i a2, a3, EXC_TABLE_KSTK l32i a2, a3, EXC_TABLE_KSTK
#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION
/* Save registers again that might be clobbered. */
s32i a7, a2, PT_AREG5
s32i a11, a2, PT_AREG6
s32i a15, a2, PT_AREG7
#endif
/* Load WB at the time the exception occurred. */ /* Load WB at the time the exception occurred. */
rsr a3, SAR # WB is still in SAR rsr a3, SAR # WB is still in SAR
...@@ -1319,7 +1267,7 @@ fast_syscall_spill_registers_fixup_return: ...@@ -1319,7 +1267,7 @@ fast_syscall_spill_registers_fixup_return:
* This is not a real function. The following conditions must be met: * This is not a real function. The following conditions must be met:
* *
* - must be called with call0. * - must be called with call0.
* - uses DEPC, a3 and SAR. * - uses a3, a4 and SAR.
* - the last 'valid' register of each frame are clobbered. * - the last 'valid' register of each frame are clobbered.
* - the caller must have registered a fixup handler * - the caller must have registered a fixup handler
* (or be inside a critical section) * (or be inside a critical section)
...@@ -1331,41 +1279,39 @@ ENTRY(_spill_registers) ...@@ -1331,41 +1279,39 @@ ENTRY(_spill_registers)
/* /*
* Rotate ws so that the current windowbase is at bit 0. * Rotate ws so that the current windowbase is at bit 0.
* Assume ws = xxxwww1yy (www1 current window frame). * Assume ws = xxxwww1yy (www1 current window frame).
* Rotate ws right so that a2 = yyxxxwww1. * Rotate ws right so that a4 = yyxxxwww1.
*/ */
wsr a2, DEPC # preserve a2 rsr a4, WINDOWBASE
rsr a2, WINDOWBASE rsr a3, WINDOWSTART # a3 = xxxwww1yy
rsr a3, WINDOWSTART ssr a4 # holds WB
ssr a2 # holds WB slli a4, a3, WSBITS
slli a2, a3, WSBITS or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
or a3, a3, a2 # a2 = xxxwww1yyxxxwww1yy srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
srl a3, a3
/* We are done if there are no more than the current register frame. */ /* We are done if there are no more than the current register frame. */
extui a3, a3, 1, WSBITS-2 # a3 = 0yyxxxwww extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
movi a2, (1 << (WSBITS-1)) movi a4, (1 << (WSBITS-1))
_beqz a3, .Lnospill # only one active frame? jump _beqz a3, .Lnospill # only one active frame? jump
/* We want 1 at the top, so that we return to the current windowbase */ /* We want 1 at the top, so that we return to the current windowbase */
or a3, a3, a2 # 1yyxxxwww or a3, a3, a4 # 1yyxxxwww
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
wsr a3, WINDOWSTART # save shifted windowstart wsr a3, WINDOWSTART # save shifted windowstart
neg a2, a3 neg a4, a3
and a3, a2, a3 # first bit set from right: 000010000 and a3, a4, a3 # first bit set from right: 000010000
ffs_ws a2, a3 # a2: shifts to skip empty frames ffs_ws a4, a3 # a4: shifts to skip empty frames
movi a3, WSBITS movi a3, WSBITS
sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
ssr a2 # save in SAR for later. ssr a4 # save in SAR for later.
rsr a3, WINDOWBASE rsr a3, WINDOWBASE
add a3, a3, a2 add a3, a3, a4
rsr a2, DEPC # restore a2
wsr a3, WINDOWBASE wsr a3, WINDOWBASE
rsync rsync
...@@ -1394,6 +1340,9 @@ ENTRY(_spill_registers) ...@@ -1394,6 +1340,9 @@ ENTRY(_spill_registers)
l32e a4, a1, -16 l32e a4, a1, -16
j .Lc12c j .Lc12c
.Lnospill:
ret
.Lloop: _bbsi.l a3, 1, .Lc4 .Lloop: _bbsi.l a3, 1, .Lc4
_bbci.l a3, 2, .Lc12 _bbci.l a3, 2, .Lc12
...@@ -1419,9 +1368,7 @@ ENTRY(_spill_registers) ...@@ -1419,9 +1368,7 @@ ENTRY(_spill_registers)
movi a3, 1 movi a3, 1
sll a3, a3 sll a3, a3
wsr a3, WINDOWSTART wsr a3, WINDOWSTART
ret
.Lnospill:
jx a0
.Lc4: s32e a4, a9, -16 .Lc4: s32e a4, a9, -16
s32e a5, a9, -12 s32e a5, a9, -12
...@@ -1830,154 +1777,6 @@ ENTRY(fast_store_prohibited) ...@@ -1830,154 +1777,6 @@ ENTRY(fast_store_prohibited)
1: j _user_exception 1: j _user_exception
#if XCHAL_EXTRA_SA_SIZE
#warning fast_coprocessor untested
/*
* Entry condition:
*
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
* a3: dispatch table
* depc: a2, original value saved on stack (PT_DEPC)
* excsave_1: a3
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
ENTRY(fast_coprocessor_double)
wsr a0, EXCSAVE_1
movi a0, unrecoverable_exception
callx0 a0
ENTRY(fast_coprocessor)
/* Fatal if we are in a double exception. */
l32i a0, a2, PT_DEPC
_bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
/* Save some registers a1, a3, a4, SAR */
xsr a3, EXCSAVE_1
s32i a3, a2, PT_AREG3
rsr a3, SAR
s32i a4, a2, PT_AREG4
s32i a1, a2, PT_AREG1
s32i a5, a1, PT_AREG5
s32i a3, a2, PT_SAR
mov a1, a2
/* Currently, the HAL macros only guarantee saving a0 and a1.
* These can and will be refined in the future, but for now,
* just save the remaining registers of a2...a15.
*/
s32i a6, a1, PT_AREG6
s32i a7, a1, PT_AREG7
s32i a8, a1, PT_AREG8
s32i a9, a1, PT_AREG9
s32i a10, a1, PT_AREG10
s32i a11, a1, PT_AREG11
s32i a12, a1, PT_AREG12
s32i a13, a1, PT_AREG13
s32i a14, a1, PT_AREG14
s32i a15, a1, PT_AREG15
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
rsr a0, EXCCAUSE
addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
/* Set corresponding CPENABLE bit */
movi a4, 1
ssl a3 # SAR: 32 - coprocessor_number
rsr a5, CPENABLE
sll a4, a4
or a4, a5, a4
wsr a4, CPENABLE
rsync
movi a5, coprocessor_info # list of owner and offset into cp_save
addx8 a0, a4, a5 # entry for CP
bne a4, a5, .Lload # bit wasn't set before, cp not in use
/* Now compare the current task with the owner of the coprocessor.
* If they are the same, there is no reason to save or restore any
* coprocessor state. Having already enabled the coprocessor,
* branch ahead to return.
*/
GET_CURRENT(a5,a1)
l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
beq a4, a5, .Ldone
/* Find location to dump current coprocessor state:
* task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
*
* Note: a0 pointer to the entry in the coprocessor owner table,
* a3 coprocessor number,
* a4 current owner of coprocessor.
*/
l32i a5, a0, COPROCESSOR_INFO_OFFSET
addi a2, a4, THREAD_CP_SAVE
add a2, a2, a5
/* Store current coprocessor states. (a5 still has CP number) */
xchal_cpi_store_funcbody
/* The macro might have destroyed a3 (coprocessor number), but
* SAR still has 32 - coprocessor_number!
*/
movi a3, 32
rsr a4, SAR
sub a3, a3, a4
.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
* the coprocessor owner table.
*
* Note: a0 pointer to the entry in the coprocessor owner table,
* a3 coprocessor number.
*/
GET_CURRENT(a4,a1)
s32i a4, a0, 0
/* Find location from where to restore the current coprocessor state.*/
l32i a5, a0, COPROCESSOR_INFO_OFFSET
addi a2, a4, THREAD_CP_SAVE
add a2, a2, a4
xchal_cpi_load_funcbody
/* We must assume that the xchal_cpi_store_funcbody macro destroyed
* registers a2..a15.
*/
.Ldone: l32i a15, a1, PT_AREG15
l32i a14, a1, PT_AREG14
l32i a13, a1, PT_AREG13
l32i a12, a1, PT_AREG12
l32i a11, a1, PT_AREG11
l32i a10, a1, PT_AREG10
l32i a9, a1, PT_AREG9
l32i a8, a1, PT_AREG8
l32i a7, a1, PT_AREG7
l32i a6, a1, PT_AREG6
l32i a5, a1, PT_AREG5
l32i a4, a1, PT_AREG4
l32i a3, a1, PT_AREG3
l32i a2, a1, PT_AREG2
l32i a0, a1, PT_AREG0
l32i a1, a1, PT_AREG1
rfe
#endif /* XCHAL_EXTRA_SA_SIZE */
/* /*
* System Calls. * System Calls.
* *
...@@ -2086,20 +1885,36 @@ ENTRY(_switch_to) ...@@ -2086,20 +1885,36 @@ ENTRY(_switch_to)
entry a1, 16 entry a1, 16
mov a4, a3 # preserve a3 mov a12, a2 # preserve 'prev' (a2)
mov a13, a3 # and 'next' (a3)
l32i a4, a2, TASK_THREAD_INFO
l32i a5, a3, TASK_THREAD_INFO
save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
s32i a0, a2, THREAD_RA # save return address s32i a0, a12, THREAD_RA # save return address
s32i a1, a2, THREAD_SP # save stack pointer s32i a1, a12, THREAD_SP # save stack pointer
/* Disable ints while we manipulate the stack pointer; spill regs. */ /* Disable ints while we manipulate the stack pointer. */
movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
xsr a5, PS xsr a14, PS
rsr a3, EXCSAVE_1 rsr a3, EXCSAVE_1
rsync rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
call0 _spill_registers /* Switch CPENABLE */
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
l32i a3, a5, THREAD_CPENABLE
xsr a3, CPENABLE
s32i a3, a4, THREAD_CPENABLE
#endif
/* Flush register file. */
call0 _spill_registers # destroys a3, a4, and SAR
/* Set kernel stack (and leave critical section) /* Set kernel stack (and leave critical section)
* Note: It's save to set it here. The stack will not be overwritten * Note: It's save to set it here. The stack will not be overwritten
...@@ -2107,19 +1922,21 @@ ENTRY(_switch_to) ...@@ -2107,19 +1922,21 @@ ENTRY(_switch_to)
* we return from kernel space. * we return from kernel space.
*/ */
l32i a0, a4, TASK_THREAD_INFO
rsr a3, EXCSAVE_1 # exc_table rsr a3, EXCSAVE_1 # exc_table
movi a1, 0 movi a6, 0
addi a0, a0, PT_REGS_OFFSET addi a7, a5, PT_REGS_OFFSET
s32i a1, a3, EXC_TABLE_FIXUP s32i a6, a3, EXC_TABLE_FIXUP
s32i a0, a3, EXC_TABLE_KSTK s32i a7, a3, EXC_TABLE_KSTK
/* restore context of the task that 'next' addresses */ /* restore context of the task that 'next' addresses */
l32i a0, a4, THREAD_RA /* restore return address */ l32i a0, a13, THREAD_RA # restore return address
l32i a1, a4, THREAD_SP /* restore stack pointer */ l32i a1, a13, THREAD_SP # restore stack pointer
load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
wsr a5, PS wsr a14, PS
mov a2, a12 # return 'prev'
rsync rsync
retw retw
......
...@@ -28,7 +28,7 @@ void *module_alloc(unsigned long size) ...@@ -28,7 +28,7 @@ void *module_alloc(unsigned long size)
{ {
if (size == 0) if (size == 0)
return NULL; return NULL;
return vmalloc(size); return vmalloc_exec(size);
} }
void module_free(struct module *mod, void *module_region) void module_free(struct module *mod, void *module_region)
......
...@@ -52,6 +52,55 @@ void (*pm_power_off)(void) = NULL; ...@@ -52,6 +52,55 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(pm_power_off);
#if XTENSA_HAVE_COPROCESSORS
void coprocessor_release_all(struct thread_info *ti)
{
unsigned long cpenable;
int i;
/* Make sure we don't switch tasks during this operation. */
preempt_disable();
/* Walk through all cp owners and release it for the requested one. */
cpenable = ti->cpenable;
for (i = 0; i < XCHAL_CP_MAX; i++) {
if (coprocessor_owner[i] == ti) {
coprocessor_owner[i] = 0;
cpenable &= ~(1 << i);
}
}
ti->cpenable = cpenable;
coprocessor_clear_cpenable();
preempt_enable();
}
void coprocessor_flush_all(struct thread_info *ti)
{
unsigned long cpenable;
int i;
preempt_disable();
cpenable = ti->cpenable;
for (i = 0; i < XCHAL_CP_MAX; i++) {
if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
coprocessor_flush(ti, i);
cpenable >>= 1;
}
preempt_enable();
}
#endif
/* /*
* Powermanagement idle function, if any is provided by the platform. * Powermanagement idle function, if any is provided by the platform.
*/ */
...@@ -71,15 +120,36 @@ void cpu_idle(void) ...@@ -71,15 +120,36 @@ void cpu_idle(void)
} }
/* /*
* Free current thread data structures etc.. * This is called when the thread calls exit().
*/ */
void exit_thread(void) void exit_thread(void)
{ {
#if XTENSA_HAVE_COPROCESSORS
coprocessor_release_all(current_thread_info());
#endif
} }
/*
* Flush thread state. This is called when a thread does an execve()
* Note that we flush coprocessor registers for the case execve fails.
*/
void flush_thread(void) void flush_thread(void)
{ {
#if XTENSA_HAVE_COPROCESSORS
struct thread_info *ti = current_thread_info();
coprocessor_flush_all(ti);
coprocessor_release_all(ti);
#endif
}
/*
* This is called before the thread is copied.
*/
void prepare_to_copy(struct task_struct *tsk)
{
#if XTENSA_HAVE_COPROCESSORS
coprocessor_flush_all(task_thread_info(tsk));
#endif
} }
/* /*
...@@ -107,6 +177,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -107,6 +177,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
struct task_struct * p, struct pt_regs * regs) struct task_struct * p, struct pt_regs * regs)
{ {
struct pt_regs *childregs; struct pt_regs *childregs;
struct thread_info *ti;
unsigned long tos; unsigned long tos;
int user_mode = user_mode(regs); int user_mode = user_mode(regs);
...@@ -128,13 +199,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -128,13 +199,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
p->set_child_tid = p->clear_child_tid = NULL; p->set_child_tid = p->clear_child_tid = NULL;
p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1); p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
p->thread.sp = (unsigned long)childregs; p->thread.sp = (unsigned long)childregs;
if (user_mode(regs)) { if (user_mode(regs)) {
int len = childregs->wmask & ~0xf; int len = childregs->wmask & ~0xf;
childregs->areg[1] = usp; childregs->areg[1] = usp;
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4], memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
&regs->areg[XCHAL_NUM_AREGS - len/4], len); &regs->areg[XCHAL_NUM_AREGS - len/4], len);
// FIXME: we need to set THREADPTR in thread_info...
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
childregs->areg[2] = childregs->areg[6]; childregs->areg[2] = childregs->areg[6];
...@@ -142,6 +214,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, ...@@ -142,6 +214,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
/* In kernel space, we start a new thread with a new stack. */ /* In kernel space, we start a new thread with a new stack. */
childregs->wmask = 1; childregs->wmask = 1;
} }
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
ti = task_thread_info(p);
ti->cpenable = 0;
#endif
return 0; return 0;
} }
...@@ -179,10 +257,6 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -179,10 +257,6 @@ unsigned long get_wchan(struct task_struct *p)
} }
/* /*
* do_copy_regs() gathers information from 'struct pt_regs' and
* 'current->thread.areg[]' to fill in the xtensa_gregset_t
* structure.
*
* xtensa_gregset_t and 'struct pt_regs' are vastly different formats * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
* of processor registers. Besides different ordering, * of processor registers. Besides different ordering,
* xtensa_gregset_t contains non-live register information that * xtensa_gregset_t contains non-live register information that
...@@ -191,18 +265,19 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -191,18 +265,19 @@ unsigned long get_wchan(struct task_struct *p)
* *
*/ */
void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs, void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
struct task_struct *tsk)
{ {
int i, n, wb_offset; unsigned long wb, ws, wm;
int live, last;
elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0; wb = regs->windowbase;
elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1; ws = regs->windowstart;
wm = regs->wmask;
ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
__asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i)); /* Don't leak any random bits. */
elfregs->cpux = i;
__asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i)); memset(elfregs, 0, sizeof (elfregs));
elfregs->cpuy = i;
/* Note: PS.EXCM is not set while user task is running; its /* Note: PS.EXCM is not set while user task is running; its
* being set in regs->ps is for exception handling convenience. * being set in regs->ps is for exception handling convenience.
...@@ -210,204 +285,22 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs, ...@@ -210,204 +285,22 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
elfregs->pc = regs->pc; elfregs->pc = regs->pc;
elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT)); elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
elfregs->exccause = regs->exccause;
elfregs->excvaddr = regs->excvaddr;
elfregs->windowbase = regs->windowbase;
elfregs->windowstart = regs->windowstart;
elfregs->lbeg = regs->lbeg; elfregs->lbeg = regs->lbeg;
elfregs->lend = regs->lend; elfregs->lend = regs->lend;
elfregs->lcount = regs->lcount; elfregs->lcount = regs->lcount;
elfregs->sar = regs->sar; elfregs->sar = regs->sar;
elfregs->syscall = regs->syscall; elfregs->windowstart = ws;
/* Copy register file.
* The layout looks like this:
*
* | a0 ... a15 | Z ... Z | arX ... arY |
* current window unused saved frames
*/
memset (elfregs->ar, 0, sizeof(elfregs->ar));
wb_offset = regs->windowbase * 4; live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16; last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
memcpy(elfregs->a, regs->areg, live * 4);
for (i = 0; i < n; i++) memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
n = (regs->wmask >> 4) * 4;
for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
} }
void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs) int dump_fpu(void)
{ {
do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
}
/* The inverse of do_copy_regs(). No error or sanity checking. */
void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
struct task_struct *tsk)
{
int i, n, wb_offset;
/* Note: PS.EXCM is not set while user task is running; it
* needs to be set in regs->ps is for exception handling convenience.
*/
regs->pc = elfregs->pc;
regs->ps = (elfregs->ps | (1 << PS_EXCM_BIT));
regs->exccause = elfregs->exccause;
regs->excvaddr = elfregs->excvaddr;
regs->windowbase = elfregs->windowbase;
regs->windowstart = elfregs->windowstart;
regs->lbeg = elfregs->lbeg;
regs->lend = elfregs->lend;
regs->lcount = elfregs->lcount;
regs->sar = elfregs->sar;
regs->syscall = elfregs->syscall;
/* Clear everything. */
memset (regs->areg, 0, sizeof(regs->areg));
/* Copy regs from live window frame. */
wb_offset = regs->windowbase * 4;
n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
for (i = 0; i < n; i++)
regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
n = (regs->wmask >> 4) * 4;
for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
}
/*
* do_save_fpregs() gathers information from 'struct pt_regs' and
* 'current->thread' to fill in the elf_fpregset_t structure.
*
* Core files and ptrace use elf_fpregset_t.
*/
void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
struct task_struct *tsk)
{
#if XCHAL_HAVE_CP
extern unsigned char _xtensa_reginfo_tables[];
extern unsigned _xtensa_reginfo_table_size;
int i;
unsigned long flags;
/* Before dumping coprocessor state from memory,
* ensure any live coprocessor contents for this
* task are first saved to memory:
*/
local_irq_save(flags);
for (i = 0; i < XCHAL_CP_MAX; i++) {
if (tsk == coprocessor_info[i].owner) {
enable_coprocessor(i);
save_coprocessor_registers(
tsk->thread.cp_save+coprocessor_info[i].offset,i);
disable_coprocessor(i);
}
}
local_irq_restore(flags);
/* Now dump coprocessor & extra state: */
memcpy((unsigned char*)fpregs,
_xtensa_reginfo_tables, _xtensa_reginfo_table_size);
memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
#endif
}
/*
* The inverse of do_save_fpregs().
* Copies coprocessor and extra state from fpregs into regs and tsk->thread.
* Returns 0 on success, non-zero if layout doesn't match.
*/
int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
struct task_struct *tsk)
{
#if XCHAL_HAVE_CP
extern unsigned char _xtensa_reginfo_tables[];
extern unsigned _xtensa_reginfo_table_size;
int i;
unsigned long flags;
/* Make sure save area layouts match.
* FIXME: in the future we could allow restoring from
* a different layout of the same registers, by comparing
* fpregs' table with _xtensa_reginfo_tables and matching
* entries and copying registers one at a time.
* Not too sure yet whether that's very useful.
*/
if( memcmp((unsigned char*)fpregs,
_xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
return -1;
}
/* Before restoring coprocessor state from memory,
* ensure any live coprocessor contents for this
* task are first invalidated.
*/
local_irq_save(flags);
for (i = 0; i < XCHAL_CP_MAX; i++) {
if (tsk == coprocessor_info[i].owner) {
enable_coprocessor(i);
save_coprocessor_registers(
tsk->thread.cp_save+coprocessor_info[i].offset,i);
coprocessor_info[i].owner = 0;
disable_coprocessor(i);
}
}
local_irq_restore(flags);
/* Now restore coprocessor & extra state: */
memcpy(tsk->thread.cp_save,
(unsigned char*)fpregs + _xtensa_reginfo_table_size,
XTENSA_CP_EXTRA_SIZE);
#endif
return 0; return 0;
} }
/*
* Fill in the CP structure for a core dump for a particular task.
*/
int
dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
{
return 0; /* no coprocessors active on this processor */
}
/*
* Fill in the CP structure for a core dump.
* This includes any FPU coprocessor.
* Here, we dump all coprocessors, and other ("extra") custom state.
*
* This function is called by elf_core_dump() in fs/binfmt_elf.c
* (in which case 'regs' comes from calls to do_coredump, see signals.c).
*/
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
{
return dump_task_fpu(regs, current, r);
}
asmlinkage asmlinkage
long xtensa_clone(unsigned long clone_flags, unsigned long newsp, long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
...@@ -421,8 +314,8 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp, ...@@ -421,8 +314,8 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
} }
/* /*
* * xtensa_execve() executes a new program. * xtensa_execve() executes a new program.
* */ */
asmlinkage asmlinkage
long xtensa_execve(char __user *name, char __user * __user *argv, long xtensa_execve(char __user *name, char __user * __user *argv,
...@@ -437,7 +330,6 @@ long xtensa_execve(char __user *name, char __user * __user *argv, ...@@ -437,7 +330,6 @@ long xtensa_execve(char __user *name, char __user * __user *argv,
error = PTR_ERR(filename); error = PTR_ERR(filename);
if (IS_ERR(filename)) if (IS_ERR(filename))
goto out; goto out;
// FIXME: release coprocessor??
error = do_execve(filename, argv, envp, regs); error = do_execve(filename, argv, envp, regs);
if (error == 0) { if (error == 0) {
task_lock(current); task_lock(current);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2007 Tensilica Inc.
* *
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net> * Chris Zankel <chris@zankel.net>
...@@ -28,14 +28,10 @@ ...@@ -28,14 +28,10 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/coprocessor.h>
#define TEST_KERNEL // verify kernel operations FIXME: remove
/* /*
* Called by kernel/ptrace.c when detaching.. * Called by kernel/ptrace.c when detaching to disable single stepping.
*
* Make sure single step bits etc are not set.
*/ */
void ptrace_disable(struct task_struct *child) void ptrace_disable(struct task_struct *child)
...@@ -43,136 +39,237 @@ void ptrace_disable(struct task_struct *child) ...@@ -43,136 +39,237 @@ void ptrace_disable(struct task_struct *child)
/* Nothing to do.. */ /* Nothing to do.. */
} }
long arch_ptrace(struct task_struct *child, long request, long addr, long data) int ptrace_getregs(struct task_struct *child, void __user *uregs)
{ {
int ret = -EPERM; struct pt_regs *regs = task_pt_regs(child);
xtensa_gregset_t __user *gregset = uregs;
unsigned long wm = regs->wmask;
unsigned long wb = regs->windowbase;
int live, i;
if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
return -EIO;
__put_user(regs->pc, &gregset->pc);
__put_user(regs->ps & ~(1 << PS_EXCM_BIT), &gregset->ps);
__put_user(regs->lbeg, &gregset->lbeg);
__put_user(regs->lend, &gregset->lend);
__put_user(regs->lcount, &gregset->lcount);
__put_user(regs->windowstart, &gregset->windowstart);
__put_user(regs->windowbase, &gregset->windowbase);
live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
for (i = 0; i < live; i++)
__put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
for (i = XCHAL_NUM_AREGS - (wm >> 4) * 4; i < XCHAL_NUM_AREGS; i++)
__put_user(regs->areg[i],gregset->a+((wb*4+i)%XCHAL_NUM_AREGS));
return 0;
}
switch (request) { int ptrace_setregs(struct task_struct *child, void __user *uregs)
case PTRACE_PEEKTEXT: /* read word at location addr. */ {
case PTRACE_PEEKDATA: struct pt_regs *regs = task_pt_regs(child);
ret = generic_ptrace_peekdata(child, addr, data); xtensa_gregset_t *gregset = uregs;
goto out; const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
unsigned long ps;
unsigned long wb;
/* Read the word at location addr in the USER area. */ if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
return -EIO;
case PTRACE_PEEKUSR: __get_user(regs->pc, &gregset->pc);
{ __get_user(ps, &gregset->ps);
struct pt_regs *regs; __get_user(regs->lbeg, &gregset->lbeg);
unsigned long tmp; __get_user(regs->lend, &gregset->lend);
__get_user(regs->lcount, &gregset->lcount);
__get_user(regs->windowstart, &gregset->windowstart);
__get_user(wb, &gregset->windowbase);
regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
if (wb >= XCHAL_NUM_AREGS / 4)
return -EFAULT;
regs->windowbase = wb;
if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
gregset->a, wb * 16))
return -EFAULT;
if (__copy_from_user(regs->areg, gregset->a + wb*4, (WSBITS-wb) * 16))
return -EFAULT;
return 0;
}
int ptrace_getxregs(struct task_struct *child, void __user *uregs)
{
struct pt_regs *regs = task_pt_regs(child);
struct thread_info *ti = task_thread_info(child);
elf_xtregs_t __user *xtregs = uregs;
int ret = 0;
if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
return -EIO;
#if XTENSA_HAVE_COPROCESSORS
/* Flush all coprocessor registers to memory. */
coprocessor_flush_all(ti);
ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
sizeof(xtregs_coprocessor_t));
#endif
ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
sizeof(xtregs->opt));
ret |= __copy_to_user(&xtregs->user,&ti->xtregs_user,
sizeof(xtregs->user));
return ret ? -EFAULT : 0;
}
int ptrace_setxregs(struct task_struct *child, void __user *uregs)
{
struct thread_info *ti = task_thread_info(child);
struct pt_regs *regs = task_pt_regs(child);
elf_xtregs_t *xtregs = uregs;
int ret = 0;
#if XTENSA_HAVE_COPROCESSORS
/* Flush all coprocessors before we overwrite them. */
coprocessor_flush_all(ti);
coprocessor_release_all(ti);
ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
sizeof(xtregs_coprocessor_t));
#endif
ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
sizeof(xtregs->opt));
ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user,
sizeof(xtregs->user));
return ret ? -EFAULT : 0;
}
int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret)
{
struct pt_regs *regs;
unsigned long tmp;
regs = task_pt_regs(child); regs = task_pt_regs(child);
tmp = 0; /* Default return value. */ tmp = 0; /* Default return value. */
switch(addr) { switch(regno) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
{ tmp = regs->areg[regno - REG_AR_BASE];
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
ar &= (XCHAL_NUM_AREGS - 1);
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
tmp = regs->areg[ar];
else
ret = -EIO;
break; break;
}
case REG_A_BASE ... REG_A_BASE + 15: case REG_A_BASE ... REG_A_BASE + 15:
tmp = regs->areg[addr - REG_A_BASE]; tmp = regs->areg[regno - REG_A_BASE];
break; break;
case REG_PC: case REG_PC:
tmp = regs->pc; tmp = regs->pc;
break; break;
case REG_PS: case REG_PS:
/* Note: PS.EXCM is not set while user task is running; /* Note: PS.EXCM is not set while user task is running;
* its being set in regs is for exception handling * its being set in regs is for exception handling
* convenience. */ * convenience. */
tmp = (regs->ps & ~(1 << PS_EXCM_BIT)); tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
break; break;
case REG_WB: case REG_WB:
tmp = regs->windowbase; break; /* tmp = 0 */
break;
case REG_WS: case REG_WS:
tmp = regs->windowstart; {
unsigned long wb = regs->windowbase;
unsigned long ws = regs->windowstart;
tmp = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
break; break;
}
case REG_LBEG: case REG_LBEG:
tmp = regs->lbeg; tmp = regs->lbeg;
break; break;
case REG_LEND: case REG_LEND:
tmp = regs->lend; tmp = regs->lend;
break; break;
case REG_LCOUNT: case REG_LCOUNT:
tmp = regs->lcount; tmp = regs->lcount;
break; break;
case REG_SAR: case REG_SAR:
tmp = regs->sar; tmp = regs->sar;
break; break;
case REG_DEPC:
tmp = regs->depc;
break;
case REG_EXCCAUSE:
tmp = regs->exccause;
break;
case REG_EXCVADDR:
tmp = regs->excvaddr;
break;
case SYSCALL_NR: case SYSCALL_NR:
tmp = regs->syscall; tmp = regs->syscall;
break; break;
default:
tmp = 0;
ret = -EIO;
goto out;
}
ret = put_user(tmp, (unsigned long *) data);
goto out;
}
case PTRACE_POKETEXT: /* write the word at location addr. */ default:
case PTRACE_POKEDATA: return -EIO;
ret = generic_ptrace_pokedata(child, addr, data); }
goto out; return put_user(tmp, ret);
}
case PTRACE_POKEUSR: int ptrace_pokeusr(struct task_struct *child, long regno, long val)
{ {
struct pt_regs *regs; struct pt_regs *regs;
regs = task_pt_regs(child); regs = task_pt_regs(child);
switch (addr) { switch (regno) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
{ regs->areg[regno - REG_AR_BASE] = val;
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
else
ret = -EIO;
break; break;
}
case REG_A_BASE ... REG_A_BASE + 15: case REG_A_BASE ... REG_A_BASE + 15:
regs->areg[addr - REG_A_BASE] = data; regs->areg[regno - REG_A_BASE] = val;
break; break;
case REG_PC: case REG_PC:
regs->pc = data; regs->pc = val;
break; break;
case SYSCALL_NR: case SYSCALL_NR:
regs->syscall = data; regs->syscall = val;
break;
#ifdef TEST_KERNEL
case REG_WB:
regs->windowbase = data;
break;
case REG_WS:
regs->windowstart = data;
break; break;
#endif
default: default:
/* The rest are not allowed. */ return -EIO;
ret = -EIO; }
break; return 0;
} }
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int ret = -EPERM;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
ret = generic_ptrace_peekdata(child, addr, data);
break;
case PTRACE_PEEKUSR: /* read register specified by addr. */
ret = ptrace_peekusr(child, addr, (void __user *) data);
break;
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write register specified by addr. */
ret = ptrace_pokeusr(child, addr, data);
break; break;
}
/* continue and stop at next (return from) syscall */ /* continue and stop at next (return from) syscall */
case PTRACE_SYSCALL: case PTRACE_SYSCALL:
case PTRACE_CONT: /* restart after signal. */ case PTRACE_CONT: /* restart after signal. */
{ {
...@@ -217,98 +314,26 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ...@@ -217,98 +314,26 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break; break;
case PTRACE_GETREGS: case PTRACE_GETREGS:
{ ret = ptrace_getregs(child, (void __user *) data);
/* 'data' points to user memory in which to write.
* Mainly due to the non-live register values, we
* reformat the register values into something more
* standard. For convenience, we use the handy
* elf_gregset_t format. */
xtensa_gregset_t format;
struct pt_regs *regs = task_pt_regs(child);
do_copy_regs (&format, regs, child);
/* Now, copy to user space nice and easy... */
ret = 0;
if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
ret = -EFAULT;
break; break;
}
case PTRACE_SETREGS: case PTRACE_SETREGS:
{ ret = ptrace_setregs(child, (void __user *) data);
/* 'data' points to user memory that contains the new
* values in the elf_gregset_t format. */
xtensa_gregset_t format;
struct pt_regs *regs = task_pt_regs(child);
if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
ret = -EFAULT;
break;
}
/* FIXME: Perhaps we want some sanity checks on
* these user-space values? See ARM version. Are
* debuggers a security concern? */
do_restore_regs (&format, regs, child);
ret = 0;
break;
}
case PTRACE_GETFPREGS:
{
/* 'data' points to user memory in which to write.
* For convenience, we use the handy
* elf_fpregset_t format. */
elf_fpregset_t fpregs;
struct pt_regs *regs = task_pt_regs(child);
do_save_fpregs (&fpregs, regs, child);
/* Now, copy to user space nice and easy... */
ret = 0;
if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
ret = -EFAULT;
break; break;
}
case PTRACE_SETFPREGS:
{
/* 'data' points to user memory that contains the new
* values in the elf_fpregset_t format.
*/
elf_fpregset_t fpregs;
struct pt_regs *regs = task_pt_regs(child);
ret = 0; case PTRACE_GETXTREGS:
if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) { ret = ptrace_getxregs(child, (void __user *) data);
ret = -EFAULT;
break;
}
if (do_restore_fpregs (&fpregs, regs, child))
ret = -EIO;
break; break;
}
case PTRACE_GETFPREGSIZE: case PTRACE_SETXTREGS:
/* 'data' points to 'unsigned long' set to the size ret = ptrace_setxregs(child, (void __user *) data);
* of elf_fpregset_t
*/
ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
break; break;
default: default:
ret = ptrace_request(child, request, addr, data); ret = ptrace_request(child, request, addr, data);
goto out; break;
} }
out:
return ret; return ret;
} }
......
...@@ -60,11 +60,6 @@ struct ide_ops *ide_ops; ...@@ -60,11 +60,6 @@ struct ide_ops *ide_ops;
extern struct rtc_ops no_rtc_ops; extern struct rtc_ops no_rtc_ops;
struct rtc_ops *rtc_ops; struct rtc_ops *rtc_ops;
#ifdef CONFIG_PC_KEYB
extern struct kbd_ops no_kbd_ops;
struct kbd_ops *kbd_ops;
#endif
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
extern void *initrd_start; extern void *initrd_start;
extern void *initrd_end; extern void *initrd_end;
......
...@@ -35,13 +35,17 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); ...@@ -35,13 +35,17 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
extern struct task_struct *coproc_owners[]; extern struct task_struct *coproc_owners[];
extern void release_all_cp (struct task_struct *);
struct rt_sigframe struct rt_sigframe
{ {
struct siginfo info; struct siginfo info;
struct ucontext uc; struct ucontext uc;
cp_state_t cpstate; struct {
xtregs_opt_t opt;
xtregs_user_t user;
#if XTENSA_HAVE_COPROCESSORS
xtregs_coprocessor_t cp;
#endif
} xtregs;
unsigned char retcode[6]; unsigned char retcode[6];
unsigned int window[4]; unsigned int window[4];
}; };
...@@ -49,8 +53,6 @@ struct rt_sigframe ...@@ -49,8 +53,6 @@ struct rt_sigframe
/* /*
* Flush register windows stored in pt_regs to stack. * Flush register windows stored in pt_regs to stack.
* Returns 1 for errors. * Returns 1 for errors.
*
* Note that windowbase, windowstart, and wmask are not updated!
*/ */
int int
...@@ -116,6 +118,9 @@ flush_window_regs_user(struct pt_regs *regs) ...@@ -116,6 +118,9 @@ flush_window_regs_user(struct pt_regs *regs)
base += inc; base += inc;
} }
regs->wmask = 1;
regs->windowstart = 1 << wb;
return 0; return 0;
errout: errout:
...@@ -131,9 +136,10 @@ flush_window_regs_user(struct pt_regs *regs) ...@@ -131,9 +136,10 @@ flush_window_regs_user(struct pt_regs *regs)
*/ */
static int static int
setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate, setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
struct pt_regs *regs, unsigned long mask)
{ {
struct sigcontext __user *sc = &frame->uc.uc_mcontext;
struct thread_info *ti = current_thread_info();
int err = 0; int err = 0;
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
...@@ -147,23 +153,32 @@ setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate, ...@@ -147,23 +153,32 @@ setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate,
err |= flush_window_regs_user(regs); err |= flush_window_regs_user(regs);
err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4); err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
err |= __put_user(0, &sc->sc_xtregs);
// err |= __copy_to_user (sc->sc_a, regs->areg, XCHAL_NUM_AREGS * 4) if (err)
return err;
#if XCHAL_HAVE_CP #if XTENSA_HAVE_COPROCESSORS
# error Coprocessors unsupported coprocessor_flush_all(ti);
err |= save_cpextra(cpstate); coprocessor_release_all(ti);
err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate); err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
sizeof (frame->xtregs.cp));
#endif #endif
/* non-iBCS2 extensions.. */ err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt,
err |= __put_user(mask, &sc->oldmask); sizeof (xtregs_opt_t));
err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
sizeof (xtregs_user_t));
err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
return err; return err;
} }
static int static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
{ {
struct sigcontext __user *sc = &frame->uc.uc_mcontext;
struct thread_info *ti = current_thread_info();
unsigned int err = 0; unsigned int err = 0;
unsigned long ps; unsigned long ps;
...@@ -181,6 +196,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -181,6 +196,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
regs->windowbase = 0; regs->windowbase = 0;
regs->windowstart = 1; regs->windowstart = 1;
regs->syscall = -1; /* disable syscall checks */
/* For PS, restore only PS.CALLINC. /* For PS, restore only PS.CALLINC.
* Assume that all other bits are either the same as for the signal * Assume that all other bits are either the same as for the signal
* handler, or the user mode value doesn't matter (e.g. PS.OWB). * handler, or the user mode value doesn't matter (e.g. PS.OWB).
...@@ -196,8 +213,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -196,8 +213,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4); err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
#if XCHAL_HAVE_CP if (err)
# error Coprocessors unsupported return err;
/* The signal handler may have used coprocessors in which /* The signal handler may have used coprocessors in which
* case they are still enabled. We disable them to force a * case they are still enabled. We disable them to force a
* reloading of the original task's CP state by the lazy * reloading of the original task's CP state by the lazy
...@@ -205,20 +223,20 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -205,20 +223,20 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
* Also, we essentially discard any coprocessor state that the * Also, we essentially discard any coprocessor state that the
* signal handler created. */ * signal handler created. */
if (!err) { #if XTENSA_HAVE_COPROCESSORS
struct task_struct *tsk = current; coprocessor_release_all(ti);
release_all_cp(tsk); err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
err |= __copy_from_user(tsk->thread.cpextra, sc->sc_cpstate, sizeof (frame->xtregs.cp));
XTENSA_CP_EXTRA_SIZE);
}
#endif #endif
err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
sizeof (xtregs_user_t));
err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt,
sizeof (xtregs_opt_t));
regs->syscall = -1; /* disable syscall checks */
return err; return err;
} }
/* /*
* Do a signal return; undo the signal stack. * Do a signal return; undo the signal stack.
*/ */
...@@ -247,7 +265,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3, ...@@ -247,7 +265,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
recalc_sigpending(); recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) if (restore_sigcontext(regs, frame))
goto badframe; goto badframe;
ret = regs->areg[2]; ret = regs->areg[2];
...@@ -360,18 +378,22 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -360,18 +378,22 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(sas_ss_flags(regs->areg[1]), err |= __put_user(sas_ss_flags(regs->areg[1]),
&frame->uc.uc_stack.ss_flags); &frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate, err |= setup_sigcontext(frame, regs);
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Create sys_rt_sigreturn syscall in stack frame */ if (ka->sa.sa_flags & SA_RESTORER) {
ra = (unsigned long)ka->sa.sa_restorer;
} else {
err |= gen_return_code(frame->retcode); /* Create sys_rt_sigreturn syscall in stack frame */
if (err) { err |= gen_return_code(frame->retcode);
goto give_sigsegv;
if (err) {
goto give_sigsegv;
}
ra = (unsigned long) frame->retcode;
} }
/* /*
* Create signal handler execution context. * Create signal handler execution context.
...@@ -385,7 +407,6 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -385,7 +407,6 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up a stack frame for a call4 /* Set up a stack frame for a call4
* Note: PS.CALLINC is set to one by start_thread * Note: PS.CALLINC is set to one by start_thread
*/ */
ra = (unsigned long) frame->retcode;
regs->areg[4] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000; regs->areg[4] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000;
regs->areg[6] = (unsigned long) signal; regs->areg[6] = (unsigned long) signal;
regs->areg[7] = (unsigned long) &frame->info; regs->areg[7] = (unsigned long) &frame->info;
......
...@@ -118,28 +118,28 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { ...@@ -118,28 +118,28 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ /* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
#if (XCHAL_CP_MASK & 1) #if XTENSA_HAVE_COPROCESSOR(0)
COPROCESSOR(0), COPROCESSOR(0),
#endif #endif
#if (XCHAL_CP_MASK & 2) #if XTENSA_HAVE_COPROCESSOR(1)
COPROCESSOR(1), COPROCESSOR(1),
#endif #endif
#if (XCHAL_CP_MASK & 4) #if XTENSA_HAVE_COPROCESSOR(2)
COPROCESSOR(2), COPROCESSOR(2),
#endif #endif
#if (XCHAL_CP_MASK & 8) #if XTENSA_HAVE_COPROCESSOR(3)
COPROCESSOR(3), COPROCESSOR(3),
#endif #endif
#if (XCHAL_CP_MASK & 16) #if XTENSA_HAVE_COPROCESSOR(4)
COPROCESSOR(4), COPROCESSOR(4),
#endif #endif
#if (XCHAL_CP_MASK & 32) #if XTENSA_HAVE_COPROCESSOR(5)
COPROCESSOR(5), COPROCESSOR(5),
#endif #endif
#if (XCHAL_CP_MASK & 64) #if XTENSA_HAVE_COPROCESSOR(6)
COPROCESSOR(6), COPROCESSOR(6),
#endif #endif
#if (XCHAL_CP_MASK & 128) #if XTENSA_HAVE_COPROCESSOR(7)
COPROCESSOR(7), COPROCESSOR(7),
#endif #endif
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, { EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
...@@ -349,9 +349,7 @@ void show_regs(struct pt_regs * regs) ...@@ -349,9 +349,7 @@ void show_regs(struct pt_regs * regs)
wmask = regs->wmask & ~1; wmask = regs->wmask & ~1;
for (i = 0; i < 32; i++) { for (i = 0; i < 16; i++) {
if (wmask & (1 << (i / 4)))
break;
if ((i % 8) == 0) if ((i % 8) == 0)
printk ("\n" KERN_INFO "a%02d: ", i); printk ("\n" KERN_INFO "a%02d: ", i);
printk("%08lx ", regs->areg[i]); printk("%08lx ", regs->areg[i]);
......
...@@ -136,7 +136,9 @@ SECTIONS ...@@ -136,7 +136,9 @@ SECTIONS
__init_begin = .; __init_begin = .;
.init.text : { .init.text : {
_sinittext = .; _sinittext = .;
*(.init.literal) INIT_TEXT *(.init.literal) *(.cpuinit.literal)
*(.devinit.literal) *(.meminit.literal)
INIT_TEXT
_einittext = .; _einittext = .;
} }
...@@ -161,6 +163,8 @@ SECTIONS ...@@ -161,6 +163,8 @@ SECTIONS
.DoubleExceptionVector.literal); .DoubleExceptionVector.literal);
RELOCATE_ENTRY(_DoubleExceptionVector_text, RELOCATE_ENTRY(_DoubleExceptionVector_text,
.DoubleExceptionVector.text); .DoubleExceptionVector.text);
RELOCATE_ENTRY(_DebugInterruptVector_text,
.DebugInterruptVector.text);
__boot_reloc_table_end = ABSOLUTE(.) ; __boot_reloc_table_end = ABSOLUTE(.) ;
} }
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <asm/pgtable.h>
#include <asm/bootparam.h> #include <asm/bootparam.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlb.h> #include <asm/tlb.h>
...@@ -181,9 +180,9 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte) ...@@ -181,9 +180,9 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
#else #else
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
&& (vma->vm_flags & VM_EXEC) != 0) { && (vma->vm_flags & VM_EXEC) != 0) {
unsigned long vaddr = addr & PAGE_MASK; unsigned long paddr = (unsigned long) page_address(page);
__flush_dcache_page(vaddr); __flush_dcache_page(paddr);
__invalidate_icache_page(vaddr); __invalidate_icache_page(paddr);
set_bit(PG_arch_1, &page->flags); set_bit(PG_arch_1, &page->flags);
} }
#endif #endif
......
...@@ -309,7 +309,7 @@ void show_mem(void) ...@@ -309,7 +309,7 @@ void show_mem(void)
struct kmem_cache *pgtable_cache __read_mostly; struct kmem_cache *pgtable_cache __read_mostly;
static void pgd_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) static void pgd_ctor(struct kmem_cache *cache, void* addr)
{ {
pte_t* ptep = (pte_t*)addr; pte_t* ptep = (pte_t*)addr;
int i; int i;
......
...@@ -295,7 +295,7 @@ ENTRY(__tlbtemp_mapping_itlb) ...@@ -295,7 +295,7 @@ ENTRY(__tlbtemp_mapping_itlb)
ENTRY(__invalidate_icache_page_alias) ENTRY(__invalidate_icache_page_alias)
entry sp, 16 entry sp, 16
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
mov a4, a2 mov a4, a2
witlb a6, a2 witlb a6, a2
isync isync
......
...@@ -43,6 +43,7 @@ static DEFINE_SPINLOCK(timer_lock); ...@@ -43,6 +43,7 @@ static DEFINE_SPINLOCK(timer_lock);
int errno; int errno;
static int __simc (int a, int b, int c, int d, int e, int f) __attribute__((__noinline__));
static int __simc (int a, int b, int c, int d, int e, int f) static int __simc (int a, int b, int c, int d, int e, int f)
{ {
int ret; int ret;
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/timer.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <asm/platform/simcall.h> #include <asm/platform/simcall.h>
...@@ -108,6 +107,7 @@ struct iss_net_private { ...@@ -108,6 +107,7 @@ struct iss_net_private {
static int errno; static int errno;
static int __simc (int a, int b, int c, int d, int e, int f) __attribute__((__noinline__));
static int __simc (int a, int b, int c, int d, int e, int f) static int __simc (int a, int b, int c, int d, int e, int f)
{ {
int ret; int ret;
......
...@@ -70,6 +70,8 @@ extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); ...@@ -70,6 +70,8 @@ extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
#endif #endif
#if (ICACHE_WAY_SIZE > PAGE_SIZE) #if (ICACHE_WAY_SIZE > PAGE_SIZE)
extern void __invalidate_icache_page_alias(unsigned long, unsigned long); extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
#else
# define __invalidate_icache_page_alias(v,p) do { } while(0)
#endif #endif
/* /*
......
...@@ -5,81 +5,173 @@ ...@@ -5,81 +5,173 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2003 - 2005 Tensilica Inc. * Copyright (C) 2003 - 2007 Tensilica Inc.
*/ */
#ifndef _XTENSA_COPROCESSOR_H #ifndef _XTENSA_COPROCESSOR_H
#define _XTENSA_COPROCESSOR_H #define _XTENSA_COPROCESSOR_H
#include <asm/variant/core.h> #include <linux/stringify.h>
#include <asm/variant/tie.h> #include <asm/variant/tie.h>
#include <asm/types.h>
#ifdef __ASSEMBLY__
# include <asm/variant/tie-asm.h>
.macro xchal_sa_start a b
.set .Lxchal_pofs_, 0
.set .Lxchal_ofs_, 0
.endm
.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
.set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
.set .Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
.endm
#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
| XTHAL_SAS_CC \
| XTHAL_SAS_CALR | XTHAL_SAS_CALE )
.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
.if XTREGS_OPT_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
.if XTREGS_OPT_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
#undef _SELECT
#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
| XTHAL_SAS_NOCC \
| XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
.if XTREGS_USER_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
.if XTREGS_USER_SIZE > 0
addi \clb, \ptr, \offset
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
.endif
.endm
#undef _SELECT
#endif /* __ASSEMBLY__ */
#if !XCHAL_HAVE_CP
#define XTENSA_CP_EXTRA_OFFSET 0
#define XTENSA_CP_EXTRA_ALIGN 1 /* must be a power of 2 */
#define XTENSA_CP_EXTRA_SIZE 0
#else
#define XTOFS(last_start,last_size,align) \
((last_start+last_size+align-1) & -align)
#define XTENSA_CP_EXTRA_OFFSET 0
#define XTENSA_CP_EXTRA_ALIGN XCHAL_EXTRA_SA_ALIGN
#define XTENSA_CPE_CP0_OFFSET \
XTOFS(XTENSA_CP_EXTRA_OFFSET, XCHAL_EXTRA_SA_SIZE, XCHAL_CP0_SA_ALIGN)
#define XTENSA_CPE_CP1_OFFSET \
XTOFS(XTENSA_CPE_CP0_OFFSET, XCHAL_CP0_SA_SIZE, XCHAL_CP1_SA_ALIGN)
#define XTENSA_CPE_CP2_OFFSET \
XTOFS(XTENSA_CPE_CP1_OFFSET, XCHAL_CP1_SA_SIZE, XCHAL_CP2_SA_ALIGN)
#define XTENSA_CPE_CP3_OFFSET \
XTOFS(XTENSA_CPE_CP2_OFFSET, XCHAL_CP2_SA_SIZE, XCHAL_CP3_SA_ALIGN)
#define XTENSA_CPE_CP4_OFFSET \
XTOFS(XTENSA_CPE_CP3_OFFSET, XCHAL_CP3_SA_SIZE, XCHAL_CP4_SA_ALIGN)
#define XTENSA_CPE_CP5_OFFSET \
XTOFS(XTENSA_CPE_CP4_OFFSET, XCHAL_CP4_SA_SIZE, XCHAL_CP5_SA_ALIGN)
#define XTENSA_CPE_CP6_OFFSET \
XTOFS(XTENSA_CPE_CP5_OFFSET, XCHAL_CP5_SA_SIZE, XCHAL_CP6_SA_ALIGN)
#define XTENSA_CPE_CP7_OFFSET \
XTOFS(XTENSA_CPE_CP6_OFFSET, XCHAL_CP6_SA_SIZE, XCHAL_CP7_SA_ALIGN)
#define XTENSA_CP_EXTRA_SIZE \
XTOFS(XTENSA_CPE_CP7_OFFSET, XCHAL_CP7_SA_SIZE, 16)
#if XCHAL_CP_NUM > 0
# ifndef __ASSEMBLY__
/* /*
* Tasks that own contents of (last user) each coprocessor. * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
* Entries are 0 for not-owned or non-existent coprocessors. *
* Note: The size of this structure is fixed to 8 bytes in entry.S * XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
*
*/ */
typedef struct {
struct task_struct *owner; /* owner */
int offset; /* offset in cpextra space. */
} coprocessor_info_t;
# else
# define COPROCESSOR_INFO_OWNER 0
# define COPROCESSOR_INFO_OFFSET 4
# define COPROCESSOR_INFO_SIZE 8
# endif
#endif
#endif /* XCHAL_HAVE_CP */
#define XTENSA_HAVE_COPROCESSOR(x) \
((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
#define XTENSA_HAVE_COPROCESSORS \
(XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
#define XTENSA_HAVE_IO_PORT(x) \
(XCHAL_CP_PORT_MASK & (1 << (x)))
#define XTENSA_HAVE_IO_PORTS \
XCHAL_CP_PORT_MASK
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
# if XCHAL_CP_NUM > 0
struct task_struct;
extern void release_coprocessors (struct task_struct*);
extern void save_coprocessor_registers(void*, int);
# else
# define release_coprocessors(task)
# endif
typedef unsigned char cp_state_t[XTENSA_CP_EXTRA_SIZE]
__attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN)));
#endif /* !__ASSEMBLY__ */ #if XCHAL_HAVE_CP
#define RSR_CPENABLE(x) do { \
__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
} while(0);
#define WSR_CPENABLE(x) do { \
__asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \
:: "a" (x)); \
} while(0);
#endif /* XCHAL_HAVE_CP */
/*
* Additional registers.
* We define three types of additional registers:
* ext: extra registers that are used by the compiler
* cpn: optional registers that can be used by a user application
* cpX: coprocessor registers that can only be used if the corresponding
* CPENABLE bit is set.
*/
#define XCHAL_SA_REG(list,cc,abi,type,y,name,z,align,size,...) \
__REG ## list (cc, abi, type, name, size, align)
#define __REG0(cc,abi,t,name,s,a) __REG0_ ## cc (abi,name)
#define __REG1(cc,abi,t,name,s,a) __REG1_ ## cc (name)
#define __REG2(cc,abi,type,...) __REG2_ ## type (__VA_ARGS__)
#define __REG0_0(abi,name)
#define __REG0_1(abi,name) __REG0_1 ## abi (name)
#define __REG0_10(name) __u32 name;
#define __REG0_11(name) __u32 name;
#define __REG0_12(name)
#define __REG1_0(name) __u32 name;
#define __REG1_1(name)
#define __REG2_0(n,s,a) __u32 name;
#define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
#if XTENSA_HAVE_COPROCESSORS
typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
__attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
__attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
__attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
__attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
__attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
__attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
__attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
extern void coprocessor_save(void*, int);
extern void coprocessor_load(void*, int);
extern void coprocessor_flush(struct thread_info*, int);
extern void coprocessor_restore(struct thread_info*, int);
extern void coprocessor_release_all(struct thread_info*);
extern void coprocessor_flush_all(struct thread_info*);
static inline void coprocessor_clear_cpenable(void)
{
unsigned long i = 0;
WSR_CPENABLE(i);
}
#endif /* XTENSA_HAVE_COPROCESSORS */
#endif /* !__ASSEMBLY__ */
#endif /* _XTENSA_COPROCESSOR_H */ #endif /* _XTENSA_COPROCESSOR_H */
...@@ -72,115 +72,32 @@ ...@@ -72,115 +72,32 @@
/* ELF register definitions. This is needed for core dump support. */ /* ELF register definitions. This is needed for core dump support. */
/*
* elf_gregset_t contains the application-level state in the following order:
* Processor info: config_version, cpuxy
* Processor state: pc, ps, exccause, excvaddr, wb, ws,
* lbeg, lend, lcount, sar
* GP regs: ar0 - arXX
*/
typedef unsigned long elf_greg_t; typedef unsigned long elf_greg_t;
typedef struct { typedef struct {
elf_greg_t xchal_config_id0;
elf_greg_t xchal_config_id1;
elf_greg_t cpux;
elf_greg_t cpuy;
elf_greg_t pc; elf_greg_t pc;
elf_greg_t ps; elf_greg_t ps;
elf_greg_t exccause;
elf_greg_t excvaddr;
elf_greg_t windowbase;
elf_greg_t windowstart;
elf_greg_t lbeg; elf_greg_t lbeg;
elf_greg_t lend; elf_greg_t lend;
elf_greg_t lcount; elf_greg_t lcount;
elf_greg_t sar; elf_greg_t sar;
elf_greg_t syscall; elf_greg_t windowstart;
elf_greg_t ar[64]; elf_greg_t windowbase;
elf_greg_t reserved[8+48];
elf_greg_t a[64];
} xtensa_gregset_t; } xtensa_gregset_t;
#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t)) #define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef elf_greg_t elf_gregset_t[ELF_NGREG];
/* #define ELF_NFPREG 18
* Compute the size of the coprocessor and extra state layout (register info)
* table (in bytes).
* This is actually the maximum size of the table, as opposed to the size,
* which is available from the _xtensa_reginfo_table_size global variable.
*
* (See also arch/xtensa/kernel/coprocessor.S)
*
*/
#ifndef XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM
# define XTENSA_CPE_LTABLE_SIZE 0
#else
# define XTENSA_CPE_SEGMENT(num) (num ? (1+num) : 0)
# define XTENSA_CPE_LTABLE_ENTRIES \
( XTENSA_CPE_SEGMENT(XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP0_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP1_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP2_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP3_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP4_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP5_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP6_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP7_SA_CONTENTS_LIBDB_NUM) \
+ 1 /* final entry */ \
)
# define XTENSA_CPE_LTABLE_SIZE (XTENSA_CPE_LTABLE_ENTRIES * 8)
#endif
/*
* Instantiations of the elf_fpregset_t type contain, in most
* architectures, the floating point (FPU) register set.
* For Xtensa, this type is extended to contain all custom state,
* ie. coprocessor and "extra" (non-coprocessor) state (including,
* for example, TIE-defined states and register files; as well
* as other optional processor state).
* This includes FPU state if a floating-point coprocessor happens
* to have been configured within the Xtensa processor.
*
* TOTAL_FPREGS_SIZE is the required size (without rounding)
* of elf_fpregset_t. It provides space for the following:
*
* a) 32-bit mask of active coprocessors for this task (similar
* to CPENABLE in single-threaded Xtensa processor systems)
*
* b) table describing the layout of custom states (ie. of
* individual registers, etc) within the save areas
*
* c) save areas for each coprocessor and for non-coprocessor
* ("extra") state
*
* Note that save areas may require up to 16-byte alignment when
* accessed by save/restore sequences. We do not need to ensure
* such alignment in an elf_fpregset_t structure because custom
* state is not directly loaded/stored into it; rather, save area
* contents are copied to elf_fpregset_t from the active save areas
* (see 'struct task_struct' definition in processor.h for that)
* using memcpy(). But we do allow space for such alignment,
* to allow optimizations of layout and copying.
*/
#if 0
#define TOTAL_FPREGS_SIZE \
(4 + XTENSA_CPE_LTABLE_SIZE + XTENSA_CP_EXTRA_SIZE)
#define ELF_NFPREG \
((TOTAL_FPREGS_SIZE + sizeof(elf_fpreg_t) - 1) / sizeof(elf_fpreg_t))
#else
#define TOTAL_FPREGS_SIZE 0
#define ELF_NFPREG 0
#endif
typedef unsigned int elf_fpreg_t; typedef unsigned int elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_CORE_COPY_REGS(_eregs, _pregs) \ #define ELF_CORE_COPY_REGS(_eregs, _pregs) \
xtensa_elf_core_copy_regs (&_eregs, _pregs); xtensa_elf_core_copy_regs ((xtensa_gregset_t*)&(_eregs), _pregs);
extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
...@@ -257,6 +174,21 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); ...@@ -257,6 +174,21 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
_r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
} while (0) } while (0)
typedef struct {
xtregs_opt_t opt;
xtregs_user_t user;
#if XTENSA_HAVE_COPROCESSORS
xtregs_cp0_t cp0;
xtregs_cp1_t cp1;
xtregs_cp2_t cp2;
xtregs_cp3_t cp3;
xtregs_cp4_t cp4;
xtregs_cp5_t cp5;
xtregs_cp6_t cp6;
xtregs_cp7_t cp7;
#endif
} elf_xtregs_t;
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) #define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
struct task_struct; struct task_struct;
......
...@@ -15,9 +15,11 @@ ...@@ -15,9 +15,11 @@
struct mod_arch_specific struct mod_arch_specific
{ {
/* Module support is not completely implemented. */ /* No special elements, yet. */
}; };
#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " "
#define Elf_Shdr Elf32_Shdr #define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym #define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr #define Elf_Ehdr Elf32_Ehdr
......
...@@ -47,7 +47,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, ...@@ -47,7 +47,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
} }
static inline pte_token_t pte_alloc_one(struct mm_struct *mm, static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long addr) unsigned long addr)
{ {
struct page *page; struct page *page;
......
...@@ -66,11 +66,9 @@ ...@@ -66,11 +66,9 @@
*/ */
#define VMALLOC_START 0xC0000000 #define VMALLOC_START 0xC0000000
#define VMALLOC_END 0xC6FEFFFF #define VMALLOC_END 0xC7FEFFFF
#define TLBTEMP_BASE_1 0xC6FF0000 #define TLBTEMP_BASE_1 0xC7FF0000
#define TLBTEMP_BASE_2 0xC6FF8000 #define TLBTEMP_BASE_2 0xC7FF8000
#define MODULE_START 0xC7000000
#define MODULE_END 0xC7FFFFFF
/* /*
* Xtensa Linux config PTE layout (when present): * Xtensa Linux config PTE layout (when present):
......
...@@ -103,10 +103,6 @@ struct thread_struct { ...@@ -103,10 +103,6 @@ struct thread_struct {
unsigned long dbreaka[XCHAL_NUM_DBREAK]; unsigned long dbreaka[XCHAL_NUM_DBREAK];
unsigned long dbreakc[XCHAL_NUM_DBREAK]; unsigned long dbreakc[XCHAL_NUM_DBREAK];
/* Allocate storage for extra state and coprocessor state. */
unsigned char cp_save[XTENSA_CP_EXTRA_SIZE]
__attribute__ ((aligned(XTENSA_CP_EXTRA_ALIGN)));
/* Make structure 16 bytes aligned. */ /* Make structure 16 bytes aligned. */
int align[0] __attribute__ ((aligned(16))); int align[0] __attribute__ ((aligned(16)));
}; };
...@@ -162,21 +158,16 @@ struct thread_struct { ...@@ -162,21 +158,16 @@ struct thread_struct {
struct task_struct; struct task_struct;
struct mm_struct; struct mm_struct;
// FIXME: do we need release_thread for CP??
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0) #define release_thread(thread) do { } while(0)
// FIXME: do we need prepare_to_copy (lazy status) for CP??
/* Prepare to copy thread state - unlazy all lazy status */ /* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0) extern void prepare_to_copy(struct task_struct*);
/* /* Create a kernel thread without removing it from tasklists */
* create a kernel thread without removing it from tasklists
*/
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/* Copy and release all segment info associated with a VM */ /* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0) #define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0) #define release_segments(mm) do { } while(0)
#define forget_segments() do { } while (0) #define forget_segments() do { } while (0)
......
...@@ -53,33 +53,30 @@ ...@@ -53,33 +53,30 @@
/* Registers used by strace */ /* Registers used by strace */
#define REG_A_BASE 0xfc000000 #define REG_A_BASE 0x0000
#define REG_AR_BASE 0x04000000 #define REG_AR_BASE 0x0100
#define REG_PC 0x14000000 #define REG_PC 0x0020
#define REG_PS 0x080000e6 #define REG_PS 0x02e6
#define REG_WB 0x08000048 #define REG_WB 0x0248
#define REG_WS 0x08000049 #define REG_WS 0x0249
#define REG_LBEG 0x08000000 #define REG_LBEG 0x0200
#define REG_LEND 0x08000001 #define REG_LEND 0x0201
#define REG_LCOUNT 0x08000002 #define REG_LCOUNT 0x0202
#define REG_SAR 0x08000003 #define REG_SAR 0x0203
#define REG_DEPC 0x080000c0
#define REG_EXCCAUSE 0x080000e8 #define SYSCALL_NR 0x00ff
#define REG_EXCVADDR 0x080000ee
#define SYSCALL_NR 0x1
#define AR_REGNO_TO_A_REGNO(ar, wb) (ar - wb*4) & ~(XCHAL_NUM_AREGS - 1)
/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */ /* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
#define PTRACE_GETREGS 12 #define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13 #define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14 #define PTRACE_GETXTREGS 18
#define PTRACE_SETFPREGS 15 #define PTRACE_SETXTREGS 19
#define PTRACE_GETFPREGSIZE 18
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef __KERNEL__
/* /*
* This struct defines the way the registers are stored on the * This struct defines the way the registers are stored on the
* kernel stack during a system call or other kernel entry. * kernel stack during a system call or other kernel entry.
...@@ -102,6 +99,9 @@ struct pt_regs { ...@@ -102,6 +99,9 @@ struct pt_regs {
unsigned long icountlevel; /* 60 */ unsigned long icountlevel; /* 60 */
int reserved[1]; /* 64 */ int reserved[1]; /* 64 */
/* Additional configurable registers that are used by the compiler. */
xtregs_opt_t xtregs_opt;
/* Make sure the areg field is 16 bytes aligned. */ /* Make sure the areg field is 16 bytes aligned. */
int align[0] __attribute__ ((aligned(16))); int align[0] __attribute__ ((aligned(16)));
...@@ -111,8 +111,6 @@ struct pt_regs { ...@@ -111,8 +111,6 @@ struct pt_regs {
unsigned long areg[16]; /* 128 (64) */ unsigned long areg[16]; /* 128 (64) */
}; };
#ifdef __KERNEL__
#include <asm/variant/core.h> #include <asm/variant/core.h>
# define task_pt_regs(tsk) ((struct pt_regs*) \ # define task_pt_regs(tsk) ((struct pt_regs*) \
......
...@@ -100,7 +100,14 @@ ...@@ -100,7 +100,14 @@
#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27 #define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28 #define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29 #define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
#define EXCCAUSE_FLOATING_POINT 40 #define EXCCAUSE_COPROCESSOR0_DISABLED 32
#define EXCCAUSE_COPROCESSOR1_DISABLED 33
#define EXCCAUSE_COPROCESSOR2_DISABLED 34
#define EXCCAUSE_COPROCESSOR3_DISABLED 35
#define EXCCAUSE_COPROCESSOR4_DISABLED 36
#define EXCCAUSE_COPROCESSOR5_DISABLED 37
#define EXCCAUSE_COPROCESSOR6_DISABLED 38
#define EXCCAUSE_COPROCESSOR7_DISABLED 39
/* PS register fields. */ /* PS register fields. */
......
...@@ -13,9 +13,6 @@ ...@@ -13,9 +13,6 @@
struct sigcontext { struct sigcontext {
unsigned long oldmask;
/* CPU registers */
unsigned long sc_pc; unsigned long sc_pc;
unsigned long sc_ps; unsigned long sc_ps;
unsigned long sc_lbeg; unsigned long sc_lbeg;
...@@ -25,6 +22,7 @@ struct sigcontext { ...@@ -25,6 +22,7 @@ struct sigcontext {
unsigned long sc_acclo; unsigned long sc_acclo;
unsigned long sc_acchi; unsigned long sc_acchi;
unsigned long sc_a[16]; unsigned long sc_a[16];
void *sc_xtregs;
}; };
#endif /* _XTENSA_SIGCONTEXT_H */ #endif /* _XTENSA_SIGCONTEXT_H */
...@@ -5,25 +5,23 @@ ...@@ -5,25 +5,23 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2001 - 2007 Tensilica Inc.
*/ */
#ifndef _XTENSA_STAT_H #ifndef _XTENSA_STAT_H
#define _XTENSA_STAT_H #define _XTENSA_STAT_H
#include <linux/types.h>
#define STAT_HAVE_NSEC 1 #define STAT_HAVE_NSEC 1
struct stat { struct stat {
unsigned long st_dev; unsigned long st_dev;
ino_t st_ino; unsigned long st_ino;
mode_t st_mode; unsigned int st_mode;
nlink_t st_nlink; unsigned int st_nlink;
uid_t st_uid; unsigned int st_uid;
gid_t st_gid; unsigned int st_gid;
unsigned int st_rdev; unsigned long st_rdev;
off_t st_size; long st_size;
unsigned long st_blksize; unsigned long st_blksize;
unsigned long st_blocks; unsigned long st_blocks;
unsigned long st_atime; unsigned long st_atime;
...@@ -36,8 +34,6 @@ struct stat { ...@@ -36,8 +34,6 @@ struct stat {
unsigned long __unused5; unsigned long __unused5;
}; };
/* This matches struct stat64 in glibc-2.3 */
struct stat64 { struct stat64 {
unsigned long long st_dev; /* Device */ unsigned long long st_dev; /* Device */
unsigned long long st_ino; /* File serial number */ unsigned long long st_ino; /* File serial number */
...@@ -47,20 +43,14 @@ struct stat64 { ...@@ -47,20 +43,14 @@ struct stat64 {
unsigned int st_gid; /* Group ID of the file's group. */ unsigned int st_gid; /* Group ID of the file's group. */
unsigned long long st_rdev; /* Device number, if device. */ unsigned long long st_rdev; /* Device number, if device. */
long long st_size; /* Size of file, in bytes. */ long long st_size; /* Size of file, in bytes. */
long st_blksize; /* Optimal block size for I/O. */ unsigned long st_blksize; /* Optimal block size for I/O. */
unsigned long __unused2; unsigned long __unused2;
#ifdef __XTENSA_EB__ unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __unused3; unsigned long st_atime; /* Time of last access. */
long st_blocks; /* Number 512-byte blocks allocated. */
#else
long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __unused3;
#endif
long st_atime; /* Time of last access. */
unsigned long st_atime_nsec; unsigned long st_atime_nsec;
long st_mtime; /* Time of last modification. */ unsigned long st_mtime; /* Time of last modification. */
unsigned long st_mtime_nsec; unsigned long st_mtime_nsec;
long st_ctime; /* Time of last status change. */ unsigned long st_ctime; /* Time of last status change. */
unsigned long st_ctime_nsec; unsigned long st_ctime_nsec;
unsigned long __unused4; unsigned long __unused4;
unsigned long __unused5; unsigned long __unused5;
......
...@@ -46,42 +46,6 @@ static inline int irqs_disabled(void) ...@@ -46,42 +46,6 @@ static inline int irqs_disabled(void)
return flags & 0xf; return flags & 0xf;
} }
#define RSR_CPENABLE(x) do { \
__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
} while(0);
#define WSR_CPENABLE(x) do { \
__asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \
:: "a" (x));} while(0);
#define clear_cpenable() __clear_cpenable()
static inline void __clear_cpenable(void)
{
#if XCHAL_HAVE_CP
unsigned long i = 0;
WSR_CPENABLE(i);
#endif
}
static inline void enable_coprocessor(int i)
{
#if XCHAL_HAVE_CP
int cp;
RSR_CPENABLE(cp);
cp |= 1 << i;
WSR_CPENABLE(cp);
#endif
}
static inline void disable_coprocessor(int i)
{
#if XCHAL_HAVE_CP
int cp;
RSR_CPENABLE(cp);
cp &= ~(1 << i);
WSR_CPENABLE(cp);
#endif
}
#define smp_read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
...@@ -111,7 +75,6 @@ extern void *_switch_to(void *last, void *next); ...@@ -111,7 +75,6 @@ extern void *_switch_to(void *last, void *next);
#define switch_to(prev,next,last) \ #define switch_to(prev,next,last) \
do { \ do { \
clear_cpenable(); \
(last) = _switch_to(prev, next); \ (last) = _switch_to(prev, next); \
} while(0) } while(0)
...@@ -244,7 +207,7 @@ static inline void spill_registers(void) ...@@ -244,7 +207,7 @@ static inline void spill_registers(void)
"wsr a13," __stringify(SAR) "\n\t" "wsr a13," __stringify(SAR) "\n\t"
"wsr a14," __stringify(PS) "\n\t" "wsr a14," __stringify(PS) "\n\t"
:: "a" (&a0), "a" (&ps) :: "a" (&a0), "a" (&ps)
: "a2", "a3", "a12", "a13", "a14", "a15", "memory"); : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
} }
#define arch_align_stack(x) (x) #define arch_align_stack(x) (x)
......
...@@ -27,6 +27,21 @@ ...@@ -27,6 +27,21 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#if XTENSA_HAVE_COPROCESSORS
typedef struct xtregs_coprocessor {
xtregs_cp0_t cp0;
xtregs_cp1_t cp1;
xtregs_cp2_t cp2;
xtregs_cp3_t cp3;
xtregs_cp4_t cp4;
xtregs_cp5_t cp5;
xtregs_cp6_t cp6;
xtregs_cp7_t cp7;
} xtregs_coprocessor_t;
#endif
struct thread_info { struct thread_info {
struct task_struct *task; /* main task structure */ struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */ struct exec_domain *exec_domain; /* execution domain */
...@@ -38,7 +53,13 @@ struct thread_info { ...@@ -38,7 +53,13 @@ struct thread_info {
mm_segment_t addr_limit; /* thread address space */ mm_segment_t addr_limit; /* thread address space */
struct restart_block restart_block; struct restart_block restart_block;
unsigned long cpenable;
/* Allocate storage for extra user states and coprocessor states. */
#if XTENSA_HAVE_COPROCESSORS
xtregs_coprocessor_t xtregs_cp;
#endif
xtregs_user_t xtregs_user;
}; };
#else /* !__ASSEMBLY__ */ #else /* !__ASSEMBLY__ */
......
...@@ -63,10 +63,10 @@ extern cycles_t cacheflush_time; ...@@ -63,10 +63,10 @@ extern cycles_t cacheflush_time;
* Register access. * Register access.
*/ */
#define WSR_CCOUNT(r) __asm__("wsr %0,"__stringify(CCOUNT) :: "a" (r)) #define WSR_CCOUNT(r) asm volatile ("wsr %0,"__stringify(CCOUNT) :: "a" (r))
#define RSR_CCOUNT(r) __asm__("rsr %0,"__stringify(CCOUNT) : "=a" (r)) #define RSR_CCOUNT(r) asm volatile ("rsr %0,"__stringify(CCOUNT) : "=a" (r))
#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r)) #define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r)) #define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r))
static inline unsigned long get_ccount (void) static inline unsigned long get_ccount (void)
{ {
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/types.h>
/* /*
* These assembly macros mirror the C macros that follow below. They * These assembly macros mirror the C macros that follow below. They
...@@ -118,7 +119,7 @@ ...@@ -118,7 +119,7 @@
* <at> destroyed (actually, (TASK_SIZE + 1 - size)) * <at> destroyed (actually, (TASK_SIZE + 1 - size))
*/ */
.macro user_ok aa, as, at, error .macro user_ok aa, as, at, error
movi \at, (TASK_SIZE+1) movi \at, __XTENSA_UL_CONST(TASK_SIZE)
bgeu \as, \at, \error bgeu \as, \at, \error
sub \at, \at, \as sub \at, \at, \as
bgeu \aa, \at, \error bgeu \aa, \at, \error
...@@ -226,20 +227,21 @@ extern long __put_user_bad(void); ...@@ -226,20 +227,21 @@ extern long __put_user_bad(void);
__pu_err; \ __pu_err; \
}) })
#define __put_user_size(x,ptr,size,retval) \ #define __put_user_size(x,ptr,size,retval) \
do { \ do { \
retval = 0; \ int __cb; \
switch (size) { \ retval = 0; \
case 1: __put_user_asm(x,ptr,retval,1,"s8i"); break; \ switch (size) { \
case 2: __put_user_asm(x,ptr,retval,2,"s16i"); break; \ case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
case 4: __put_user_asm(x,ptr,retval,4,"s32i"); break; \ case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
case 8: { \ case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
__typeof__(*ptr) __v64 = x; \ case 8: { \
retval = __copy_to_user(ptr,&__v64,8); \ __typeof__(*ptr) __v64 = x; \
break; \ retval = __copy_to_user(ptr,&__v64,8); \
} \ break; \
default: __put_user_bad(); \ } \
} \ default: __put_user_bad(); \
} \
} while (0) } while (0)
...@@ -267,14 +269,14 @@ do { \ ...@@ -267,14 +269,14 @@ do { \
#define __check_align_1 "" #define __check_align_1 ""
#define __check_align_2 \ #define __check_align_2 \
" _bbci.l %2, 0, 1f \n" \ " _bbci.l %3, 0, 1f \n" \
" movi %0, %3 \n" \ " movi %0, %4 \n" \
" _j 2f \n" " _j 2f \n"
#define __check_align_4 \ #define __check_align_4 \
" _bbsi.l %2, 0, 0f \n" \ " _bbsi.l %3, 0, 0f \n" \
" _bbci.l %2, 1, 1f \n" \ " _bbci.l %3, 1, 1f \n" \
"0: movi %0, %3 \n" \ "0: movi %0, %4 \n" \
" _j 2f \n" " _j 2f \n"
...@@ -286,24 +288,24 @@ do { \ ...@@ -286,24 +288,24 @@ do { \
* WARNING: If you modify this macro at all, verify that the * WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work. * __check_align_* macros still work.
*/ */
#define __put_user_asm(x, addr, err, align, insn) \ #define __put_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %1, %2, 0 \n" \ "1: "insn" %2, %3, 0 \n" \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
" .align 4 \n" \ " .align 4 \n" \
"4: \n" \ "4: \n" \
" .long 2b \n" \ " .long 2b \n" \
"5: \n" \ "5: \n" \
" l32r %2, 4b \n" \ " l32r %1, 4b \n" \
" movi %0, %3 \n" \ " movi %0, %4 \n" \
" jx %2 \n" \ " jx %1 \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \ " .long 1b, 5b \n" \
" .previous" \ " .previous" \
:"=r" (err) \ :"=r" (err), "=r" (cb) \
:"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
#define __get_user_nocheck(x,ptr,size) \ #define __get_user_nocheck(x,ptr,size) \
...@@ -328,11 +330,12 @@ extern long __get_user_bad(void); ...@@ -328,11 +330,12 @@ extern long __get_user_bad(void);
#define __get_user_size(x,ptr,size,retval) \ #define __get_user_size(x,ptr,size,retval) \
do { \ do { \
int __cb; \
retval = 0; \ retval = 0; \
switch (size) { \ switch (size) { \
case 1: __get_user_asm(x,ptr,retval,1,"l8ui"); break; \ case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
case 2: __get_user_asm(x,ptr,retval,2,"l16ui"); break; \ case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
case 4: __get_user_asm(x,ptr,retval,4,"l32i"); break; \ case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
case 8: retval = __copy_from_user(&x,ptr,8); break; \ case 8: retval = __copy_from_user(&x,ptr,8); break; \
default: (x) = __get_user_bad(); \ default: (x) = __get_user_bad(); \
} \ } \
...@@ -343,25 +346,25 @@ do { \ ...@@ -343,25 +346,25 @@ do { \
* WARNING: If you modify this macro at all, verify that the * WARNING: If you modify this macro at all, verify that the
* __check_align_* macros still work. * __check_align_* macros still work.
*/ */
#define __get_user_asm(x, addr, err, align, insn) \ #define __get_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %1, %2, 0 \n" \ "1: "insn" %2, %3, 0 \n" \
"2: \n" \ "2: \n" \
" .section .fixup,\"ax\" \n" \ " .section .fixup,\"ax\" \n" \
" .align 4 \n" \ " .align 4 \n" \
"4: \n" \ "4: \n" \
" .long 2b \n" \ " .long 2b \n" \
"5: \n" \ "5: \n" \
" l32r %2, 4b \n" \ " l32r %1, 4b \n" \
" movi %1, 0 \n" \ " movi %2, 0 \n" \
" movi %0, %3 \n" \ " movi %0, %4 \n" \
" jx %2 \n" \ " jx %1 \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \ " .long 1b, 5b \n" \
" .previous" \ " .previous" \
:"=r" (err), "=r" (x) \ :"=r" (err), "=r" (cb), "=r" (x) \
:"r" (addr), "i" (-EFAULT), "0" (err)) :"r" (addr), "i" (-EFAULT), "0" (err))
......
/*
* This header file contains assembly-language definitions (assembly
* macros, etc.) for this specific Xtensa processor's TIE extensions
* and options. It is customized to this Xtensa processor configuration.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2008 Tensilica Inc.
*/
#ifndef _XTENSA_CORE_TIE_ASM_H
#define _XTENSA_CORE_TIE_ASM_H
/* Selection parameter values for save-area save/restore macros: */
/* Option vs. TIE: */
#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
/* Whether used automatically by compiler: */
#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
/* ABI handling across function calls: */
#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
/* Misc */
#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
/* Macro to save all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Save area ptr (clobbered): ptr (1 byte aligned)
* Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
*/
.macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
xchal_sa_start \continue, \ofs
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
xchal_sa_align \ptr, 0, 1024-4, 4, 4
rur \at1, THREADPTR // threadptr option
s32i \at1, \ptr, .Lxchal_ofs_ + 0
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
.endm // xchal_ncp_store
/* Macro to save all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Save area ptr (clobbered): ptr (1 byte aligned)
* Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
*/
.macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
xchal_sa_start \continue, \ofs
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
xchal_sa_align \ptr, 0, 1024-4, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_ + 0
wur \at1, THREADPTR // threadptr option
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
.endm // xchal_ncp_load
#define XCHAL_NCP_NUM_ATMPS 1
#define XCHAL_SA_NUM_ATMPS 1
#endif /*_XTENSA_CORE_TIE_ASM_H*/
/* /*
* Xtensa processor core configuration information. * This header file describes this specific Xtensa processor's TIE extensions
* that extend basic Xtensa core functionality. It is customized to this
* Xtensa processor configuration.
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 1999-2006 Tensilica Inc. * Copyright (C) 1999-2007 Tensilica Inc.
*/ */
#ifndef XTENSA_TIE_H #ifndef _XTENSA_CORE_TIE_H
#define XTENSA_TIE_H #define _XTENSA_CORE_TIE_H
/*----------------------------------------------------------------------
COPROCESSORS and EXTRA STATE
----------------------------------------------------------------------*/
#define XCHAL_CP_NUM 0 /* number of coprocessors */ #define XCHAL_CP_NUM 0 /* number of coprocessors */
#define XCHAL_CP_MASK 0x00 #define XCHAL_CP_MAX 0 /* max CP ID + 1 (0 if none) */
#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
/* Basic parameters of each coprocessor: */
#define XCHAL_CP7_NAME "XTIOP"
#define XCHAL_CP7_IDENT XTIOP
#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
/* Filler info for unassigned coprocessors, to simplify arrays etc: */
#define XCHAL_NCP_SA_SIZE 0
#define XCHAL_NCP_SA_ALIGN 1
#define XCHAL_CP0_SA_SIZE 0
#define XCHAL_CP0_SA_ALIGN 1
#define XCHAL_CP1_SA_SIZE 0
#define XCHAL_CP1_SA_ALIGN 1
#define XCHAL_CP2_SA_SIZE 0
#define XCHAL_CP2_SA_ALIGN 1
#define XCHAL_CP3_SA_SIZE 0
#define XCHAL_CP3_SA_ALIGN 1
#define XCHAL_CP4_SA_SIZE 0
#define XCHAL_CP4_SA_ALIGN 1
#define XCHAL_CP5_SA_SIZE 0
#define XCHAL_CP5_SA_ALIGN 1
#define XCHAL_CP6_SA_SIZE 0
#define XCHAL_CP6_SA_ALIGN 1
/* Save area for non-coprocessor optional and custom (TIE) state: */
#define XCHAL_NCP_SA_SIZE 0
#define XCHAL_NCP_SA_ALIGN 1
/* Total save area for optional and custom state (NCP + CPn): */
#define XCHAL_TOTAL_SA_SIZE 0 /* with 16-byte align padding */
#define XCHAL_TOTAL_SA_ALIGN 1 /* actual minimum alignment */
#define XCHAL_NCP_SA_NUM 0
#define XCHAL_NCP_SA_LIST(s)
#define XCHAL_CP0_SA_NUM 0
#define XCHAL_CP0_SA_LIST(s)
#define XCHAL_CP1_SA_NUM 0
#define XCHAL_CP1_SA_LIST(s)
#define XCHAL_CP2_SA_NUM 0
#define XCHAL_CP2_SA_LIST(s)
#define XCHAL_CP3_SA_NUM 0
#define XCHAL_CP3_SA_LIST(s)
#define XCHAL_CP4_SA_NUM 0
#define XCHAL_CP4_SA_LIST(s)
#define XCHAL_CP5_SA_NUM 0
#define XCHAL_CP5_SA_LIST(s)
#define XCHAL_CP6_SA_NUM 0
#define XCHAL_CP6_SA_LIST(s)
#define XCHAL_CP7_SA_NUM 0
#define XCHAL_CP7_SA_LIST(s)
/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
#endif /*XTENSA_CONFIG_TIE_H*/ #endif /*_XTENSA_CORE_TIE_H*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment