An error occurred while fetching merge requests data.
Commit 234c8819 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.3.13pre3

parent de7578b7
......@@ -1092,7 +1092,7 @@ sys_call_table:
.quad sys_munlockall
.quad sys_sysinfo
.quad sys_sysctl
.quad sys_idle /* 320 */
.quad sys_ni_syscall /* 320 */
.quad sys_oldumount
.quad sys_swapon
.quad sys_times
......
......@@ -74,9 +74,8 @@ sys_sethae(unsigned long hae, unsigned long a1, unsigned long a2,
return 0;
}
#ifdef __SMP__
int
cpu_idle(void *unused)
void
cpu_idle(void)
{
/* An endless idle loop with no priority at all. */
current->priority = 0;
......@@ -94,27 +93,6 @@ cpu_idle(void *unused)
}
}
}
#endif
asmlinkage int
sys_idle(void)
{
if (current->pid != 0)
return -EPERM;
/* An endless idle loop with no priority at all. */
current->priority = 0;
current->counter = -100;
init_idle();
while (1) {
/* FIXME -- EV6 and LCA45 know how to power down
the CPU. */
schedule();
check_pgt_cache();
}
}
void
generic_kill_arch (int mode, char *restart_cmd)
......
......@@ -194,7 +194,7 @@ extern void entDbg(void);
/* process.c */
extern void generic_kill_arch (int mode, char *reboot_cmd);
extern int cpu_idle(void *) __attribute__((noreturn));
extern void cpu_idle(void) __attribute__((noreturn));
/* ptrace.c */
extern int ptrace_set_bpt (struct task_struct *child);
......
......@@ -153,7 +153,7 @@ smp_callin(void)
cpuid, current));
/* Do nothing. */
cpu_idle(NULL);
cpu_idle();
}
......
......@@ -221,7 +221,7 @@ ruffian_init_arch(unsigned long *mem_start, unsigned long *mem_end)
/* FIXME: What do we do with ruffian_get_bank_size above? */
#if 1
pyxis_init_arch();
pyxis_init_arch(mem_start, mem_end);
#else
pyxis_enable_errors();
if (!pyxis_srm_window_setup()) {
......
......@@ -2,11 +2,11 @@ OUTPUT_FORMAT("elf64-alpha")
ENTRY(__start)
SECTIONS
{
. = 0xfffffc0000310000;
_text = .;
.text : { *(.text) }
.text2 : { *(.text2) }
_etext = .;
. = 0xfffffc0000310000;
_text = .;
.text : { *(.text) }
.text2 : { *(.text2) }
_etext = .;
/* Exception table */
. = ALIGN(16);
......@@ -26,6 +26,17 @@ SECTIONS
__init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
. = ALIGN(16);
__setup_start = .;
.setup.init : { *(.setup.init) }
__setup_end = .;
. = ALIGN(8);
__initcall_start = .;
.initcall.init : { *(.initcall.init) }
__initcall_end = .;
. = ALIGN(2*8192); /* Align double page for init_task_union */
__init_end = .;
......
/* $Id: pcic.c,v 1.6 1999/06/03 15:02:18 davem Exp $
/* $Id: pcic.c,v 1.7 1999/07/23 01:56:07 davem Exp $
* pcic.c: Sparc/PCI controller support
*
* Copyright (C) 1998 V. Roganov and G. Raiko
......
/* $Id: process.c,v 1.137 1999/05/08 03:00:10 davem Exp $
/* $Id: process.c,v 1.138 1999/07/23 01:56:10 davem Exp $
* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......
/* $Id: setup.c,v 1.107 1999/06/03 15:02:20 davem Exp $
/* $Id: setup.c,v 1.108 1999/07/30 09:35:03 davem Exp $
* linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......
/* $Id: signal.c,v 1.92 1999/06/14 05:23:53 davem Exp $
/* $Id: signal.c,v 1.94 1999/07/30 09:35:04 davem Exp $
* linux/arch/sparc/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
......@@ -659,6 +659,9 @@ new_setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
err |= __copy_to_user(sf, (char *) regs->u_regs [UREG_FP],
sizeof (struct reg_window));
err |= __copy_to_user(&sf->info, info, sizeof(siginfo_t));
if (err)
goto sigsegv;
......
/* $Id: sparc-stub.c,v 1.24 1998/02/08 07:58:44 ecd Exp $
/* $Id: sparc-stub.c,v 1.25 1999/07/23 01:56:13 davem Exp $
* sparc-stub.c: KGDB support for the Linux kernel.
*
* Modifications to run under Linux
......
/* $Id: sparc_ksyms.c,v 1.77 1999/03/21 06:37:43 davem Exp $
/* $Id: sparc_ksyms.c,v 1.78 1999/07/23 01:56:15 davem Exp $
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......
/* $Id: sunos_ioctl.c,v 1.31 1998/10/25 19:31:04 davem Exp $
/* $Id: sunos_ioctl.c,v 1.33 1999/07/28 12:59:03 anton Exp $
* sunos_ioctl.c: The Linux Operating system: SunOS ioctl compatibility.
*
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
......@@ -21,6 +21,7 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/file.h>
#include <asm/kbio.h>
#if 0
......
/* $Id: sys_sunos.c,v 1.101 1999/06/29 12:33:54 davem Exp $
/* $Id: sys_sunos.c,v 1.102 1999/07/23 01:56:19 davem Exp $
* sys_sunos.c: SunOS specific syscall compatibility support.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......
......@@ -30,9 +30,12 @@
#define _FP_WS_TYPE signed long
#define _FP_I_TYPE long
#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(S,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(D,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_Q(R,X,Y) _FP_MUL_MEAT_4_wide(Q,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_S(R,X,Y) \
_FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y)
#define _FP_MUL_MEAT_D(R,X,Y) \
_FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_Q(R,X,Y) \
_FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
......@@ -54,7 +57,7 @@
* CPU instruction emulation this should prefer Y.
* (see SPAMv9 B.2.2 section).
*/
#define _FP_CHOOSENAN(fs, wc, R, X, Y) \
#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
do { \
if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
&& !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
......
/* $Id: fault.c,v 1.103 1999/07/04 04:35:51 davem Exp $
/* $Id: fault.c,v 1.106 1999/07/30 09:35:07 davem Exp $
* fault.c: Page fault handlers for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -382,12 +382,13 @@ inline void force_user_fault(unsigned long address, int write)
if(expand_stack(vma, address))
goto bad_area;
good_area:
if(write)
if(write) {
if(!(vma->vm_flags & VM_WRITE))
goto bad_area;
else
} else {
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
if (!handle_mm_fault(current, vma, address, write))
goto do_sigbus;
up(&mm->mmap_sem);
......
/* $Id: srmmu.c,v 1.187 1999/04/28 17:00:45 davem Exp $
/* $Id: srmmu.c,v 1.189 1999/07/30 09:35:08 davem Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......
/* $Id: sun4c.c,v 1.173 1999/01/17 02:20:37 davem Exp $
/* $Id: sun4c.c,v 1.175 1999/07/30 09:35:10 davem Exp $
* sun4c.c: Doing in software what should be done in hardware.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......
# $Id: Makefile,v 1.37 1999/06/04 13:29:10 jj Exp $
# $Id: Makefile,v 1.38 1999/08/02 12:06:06 jj Exp $
# sparc64/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
......@@ -15,7 +15,7 @@ SHELL =/bin/bash
CC := sparc64-linux-gcc -D__KERNEL__ -I$(TOPDIR)/include
CC_HAS_ARGS := $(shell if echo "$(CC)" | grep '\(__KERNEL__\| \)' > /dev/null; then echo y; else echo n; fi)
IS_EGCS := $(shell if $(CC) -c -m64 -mcmodel=medlow -o _tmp.o arch/sparc64/math-emu/fnegq.c >/dev/null 2>&1; then echo y; else echo n; fi; rm -f _tmp.o)
IS_EGCS := $(shell if $(CC) -c -m64 -mcmodel=medlow -o /dev/null /dev/null >/dev/null 2>&1; then echo y; else echo n; fi; )
NEW_GAS := $(shell if $(LD) --version 2>&1 | grep 'elf64_sparc' > /dev/null; then echo y; else echo n; fi)
ifneq ($(CC_HAS_ARGS),y)
......
# $Id: config.in,v 1.67 1999/05/01 09:17:37 davem Exp $
# $Id: config.in,v 1.71 1999/07/30 09:35:13 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
......@@ -36,6 +36,7 @@ endmenu
# Global things across all Sun machines.
define_bool CONFIG_SBUS y
define_bool CONFIG_SBUSCHAR y
define_bool CONFIG_MOUSE y
define_bool CONFIG_SUN_MOUSE y
define_bool CONFIG_SERIAL y
define_bool CONFIG_SUN_SERIAL y
......
......@@ -49,6 +49,7 @@ CONFIG_FONT_SUN8x16=y
# CONFIG_FBCON_FONTS is not set
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
CONFIG_MOUSE=y
CONFIG_SUN_MOUSE=y
CONFIG_SERIAL=y
CONFIG_SUN_SERIAL=y
......@@ -93,12 +94,13 @@ CONFIG_BINFMT_ELF32=y
CONFIG_BINFMT_MISC=m
CONFIG_SOLARIS_EMUL=m
CONFIG_PARPORT=m
# CONFIG_PARPORT_PC is not set
CONFIG_PARPORT_PC=m
CONFIG_PARPORT_PC_FIFO=y
# CONFIG_PARPORT_AMIGA is not set
# CONFIG_PARPORT_MFC3 is not set
# CONFIG_PARPORT_ATARI is not set
# CONFIG_PARPORT_OTHER is not set
# CONFIG_PARPORT_1284 is not set
CONFIG_PARPORT_1284=y
CONFIG_PRINTER=m
CONFIG_ENVCTRL=m
......
# $Id: Makefile,v 1.43 1999/01/02 16:45:53 davem Exp $
# $Id: Makefile,v 1.44 1999/08/02 12:05:53 jj Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
......@@ -61,12 +61,16 @@ endif
check_asm: dummy
@echo "/* Automatically generated. Do not edit. */" > asm_offsets.h
@echo "#ifndef __ASM_OFFSETS_H__" >> asm_offsets.h
@echo "#define __ASM_OFFSETS_H__" >> asm_offsets.h
@echo "" >> asm_offsets.h
@echo "#include <linux/config.h>" >> asm_offsets.h
@echo "" >> asm_offsets.h
@echo "#ifndef CONFIG_SMP" >> asm_offsets.h
@echo "" >> asm_offsets.h
@echo -e "#define __ASM_OFFSETS_H__\n" >> asm_offsets.h
@echo -e "#include <linux/config.h>\n" >> asm_offsets.h
@echo '#if defined(__KERNEL__) && !defined(__ASSEMBLY__)' >> asm_offsets.h
@if $(CC) -c -m64 -mcmodel=medlow -o /dev/null /dev/null >/dev/null 2>&1; then \
echo '# if !((__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8))' >> asm_offsets.h; \
else \
echo '# if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)' >> asm_offsets.h; \
fi
@echo -e "# error Please issue 'make check_asm' in linux top-level directory first\n# endif\n#endif\n" >> asm_offsets.h
@echo -e "#ifndef CONFIG_SMP\n" >> asm_offsets.h
@echo "#include <linux/config.h>" > tmp.c
@echo "#undef CONFIG_SMP" >> tmp.c
@echo "#include <linux/sched.h>" >> tmp.c
......@@ -92,11 +96,8 @@ check_asm: dummy
# </hack>
./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
@echo "" >> asm_offsets.h
@echo "#else /* CONFIG_SMP */" >> asm_offsets.h
@echo "" >> asm_offsets.h
@echo "#ifndef SPIN_LOCK_DEBUG" >>asm_offsets.h
@echo "" >> asm_offsets.h
@echo -e "\n#else /* CONFIG_SMP */\n" >> asm_offsets.h
@echo -e "#ifndef SPIN_LOCK_DEBUG\n" >>asm_offsets.h
@echo "#include <linux/config.h>" > tmp.c
@echo "#undef CONFIG_SMP" >> tmp.c
@echo "#define CONFIG_SMP 1" >> tmp.c
......@@ -124,9 +125,7 @@ check_asm: dummy
# </hack>
./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
@echo "" >> asm_offsets.h
@echo "#else /* SPIN_LOCK_DEBUG */" >> asm_offsets.h
@echo "" >> asm_offsets.h
@echo -e "\n#else /* SPIN_LOCK_DEBUG */\n" >> asm_offsets.h
@echo "#include <linux/sched.h>" > tmp.c
$(CC) -D__SMP__ -DSPIN_LOCK_DEBUG -E tmp.c -o tmp.i
@echo "/* Automatically generated. Do not edit. */" > check_asm.c
......@@ -151,10 +150,8 @@ check_asm: dummy
# </hack>
./check_asm >> asm_offsets.h
@rm -f check_asm check_asm.c
@echo "#endif /* SPIN_LOCK_DEBUG */" >> asm_offsets.h
@echo "" >> asm_offsets.h
@echo "#endif /* CONFIG_SMP */" >> asm_offsets.h
@echo "" >> asm_offsets.h
@echo -e "#endif /* SPIN_LOCK_DEBUG */\n" >> asm_offsets.h
@echo -e "#endif /* CONFIG_SMP */\n" >> asm_offsets.h
@echo "#endif /* __ASM_OFFSETS_H__ */" >> asm_offsets.h
@if test -r $(HPATH)/asm/asm_offsets.h; then \
if cmp -s asm_offsets.h $(HPATH)/asm/asm_offsets.h; then \
......
......@@ -339,6 +339,16 @@ static inline int do_load_aout32_binary(struct linux_binprm * bprm,
current->mm->start_stack =
(unsigned long) create_aout32_tables((char *)bprm->p, bprm);
if (!(current->thread.flags & SPARC_FLAG_32BIT)) {
unsigned long pgd_cache;
pgd_cache = ((unsigned long)current->mm->pgd[0])<<11UL;
__asm__ __volatile__("stxa\t%0, [%1] %2"
: /* no outputs */
: "r" (pgd_cache),
"r" (TSB_REG), "i" (ASI_DMMU));
current->thread.flags |= SPARC_FLAG_32BIT;
}
start_thread32(regs, ex.a_entry, current->mm->start_stack);
if (current->flags & PF_PTRACED)
send_sig(SIGTRAP, current, 0);
......
......@@ -142,7 +142,7 @@ struct elf_prpsinfo32
#ifdef CONFIG_BINFMT_ELF32_MODULE
#define CONFIG_BINFMT_ELF_MODULE CONFIG_BINFMT_ELF32_MODULE
#endif
#define ELF_FLAGS_INIT current->tss.flags |= SPARC_FLAG_32BIT
#define ELF_FLAGS_INIT current->thread.flags |= SPARC_FLAG_32BIT
MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit SparcLinux binaries on the Ultra");
MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
......
......@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/tasks.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <asm/page.h>
#include <asm/oplib.h>
......@@ -28,6 +29,10 @@ device_scan(unsigned long mem_start))
int cpu_nds[64]; /* One node for each cpu */
int cpu_ctr = 0;
/* FIX ME FAST... -DaveM */
ioport_resource.end = 0xffffffffffffffffUL;
iomem_resource.end = 0xffffffffffffffffUL;
prom_getstring(prom_root_node, "device_type", node_str, sizeof(node_str));
prom_printf("Booting Linux...\n");
......
/* $Id: entry.S,v 1.103 1999/05/08 03:00:21 davem Exp $
/* $Id: entry.S,v 1.106 1999/08/02 08:39:34 davem Exp $
* arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -42,13 +42,13 @@ sparc64_vpte_patchme2:
/* This is trivial with the new code... */
.globl do_fpdis
do_fpdis:
ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g5 ! Load Group
ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g5 ! Load Group
sethi %hi(TSTATE_PEF), %g4 ! IEU0
wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
be,a,pt %icc, 1f ! CTI
clr %g7 ! IEU0
ldub [%g6 + AOFF_task_tss + AOFF_thread_gsr], %g7 ! Load Group
ldub [%g6 + AOFF_task_thread + AOFF_thread_gsr], %g7 ! Load Group
1: andcc %g5, FPRS_DL, %g0 ! IEU1
bne,pn %icc, 2f ! CTI
fzero %f0 ! FPA
......@@ -157,7 +157,7 @@ fpdis_exit:
flush %g6
fpdis_exit2:
wr %g7, 0, %gsr
ldx [%g6 + AOFF_task_tss + AOFF_thread_xfsr], %fsr
ldx [%g6 + AOFF_task_thread + AOFF_thread_xfsr], %fsr
rdpr %tstate, %g3
or %g3, %g4, %g3 ! anal...
wrpr %g3, %tstate
......@@ -167,13 +167,13 @@ fpdis_exit2:
.globl do_fptrap
.align 32
do_fptrap:
ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g3
stx %fsr, [%g6 + AOFF_task_tss + AOFF_thread_xfsr]
ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
rd %fprs, %g1
or %g3, %g1, %g3
stb %g3, [%g6 + AOFF_task_tss + AOFF_thread_fpsaved]
stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
rd %gsr, %g3
stb %g3, [%g6 + AOFF_task_tss + AOFF_thread_gsr]
stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_gsr]
mov SECONDARY_CONTEXT, %g3
add %g6, AOFF_task_fpregs, %g2
ldxa [%g3] ASI_DMMU, %g5
......@@ -633,41 +633,28 @@ execve_merge:
jmpl %g1, %g0
add %sp, STACK_BIAS + REGWIN_SZ, %o0
.globl sys_pipe, sys_execve, sys_sigpause, sys_nis_syscall
.globl sys_pipe, sys_sigpause, sys_nis_syscall
.globl sys_sigsuspend, sys_rt_sigsuspend, sys32_rt_sigsuspend
.globl sys_sigreturn, sys_rt_sigreturn
.globl sys_rt_sigreturn
.globl sys32_sigreturn, sys32_rt_sigreturn
.globl sys32_execve, sys_ptrace
.globl sys_sigaltstack, sys32_sigaltstack
.globl sys32_sigstack
.align 32
sys_pipe: sethi %hi(sparc_pipe), %g1
add %sp, STACK_BIAS + REGWIN_SZ, %o0
jmpl %g1 + %lo(sparc_pipe), %g0
nop
sys_nis_syscall:sethi %hi(c_sys_nis_syscall), %g1
add %sp, STACK_BIAS + REGWIN_SZ, %o0
jmpl %g1 + %lo(c_sys_nis_syscall), %g0
nop
sys_pipe: ba,pt %xcc, sparc_pipe
add %sp, STACK_BIAS + REGWIN_SZ, %o0
sys_nis_syscall:ba,pt %xcc, c_sys_nis_syscall
add %sp, STACK_BIAS + REGWIN_SZ, %o0
sys_memory_ordering:
sethi %hi(sparc_memory_ordering), %g1
add %sp, STACK_BIAS + REGWIN_SZ, %o1
jmpl %g1 + %lo(sparc_memory_ordering), %g0
nop
sys_sigaltstack:sethi %hi(do_sigaltstack), %g1
add %i6, STACK_BIAS, %o2
jmpl %g1 + %lo(do_sigaltstack), %g1
nop
sys32_sigstack: sethi %hi(do_sys32_sigstack), %g1
mov %i6, %o2
jmpl %g1 + %lo(do_sys32_sigstack), %g1
nop
ba,pt %xcc, sparc_memory_ordering
add %sp, STACK_BIAS + REGWIN_SZ, %o1
sys_sigaltstack:ba,pt %xcc, do_sigaltstack
add %i6, STACK_BIAS, %o2
sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
mov %i6, %o2
sys32_sigaltstack:
sethi %hi(do_sys32_sigaltstack), %g1
mov %i6, %o2
jmpl %g1 + %lo(do_sys32_sigaltstack), %g1
nop
ba,pt %xcc, do_sys32_sigaltstack
mov %i6, %o2
.align 32
sys_sigsuspend: add %sp, STACK_BIAS + REGWIN_SZ, %o0
......@@ -689,10 +676,6 @@ sys_sigpause: add %sp, STACK_BIAS + REGWIN_SZ, %o1
call do_sigpause
add %o7, 1f-.-4, %o7
nop
sys_sigreturn: add %sp, STACK_BIAS + REGWIN_SZ, %o0
call do_sigreturn
add %o7, 1f-.-4, %o7
nop
sys32_sigreturn:
add %sp, STACK_BIAS + REGWIN_SZ, %o0
call do_sigreturn32
......@@ -761,38 +744,30 @@ sys_clone: flushw
ba,pt %xcc, do_fork
add %sp, STACK_BIAS + REGWIN_SZ, %o2
ret_from_syscall:
/* Clear SPARC_FLAG_NEWCHILD, switch_to leaves tss.flags in
/* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
* %o7 for us. Check performance counter stuff too.
*/
#ifdef __SMP__
andn %o7, 0x100, %l0
andn %o7, SPARC_FLAG_NEWCHILD, %l0
mov %g5, %o0 /* 'prev' */
call schedule_tail
sth %l0, [%g6 + AOFF_task_tss + AOFF_thread_flags]
#else
andn %o7, 0x100, %l0
sth %l0, [%g6 + AOFF_task_tss + AOFF_thread_flags]
#endif
andcc %l0, 0x200, %g0
stb %l0, [%g6 + AOFF_task_thread + AOFF_thread_flags]
andcc %l0, SPARC_FLAG_PERFCTR, %g0
be,pt %icc, 1f
nop
ldx [%g6 + AOFF_task_tss + AOFF_thread_pcr_reg], %o7
ldx [%g6 + AOFF_task_thread + AOFF_thread_pcr_reg], %o7
wr %g0, %o7, %pcr
wr %g0, %g0, %pic
1: b,pt %xcc, ret_sys_call
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0], %o0
sparc_exit: rdpr %otherwin, %g1
rdpr %pstate, %g2
wrpr %g2, PSTATE_IE, %pstate
wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate
rdpr %cansave, %g3
add %g3, %g1, %g3
wrpr %g3, 0x0, %cansave
wrpr %g0, 0x0, %otherwin
wrpr %g2, 0x0, %pstate
mov %o7, %l5
sth %g0, [%g6 + AOFF_task_tss + AOFF_thread_w_saved]
call sys_exit
mov %l5, %o7
wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate
ba,pt %xcc, sys_exit
stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
linux_sparc_ni_syscall:
sethi %hi(sys_ni_syscall), %l7
......
/* $Id: etrap.S,v 1.41 1999/05/25 16:53:09 jj Exp $
/* $Id: etrap.S,v 1.42 1999/07/30 09:35:18 davem Exp $
* etrap.S: Preparing for entry into the kernel on Sparc V9.
*
* Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -56,7 +56,7 @@ etrap_irq: rdpr %tstate, %g1 ! Single Group
wrpr %g0, 0, %canrestore ! Single Group+4bubbles
sll %g2, 3, %g2 ! IEU0 Group
mov 1, %l5 ! IEU1
stb %l5, [%l6 + AOFF_task_tss + AOFF_thread_fpdepth] ! Store
stb %l5, [%l6 + AOFF_task_thread + AOFF_thread_fpdepth] ! Store
wrpr %g3, 0, %otherwin ! Single Group+4bubbles
wrpr %g2, 0, %wstate ! Single Group+4bubbles
stxa %g0, [%l4] ASI_DMMU ! Store Group
......@@ -89,11 +89,11 @@ etrap_irq: rdpr %tstate, %g1 ! Single Group
jmpl %l2 + 0x4, %g0 ! CTI Group
mov %l6, %g6 ! IEU0
3: ldub [%l6 + AOFF_task_tss + AOFF_thread_fpdepth], %l5 ! Load Group
add %l6, AOFF_task_tss + AOFF_thread_fpsaved + 1, %l4 ! IEU0
3: ldub [%l6 + AOFF_task_thread + AOFF_thread_fpdepth], %l5 ! Load Group
add %l6, AOFF_task_thread + AOFF_thread_fpsaved + 1, %l4 ! IEU0
srl %l5, 1, %l3 ! IEU0 Group
add %l5, 2, %l5 ! IEU1
stb %l5, [%l6 + AOFF_task_tss + AOFF_thread_fpdepth] ! Store
stb %l5, [%l6 + AOFF_task_thread + AOFF_thread_fpdepth] ! Store
ba,pt %xcc, 2b ! CTI
stb %g0, [%l4 + %l3] ! Store Group
......
/* $Id: ioctl32.c,v 1.63 1999/06/09 04:56:14 davem Exp $
/* $Id: ioctl32.c,v 1.65 1999/07/30 09:35:19 davem Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......@@ -38,6 +38,7 @@
#include <linux/ext2_fs.h>
#include <linux/videodev.h>
#include <linux/netdevice.h>
#include <linux/raw.h>
#include <scsi/scsi.h>
/* Ugly hack. */
......@@ -2366,6 +2367,10 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
case AUTOFS_IOC_PROTOVER:
case AUTOFS_IOC_EXPIRE:
/* Raw devices */
case RAW_SETBIND:
case RAW_GETBIND:
error = sys_ioctl (fd, cmd, arg);
goto out;
......
This diff is collapsed.
/* $Id: psycho.c,v 1.86 1999/07/01 10:39:43 davem Exp $
/* $Id: psycho.c,v 1.87 1999/07/23 01:56:45 davem Exp $
* psycho.c: Ultra/AX U2P PCI controller support.
*
* Copyright (C) 1997 David S. Miller (davem@caipfs.rutgers.edu)
......
......@@ -52,7 +52,7 @@ static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
static inline void
pt_succ_return_linux(struct pt_regs *regs, unsigned long value, long *addr)
{
if (current->tss.flags & SPARC_FLAG_32BIT) {
if (current->thread.flags & SPARC_FLAG_32BIT) {
if(put_user(value, (unsigned int *)addr))
return pt_error_return(regs, EFAULT);
} else {
......@@ -114,7 +114,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned long addr2 = regs->u_regs[UREG_I4];
struct task_struct *child;
if (current->tss.flags & SPARC_FLAG_32BIT) {
if (current->thread.flags & SPARC_FLAG_32BIT) {
addr &= 0xffffffffUL;
data &= 0xffffffffUL;
addr2 &= 0xffffffffUL;
......@@ -220,7 +220,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
goto out;
}
if(!(child->tss.flags & SPARC_FLAG_32BIT) &&
if(!(child->thread.flags & SPARC_FLAG_32BIT) &&
((request == PTRACE_READDATA64) ||
(request == PTRACE_WRITEDATA64) ||
(request == PTRACE_READTEXT64) ||
......@@ -242,7 +242,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
int res, copied;
res = -EIO;
if (current->tss.flags & SPARC_FLAG_32BIT) {
if (current->thread.flags & SPARC_FLAG_32BIT) {
copied = access_process_vm(child, addr,
&tmp32, sizeof(tmp32), 0);
tmp64 = (unsigned long) tmp32;
......@@ -267,7 +267,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned int tmp32;
int copied, res = -EIO;
if (current->tss.flags & SPARC_FLAG_32BIT) {
if (current->thread.flags & SPARC_FLAG_32BIT) {
tmp32 = data;
copied = access_process_vm(child, addr,
&tmp32, sizeof(tmp32), 1);
......@@ -289,7 +289,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_GETREGS: {
struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
struct pt_regs *cregs = child->tss.kregs;
struct pt_regs *cregs = child->thread.kregs;
int rval;
if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
......@@ -313,7 +313,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_GETREGS64: {
struct pt_regs *pregs = (struct pt_regs *) addr;
struct pt_regs *cregs = child->tss.kregs;
struct pt_regs *cregs = child->thread.kregs;
int rval;
if (__put_user(cregs->tstate, (&pregs->tstate)) ||
......@@ -337,7 +337,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_SETREGS: {
struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
struct pt_regs *cregs = child->tss.kregs;
struct pt_regs *cregs = child->thread.kregs;
unsigned int psr, pc, npc, y;
int i;
......@@ -370,7 +370,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_SETREGS64: {
struct pt_regs *pregs = (struct pt_regs *) addr;
struct pt_regs *cregs = child->tss.kregs;
struct pt_regs *cregs = child->thread.kregs;
unsigned long tstate, tpc, tnpc, y;
int i;
......@@ -418,7 +418,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
if (copy_to_user(&fps->regs[0], fpregs,
(32 * sizeof(unsigned int))) ||
__put_user(child->tss.xfsr[0], (&fps->fsr)) ||
__put_user(child->thread.xfsr[0], (&fps->fsr)) ||
__put_user(0, (&fps->fpqd)) ||
__put_user(0, (&fps->flags)) ||
__put_user(0, (&fps->extra)) ||
......@@ -439,7 +439,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
if (copy_to_user(&fps->regs[0], fpregs,
(64 * sizeof(unsigned int))) ||
__put_user(child->tss.xfsr[0], (&fps->fsr))) {
__put_user(child->thread.xfsr[0], (&fps->fsr))) {
pt_error_return(regs, EFAULT);
goto out;
}
......@@ -468,11 +468,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
pt_error_return(regs, EFAULT);
goto out;
}
child->tss.xfsr[0] &= 0xffffffff00000000UL;
child->tss.xfsr[0] |= fsr;
if (!(child->tss.fpsaved[0] & FPRS_FEF))
child->tss.gsr[0] = 0;
child->tss.fpsaved[0] |= (FPRS_FEF | FPRS_DL);
child->thread.xfsr[0] &= 0xffffffff00000000UL;
child->thread.xfsr[0] |= fsr;
if (!(child->thread.fpsaved[0] & FPRS_FEF))
child->thread.gsr[0] = 0;
child->thread.fpsaved[0] |= (FPRS_FEF | FPRS_DL);
pt_succ_return(regs, 0);
goto out;
}
......@@ -486,13 +486,13 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
if (copy_from_user(fpregs, &fps->regs[0],
(64 * sizeof(unsigned int))) ||
__get_user(child->tss.xfsr[0], (&fps->fsr))) {
__get_user(child->thread.xfsr[0], (&fps->fsr))) {
pt_error_return(regs, EFAULT);
goto out;
}
if (!(child->tss.fpsaved[0] & FPRS_FEF))
child->tss.gsr[0] = 0;
child->tss.fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
if (!(child->thread.fpsaved[0] & FPRS_FEF))
child->thread.gsr[0] = 0;
child->thread.fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
pt_succ_return(regs, 0);
goto out;
}
......@@ -538,11 +538,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
goto out;
}
#ifdef DEBUG_PTRACE
printk ("Original: %016lx %016lx\n", child->tss.kregs->tpc, child->tss.kregs->tnpc);
printk ("Original: %016lx %016lx\n", child->thread.kregs->tpc, child->thread.kregs->tnpc);
printk ("Continuing with %016lx %016lx\n", addr, addr+4);
#endif
child->tss.kregs->tpc = addr;
child->tss.kregs->tnpc = addr + 4;
child->thread.kregs->tpc = addr;
child->thread.kregs->tnpc = addr + 4;
}
if (request == PTRACE_SYSCALL)
......@@ -554,8 +554,8 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
#ifdef DEBUG_PTRACE
printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
child->pid, child->exit_code,
child->tss.kregs->tpc,
child->tss.kregs->tnpc);
child->thread.kregs->tpc,
child->thread.kregs->tnpc);
#endif
wake_up_process(child);
......@@ -634,7 +634,7 @@ asmlinkage void syscall_trace(void)
return;
current->exit_code = SIGTRAP;
current->state = TASK_STOPPED;
current->tss.flags ^= MAGIC_CONSTANT;
current->thread.flags ^= MAGIC_CONSTANT;
notify_parent(current, SIGCHLD);
schedule();
/*
......
/* $Id: rtrap.S,v 1.46 1999/05/25 16:53:20 jj Exp $
/* $Id: rtrap.S,v 1.47 1999/07/30 09:35:23 davem Exp $
* rtrap.S: Preparing for return from trap on Sparc V9.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......@@ -11,6 +11,7 @@
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/visasm.h>
#include <asm/processor.h>
#define PTREGS_OFF (STACK_BIAS + REGWIN_SZ)
......@@ -39,13 +40,13 @@ rtrap: sethi %hi(bh_active), %l2
be,pt %icc, to_user
andn %l7, PSTATE_IE, %l7
ldub [%g6 + AOFF_task_tss + AOFF_thread_fpdepth], %l5
ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %l5
brz,pt %l5, rt_continue
srl %l5, 1, %o0
add %g6, AOFF_task_tss + AOFF_thread_fpsaved, %l6
add %g6, AOFF_task_thread + AOFF_thread_fpsaved, %l6
ldub [%l6 + %o0], %l2
sub %l5, 2, %l5
add %g6, AOFF_task_tss + AOFF_thread_gsr, %o1
add %g6, AOFF_task_thread + AOFF_thread_gsr, %o1
andcc %l2, (FPRS_FEF|FPRS_DU), %g0
be,pt %icc, 2f
and %l2, FPRS_DL, %l6
......@@ -55,7 +56,7 @@ rtrap: sethi %hi(bh_active), %l2
rd %fprs, %g5
wr %g5, FPRS_FEF, %fprs
ldub [%o1 + %o0], %g5
add %g6, AOFF_task_tss + AOFF_thread_xfsr, %o1
add %g6, AOFF_task_thread + AOFF_thread_xfsr, %o1
membar #StoreLoad | #LoadLoad
sll %o0, 8, %o2
add %g6, AOFF_task_fpregs, %o3
......@@ -71,9 +72,8 @@ rtrap: sethi %hi(bh_active), %l2
ldda [%o4 + %o2] ASI_BLK_P, %f48
1: membar #Sync
ldx [%o1 + %o5], %fsr
2: stb %l5, [%g6 + AOFF_task_tss + AOFF_thread_fpdepth]
rt_continue: lduh [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0
ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
2: stb %l5, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
mov %g6, %o5
......@@ -105,10 +105,11 @@ rt_continue: lduh [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0
wrpr %o2, %g0, %tnpc
brnz,pn %l3, kern_rtt
mov PRIMARY_CONTEXT, %l7
ldxa [%l7 + %l7] ASI_DMMU, %l0
stxa %l0, [%l7] ASI_DMMU
flush %o5
rdpr %wstate, %l1
rdpr %wstate, %l1
rdpr %otherwin, %l2
srl %l1, 3, %l1
wrpr %l2, %g0, %canrestore
......@@ -116,8 +117,8 @@ rt_continue: lduh [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0
wrpr %g0, %g0, %otherwin
restore
rdpr %canrestore, %g1
wrpr %g1, 0x0, %cleanwin
wrpr %g1, 0x0, %cleanwin
retry
kern_rtt: restore
retry
......@@ -125,8 +126,8 @@ to_user: ldx [%g6 + AOFF_task_need_resched], %l0
wrpr %l7, PSTATE_IE, %pstate
orcc %g0, %l0, %g0
be,a,pt %xcc, check_signal
lduw [%g6 + AOFF_task_sigpending], %l0
lduw [%g6 + AOFF_task_sigpending], %l0
call schedule
nop
lduw [%g6 + AOFF_task_sigpending], %l0
......@@ -146,7 +147,7 @@ check_signal: brz,a,pt %l0, check_user_wins
*/
check_user_wins:
wrpr %l7, 0x0, %pstate
lduh [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %o2
ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
brz,pt %o2, 1f
sethi %hi(TSTATE_PEF), %l6
......@@ -162,8 +163,8 @@ check_user_wins:
call rtrap_check
add %sp, STACK_BIAS + REGWIN_SZ, %o0
#endif
lduh [%g6 + AOFF_task_tss + AOFF_thread_flags], %l5
andcc %l5, 0x200, %g0
ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %l5
andcc %l5, SPARC_FLAG_PERFCTR, %g0
be,pt %xcc, 1f
nop
......@@ -172,7 +173,7 @@ check_user_wins:
call update_perfctrs
nop
wrpr %l7, 0x0, %pstate
lduh [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %o2
ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %o2
brz,pt %o2, 1f
sethi %hi(TSTATE_PEF), %l6
wrpr %l7, PSTATE_IE, %pstate
......@@ -182,14 +183,14 @@ check_user_wins:
1:
andcc %l1, %l6, %g0
be,pt %xcc, rt_continue
stb %g0, [%g6 + AOFF_task_tss + AOFF_thread_fpdepth] ! This is neccessary for non-syscall rtraps only
stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth] ! This is neccessary for non-syscall rtraps only
rd %fprs, %l5
andcc %l5, FPRS_FEF, %g0
be,a,pn %icc, rt_continue
andn %l1, %l6, %l1
ba,pt %xcc, rt_continue+4
lduh [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0
ba,pt %xcc, rt_continue
nop
5: wr %g0, FPRS_FEF, %fprs
membar #StoreLoad | #LoadLoad
......@@ -201,6 +202,6 @@ check_user_wins:
1: membar #Sync
wr %g0, FPRS_DU, %fprs
ba,pt %xcc, rt_continue
stb %l5, [%g6 + AOFF_task_tss + AOFF_thread_fpdepth]
stb %l5, [%g6 + AOFF_task_thread + AOFF_thread_fpdepth]
#undef PTREGS_OFF
/* $Id: setup.c,v 1.44 1999/05/28 02:17:29 davem Exp $
/* $Id: setup.c,v 1.46 1999/08/02 08:39:36 davem Exp $
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
......@@ -135,17 +135,21 @@ int prom_callback(long *args)
* Find process owning ctx, lookup mapping.
*/
struct task_struct *p;
struct mm_struct *mm = NULL;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
for_each_task(p)
if (p->tss.ctx == ctx)
for_each_task(p) {
mm = p->mm;
if (CTX_HWBITS(mm->context) == ctx)
break;
if (p->tss.ctx != ctx)
}
if (!mm ||
CTX_HWBITS(mm->context) != ctx)
goto done;
pgdp = pgd_offset(p->mm, va);
pgdp = pgd_offset(mm, va);
if (pgd_none(*pgdp))
goto done;
pmdp = pmd_offset(pgdp, va);
......@@ -534,8 +538,7 @@ __initfunc(void setup_arch(char **cmdline_p,
init_mm.mmap->vm_page_prot = PAGE_SHARED;
init_mm.mmap->vm_start = PAGE_OFFSET;
init_mm.mmap->vm_end = *memory_end_p;
init_mm.context = (unsigned long) NO_CONTEXT;
init_task.tss.kregs = &fake_swapper_regs;
init_task.thread.kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
......
This diff is collapsed.
This diff is collapsed.
......@@ -53,7 +53,7 @@ unsigned long cpu_present_map = 0;
int smp_num_cpus = 1;
int smp_threads_ready = 0;
__initfunc(void smp_setup(char *str, int *ints))
void __init smp_setup(char *str, int *ints)
{
/* XXX implement me XXX */
}
......@@ -151,13 +151,17 @@ __initfunc(void smp_callin(void))
/* Clear this or we will die instantly when we
* schedule back to this idler...
*/
current->tss.flags &= ~(SPARC_FLAG_NEWCHILD);
current->thread.flags &= ~(SPARC_FLAG_NEWCHILD);
/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
while(!smp_processors_ready)
membar("#LoadLoad");
}
extern int cpu_idle(void *unused);
extern int cpu_idle(void);
extern void init_IRQ(void);
void initialize_secondary(void)
......@@ -169,7 +173,7 @@ int start_secondary(void *unused)
trap_init();
init_IRQ();
smp_callin();
return cpu_idle(NULL);
return cpu_idle();
}
void cpu_panic(void)
......@@ -216,9 +220,17 @@ __initfunc(void smp_boot_cpus(void))
entry += phys_base - KERNBASE;
cookie += phys_base - KERNBASE;
kernel_thread(start_secondary, NULL, CLONE_PID);
p = task[++cpucount];
cpucount++;
p = init_task.prev_task;
init_tasks[cpucount] = p;
p->processor = i;
p->has_cpu = 1; /* we schedule the first task manually */
del_from_runqueue(p);
unhash_process(p);
callin_flag = 0;
for (no = 0; no < linux_num_cpus; no++)
if (linux_cpus[no].mid == i)
......@@ -384,6 +396,9 @@ void smp_flush_tlb_all(void)
* are flush_tlb_*() routines, and these run after flush_cache_*()
* which performs the flushw.
*
* XXX I diked out the fancy flush avoidance code for the
* XXX swapping cases for now until the new MM code stabilizes. -DaveM
*
* The SMP TLB coherency scheme we use works as follows:
*
* 1) mm->cpu_vm_mask is a bit mask of which cpus an address
......@@ -395,16 +410,16 @@ void smp_flush_tlb_all(void)
* cross calls.
*
* One invariant is that when a cpu switches to a process, and
* that processes tsk->mm->cpu_vm_mask does not have the current
* cpu's bit set, that tlb context is flushed locally.
* that processes tsk->active_mm->cpu_vm_mask does not have the
* current cpu's bit set, that tlb context is flushed locally.
*
* If the address space is non-shared (ie. mm->count == 1) we avoid
* cross calls when we want to flush the currently running process's
* tlb state. This is done by clearing all cpu bits except the current
* processor's in current->mm->cpu_vm_mask and performing the flush
* locally only. This will force any subsequent cpus which run this
* task to flush the context from the local tlb if the process migrates
* to another cpu (again).
* processor's in current->active_mm->cpu_vm_mask and performing the
* flush locally only. This will force any subsequent cpus which run
* this task to flush the context from the local tlb if the process
* migrates to another cpu (again).
*
* 3) For shared address spaces (threads) and swapping we bite the
* bullet for most cases and perform the cross call.
......@@ -422,13 +437,13 @@ void smp_flush_tlb_all(void)
*/
void smp_flush_tlb_mm(struct mm_struct *mm)
{
u32 ctx = mm->context & 0x3ff;
u32 ctx = CTX_HWBITS(mm->context);
if(mm == current->mm && atomic_read(&mm->count) == 1) {
if(mm->cpu_vm_mask != (1UL << smp_processor_id()))
mm->cpu_vm_mask = (1UL << smp_processor_id());
if (mm == current->active_mm &&
atomic_read(&mm->mm_users) == 1 &&
(mm->cpu_vm_mask == (1UL << smp_processor_id())))
goto local_flush_and_out;
}
smp_cross_call(&xcall_flush_tlb_mm, ctx, 0, 0);
local_flush_and_out:
......@@ -438,15 +453,15 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
u32 ctx = mm->context & 0x3ff;
u32 ctx = CTX_HWBITS(mm->context);
start &= PAGE_MASK;
end &= PAGE_MASK;
if(mm == current->mm && atomic_read(&mm->count) == 1) {
if(mm->cpu_vm_mask != (1UL << smp_processor_id()))
mm->cpu_vm_mask = (1UL << smp_processor_id());
if(mm == current->active_mm &&
atomic_read(&mm->mm_users) == 1 &&
(mm->cpu_vm_mask == (1UL << smp_processor_id())))
goto local_flush_and_out;
}
smp_cross_call(&xcall_flush_tlb_range, ctx, start, end);
local_flush_and_out:
......@@ -455,30 +470,15 @@ void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
{
u32 ctx = mm->context & 0x3ff;
u32 ctx = CTX_HWBITS(mm->context);
page &= PAGE_MASK;
if(mm == current->mm && atomic_read(&mm->count) == 1) {
if(mm->cpu_vm_mask != (1UL << smp_processor_id()))
mm->cpu_vm_mask = (1UL << smp_processor_id());
if(mm == current->active_mm &&
atomic_read(&mm->mm_users) == 1 &&
(mm->cpu_vm_mask == (1UL << smp_processor_id()))) {
goto local_flush_and_out;
} else {
/* Try to handle two special cases to avoid cross calls
* in common scenerios where we are swapping process
* pages out.
*/
if(((mm->context ^ tlb_context_cache) & CTX_VERSION_MASK) ||
(mm->cpu_vm_mask == 0)) {
/* A dead context cannot ever become "alive" until
* a task switch is done to it.
*/
return; /* It's dead, nothing to do. */
}
if(mm->cpu_vm_mask == (1UL << smp_processor_id())) {
__flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
return; /* Only local flush is necessary. */
}
}
smp_cross_call(&xcall_flush_tlb_page, ctx, page, 0);
local_flush_and_out:
......
/* $Id: sparc64_ksyms.c,v 1.60 1999/07/03 22:11:12 davem Exp $
/* $Id: sparc64_ksyms.c,v 1.61 1999/07/23 01:56:48 davem Exp $
* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......
/* $Id: sys_sparc.c,v 1.27 1999/06/02 12:06:34 jj Exp $
/* $Id: sys_sparc.c,v 1.28 1999/07/30 09:35:27 davem Exp $
* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
......@@ -170,7 +170,7 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
}
retval = -EINVAL;
if (current->tss.flags & SPARC_FLAG_32BIT) {
if (current->thread.flags & SPARC_FLAG_32BIT) {
if (len > 0xf0000000UL || addr > 0xf0000000UL - len)
goto out_putf;
} else {
......@@ -281,40 +281,40 @@ asmlinkage int sys_utrap_install(utrap_entry_t type, utrap_handler_t new_p,
return -EINVAL;
if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
if (old_p) {
if (!current->tss.utraps)
if (!current->thread.utraps)
put_user_ret(NULL, old_p, -EFAULT);
else
put_user_ret((utrap_handler_t)(current->tss.utraps[type]), old_p, -EFAULT);
put_user_ret((utrap_handler_t)(current->thread.utraps[type]), old_p, -EFAULT);
}
if (old_d)
put_user_ret(NULL, old_d, -EFAULT);
return 0;
}
lock_kernel();
if (!current->tss.utraps) {
current->tss.utraps = kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
if (!current->tss.utraps) return -ENOMEM;
current->tss.utraps[0] = 1;
memset(current->tss.utraps+1, 0, UT_TRAP_INSTRUCTION_31*sizeof(long));
if (!current->thread.utraps) {
current->thread.utraps = kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
if (!current->thread.utraps) return -ENOMEM;
current->thread.utraps[0] = 1;
memset(current->thread.utraps+1, 0, UT_TRAP_INSTRUCTION_31*sizeof(long));
} else {
if ((utrap_handler_t)current->tss.utraps[type] != new_p && current->tss.utraps[0] > 1) {
long *p = current->tss.utraps;
if ((utrap_handler_t)current->thread.utraps[type] != new_p && current->thread.utraps[0] > 1) {
long *p = current->thread.utraps;
current->tss.utraps = kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
if (!current->tss.utraps) {
current->tss.utraps = p;
current->thread.utraps = kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
if (!current->thread.utraps) {
current->thread.utraps = p;
return -ENOMEM;
}
p[0]--;
current->tss.utraps[0] = 1;
memcpy(current->tss.utraps+1, p+1, UT_TRAP_INSTRUCTION_31*sizeof(long));
current->thread.utraps[0] = 1;
memcpy(current->thread.utraps+1, p+1, UT_TRAP_INSTRUCTION_31*sizeof(long));
}
}
if (old_p)
put_user_ret((utrap_handler_t)(current->tss.utraps[type]), old_p, -EFAULT);
put_user_ret((utrap_handler_t)(current->thread.utraps[type]), old_p, -EFAULT);
if (old_d)
put_user_ret(NULL, old_d, -EFAULT);
current->tss.utraps[type] = (long)new_p;
current->thread.utraps[type] = (long)new_p;
unlock_kernel();
return 0;
}
......@@ -363,10 +363,10 @@ update_perfctrs(void)
unsigned long pic, tmp;
read_pic(pic);
tmp = (current->tss.kernel_cntd0 += (unsigned int)pic);
__put_user(tmp, current->tss.user_cntd0);
tmp = (current->tss.kernel_cntd1 += (pic >> 32));
__put_user(tmp, current->tss.user_cntd1);
tmp = (current->thread.kernel_cntd0 += (unsigned int)pic);
__put_user(tmp, current->thread.user_cntd0);
tmp = (current->thread.kernel_cntd1 += (pic >> 32));
__put_user(tmp, current->thread.user_cntd1);
reset_pic();
}
......@@ -377,24 +377,24 @@ sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long ar
switch(opcode) {
case PERFCTR_ON:
current->tss.pcr_reg = arg2;
current->tss.user_cntd0 = (u64 *) arg0;
current->tss.user_cntd1 = (u64 *) arg1;
current->tss.kernel_cntd0 =
current->tss.kernel_cntd1 = 0;
current->thread.pcr_reg = arg2;
current->thread.user_cntd0 = (u64 *) arg0;
current->thread.user_cntd1 = (u64 *) arg1;
current->thread.kernel_cntd0 =
current->thread.kernel_cntd1 = 0;
write_pcr(arg2);
reset_pic();
current->tss.flags |= SPARC_FLAG_PERFCTR;
current->thread.flags |= SPARC_FLAG_PERFCTR;
break;
case PERFCTR_OFF:
err = -EINVAL;
if ((current->tss.flags & SPARC_FLAG_PERFCTR) != 0) {
current->tss.user_cntd0 =
current->tss.user_cntd1 = NULL;
current->tss.pcr_reg = 0;
if ((current->thread.flags & SPARC_FLAG_PERFCTR) != 0) {
current->thread.user_cntd0 =
current->thread.user_cntd1 = NULL;
current->thread.pcr_reg = 0;
write_pcr(0);
current->tss.flags &= ~(SPARC_FLAG_PERFCTR);
current->thread.flags &= ~(SPARC_FLAG_PERFCTR);
err = 0;
}
break;
......@@ -402,50 +402,50 @@ sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long ar
case PERFCTR_READ: {
unsigned long pic, tmp;
if (!(current->tss.flags & SPARC_FLAG_PERFCTR)) {
if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
err = -EINVAL;
break;
}
read_pic(pic);
tmp = (current->tss.kernel_cntd0 += (unsigned int)pic);
err |= __put_user(tmp, current->tss.user_cntd0);
tmp = (current->tss.kernel_cntd1 += (pic >> 32));
err |= __put_user(tmp, current->tss.user_cntd1);
tmp = (current->thread.kernel_cntd0 += (unsigned int)pic);
err |= __put_user(tmp, current->thread.user_cntd0);
tmp = (current->thread.kernel_cntd1 += (pic >> 32));
err |= __put_user(tmp, current->thread.user_cntd1);
reset_pic();
break;
}
case PERFCTR_CLRPIC:
if (!(current->tss.flags & SPARC_FLAG_PERFCTR)) {
if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
err = -EINVAL;
break;
}
current->tss.kernel_cntd0 =
current->tss.kernel_cntd1 = 0;
current->thread.kernel_cntd0 =
current->thread.kernel_cntd1 = 0;
reset_pic();
break;
case PERFCTR_SETPCR: {
u64 *user_pcr = (u64 *)arg0;
if (!(current->tss.flags & SPARC_FLAG_PERFCTR)) {
if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
err = -EINVAL;
break;
}
err |= __get_user(current->tss.pcr_reg, user_pcr);
write_pcr(current->tss.pcr_reg);
current->tss.kernel_cntd0 =
current->tss.kernel_cntd1 = 0;
err |= __get_user(current->thread.pcr_reg, user_pcr);
write_pcr(current->thread.pcr_reg);
current->thread.kernel_cntd0 =
current->thread.kernel_cntd1 = 0;
reset_pic();
break;
}
case PERFCTR_GETPCR: {
u64 *user_pcr = (u64 *)arg0;
if (!(current->tss.flags & SPARC_FLAG_PERFCTR)) {
if (!(current->thread.flags & SPARC_FLAG_PERFCTR)) {
err = -EINVAL;
break;
}
err |= __put_user(current->tss.pcr_reg, user_pcr);
err |= __put_user(current->thread.pcr_reg, user_pcr);
break;
}
......
/* $Id: sys_sparc32.c,v 1.112 1999/06/29 12:34:02 davem Exp $
/* $Id: sys_sparc32.c,v 1.117 1999/08/02 08:39:40 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......@@ -1663,85 +1663,6 @@ asmlinkage int sys32_rt_sigpending(sigset_t32 *set, __kernel_size_t32 sigsetsize
return ret;
}
siginfo_t32 *
siginfo64to32(siginfo_t32 *d, siginfo_t *s)
{
memset (&d, 0, sizeof(siginfo_t32));
d->si_signo = s->si_signo;
d->si_errno = s->si_errno;
d->si_code = s->si_code;
if (s->si_signo >= SIGRTMIN) {
d->si_pid = s->si_pid;
d->si_uid = s->si_uid;
/* XXX: Ouch, how to find this out??? */
d->si_int = s->si_int;
} else switch (s->si_signo) {
/* XXX: What about POSIX1.b timers */
case SIGCHLD:
d->si_pid = s->si_pid;
d->si_status = s->si_status;
d->si_utime = s->si_utime;
d->si_stime = s->si_stime;
break;
case SIGSEGV:
case SIGBUS:
case SIGFPE:
case SIGILL:
d->si_addr = (long)(s->si_addr);
/* XXX: Do we need to translate this from sparc64 to sparc32 traps? */
d->si_trapno = s->si_trapno;
break;
case SIGPOLL:
d->si_band = s->si_band;
d->si_fd = s->si_fd;
break;
default:
d->si_pid = s->si_pid;
d->si_uid = s->si_uid;
break;
}
return d;
}
siginfo_t *
siginfo32to64(siginfo_t *d, siginfo_t32 *s)
{
d->si_signo = s->si_signo;
d->si_errno = s->si_errno;
d->si_code = s->si_code;
if (s->si_signo >= SIGRTMIN) {
d->si_pid = s->si_pid;
d->si_uid = s->si_uid;
/* XXX: Ouch, how to find this out??? */
d->si_int = s->si_int;
} else switch (s->si_signo) {
/* XXX: What about POSIX1.b timers */
case SIGCHLD:
d->si_pid = s->si_pid;
d->si_status = s->si_status;
d->si_utime = s->si_utime;
d->si_stime = s->si_stime;
break;
case SIGSEGV:
case SIGBUS:
case SIGFPE:
case SIGILL:
d->si_addr = (void *)A(s->si_addr);
/* XXX: Do we need to translate this from sparc32 to sparc64 traps? */
d->si_trapno = s->si_trapno;
break;
case SIGPOLL:
d->si_band = s->si_band;
d->si_fd = s->si_fd;
break;
default:
d->si_pid = s->si_pid;
d->si_uid = s->si_uid;
break;
}
return d;
}
extern asmlinkage int
sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
const struct timespec *uts, size_t sigsetsize);
......@@ -1753,10 +1674,9 @@ sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo,
sigset_t s;
sigset_t32 s32;
struct timespec t;
int ret;
int ret, err, i;
mm_segment_t old_fs = get_fs();
siginfo_t info;
siginfo_t32 info32;
if (copy_from_user (&s32, uthese, sizeof(sigset_t32)))
return -EFAULT;
......@@ -1776,8 +1696,43 @@ sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo,
ret = sys_rt_sigtimedwait(&s, &info, &t, sigsetsize);
set_fs (old_fs);
if (ret >= 0 && uinfo) {
if (copy_to_user (uinfo, siginfo64to32(&info32, &info), sizeof(siginfo_t32)))
return -EFAULT;
err = put_user (info.si_signo, &uinfo->si_signo);
err |= __put_user (info.si_errno, &uinfo->si_errno);
err |= __put_user (info.si_code, &uinfo->si_code);
if (info.si_code < 0)
err |= __copy_to_user (uinfo->_sifields._pad, info._sifields._pad, SI_PAD_SIZE);
else {
i = info.si_signo;
if (info.si_code == SI_USER)
i = SIGRTMIN;
switch (i) {
case SIGPOLL:
err |= __put_user (info.si_band, &uinfo->si_band);
err |= __put_user (info.si_fd, &uinfo->si_fd);
break;
case SIGCHLD:
err |= __put_user (info.si_pid, &uinfo->si_pid);
err |= __put_user (info.si_uid, &uinfo->si_uid);
err |= __put_user (info.si_status, &uinfo->si_status);
err |= __put_user (info.si_utime, &uinfo->si_utime);
err |= __put_user (info.si_stime, &uinfo->si_stime);
break;
case SIGSEGV:
case SIGILL:
case SIGFPE:
case SIGBUS:
case SIGEMT:
err |= __put_user ((long)info.si_addr, &uinfo->si_addr);
err |= __put_user (info.si_trapno, &uinfo->si_trapno);
break;
default:
err |= __put_user (info.si_pid, &uinfo->si_pid);
err |= __put_user (info.si_uid, &uinfo->si_uid);
break;
}
}
if (err)
ret = -EFAULT;
}
return ret;
}
......@@ -1789,14 +1744,12 @@ asmlinkage int
sys32_rt_sigqueueinfo(int pid, int sig, siginfo_t32 *uinfo)
{
siginfo_t info;
siginfo_t32 info32;
int ret;
mm_segment_t old_fs = get_fs();
if (copy_from_user (&info32, uinfo, sizeof(siginfo_t32)))
if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
return -EFAULT;
/* XXX: Is this correct? */
siginfo32to64(&info, &info32);
set_fs (KERNEL_DS);
ret = sys_rt_sigqueueinfo(pid, sig, &info);
set_fs (old_fs);
......@@ -2659,7 +2612,7 @@ asmlinkage int sys32_sigaction (int sig, struct old_sigaction32 *act, struct old
int ret;
if(sig < 0) {
current->tss.new_signal = 1;
current->thread.flags |= SPARC_FLAG_NEWSIGNALS;
sig = -sig;
}
......@@ -2703,7 +2656,7 @@ sys32_rt_sigaction(int sig, struct sigaction32 *act, struct sigaction32 *oact,
/* All tasks which use RT signals (effectively) use
* new style signals.
*/
current->tss.new_signal = 1;
current->thread.flags |= SPARC_FLAG_NEWSIGNALS;
if (act) {
new_ka.ka_restorer = restorer;
......@@ -2883,6 +2836,8 @@ asmlinkage int sparc32_execve(struct pt_regs *regs)
int error, base = 0;
char *filename;
/* User register window flush is done by entry.S */
/* Check for indirect call. */
if((u32)regs->u_regs[UREG_G1] == 0)
base = 1;
......@@ -2899,8 +2854,8 @@ asmlinkage int sparc32_execve(struct pt_regs *regs)
if(!error) {
fprs_write(0);
current->tss.xfsr[0] = 0;
current->tss.fpsaved[0] = 0;
current->thread.xfsr[0] = 0;
current->thread.fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
}
out:
......
/* $Id: sys_sunos32.c,v 1.28 1999/06/29 12:34:04 davem Exp $
/* $Id: sys_sunos32.c,v 1.30 1999/07/30 09:35:31 davem Exp $
* sys_sunos32.c: SunOS binary compatability layer on sparc64.
*
* Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -557,9 +557,9 @@ asmlinkage int sunos_nosys(void)
struct pt_regs *regs;
lock_kernel();
regs = current->tss.kregs;
current->tss.sig_address = regs->tpc;
current->tss.sig_desc = regs->u_regs[UREG_G1];
regs = current->thread.kregs;
current->thread.sig_address = regs->tpc;
current->thread.sig_desc = regs->u_regs[UREG_G1];
send_sig(SIGSYS, current, 1);
printk("Process makes ni_syscall number %d, register dump:\n",
(int) regs->u_regs[UREG_G1]);
......@@ -1159,7 +1159,7 @@ asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
if(!kmbuf)
break;
sp = (struct sparc_stackf32 *)
(current->tss.kregs->u_regs[UREG_FP] & 0xffffffffUL);
(current->thread.kregs->u_regs[UREG_FP] & 0xffffffffUL);
if(get_user(arg5, &sp->xxargs[0])) {
rval = -EFAULT;
break;
......
/* $Id: systbls.S,v 1.54 1999/06/02 12:06:31 jj Exp $
/* $Id: systbls.S,v 1.56 1999/07/31 00:06:17 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
......@@ -59,7 +59,7 @@ sys_call_table32:
.word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
/*200*/ .word sys_ssetmask, sys_sigsuspend, sys32_newlstat, sys_uselib, old32_readdir
.word sys_nis_syscall, sys32_socketcall, sys_syslog, sys_nis_syscall, sys_nis_syscall
/*210*/ .word sys_idle, sys_nis_syscall, sys_waitpid, sys_swapoff, sys32_sysinfo
/*210*/ .word sys_nis_syscall, sys_nis_syscall, sys_waitpid, sys_swapoff, sys32_sysinfo
.word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
/*220*/ .word sys32_sigprocmask, sys32_create_module, sys32_delete_module, sys32_get_kernel_syms, sys_getpgid
.word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
......@@ -112,15 +112,15 @@ sys_call_table:
.word sys_quotactl, sys_nis_syscall, sys_mount, sys_ustat, sys_nis_syscall
/*170*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getdents
.word sys_setsid, sys_fchdir, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
/*180*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_sigpending, sys_query_module
/*180*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_query_module
.word sys_setpgid, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_newuname
/*190*/ .word sys_init_module, sys_personality, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
.word sys_nis_syscall, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask
/*200*/ .word sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_nis_syscall
/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
.word sys_nis_syscall, sys_socketcall, sys_syslog, sys_nis_syscall, sys_nis_syscall
/*210*/ .word sys_idle, sys_nis_syscall, sys_waitpid, sys_swapoff, sys_sysinfo
.word sys_ipc, sys_sigreturn, sys_clone, sys_nis_syscall, sys_adjtimex
/*220*/ .word sys_sigprocmask, sys_create_module, sys_delete_module, sys_get_kernel_syms, sys_getpgid
/*210*/ .word sys_nis_syscall, sys_nis_syscall, sys_waitpid, sys_swapoff, sys_sysinfo
.word sys_ipc, sys_nis_syscall, sys_clone, sys_nis_syscall, sys_adjtimex
/*220*/ .word sys_nis_syscall, sys_create_module, sys_delete_module, sys_get_kernel_syms, sys_getpgid
.word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
/*230*/ .word sys_select, sys_time, sys_nis_syscall, sys_stime, sys_nis_syscall
.word sys_nis_syscall, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
......
/* $Id: traps.c,v 1.60 1999/06/02 19:19:55 jj Exp $
/* $Id: traps.c,v 1.61 1999/07/30 09:35:32 davem Exp $
* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -147,12 +147,12 @@ void syscall_trace_entry(unsigned long g1, struct pt_regs *regs)
if(i)
printk(",");
if(!sdp->arg_is_string[i]) {
if (current->tss.flags & SPARC_FLAG_32BIT)
if (current->thread.flags & SPARC_FLAG_32BIT)
printk("%08x", (unsigned int)regs->u_regs[UREG_I0 + i]);
else
printk("%016lx", regs->u_regs[UREG_I0 + i]);
} else {
if (current->tss.flags & SPARC_FLAG_32BIT)
if (current->thread.flags & SPARC_FLAG_32BIT)
strncpy_from_user(scall_strbuf,
(char *)(regs->u_regs[UREG_I0 + i] & 0xffffffff),
512);
......@@ -178,7 +178,7 @@ unsigned long syscall_trace_exit(unsigned long retval, struct pt_regs *regs)
}
#endif /* SYSCALL_TRACING */
#if 0
#if 1
void rtrap_check(struct pt_regs *regs)
{
register unsigned long pgd_phys asm("o1");
......@@ -219,7 +219,7 @@ void rtrap_check(struct pt_regs *regs)
if((pgd_phys != __pa(current->mm->pgd)) ||
((pgd_cache != 0) &&
(pgd_cache != pgd_val(current->mm->pgd[0]))) ||
(pgd_cache != pgd_val(current->mm->pgd[0])<<11UL)) ||
(g1_or_g3 != (0xfffffffe00000000UL | 0x0000000000000018UL)) ||
#define KERN_HIGHBITS ((_PAGE_VALID | _PAGE_SZ4MB) ^ 0xfffff80000000000)
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
......@@ -228,18 +228,17 @@ void rtrap_check(struct pt_regs *regs)
#undef KERN_LOWBITS
((ctx != (current->mm->context & 0x3ff)) ||
(ctx == 0) ||
(current->tss.ctx != ctx))) {
(CTX_HWBITS(current->mm->context) != ctx))) {
printk("SHIT[%s:%d]: "
"(PP[%016lx] CACH[%016lx] CTX[%x] g1g3[%016lx] g2[%016lx]) ",
"(PP[%016lx] CACH[%016lx] CTX[%lx] g1g3[%016lx] g2[%016lx]) ",
current->comm, current->pid,
pgd_phys, pgd_cache, ctx, g1_or_g3, g2);
printk("SHIT[%s:%d]: "
"[PP[%016lx] CACH[%016lx] CTX[%x:%x]] PC[%016lx:%016lx]\n",
"[PP[%016lx] CACH[%016lx] CTX[%lx]] PC[%016lx:%016lx]\n",
current->comm, current->pid,
__pa(current->mm->pgd),
pgd_val(current->mm->pgd[0]),
current->mm->context & 0x3ff,
current->tss.ctx,
regs->tpc, regs->tnpc);
show_regs(regs);
#if 1
......@@ -262,8 +261,8 @@ void bad_trap (struct pt_regs *regs, long lvl)
}
if (regs->tstate & TSTATE_PRIV)
die_if_kernel ("Kernel bad trap", regs);
current->tss.sig_desc = SUBSIG_BADTRAP(lvl - 0x100);
current->tss.sig_address = regs->tpc;
current->thread.sig_desc = SUBSIG_BADTRAP(lvl - 0x100);
current->thread.sig_address = regs->tpc;
force_sig(SIGILL, current);
unlock_kernel ();
}
......@@ -289,8 +288,8 @@ void instruction_access_exception (struct pt_regs *regs,
#endif
die_if_kernel("Iax", regs);
}
current->tss.sig_desc = SUBSIG_ILLINST;
current->tss.sig_address = regs->tpc;
current->thread.sig_desc = SUBSIG_ILLINST;
current->thread.sig_address = regs->tpc;
force_sig(SIGILL, current);
unlock_kernel();
}
......@@ -402,8 +401,8 @@ void do_fpe_common(struct pt_regs *regs)
regs->tpc = regs->tnpc;
regs->tnpc += 4;
} else {
current->tss.sig_address = regs->tpc;
current->tss.sig_desc = SUBSIG_FPERROR;
current->thread.sig_address = regs->tpc;
current->thread.sig_desc = SUBSIG_FPERROR;
send_sig(SIGFPE, current, 1);
}
}
......@@ -411,7 +410,7 @@ void do_fpe_common(struct pt_regs *regs)
void do_fpieee(struct pt_regs *regs)
{
#ifdef DEBUG_FPU
printk("fpieee %016lx\n", current->tss.xfsr[0]);
printk("fpieee %016lx\n", current->thread.xfsr[0]);
#endif
do_fpe_common(regs);
}
......@@ -423,7 +422,7 @@ void do_fpother(struct pt_regs *regs)
struct fpustate *f = FPUSTATE;
int ret = 0;
switch ((current->tss.xfsr[0] & 0x1c000)) {
switch ((current->thread.xfsr[0] & 0x1c000)) {
case (2 << 14): /* unfinished_FPop */
case (3 << 14): /* unimplemented_FPop */
ret = do_mathemu(regs, f);
......@@ -431,7 +430,7 @@ void do_fpother(struct pt_regs *regs)
}
if (ret) return;
#ifdef DEBUG_FPU
printk("fpother %016lx\n", current->tss.xfsr[0]);
printk("fpother %016lx\n", current->thread.xfsr[0]);
#endif
do_fpe_common(regs);
}
......@@ -440,8 +439,8 @@ void do_tof(struct pt_regs *regs)
{
if(regs->tstate & TSTATE_PRIV)
die_if_kernel("Penguin overflow trap from kernel mode", regs);
current->tss.sig_address = regs->tpc;
current->tss.sig_desc = SUBSIG_TAG; /* as good as any */
current->thread.sig_address = regs->tpc;
current->thread.sig_desc = SUBSIG_TAG; /* as good as any */
send_sig(SIGEMT, current, 1);
}
......@@ -540,7 +539,7 @@ void do_illegal_instruction(struct pt_regs *regs)
if(tstate & TSTATE_PRIV)
die_if_kernel("Kernel illegal instruction", regs);
if(current->tss.flags & SPARC_FLAG_32BIT)
if(current->thread.flags & SPARC_FLAG_32BIT)
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
......@@ -551,8 +550,8 @@ void do_illegal_instruction(struct pt_regs *regs)
return;
}
}
current->tss.sig_address = pc;
current->tss.sig_desc = SUBSIG_ILLINST;
current->thread.sig_address = pc;
current->thread.sig_desc = SUBSIG_ILLINST;
send_sig(SIGILL, current, 1);
}
......@@ -565,23 +564,23 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo
return kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc), sfar, sfsr);
} else {
current->tss.sig_address = regs->tpc;
current->tss.sig_desc = SUBSIG_PRIVINST;
current->thread.sig_address = regs->tpc;
current->thread.sig_desc = SUBSIG_PRIVINST;
send_sig(SIGBUS, current, 1);
}
}
void do_privop(struct pt_regs *regs)
{
current->tss.sig_address = regs->tpc;
current->tss.sig_desc = SUBSIG_PRIVINST;
current->thread.sig_address = regs->tpc;
current->thread.sig_desc = SUBSIG_PRIVINST;
send_sig(SIGILL, current, 1);
}
void do_privact(struct pt_regs *regs)
{
current->tss.sig_address = regs->tpc;
current->tss.sig_desc = SUBSIG_PRIVINST;
current->thread.sig_address = regs->tpc;
current->thread.sig_desc = SUBSIG_PRIVINST;
send_sig(SIGILL, current, 1);
}
......@@ -590,8 +589,8 @@ void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long n
{
if(tstate & TSTATE_PRIV)
die_if_kernel("Penguin instruction from Penguin mode??!?!", regs);
current->tss.sig_address = pc;
current->tss.sig_desc = SUBSIG_PRIVINST;
current->thread.sig_address = pc;
current->thread.sig_desc = SUBSIG_PRIVINST;
send_sig(SIGILL, current, 1);
}
......@@ -727,4 +726,11 @@ void cache_flush_trap(struct pt_regs *regs)
void trap_init(void)
{
/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
/* NOTE: Other cpus have this done as they are started
* up on SMP.
*/
}
/* $Id: unaligned.c,v 1.16 1999/05/25 16:53:15 jj Exp $
/* $Id: unaligned.c,v 1.18 1999/08/02 08:39:44 davem Exp $
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
......@@ -70,7 +70,7 @@ static inline int decode_access_size(unsigned int insn)
return 2;
else {
printk("Impossible unaligned trap. insn=%08x\n", insn);
die_if_kernel("Byte sized unaligned access?!?!", current->tss.kregs);
die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
}
}
......@@ -117,7 +117,7 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
value = win->locals[reg - 16];
} else if (current->tss.flags & SPARC_FLAG_32BIT) {
} else if (current->thread.flags & SPARC_FLAG_32BIT) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(value, &win32->locals[reg - 16]);
......@@ -137,7 +137,7 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
struct reg_window *win;
win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
return &win->locals[reg - 16];
} else if (current->tss.flags & SPARC_FLAG_32BIT) {
} else if (current->thread.flags & SPARC_FLAG_32BIT) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
return (unsigned long *)&win32->locals[reg - 16];
......@@ -164,10 +164,10 @@ static inline unsigned long compute_effective_address(struct pt_regs *regs,
}
}
/* This is just to make gcc think panic does return... */
static void unaligned_panic(char *str)
/* This is just to make gcc think die_if_kernel does return... */
static void unaligned_panic(char *str, struct pt_regs *regs)
{
panic(str);
die_if_kernel(str, regs);
}
#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
......@@ -380,7 +380,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
if(!ok_for_kernel(insn) || dir == both) {
printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
regs->tpc);
unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
__asm__ __volatile__ ("\n"
"kernel_unaligned_trap_fault:\n\t"
......@@ -453,7 +453,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
if (rd)
regs->u_regs[rd] = ret;
} else {
if (current->tss.flags & SPARC_FLAG_32BIT) {
if (current->thread.flags & SPARC_FLAG_32BIT) {
struct reg_window32 *win32;
win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
put_user(ret, &win32->locals[rd - 16]);
......@@ -480,9 +480,9 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
save_and_clear_fpu();
current->tss.xfsr[0] &= ~0x1c000;
current->thread.xfsr[0] &= ~0x1c000;
if (freg & 3) {
current->tss.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
do_fpother(regs);
return 0;
}
......@@ -490,7 +490,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
/* STQ */
u64 first = 0, second = 0;
if (current->tss.fpsaved[0] & flag) {
if (current->thread.fpsaved[0] & flag) {
first = *(u64 *)&f->regs[freg];
second = *(u64 *)&f->regs[freg+2];
}
......@@ -565,18 +565,18 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
break;
}
}
if (!(current->tss.fpsaved[0] & FPRS_FEF)) {
current->tss.fpsaved[0] = FPRS_FEF;
current->tss.gsr[0] = 0;
if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
current->thread.fpsaved[0] = FPRS_FEF;
current->thread.gsr[0] = 0;
}
if (!(current->tss.fpsaved[0] & flag)) {
if (!(current->thread.fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
memcpy(f->regs + freg, data, size * 4);
current->tss.fpsaved[0] |= flag;
current->thread.fpsaved[0] |= flag;
}
advance(regs);
return 1;
......@@ -609,7 +609,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if(tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
if(current->tss.flags & SPARC_FLAG_32BIT)
if(current->thread.flags & SPARC_FLAG_32BIT)
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
asi = sfsr >> 16;
......@@ -629,18 +629,18 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (asi & 0x8) /* Little */
value = __swab64p(&value);
flag = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current->tss.fpsaved[0] & FPRS_FEF)) {
current->tss.fpsaved[0] = FPRS_FEF;
current->tss.gsr[0] = 0;
if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
current->thread.fpsaved[0] = FPRS_FEF;
current->thread.gsr[0] = 0;
}
if (!(current->tss.fpsaved[0] & flag)) {
if (!(current->thread.fpsaved[0] & flag)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
*(u64 *)(f->regs + freg) = value;
current->tss.fpsaved[0] |= flag;
current->thread.fpsaved[0] |= flag;
} else {
daex: data_access_exception(regs);
return;
......@@ -661,7 +661,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if(tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
if(current->tss.flags & SPARC_FLAG_32BIT)
if(current->thread.flags & SPARC_FLAG_32BIT)
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
......@@ -672,7 +672,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
(asi < ASI_P))
goto daex;
save_and_clear_fpu();
if (current->tss.fpsaved[0] & flag)
if (current->thread.fpsaved[0] & flag)
value = *(u64 *)&f->regs[freg];
switch (asi) {
case ASI_P:
......
This diff is collapsed.
/* $Id: VIScopy.S,v 1.20 1999/05/25 16:52:57 jj Exp $
/* $Id: VIScopy.S,v 1.21 1999/07/30 09:35:35 davem Exp $
* VIScopy.S: High speed copy operations utilizing the UltraSparc
* Visual Instruction Set.
*
......@@ -29,19 +29,19 @@
#include <asm/asm_offsets.h>
#define FPU_CLEAN_RETL \
ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %o1; \
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o1; \
VISExit \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define FPU_RETL \
ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %o1; \
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o1; \
VISExit \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
#define NORMAL_RETL \
ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %o1; \
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o1; \
clr %o0; \
retl; \
wr %o1, %g0, %asi;
......@@ -1009,7 +1009,7 @@ VIScopyfixup_ret:
/* If this is copy_from_user(), zero out the rest of the
* kernel buffer.
*/
ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %o4
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %o4
andcc asi_src, 0x1, %g0
be,pt %icc, 1f
VISExit
......
/* $Id: VIScsum.S,v 1.4 1999/05/25 16:53:00 jj Exp $
/* $Id: VIScsum.S,v 1.5 1999/07/30 09:35:36 davem Exp $
* VIScsum.S: High bandwidth IP checksumming utilizing the UltraSparc
* Visual Instruction Set.
*
......@@ -341,7 +341,7 @@ csum_partial:
DO_THE_TRICK(f44,f46,f48,f50,f52,f54,f56,f58,f60,f62,f0,f2,f4,f6,f8,f10,f12,f14)
END_THE_TRICK(f60,f62,f0,f2,f4,f6,f8,f10,f12,f14,f16,f18,f20,f22,f24,f26,f28,f30)
#ifdef __KERNEL__
ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %g7
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %g7
#endif
and %o1, 0x3f, %o1 /* IEU0 Group */
#ifdef __KERNEL__
......
/* $Id: VISsave.S,v 1.3 1998/10/21 10:36:39 jj Exp $
/* $Id: VISsave.S,v 1.4 1999/07/30 09:35:37 davem Exp $
* VISsave.S: Code for saving FPU register state for
* VIS routines. One should not call this directly,
* but use macros provided in <asm/visasm.h>.
......@@ -19,35 +19,35 @@
.align 32
VISenter:
ldub [%g6 + AOFF_task_tss + AOFF_thread_fpdepth], %g1
ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
stb %g0, [%g6 + AOFF_task_tss + AOFF_thread_fpsaved]
stx %fsr, [%g6 + AOFF_task_tss + AOFF_thread_xfsr]
stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
9: jmpl %g7 + %g0, %g0
nop
1: bne,pn %icc, 2f
srl %g1, 1, %g1
vis1: ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g3
stx %fsr, [%g6 + AOFF_task_tss + AOFF_thread_xfsr]
vis1: ldub [%g6 + AOFF_task_thread + AOFF_thread_fpsaved], %g3
stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
or %g3, %o5, %g3
stb %g3, [%g6 + AOFF_task_tss + AOFF_thread_fpsaved]
stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
rd %gsr, %g3
clr %g1
ba,pt %xcc, 3f
stb %g3, [%g6 + AOFF_task_tss + AOFF_thread_gsr]
stb %g3, [%g6 + AOFF_task_thread + AOFF_thread_gsr]
2: add %g6, %g1, %g3
cmp %o5, FPRS_DU
be,pn %icc, 6f
sll %g1, 3, %g1
stb %o5, [%g3 + AOFF_task_tss + AOFF_thread_fpsaved]
stb %o5, [%g3 + AOFF_task_thread + AOFF_thread_fpsaved]
rd %gsr, %g2
stb %g2, [%g3 + AOFF_task_tss + AOFF_thread_gsr]
stb %g2, [%g3 + AOFF_task_thread + AOFF_thread_gsr]
add %g6, %g1, %g2
stx %fsr, [%g2 + AOFF_task_tss + AOFF_thread_xfsr]
stx %fsr, [%g2 + AOFF_task_thread + AOFF_thread_xfsr]
sll %g1, 5, %g1
3: andcc %o5, FPRS_DL|FPRS_DU, %g0
be,pn %icc, 9b
......@@ -69,10 +69,10 @@ vis1: ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g3
jmpl %g7 + %g0, %g0
nop
6: ldub [%g3 + AOFF_task_tss + AOFF_thread_fpsaved], %o5
6: ldub [%g3 + AOFF_task_thread + AOFF_thread_fpsaved], %o5
or %o5, FPRS_DU, %o5
add %g6, AOFF_task_fpregs+0x80, %g2
stb %o5, [%g3 + AOFF_task_tss + AOFF_thread_fpsaved]
stb %o5, [%g3 + AOFF_task_thread + AOFF_thread_fpsaved]
sll %g1, 5, %g1
add %g6, AOFF_task_fpregs+0xc0, %g3
......@@ -87,11 +87,11 @@ vis1: ldub [%g6 + AOFF_task_tss + AOFF_thread_fpsaved], %g3
.align 32
VISenterhalf:
ldub [%g6 + AOFF_task_tss + AOFF_thread_fpdepth], %g1
ldub [%g6 + AOFF_task_thread + AOFF_thread_fpdepth], %g1
brnz,a,pn %g1, 1f
cmp %g1, 1
stb %g0, [%g6 + AOFF_task_tss + AOFF_thread_fpsaved]
stx %fsr, [%g6 + AOFF_task_tss + AOFF_thread_xfsr]
stb %g0, [%g6 + AOFF_task_thread + AOFF_thread_fpsaved]
stx %fsr, [%g6 + AOFF_task_thread + AOFF_thread_xfsr]
clr %o5
jmpl %g7 + %g0, %g0
wr %g0, FPRS_FEF, %fprs
......@@ -103,12 +103,12 @@ VISenterhalf:
2: addcc %g6, %g1, %g3
sll %g1, 3, %g1
andn %o5, FPRS_DU, %g2
stb %g2, [%g3 + AOFF_task_tss + AOFF_thread_fpsaved]
stb %g2, [%g3 + AOFF_task_thread + AOFF_thread_fpsaved]
rd %gsr, %g2
stb %g2, [%g3 + AOFF_task_tss + AOFF_thread_gsr]
stb %g2, [%g3 + AOFF_task_thread + AOFF_thread_gsr]
add %g6, %g1, %g2
stx %fsr, [%g2 + AOFF_task_tss + AOFF_thread_xfsr]
stx %fsr, [%g2 + AOFF_task_thread + AOFF_thread_xfsr]
sll %g1, 5, %g1
3: andcc %o5, FPRS_DL, %g0
be,pn %icc, 4f
......
/* $Id: blockops.S,v 1.17 1999/05/25 16:52:52 jj Exp $
/* $Id: blockops.S,v 1.18 1999/07/30 09:35:37 davem Exp $
* blockops.S: UltraSparc block zero optimized routines.
*
* Copyright (C) 1996,1998 David S. Miller (davem@caip.rutgers.edu)
......@@ -29,7 +29,7 @@
.type copy_page,@function
copy_page: /* %o0=dest, %o1=src */
VISEntry
ldx [%g6 + AOFF_task_mm], %o2
ldx [%g6 + AOFF_task_active_mm], %o2
sub %o0, %g4, %g1
sethi %uhi(_PAGE_VALID), %g3
sub %o1, %g4, %g2
......@@ -107,7 +107,7 @@ copy_page: /* %o0=dest, %o1=src */
.type clear_page,@function
clear_page: /* %o0=dest */
VISEntryHalf
ldx [%g6 + AOFF_task_mm], %o2
ldx [%g6 + AOFF_task_active_mm], %o2
sub %o0, %g4, %g1
sethi %uhi(_PAGE_VALID), %g3
sllx %g3, 32, %g3
......
......@@ -266,7 +266,7 @@ cpc_end:
.globl cpc_handler
cpc_handler:
ldx [%sp + 0x7ff + 128], %g1
ldub [%g6 + AOFF_task_tss + AOFF_thread_current_ds], %g3
ldub [%g6 + AOFF_task_thread + AOFF_thread_current_ds], %g3
sub %g0, EFAULT, %g2
brnz,a,pt %g1, 1f
st %g2, [%g1]
......
/* $Id: fcmpeq.c,v 1.5 1999/05/28 13:43:29 jj Exp $
/* $Id: fcmpeq.c,v 1.6 1999/07/30 09:35:40 davem Exp $
* arch/sparc64/math-emu/fcmpeq.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
......@@ -26,14 +26,14 @@ int FCMPEQ(void *rd, void *rs2, void *rs1)
if (!FP_INHIBIT_RESULTS) {
rd = (void *)(((long)rd)&~3);
if (ret == -1) ret = 2;
fsr = current->tss.xfsr[0];
fsr = current->thread.xfsr[0];
switch (fccno) {
case 0: fsr &= ~0xc00; fsr |= (ret << 10); break;
case 1: fsr &= ~0x300000000UL; fsr |= (ret << 32); break;
case 2: fsr &= ~0xc00000000UL; fsr |= (ret << 34); break;
case 3: fsr &= ~0x3000000000UL; fsr |= (ret << 36); break;
}
current->tss.xfsr[0] = fsr;
current->thread.xfsr[0] = fsr;
}
FP_HANDLE_EXCEPTIONS;
}
/* $Id: fcmpq.c,v 1.5 1999/05/28 13:43:33 jj Exp $
/* $Id: fcmpq.c,v 1.6 1999/07/30 09:35:40 davem Exp $
* arch/sparc64/math-emu/fcmpq.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
......@@ -26,14 +26,14 @@ int FCMPQ(void *rd, void *rs2, void *rs1)
if (!FP_INHIBIT_RESULTS) {
rd = (void *)(((long)rd)&~3);
if (ret == -1) ret = 2;
fsr = current->tss.xfsr[0];
fsr = current->thread.xfsr[0];
switch (fccno) {
case 0: fsr &= ~0xc00; fsr |= (ret << 10); break;
case 1: fsr &= ~0x300000000UL; fsr |= (ret << 32); break;
case 2: fsr &= ~0xc00000000UL; fsr |= (ret << 34); break;
case 3: fsr &= ~0x3000000000UL; fsr |= (ret << 36); break;
}
current->tss.xfsr[0] = fsr;
current->thread.xfsr[0] = fsr;
}
FP_HANDLE_EXCEPTIONS;
}
/* $Id: fsubd.c,v 1.4 1999/05/28 13:45:04 jj Exp $
/* $Id: fsubd.c,v 1.5 1999/08/02 14:08:04 jj Exp $
* arch/sparc64/math-emu/fsubd.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
......@@ -17,9 +17,7 @@ int FSUBD(void *rd, void *rs2, void *rs1)
FP_UNPACK_DP(A, rs1);
FP_UNPACK_DP(B, rs2);
if (B_c != FP_CLS_NAN)
B_s ^= 1;
FP_ADD_D(R, A, B);
FP_SUB_D(R, A, B);
FP_PACK_DP(rd, R);
FP_HANDLE_EXCEPTIONS;
}
/* $Id: fsubq.c,v 1.4 1999/05/28 13:45:09 jj Exp $
/* $Id: fsubq.c,v 1.5 1999/08/02 14:08:06 jj Exp $
* arch/sparc64/math-emu/fsubq.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
......@@ -17,9 +17,7 @@ int FSUBQ(void *rd, void *rs2, void *rs1)
FP_UNPACK_QP(A, rs1);
FP_UNPACK_QP(B, rs2);
if (B_c != FP_CLS_NAN)
B_s ^= 1;
FP_ADD_Q(R, A, B);
FP_SUB_Q(R, A, B);
FP_PACK_QP(rd, R);
FP_HANDLE_EXCEPTIONS;
}
/* $Id: fsubs.c,v 1.4 1999/05/28 13:45:12 jj Exp $
/* $Id: fsubs.c,v 1.5 1999/08/02 14:08:07 jj Exp $
* arch/sparc64/math-emu/fsubs.c
*
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
......@@ -17,9 +17,7 @@ int FSUBS(void *rd, void *rs2, void *rs1)
FP_UNPACK_SP(A, rs1);
FP_UNPACK_SP(B, rs2);
if (B_c != FP_CLS_NAN)
B_s ^= 1;
FP_ADD_S(R, A, B);
FP_SUB_S(R, A, B);
FP_PACK_SP(rd, R);
FP_HANDLE_EXCEPTIONS;
}
/* $Id: math.c,v 1.8 1999/05/28 13:43:11 jj Exp $
/* $Id: math.c,v 1.9 1999/07/30 09:35:41 davem Exp $
* arch/sparc64/math-emu/math.c
*
* Copyright (C) 1997,1999 Jakub Jelinek (jj@ultra.linux.cz)
......@@ -75,7 +75,7 @@ FLOATFUNC(FDTOI)
*/
static int record_exception(struct pt_regs *regs, int eflag)
{
u64 fsr = current->tss.xfsr[0];
u64 fsr = current->thread.xfsr[0];
int would_trap;
/* Determine if this exception would have generated a trap. */
......@@ -120,7 +120,7 @@ static int record_exception(struct pt_regs *regs, int eflag)
if(would_trap != 0)
fsr |= (1UL << 14);
current->tss.xfsr[0] = fsr;
current->thread.xfsr[0] = fsr;
/* If we will not trap, advance the program counter over
* the instruction being handled.
......@@ -148,7 +148,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if(tstate & TSTATE_PRIV)
die_if_kernel("FPQuad from kernel", regs);
if(current->tss.flags & SPARC_FLAG_32BIT)
if(current->thread.flags & SPARC_FLAG_32BIT)
pc = (u32)pc;
if (get_user(insn, (u32 *)pc) != -EFAULT) {
if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
......@@ -201,33 +201,33 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if (type) {
void *rs1 = NULL, *rs2 = NULL, *rd = NULL;
freg = (current->tss.xfsr[0] >> 14) & 0xf;
freg = (current->thread.xfsr[0] >> 14) & 0xf;
if (freg != (type >> 8))
goto err;
current->tss.xfsr[0] &= ~0x1c000;
current->thread.xfsr[0] &= ~0x1c000;
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) {
case 3: if (freg & 2) {
current->tss.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs1 = (void *)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current->tss.fpsaved[0] & flags))
if (!(current->thread.fpsaved[0] & flags))
rs1 = (void *)&zero;
break;
}
freg = (insn & 0x1f);
switch ((type >> 2) & 0x3) {
case 3: if (freg & 2) {
current->tss.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs2 = (void *)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current->tss.fpsaved[0] & flags))
if (!(current->thread.fpsaved[0] & flags))
rs2 = (void *)&zero;
break;
}
......@@ -235,23 +235,23 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
switch ((type >> 4) & 0x3) {
case 0: rd = (void *)(long)(freg & 3); break;
case 3: if (freg & 2) {
current->tss.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rd = (void *)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current->tss.fpsaved[0] & FPRS_FEF)) {
current->tss.fpsaved[0] = FPRS_FEF;
current->tss.gsr[0] = 0;
if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
current->thread.fpsaved[0] = FPRS_FEF;
current->thread.gsr[0] = 0;
}
if (!(current->tss.fpsaved[0] & flags)) {
if (!(current->thread.fpsaved[0] & flags)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
current->tss.fpsaved[0] |= flags;
current->thread.fpsaved[0] |= flags;
break;
}
flags = func(rd, rs2, rs1);
......@@ -259,7 +259,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
return record_exception(regs, flags);
/* Success and no exceptions detected. */
current->tss.xfsr[0] &= ~(FSR_CEXC_MASK);
current->thread.xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
......
......@@ -120,31 +120,31 @@
/* Basic. Assuming the host word size is >= 2*FRACBITS, we can do the
multiplication immediately. */
#define _FP_MUL_MEAT_1_imm(fs, R, X, Y) \
#define _FP_MUL_MEAT_1_imm(wfracbits, R, X, Y) \
do { \
R##_f = X##_f * Y##_f; \
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
_FP_FRAC_SRS_1(R, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
_FP_FRAC_SRS_1(R, wfracbits-1, 2*wfracbits); \
} while (0)
/* Given a 1W * 1W => 2W primitive, do the extended multiplication. */
#define _FP_MUL_MEAT_1_wide(fs, R, X, Y, doit) \
#define _FP_MUL_MEAT_1_wide(wfracbits, R, X, Y, doit) \
do { \
_FP_W_TYPE _Z_f0, _Z_f1; \
doit(_Z_f1, _Z_f0, X##_f, Y##_f); \
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
_FP_FRAC_SRS_2(_Z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
_FP_FRAC_SRS_2(_Z, wfracbits-1, 2*wfracbits); \
R##_f = _Z_f0; \
} while (0)
/* Finally, a simple widening multiply algorithm. What fun! */
#define _FP_MUL_MEAT_1_hard(fs, R, X, Y) \
#define _FP_MUL_MEAT_1_hard(wfracbits, R, X, Y) \
do { \
_FP_W_TYPE _xh, _xl, _yh, _yl, _z_f0, _z_f1, _a_f0, _a_f1; \
\
......@@ -168,7 +168,7 @@
_FP_FRAC_ADD_2(_z, _z, _a); \
\
/* normalize */ \
_FP_FRAC_SRS_2(_z, _FP_WFRACBITS_##fs - 1, 2*_FP_WFRACBITS_##fs); \
_FP_FRAC_SRS_2(_z, wfracbits - 1, 2*wfracbits); \
R##_f = _z_f0; \
} while (0)
......
......@@ -234,7 +234,7 @@
/* Given a 1W * 1W => 2W primitive, do the extended multiplication. */
#define _FP_MUL_MEAT_2_wide(fs, R, X, Y, doit) \
#define _FP_MUL_MEAT_2_wide(wfracbits, R, X, Y, doit) \
do { \
_FP_FRAC_DECL_4(_z); _FP_FRAC_DECL_2(_b); _FP_FRAC_DECL_2(_c); \
\
......@@ -255,7 +255,7 @@
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
_FP_FRAC_SRS_4(_z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
_FP_FRAC_SRS_4(_z, wfracbits-1, 2*wfracbits); \
R##_f0 = _FP_FRAC_WORD_4(_z,0); \
R##_f1 = _FP_FRAC_WORD_4(_z,1); \
} while (0)
......@@ -264,7 +264,7 @@
Do only 3 multiplications instead of four. This one is for machines
where multiplication is much more expensive than subtraction. */
#define _FP_MUL_MEAT_2_wide_3mul(fs, R, X, Y, doit) \
#define _FP_MUL_MEAT_2_wide_3mul(wfracbits, R, X, Y, doit) \
do { \
_FP_FRAC_DECL_4(_z); _FP_FRAC_DECL_2(_b); _FP_FRAC_DECL_2(_c); \
_FP_W_TYPE _d; \
......@@ -299,12 +299,12 @@
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
_FP_FRAC_SRS_4(_z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
_FP_FRAC_SRS_4(_z, wfracbits-1, 2*wfracbits); \
R##_f0 = _FP_FRAC_WORD_4(_z,0); \
R##_f1 = _FP_FRAC_WORD_4(_z,1); \
} while (0)
#define _FP_MUL_MEAT_2_gmp(fs, R, X, Y) \
#define _FP_MUL_MEAT_2_gmp(wfracbits, R, X, Y) \
do { \
_FP_FRAC_DECL_4(_z); \
_FP_W_TYPE _x[2], _y[2]; \
......@@ -316,11 +316,106 @@
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
_FP_FRAC_SRS_4(_z, _FP_WFRACBITS##_fs-1, 2*_FP_WFRACBITS_##fs); \
_FP_FRAC_SRS_4(_z, wfracbits-1, 2*wfracbits); \
R##_f0 = _z_f[0]; \
R##_f1 = _z_f[1]; \
} while (0)
/* Do at most 120x120=240 bits multiplication using double floating
point multiplication. This is useful if floating point
multiplication has much bigger throughput than integer multiply.
It is supposed to work for _FP_W_TYPE_SIZE 64 and wfracbits
between 106 and 120 only.
Caller guarantees that X and Y has (1LLL << (wfracbits - 1)) set.
SETFETZ is a macro which will disable all FPU exceptions and set rounding
towards zero, RESETFE should optionally reset it back. */
#define _FP_MUL_MEAT_2_120_240_double(wfracbits, R, X, Y, setfetz, resetfe) \
do { \
static const double _const[] = { \
/* 2^-24 */ 5.9604644775390625e-08, \
/* 2^-48 */ 3.5527136788005009e-15, \
/* 2^-72 */ 2.1175823681357508e-22, \
/* 2^-96 */ 1.2621774483536189e-29, \
/* 2^28 */ 2.68435456e+08, \
/* 2^4 */ 1.600000e+01, \
/* 2^-20 */ 9.5367431640625e-07, \
/* 2^-44 */ 5.6843418860808015e-14, \
/* 2^-68 */ 3.3881317890172014e-21, \
/* 2^-92 */ 2.0194839173657902e-28, \
/* 2^-116 */ 1.2037062152420224e-35}; \
double _a240, _b240, _c240, _d240, _e240, _f240, \
_g240, _h240, _i240, _j240, _k240; \
union { double d; UDItype i; } _l240, _m240, _n240, _o240, \
_p240, _q240, _r240, _s240; \
UDItype _t240, _u240, _v240, _w240, _x240, _y240 = 0; \
\
if (wfracbits < 106 || wfracbits > 120) \
abort(); \
\
setfetz; \
\
_e240 = (double)(long)(X##_f0 & 0xffffff); \
_j240 = (double)(long)(Y##_f0 & 0xffffff); \
_d240 = (double)(long)((X##_f0 >> 24) & 0xffffff); \
_i240 = (double)(long)((Y##_f0 >> 24) & 0xffffff); \
_c240 = (double)(long)(((X##_f1 << 16) & 0xffffff) | (X##_f0 >> 48)); \
_h240 = (double)(long)(((Y##_f1 << 16) & 0xffffff) | (Y##_f0 >> 48)); \
_b240 = (double)(long)((X##_f1 >> 8) & 0xffffff); \
_g240 = (double)(long)((Y##_f1 >> 8) & 0xffffff); \
_a240 = (double)(long)(X##_f1 >> 32); \
_f240 = (double)(long)(Y##_f1 >> 32); \
_e240 *= _const[3]; \
_j240 *= _const[3]; \
_d240 *= _const[2]; \
_i240 *= _const[2]; \
_c240 *= _const[1]; \
_h240 *= _const[1]; \
_b240 *= _const[0]; \
_g240 *= _const[0]; \
_s240.d = _e240*_j240;\
_r240.d = _d240*_j240 + _e240*_i240;\
_q240.d = _c240*_j240 + _d240*_i240 + _e240*_h240;\
_p240.d = _b240*_j240 + _c240*_i240 + _d240*_h240 + _e240*_g240;\
_o240.d = _a240*_j240 + _b240*_i240 + _c240*_h240 + _d240*_g240 + _e240*_f240;\
_n240.d = _a240*_i240 + _b240*_h240 + _c240*_g240 + _d240*_f240; \
_m240.d = _a240*_h240 + _b240*_g240 + _c240*_f240; \
_l240.d = _a240*_g240 + _b240*_f240; \
_k240 = _a240*_f240; \
_r240.d += _s240.d; \
_q240.d += _r240.d; \
_p240.d += _q240.d; \
_o240.d += _p240.d; \
_n240.d += _o240.d; \
_m240.d += _n240.d; \
_l240.d += _m240.d; \
_k240 += _l240.d; \
_s240.d -= ((_const[10]+_s240.d)-_const[10]); \
_r240.d -= ((_const[9]+_r240.d)-_const[9]); \
_q240.d -= ((_const[8]+_q240.d)-_const[8]); \
_p240.d -= ((_const[7]+_p240.d)-_const[7]); \
_o240.d += _const[7]; \
_n240.d += _const[6]; \
_m240.d += _const[5]; \
_l240.d += _const[4]; \
if (_s240.d != 0.0) _y240 = 1; \
if (_r240.d != 0.0) _y240 = 1; \
if (_q240.d != 0.0) _y240 = 1; \
if (_p240.d != 0.0) _y240 = 1; \
_t240 = (DItype)_k240; \
_u240 = _l240.i; \
_v240 = _m240.i; \
_w240 = _n240.i; \
_x240 = _o240.i; \
R##_f1 = (_t240 << (128 - (wfracbits - 1))) \
| ((_u240 & 0xffffff) >> ((wfracbits - 1) - 104)); \
R##_f0 = ((_u240 & 0xffffff) << (168 - (wfracbits - 1))) \
| ((_v240 & 0xffffff) << (144 - (wfracbits - 1))) \
| ((_w240 & 0xffffff) << (120 - (wfracbits - 1))) \
| ((_x240 & 0xffffff) >> ((wfracbits - 1) - 96)) \
| _y240; \
resetfe; \
} while (0)
/*
* Division algorithms:
......
......@@ -232,7 +232,7 @@
/* Given a 1W * 1W => 2W primitive, do the extended multiplication. */
#define _FP_MUL_MEAT_4_wide(fs, R, X, Y, doit) \
#define _FP_MUL_MEAT_4_wide(wfracbits, R, X, Y, doit) \
do { \
_FP_FRAC_DECL_8(_z); _FP_FRAC_DECL_2(_b); _FP_FRAC_DECL_2(_c); \
_FP_FRAC_DECL_2(_d); _FP_FRAC_DECL_2(_e); _FP_FRAC_DECL_2(_f); \
......@@ -311,12 +311,12 @@
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
_FP_FRAC_SRS_8(_z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
_FP_FRAC_SRS_8(_z, wfracbits-1, 2*wfracbits); \
__FP_FRAC_SET_4(R, _FP_FRAC_WORD_8(_z,3), _FP_FRAC_WORD_8(_z,2), \
_FP_FRAC_WORD_8(_z,1), _FP_FRAC_WORD_8(_z,0)); \
} while (0)
#define _FP_MUL_MEAT_4_gmp(fs, R, X, Y) \
#define _FP_MUL_MEAT_4_gmp(wfracbits, R, X, Y) \
do { \
_FP_FRAC_DECL_8(_z); \
\
......@@ -325,7 +325,7 @@
/* Normalize since we know where the msb of the multiplicands \
were (bit B), we know that the msb of the of the product is \
at either 2B or 2B-1. */ \
_FP_FRAC_SRS_8(_z, _FP_WFRACBITS_##fs-1, 2*_FP_WFRACBITS_##fs); \
_FP_FRAC_SRS_8(_z, wfracbits-1, 2*wfracbits); \
__FP_FRAC_SET_4(R, _FP_FRAC_WORD_8(_z,3), _FP_FRAC_WORD_8(_z,2), \
_FP_FRAC_WORD_8(_z,1), _FP_FRAC_WORD_8(_z,0)); \
} while (0)
......
......@@ -207,7 +207,7 @@ do { \
* Main addition routine. The input values should be cooked.
*/
#define _FP_ADD(fs, wc, R, X, Y) \
#define _FP_ADD_INTERNAL(fs, wc, R, X, Y, OP) \
do { \
switch (_FP_CLS_COMBINE(X##_c, Y##_c)) \
{ \
......@@ -284,7 +284,7 @@ do { \
} \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NAN): \
_FP_CHOOSENAN(fs, wc, R, X, Y); \
_FP_CHOOSENAN(fs, wc, R, X, Y, OP); \
break; \
\
case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
......@@ -345,6 +345,13 @@ do { \
} \
} while (0)
#define _FP_ADD(fs, wc, R, X, Y) _FP_ADD_INTERNAL(fs, wc, R, X, Y, '+')
#define _FP_SUB(fs, wc, R, X, Y) \
do { \
if (Y##_c != FP_CLS_NAN) Y##_s ^= 1; \
_FP_ADD_INTERNAL(fs, wc, R, X, Y, '-'); \
} while (0)
/*
* Main negation routine. FIXME -- when we care about setting exception
......@@ -382,7 +389,7 @@ do { \
break; \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NAN): \
_FP_CHOOSENAN(fs, wc, R, X, Y); \
_FP_CHOOSENAN(fs, wc, R, X, Y, '*'); \
break; \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
......@@ -440,7 +447,7 @@ do { \
break; \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NAN): \
_FP_CHOOSENAN(fs, wc, R, X, Y); \
_FP_CHOOSENAN(fs, wc, R, X, Y, '/'); \
break; \
\
case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
......
......@@ -108,13 +108,9 @@ union _FP_UNION_Q
_FP_PACK_RAW_4_P(Q,val,X); \
} while (0)
#define FP_ISSIGNAN_Q(X) _FP_ISSIGNAN(Q,4,X)
#define FP_NEG_Q(R,X) _FP_NEG(Q,4,R,X)
#define FP_ADD_Q(R,X,Y) _FP_ADD(Q,4,R,X,Y)
/* single.h and double.h define FP_SUB_t this way too. However, _FP_SUB is
* never defined in op-common.h! Fortunately nobody seems to use the FP_SUB_t
* macros: I suggest a combination of FP_NEG and FP_ADD :-> -- PMM 02/1998
*/
#define FP_ISSIGNAN_Q(X) _FP_ISSIGNAN(Q,4,X)
#define FP_NEG_Q(R,X) _FP_NEG(Q,4,R,X)
#define FP_ADD_Q(R,X,Y) _FP_ADD(Q,4,R,X,Y)
#define FP_SUB_Q(R,X,Y) _FP_SUB(Q,4,R,X,Y)
#define FP_MUL_Q(R,X,Y) _FP_MUL(Q,4,R,X,Y)
#define FP_DIV_Q(R,X,Y) _FP_DIV(Q,4,R,X,Y)
......
......@@ -29,9 +29,12 @@
#define _FP_WS_TYPE signed long
#define _FP_I_TYPE long
#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_imm(S,R,X,Y)
#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_1_wide(D,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_Q(R,X,Y) _FP_MUL_MEAT_2_wide_3mul(Q,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_S(R,X,Y) \
_FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
#define _FP_MUL_MEAT_D(R,X,Y) \
_FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_Q(R,X,Y) \
_FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y)
......@@ -53,7 +56,7 @@
* CPU instruction emulation this should prefer Y.
* (see SPAMv9 B.2.2 section).
*/
#define _FP_CHOOSENAN(fs, wc, R, X, Y) \
#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
do { \
if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
&& !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
......@@ -71,7 +74,7 @@
/* Obtain the current rounding mode. */
#ifndef FP_ROUNDMODE
#define FP_ROUNDMODE ((current->tss.xfsr[0] >> 30) & 0x3)
#define FP_ROUNDMODE ((current->thread.xfsr[0] >> 30) & 0x3)
#endif
/* Exception flags. */
......@@ -83,6 +86,6 @@
#define FP_HANDLE_EXCEPTIONS return _fex
#define FP_INHIBIT_RESULTS ((current->tss.xfsr[0] >> 23) & _fex)
#define FP_INHIBIT_RESULTS ((current->thread.xfsr[0] >> 23) & _fex)
#endif
/* $Id: asyncd.c,v 1.8 1999/07/04 04:35:55 davem Exp $
/* $Id: asyncd.c,v 1.9 1999/07/30 09:35:43 davem Exp $
* The asyncd kernel daemon. This handles paging on behalf of
* processes that receive page faults due to remote (async) memory
* accesses.
......@@ -91,7 +91,8 @@ static void add_to_async_queue(int taskid,
void async_fault(unsigned long address, int write, int taskid,
void (*callback)(int,unsigned long,int,int))
{
struct task_struct *tsk = task[taskid];
#warning Need some fixing here... -DaveM
struct task_struct *tsk = current /* XXX task[taskid] */;
struct mm_struct *mm = tsk->mm;
stats.faults++;
......@@ -111,7 +112,8 @@ static int fault_in_page(int taskid,
{
static unsigned last_address;
static int last_task, loop_counter;
struct task_struct *tsk = task[taskid];
#warning Need some fixing here... -DaveM
struct task_struct *tsk = current /* XXX task[taskid] */;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
......@@ -178,8 +180,8 @@ static int fault_in_page(int taskid,
bad_area:
stats.failure++;
tsk->tss.sig_address = address;
tsk->tss.sig_desc = SUBSIG_NOMAPPING;
tsk->thread.sig_address = address;
tsk->thread.sig_desc = SUBSIG_NOMAPPING;
send_sig(SIGSEGV, tsk, 1);
return 1;
}
......
/* $Id: fault.c,v 1.36 1999/07/04 04:35:56 davem Exp $
/* $Id: fault.c,v 1.38 1999/08/02 08:39:50 davem Exp $
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
......@@ -84,10 +84,11 @@ void unhandled_fault(unsigned long address, struct task_struct *tsk,
printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %016lx\n", (unsigned long)address);
}
printk(KERN_ALERT "tsk->mm->context = %016lx\n",
(unsigned long) tsk->mm->context);
printk(KERN_ALERT "tsk->mm->pgd = %016lx\n",
(unsigned long) tsk->mm->pgd);
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
(tsk->mm ? (unsigned long) tsk->mm->pgd :
(unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
......@@ -159,11 +160,40 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs, unsigned long address, in
down(&mm->mmap_sem);
#ifdef DEBUG_LOCKUPS
if (regs->tpc == lastpc && address == lastaddr && write == lastwrite) {
if (regs->tpc == lastpc &&
address == lastaddr &&
write == lastwrite) {
lockcnt++;
if (lockcnt == 100000) {
printk("do_sparc64_fault: possible fault loop for %016lx %s\n", address, write ? "write" : "read");
unsigned char tmp;
register unsigned long tmp1 asm("o5");
register unsigned long tmp2 asm("o4");
printk("do_sparc64_fault[%s:%d]: possible fault loop for %016lx %s\n",
current->comm, current->pid,
address, write ? "write" : "read");
printk("do_sparc64_fault: CHECK[papgd[%016lx],pcac[%016lx]]\n",
__pa(mm->pgd), pgd_val(mm->pgd[0])<<11UL);
__asm__ __volatile__(
"wrpr %%g0, 0x494, %%pstate\n\t"
"mov %3, %%g4\n\t"
"mov %%g7, %0\n\t"
"ldxa [%%g4] %2, %1\n\t"
"wrpr %%g0, 0x096, %%pstate"
: "=r" (tmp1), "=r" (tmp2)
: "i" (ASI_DMMU), "i" (TSB_REG));
printk("do_sparc64_fault: IS[papgd[%016lx],pcac[%016lx]]\n",
tmp1, tmp2);
printk("do_sparc64_fault: CHECK[ctx(%016lx)] IS[ctx(%016lx)]\n",
mm->context, spitfire_get_secondary_context());
__asm__ __volatile__("rd %%asi, %0"
: "=r" (tmp));
printk("do_sparc64_fault: CHECK[seg(%02x)] IS[seg(%02x)]\n",
current->thread.current_ds.seg, tmp);
show_regs(regs);
__sti();
while(1)
barrier();
}
} else {
lastpc = regs->tpc;
......@@ -282,8 +312,8 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs, unsigned long address, in
return;
}
} else {
current->tss.sig_address = address;
current->tss.sig_desc = SUBSIG_NOMAPPING;
current->thread.sig_address = address;
current->thread.sig_desc = SUBSIG_NOMAPPING;
force_sig(SIGSEGV, current);
return;
}
......@@ -293,8 +323,8 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs, unsigned long address, in
do_sigbus:
up(&mm->mmap_sem);
current->tss.sig_address = address;
current->tss.sig_desc = SUBSIG_MISCERROR;
current->thread.sig_address = address;
current->thread.sig_desc = SUBSIG_MISCERROR;
force_sig(SIGBUS, current);
if (regs->tstate & TSTATE_PRIV)
goto do_kernel_fault;
......
/* $Id: generic.c,v 1.8 1999/03/12 06:51:50 davem Exp $
/* $Id: generic.c,v 1.9 1999/07/23 22:32:01 davem Exp $
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
......@@ -95,7 +95,8 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign
space);
curend = address + 0x10000;
offset += 0x10000;
}
} else
offset += PAGE_SIZE;
} else
offset += PAGE_SIZE;
......
/* $Id: init.c,v 1.130 1999/06/29 12:34:06 davem Exp $
/* $Id: init.c,v 1.131 1999/07/30 09:35:45 davem Exp $
* arch/sparc64/mm/init.c
*
* Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
......@@ -42,7 +42,10 @@ unsigned long *sparc64_valid_addr_bitmap;
unsigned long phys_base;
/* get_new_mmu_context() uses "cache + 1". */
spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
/* References to section boundaries */
extern char __init_begin, __init_end, etext, __bss_start;
......@@ -386,7 +389,7 @@ void mmu_map_dma_area(unsigned long addr, int len, __u32 *dvma_addr,
dvma_pages_current_offset;
/* Map the CPU's view. */
pgdp = pgd_offset(init_task.mm, addr);
pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_alloc_kernel(pgdp, addr);
ptep = pte_alloc_kernel(pmdp, addr);
pte = mk_pte(the_page, PAGE_KERNEL);
......@@ -677,7 +680,7 @@ static inline void inherit_prom_mappings(void)
for (vaddr = trans[i].virt;
vaddr < trans[i].virt + trans[i].size;
vaddr += PAGE_SIZE) {
pgdp = pgd_offset(init_task.mm, vaddr);
pgdp = pgd_offset(&init_mm, vaddr);
if (pgd_none(*pgdp)) {
pmdp = sparc_init_alloc(&mempool,
PMD_TABLE_SIZE);
......@@ -739,7 +742,7 @@ void prom_world(int enter)
int i;
if (!enter)
set_fs(current->tss.current_ds);
set_fs(current->thread.current_ds);
if (!prom_ditlb_set)
return;
......@@ -957,9 +960,6 @@ void __flush_tlb_all(void)
: : "r" (pstate));
}
#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
/* Caller does TLB context flushing on local CPU if necessary.
*
* We must be careful about boundary cases so that we never
......@@ -969,14 +969,16 @@ unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
*/
void get_new_mmu_context(struct mm_struct *mm)
{
unsigned long ctx = (tlb_context_cache + 1) & ~(CTX_VERSION_MASK);
unsigned long new_ctx;
unsigned long ctx, new_ctx;
spin_lock(&ctx_alloc_lock);
ctx = CTX_HWBITS(tlb_context_cache + 1);
if (ctx == 0)
ctx = 1;
if ((mm->context != NO_CONTEXT) &&
!((mm->context ^ tlb_context_cache) & CTX_VERSION_MASK))
clear_bit(mm->context & ~(CTX_VERSION_MASK), mmu_context_bmap);
if (CTX_VALID(mm->context)) {
unsigned long nr = CTX_HWBITS(mm->context);
mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
}
new_ctx = find_next_zero_bit(mmu_context_bmap, 1UL << CTX_VERSION_SHIFT, ctx);
if (new_ctx >= (1UL << CTX_VERSION_SHIFT)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
......@@ -1003,12 +1005,13 @@ void get_new_mmu_context(struct mm_struct *mm)
goto out;
}
}
set_bit(new_ctx, mmu_context_bmap);
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
out:
tlb_context_cache = new_ctx;
spin_unlock(&ctx_alloc_lock);
mm->context = new_ctx;
mm->cpu_vm_mask = 0;
}
#ifndef __SMP__
......@@ -1049,7 +1052,7 @@ allocate_ptable_skeleton(unsigned long start, unsigned long end))
pte_t *ptep;
while (start < end) {
pgdp = pgd_offset(init_task.mm, start);
pgdp = pgd_offset(&init_mm, start);
if (pgd_none(*pgdp)) {
pmdp = sparc_init_alloc(&mempool, PAGE_SIZE);
memset(pmdp, 0, PAGE_SIZE);
......@@ -1073,7 +1076,7 @@ allocate_ptable_skeleton(unsigned long start, unsigned long end))
void sparc_ultra_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
int bus, int rdonly)
{
pgd_t *pgdp = pgd_offset(init_task.mm, virt_addr);
pgd_t *pgdp = pgd_offset(&init_mm, virt_addr);
pmd_t *pmdp = pmd_offset(pgdp, virt_addr);
pte_t *ptep = pte_offset(pmdp, virt_addr);
pte_t pte;
......@@ -1095,7 +1098,7 @@ void sparc_ultra_unmapioaddr(unsigned long virt_addr)
pmd_t *pmdp;
pte_t *ptep;
pgdp = pgd_offset(init_task.mm, virt_addr);
pgdp = pgd_offset(&init_mm, virt_addr);
pmdp = pmd_offset(pgdp, virt_addr);
ptep = pte_offset(pmdp, virt_addr);
......
/* $Id: ultra.S,v 1.32 1999/03/28 08:39:34 davem Exp $
/* $Id: ultra.S,v 1.33 1999/08/02 08:39:49 davem Exp $
* ultra.S: Don't expand these all over the place...
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
......@@ -136,36 +136,37 @@ __flush_tlb_range_pbp_slow:
flush_icache_page: /* %o0 = phys_page */
sethi %hi(1 << 13), %o2 ! IC_set bit
mov 1, %g1
srlx %o0, 5, %o0 ! phys-addr comparitor
srlx %o0, 5, %o0
clr %o1 ! IC_addr
sllx %g1, 36, %g1
sub %g1, 1, %g2
andn %g2, 0xff, %g2 ! IC_tag mask
nop
or %o0, %g1, %o0 ! VALID+phys-addr comparitor
sllx %g2, 1, %g2
andn %g2, 0xfe, %g2 ! IC_tag mask
1: ldda [%o1] ASI_IC_TAG, %o4
and %o5, %g2, %o5
cmp %o5, %o0
be,pn %xcc, iflush1
nop
add %o1, 0x20, %g3
2: ldda [%o1 + %o2] ASI_IC_TAG, %o4
and %o5, %g2, %o5
cmp %o5, %o0
cmp %o5, %o0
be,pn %xcc, iflush2
nop
3: add %o1, 0x20, %o1
cmp %o1, %o2
3: cmp %g3, %o2
bne,pt %xcc, 1b
nop
mov %g3, %o1
retl
nop
iflush1:stxa %g0, [%o1] ASI_IC_TAG
ba,pt %xcc, 2b
flush %g6
flush %g6
ba,a,pt %xcc, 2b
iflush2:stxa %g0, [%o1 + %o2] ASI_IC_TAG
ba,pt %xcc, 3b
flush %g6
flush %g6
ba,a,pt %xcc, 3b
#ifdef __SMP__
/* These are all called by the slaves of a cross call, at
......
/* $Id: p1275.c,v 1.15 1998/10/13 14:03:47 davem Exp $
/* $Id: p1275.c,v 1.16 1999/08/02 12:05:57 jj Exp $
* p1275.c: Sun IEEE 1275 PROM low level interface routines
*
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......@@ -252,8 +252,8 @@ void prom_cif_callback(void)
* the counter is needed. -DaveM
*/
static int prom_entry_depth = 0;
#ifdef __SMP__
static spinlock_t prom_entry_lock = SPIN_LOCK_UNLOCKED;
#ifdef __SMP__
extern void smp_capture(void);
extern void smp_release(void);
#endif
......
/* $Id: ioctl.c,v 1.11 1999/05/27 00:36:25 davem Exp $
/* $Id: ioctl.c,v 1.12 1999/07/23 01:57:03 davem Exp $
* ioctl.c: Solaris ioctl emulation.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......@@ -367,7 +367,6 @@ static inline int solaris_sockmod(unsigned int fd, unsigned int cmd, u32 arg)
static inline int solaris_timod(unsigned int fd, unsigned int cmd, u32 arg,
int len, int *len_p)
{
struct inode *ino;
int ret;
switch (cmd & 0xff) {
......@@ -459,7 +458,6 @@ static inline int solaris_S(struct file *filp, unsigned int fd, unsigned int cmd
mm_segment_t old_fs;
struct strioctl si;
struct inode *ino;
struct file *filp;
struct sol_socket_struct *sock;
struct module_info *mi;
......
/* $Id: socksys.c,v 1.8 1998/08/26 10:28:28 davem Exp $
/* $Id: socksys.c,v 1.9 1999/07/23 01:57:07 davem Exp $
* socksys.c: /dev/inet/ stuff for Solaris emulation.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
......
/* $Id: timod.c,v 1.2 1999/05/12 11:11:55 davem Exp $
/* $Id: timod.c,v 1.3 1999/08/02 12:06:01 jj Exp $
* timod.c: timod emulation.
*
* Copyright (C) 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
......@@ -33,9 +33,7 @@ extern asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd,
u32 arg);
asmlinkage int solaris_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
#ifdef __SMP__
spinlock_t timod_pagelock = SPIN_LOCK_UNLOCKED;
#endif
static char * page = NULL ;
#ifndef DEBUG_SOLARIS_KMALLOC
......@@ -866,7 +864,7 @@ asmlinkage int solaris_getmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
SOLD("entry");
lock_kernel();
if(fd >= current->files->max_fds) goto out;
if(fd >= NR_OPEN) goto out;
filp = current->files->fd[fd];
if(!filp) goto out;
......@@ -933,7 +931,7 @@ asmlinkage int solaris_putmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
SOLD("entry");
lock_kernel();
if(fd >= current->files->max_fds) goto out;
if(fd >= NR_OPEN) goto out;
filp = current->files->fd[fd];
if(!filp) goto out;
......
......@@ -39,6 +39,13 @@ SECTIONS
__init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
. = ALIGN(16);
__setup_start = .;
.setup_init : { *(.setup.init) }
__setup_end = .;
__initcall_start = .;
.initcall.init : { *(.initcall.init) }
__initcall_end = .;
. = ALIGN(8192);
__init_end = .;
__bss_start = .;
......
/* $Id: cmd646.c,v 1.14 1999/07/03 08:56:09 davem Exp $
/* $Id: cmd646.c,v 1.15 1999/07/23 01:48:37 davem Exp $
* cmd646.c: Enable interrupts at initialization time on Ultra/PCI machines.
* Note, this driver is not used at all on other systems because
* there the "BIOS" has done all of the following already.
......
......@@ -159,12 +159,16 @@ static int __init ramdisk_size(char *str)
return 1;
}
static int __init ramdisk_size2(char *str)
{
return ramdisk_size(str);
}
__setup("ramdisk_start=", ramdisk_start_setup);
__setup("load_ramdisk=", load_ramdisk);
__setup("prompt_ramdisk=", prompt_ramdisk);
__setup("ramdisk=", ramdisk_size);
__setup("ramdisk_size=", ramdisk_size);
__setup("ramdisk_size=", ramdisk_size2);
#endif
......
/* $Id: sunlance.c,v 1.85 1999/03/21 05:22:05 davem Exp $
/* $Id: sunlance.c,v 1.86 1999/07/23 01:52:58 davem Exp $
* lance.c: Linux/Sparc/Lance driver
*
* Written 1995, 1996 by Miguel de Icaza
......
......@@ -1421,7 +1421,7 @@ static int eb4231_recintr(struct sparcaudio_driver *drv)
status += 2;
}
sparcaudio_input_done(drv, 1);
sparcaudio_input_done(drv, status);
return 1;
}
......@@ -1503,7 +1503,7 @@ static void cs4231_start_output(struct sparcaudio_driver *drv, __u8 * buffer,
static void eb4231_stop_output(struct sparcaudio_driver *drv)
{
struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
int dcsr;
unsigned int dcsr;
dprintk(("eb4231_stop_output: dcsr 0x%x dacr 0x%x dbcr %d\n",
readl(&cs4231_chip->eb2p->dcsr),
......@@ -1635,6 +1635,68 @@ static void cs4231_stop_input(struct sparcaudio_driver *drv)
cs4231_pollinput(drv);
}
#ifdef EB4231_SUPPORT
static void eb4231_start_input(struct sparcaudio_driver *drv, __u8 * buffer,
unsigned long count)
{
struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
unsigned int dcsr;
cs4231_chip->input_ptr = buffer;
cs4231_chip->input_size = count;
if (cs4231_chip->perchip_info.record.active ||
(cs4231_chip->perchip_info.record.pause))
return;
cs4231_ready(drv);
cs4231_chip->perchip_info.record.active = 1;
cs4231_chip->recording_count = 0;
dcsr = readl(&cs4231_chip->eb2c->dcsr);
if (!(dcsr & EBUS_DCSR_EN_DMA)) {
writel(EBUS_DCSR_RESET, &(cs4231_chip->eb2c->dcsr));
writel(EBUS_DCSR_BURST_SZ_16, &(cs4231_chip->eb2c->dcsr));
eb4231_recintr(drv);
writel(EBUS_DCSR_BURST_SZ_16 |
(EBUS_DCSR_EN_DMA | EBUS_DCSR_INT_EN | EBUS_DCSR_EN_CNT | EBUS_DCSR_EN_NEXT),
&(cs4231_chip->eb2c->dcsr));
cs4231_enable_rec(drv);
cs4231_ready(drv);
} else
eb4231_recintr(drv);
}
static void eb4231_stop_input(struct sparcaudio_driver *drv)
{
struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
unsigned int dcsr;
cs4231_chip->perchip_info.record.active = 0;
cs4231_chip->input_ptr = NULL;
cs4231_chip->input_size = 0;
if (cs4231_chip->input_dma_handle) {
cs4231_chip->input_dma_handle = 0;
cs4231_chip->input_dma_size = 0;
}
if (cs4231_chip->input_next_dma_handle) {
cs4231_chip->input_next_dma_handle = 0;
cs4231_chip->input_next_dma_size = 0;
}
dcsr = readl(&(cs4231_chip->eb2c->dcsr));
if (dcsr & EBUS_DCSR_EN_DMA)
writel(dcsr & ~EBUS_DCSR_EN_DMA, &(cs4231_chip->eb2c->dcsr));
cs4231_disable_rec(drv);
}
#endif
static int cs4231_set_output_pause(struct sparcaudio_driver *drv, int value)
{
struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
......@@ -1763,13 +1825,25 @@ void eb4231_cinterrupt(int irq, void *dev_id, struct pt_regs *regs)
struct cs4231_chip *cs4231_chip = (struct cs4231_chip *)drv->private;
int dummy;
/* Read status. */
dummy = readl(&cs4231_chip->eb2c->dcsr);
/* Clear the interrupt. */
dummy = readl(&(cs4231_chip->eb2c->dcsr));
writel(dummy, &(cs4231_chip->eb2c->dcsr));
if ((dummy & EBUS_DCSR_TC) != 0
/*&& (dummy & EBUS_DCSR_A_LOADED) != 0*/) {
cs4231_chip->perchip_info.record.samples +=
cs4231_length_to_samplecount(&(cs4231_chip->perchip_info.record),
cs4231_chip->reclen);
eb4231_recintr(drv);
}
cs4231_chip->perchip_info.record.samples +=
cs4231_length_to_samplecount(&(cs4231_chip->perchip_info.record),
cs4231_chip->reclen);
eb4231_recintr(drv);
if ((dummy & EBUS_DCSR_A_LOADED) == 0) {
cs4231_chip->perchip_info.record.active = 0;
eb4231_recintr(drv);
#if 1
eb4231_getsamplecount(drv, cs4231_chip->reclen, 1);
#endif
}
}
/* ebus audio play interrupt handler. */
......@@ -1937,8 +2011,8 @@ static struct sparcaudio_operations eb4231_ops = {
cs4231_ioctl,
eb4231_start_output,
eb4231_stop_output,
cs4231_start_input,
cs4231_stop_input,
eb4231_start_input,
eb4231_stop_input,
cs4231_audio_getdev,
cs4231_set_output_volume,
cs4231_get_output_volume,
......
......@@ -79,11 +79,6 @@ extern void scrollfront(int);
struct l1a_kbd_state l1a_state = { 0, 0 };
/* Dummy function for now, we need it to link. -DaveM */
void kbd_reset_setup(char *str, int *ints)
{
}
#ifndef CONFIG_PCI
DECLARE_WAIT_QUEUE_HEAD(keypress_wait);
#endif
......@@ -1305,7 +1300,7 @@ kbd_read (struct file *f, char *buffer, size_t count, loff_t *ppos)
p = buffer;
for (; p < end && kbd_head != kbd_tail;){
#ifdef CONFIG_SPARC32_COMPAT
if (current->tss.flags & SPARC_FLAG_32BIT) {
if (current->thread.flags & SPARC_FLAG_32BIT) {
copy_to_user_ret((Firm_event *)p, &kbd_queue [kbd_tail],
sizeof(Firm_event)-sizeof(struct timeval), -EFAULT);
p += sizeof(Firm_event)-sizeof(struct timeval);
......
......@@ -386,7 +386,7 @@ sun_mouse_read(struct file *file, char *buffer,
while (p < end && !queue_empty ()){
#ifdef CONFIG_SPARC32_COMPAT
if (current->tss.flags & SPARC_FLAG_32BIT) {
if (current->thread.flags & SPARC_FLAG_32BIT) {
Firm_event *q = get_from_queue();
copy_to_user_ret((Firm_event *)p, q,
......
/* $Id: zs.c,v 1.42 1999/05/12 11:15:26 davem Exp $
/* $Id: zs.c,v 1.43 1999/07/17 06:03:58 zaitcev Exp $
* zs.c: Zilog serial port driver for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......@@ -1844,7 +1844,7 @@ int zs_open(struct tty_struct *tty, struct file * filp)
static void show_serial_version(void)
{
char *revision = "$Revision: 1.42 $";
char *revision = "$Revision: 1.43 $";
char *version, *p;
version = strchr(revision, ' ');
......@@ -2012,9 +2012,8 @@ get_zs(int chip))
/* Can use the prom for other machine types */
zsnode = prom_getchild(prom_root_node);
if (sparc_cpu_model == sun4d) {
int node;
int no = 0;
tmpnode = zsnode;
zsnode = 0;
bbnode = 0;
......
/* $Id: sbus.c,v 1.77 1999/05/29 06:25:57 davem Exp $
/* $Id: sbus.c,v 1.78 1999/07/23 02:00:27 davem Exp $
* sbus.c: SBus support routines.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......
......@@ -603,7 +603,7 @@ static unsigned long get_wchan(struct task_struct *p)
#ifdef __sparc_v9__
bias = STACK_BIAS;
#endif
fp = p->tss.ksp + bias;
fp = p->thread.ksp + bias;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct task_struct)) ||
......@@ -648,11 +648,11 @@ static unsigned long get_wchan(struct task_struct *p)
#define KSTK_EIP(tsk) ((tsk)->tss.regs->nip)
#define KSTK_ESP(tsk) ((tsk)->tss.regs->gpr[1])
#elif defined (__sparc_v9__)
# define KSTK_EIP(tsk) ((tsk)->tss.kregs->tpc)
# define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
# define KSTK_EIP(tsk) ((tsk)->thread.kregs->tpc)
# define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
#elif defined(__sparc__)
# define KSTK_EIP(tsk) ((tsk)->tss.kregs->pc)
# define KSTK_ESP(tsk) ((tsk)->tss.kregs->u_regs[UREG_FP])
# define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
# define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
#endif
/* Gcc optimizes away "strlen(x)" for constant x */
......
......@@ -2,6 +2,7 @@
#define _ALPHA_INIT_H
#ifndef MODULE
#define __init __attribute__ ((__section__ (".text.init")))
#define __initdata __attribute__ ((__section__ (".data.init")))
#define __initfunc(__arginit) \
......@@ -12,8 +13,37 @@
#define __INIT .section .text.init,"ax"
#define __FINIT .previous
#define __INITDATA .section .data.init,"a"
#endif
#define __cacheline_aligned __attribute__((__aligned__(32)))
#endif
/*
* Used for initialization calls.
*/
typedef int (*initcall_t)(void);
extern initcall_t __initcall_start, __initcall_end;
#define __initcall(fn) \
static __attribute__ ((unused, __section__ (".initcall.init"))) \
initcall_t __initcall_##fn = fn
/*
* Used for kernel command line parameter setup.
*/
struct kernel_param {
const char *str;
int (*setup_func)(char *);
};
extern struct kernel_param __setup_start, __setup_end;
#define __setup(str, fn) \
static __attribute__ ((__section__ (".data.init"))) \
char __setup_str_##fn[] = str; \
static __attribute__ ((unused, __section__ (".setup.init"))) \
struct kernel_param __setup_##fn = { __setup_str_##fn, fn }
#endif /* MODULE */
#endif /* _ALPHA_INIT_H */
......@@ -88,6 +88,7 @@ struct el_common_EV5_uncorrectable_mcheck {
extern void halt(void) __attribute__((noreturn));
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) \
do { \
unsigned long pcbb; \
......
......@@ -257,7 +257,7 @@
#define __NR_munlockall 317
#define __NR_sysinfo 318
#define __NR__sysctl 319
#define __NR_idle 320
/* 320 was sys_idle. */
#define __NR_oldumount 321
#define __NR_swapon 322
#define __NR_times 323
......
......@@ -141,6 +141,8 @@ extern asmlinkage void __backtrace(void);
#define wmb() mb()
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
#define prepare_to_switch() do { } while(0)
/*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'.
......
......@@ -9,6 +9,7 @@
struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
......
......@@ -16,6 +16,8 @@ extern inline void wrusp(unsigned long usp) {
__asm__ __volatile__("move %0,%/usp" : : "a" (usp));
}
#define prepare_to_switch() do { } while(0)
/*
* switch_to(n) should switch tasks to task ptr, first checking that
* ptr isn't the current task, in which case it does nothing. This
......
......@@ -143,6 +143,7 @@ __asm__ __volatile__( \
extern asmlinkage void *(*resume)(void *last, void *next);
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) \
do { \
(last) = resume(prev, next); \
......
......@@ -80,6 +80,7 @@ struct device_node;
extern void note_scsi_host(struct device_node *, void *);
struct task_struct;
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) _switch_to((prev),(next),&(last))
extern void _switch_to(struct task_struct *, struct task_struct *,
struct task_struct **);
......
/* $Id: resource.h,v 1.7 1998/11/19 20:01:44 davem Exp $
/* $Id: resource.h,v 1.8 1999/07/30 09:37:56 davem Exp $
* resource.h: Resource definitions.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
......
/* $Id: siginfo.h,v 1.4 1999/04/28 19:45:20 davem Exp $
/* $Id: siginfo.h,v 1.5 1999/07/29 12:56:57 jj Exp $
* siginfo.c:
*/
......@@ -26,7 +26,7 @@ typedef struct siginfo {
/* kill() */
struct {
pid_t _pid; /* sender's pid */
uid_t _uid; /* sender's uid */
unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
......@@ -38,20 +38,20 @@ typedef struct siginfo {
/* POSIX.1b signals */
struct {
pid_t _pid; /* sender's pid */
uid_t _uid; /* sender's uid */
unsigned int _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
unsigned int _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
struct {
void *_addr; /* faulting insn/memory ref. */
int _trapno; /* TRAP # which caused the signal */
......@@ -85,6 +85,7 @@ typedef struct siginfo {
* si_code values
* Digital reserves positive values for kernel-generated signals.
*/
#define SI_NOINFO 32767 /* no information in siginfo_t */
#define SI_USER 0 /* sent by kill, sigsend, raise */
#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
#define SI_QUEUE -1 /* sent by sigqueue */
......@@ -166,6 +167,12 @@ typedef struct siginfo {
#define POLL_HUP 6 /* device disconnected */
#define NSIGPOLL 6
/*
* SIGEMT si_codes
*/
#define EMT_TAGOVF 1 /* tag overflow */
#define NSIGEMT 1
/*
* sigevent definitions
*
......
/* $Id: a.out.h,v 1.4 1997/05/04 07:21:19 davem Exp $ */
/* $Id: a.out.h,v 1.5 1999/07/30 09:31:09 davem Exp $ */
#ifndef __SPARC64_A_OUT_H__
#define __SPARC64_A_OUT_H__
......@@ -95,7 +95,7 @@ struct relocation_info /* used when header.a_machtype == M_SPARC */
#ifdef __KERNEL__
#define STACK_TOP (current->tss.flags & SPARC_FLAG_32BIT ? 0xf0000000 : TASK_SIZE)
#define STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : TASK_SIZE)
#endif
......
This diff is collapsed.
/* $Id: checksum.h,v 1.12 1999/05/25 16:53:36 jj Exp $ */
/* $Id: checksum.h,v 1.13 1999/07/30 09:31:13 davem Exp $ */
#ifndef __SPARC64_CHECKSUM_H
#define __SPARC64_CHECKSUM_H
......@@ -50,7 +50,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len,
unsigned int sum)
{
int ret;
unsigned char cur_ds = current->tss.current_ds.seg;
unsigned char cur_ds = current->thread.current_ds.seg;
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "i" (ASI_P));
ret = csum_partial_copy_sparc64(src, dst, len, sum);
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" (cur_ds));
......
/* $Id: elf.h,v 1.19 1999/06/11 13:26:04 jj Exp $ */
/* $Id: elf.h,v 1.20 1999/07/30 09:31:14 davem Exp $ */
#ifndef __ASM_SPARC64_ELF_H
#define __ASM_SPARC64_ELF_H
......@@ -67,17 +67,30 @@ typedef struct {
#define ELF_PLATFORM (NULL)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) \
do { \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
current->tss.flags |= SPARC_FLAG_32BIT; \
else \
current->tss.flags &= ~SPARC_FLAG_32BIT; \
\
if (ibcs2) \
current->personality = PER_SVR4; \
else if (current->personality != PER_LINUX32) \
current->personality = PER_LINUX; \
#define SET_PERSONALITY(ex, ibcs2) \
do { unsigned char flags = current->thread.flags; \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
flags |= SPARC_FLAG_32BIT; \
else \
flags &= ~SPARC_FLAG_32BIT; \
if (flags != current->thread.flags) { \
unsigned long pgd_cache = 0UL; \
if (flags & SPARC_FLAG_32BIT) \
pgd_cache = \
pgd_val(current->mm->pgd[0])<<11UL; \
__asm__ __volatile__( \
"stxa\t%0, [%1] %2" \
: /* no outputs */ \
: "r" (pgd_cache), \
"r" (TSB_REG), \
"i" (ASI_DMMU)); \
current->thread.flags = flags; \
} \
\
if (ibcs2) \
current->personality = PER_SVR4; \
else if (current->personality != PER_LINUX32) \
current->personality = PER_LINUX; \
} while (0)
#endif
......
#ifndef _SPARC_INIT_H
#define _SPARC_INIT_H
typedef int (*initcall_t)(void);
extern initcall_t __initcall_start, __initcall_end;
struct kernel_param {
const char *str;
int (*setup_func)(char *);
};
extern struct kernel_param __setup_start, __setup_end;
/* Used for initialization calls.. */
#define __initcall(fn) \
static __attribute__ ((unused,__section__ (".initcall.init"))) initcall_t __initcall_##fn = fn
/* Used for kernel command line parameter setup */
#define __setup(str, fn) \
static __attribute__ ((unused,__section__ (".setup.init"))) struct kernel_param __setup_##fn = { str, fn }
#define __init __attribute__ ((__section__ (".text.init")))
#define __initdata __attribute__ ((__section__ (".data.init")))
#define __initfunc(__arginit) \
......
/* $Id: mmu_context.h,v 1.36 1999/05/25 16:53:34 jj Exp $ */
/* $Id: mmu_context.h,v 1.39 1999/08/02 08:39:57 davem Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
......@@ -8,122 +8,140 @@
#include <asm/spitfire.h>
#include <asm/spinlock.h>
#define NO_CONTEXT 0
#ifndef __ASSEMBLY__
extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
#define CTX_VERSION_SHIFT (PAGE_SHIFT - 3)
#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
#define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL)
#define CTX_VALID(__ctx) \
(!(((__ctx) ^ tlb_context_cache) & CTX_VERSION_MASK))
#define CTX_HWBITS(__ctx) ((__ctx) & ~CTX_VERSION_MASK)
extern void get_new_mmu_context(struct mm_struct *mm);
/* Initialize/destroy the context related info for a new mm_struct
* instance.
/* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated.
* A fresh mm_struct is cleared out to zeros, so this need not
* do anything on Sparc64 since the only thing we care about
* is that mm->context is an invalid context (ie. zero).
*/
#define init_new_context(__mm) ((__mm)->context = NO_CONTEXT)
/* Kernel threads like rpciod and nfsd drop their mm, and then use
* init_mm, when this happens we must make sure the secondary context is
* updated as well. Otherwise we have disasters relating to
* set_fs/get_fs usage later on.
*
* Also we can only clear the mmu_context_bmap bit when this is
* the final reference to the address space.
#define init_new_context(__tsk, __mm) do { } while(0)
/* Destroy a dead context. This occurs when mmput drops the
* mm_users count to zero, the mmaps have been released, and
* all the page tables have been flushed. Our job is to destroy
* any remaining processor-specific state, and in the sparc64
* case this just means freeing up the mmu context ID held by
* this task if valid.
*/
#define destroy_context(__mm) do { \
if ((__mm)->context != NO_CONTEXT && \
atomic_read(&(__mm)->count) == 1) { \
if (!(((__mm)->context ^ tlb_context_cache) & CTX_VERSION_MASK))\
clear_bit((__mm)->context & ~(CTX_VERSION_MASK), \
mmu_context_bmap); \
(__mm)->context = NO_CONTEXT; \
if(current->mm == (__mm)) { \
current->tss.ctx = 0; \
spitfire_set_secondary_context(0); \
__asm__ __volatile__("flush %g6"); \
} \
} \
} while (0)
/* The caller must flush the current set of user windows
* to the stack (if necessary) before we get here.
#define destroy_context(__mm) \
do { spin_lock(&ctx_alloc_lock); \
if (CTX_VALID((__mm)->context)) { \
unsigned long nr = CTX_HWBITS((__mm)->context); \
mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \
} \
spin_unlock(&ctx_alloc_lock); \
} while(0)
/* Reload the two core values used by TLB miss handler
* processing on sparc64. They are:
* 1) The physical address of mm->pgd, when full page
* table walks are necessary, this is where the
* search begins.
* 2) A "PGD cache". For 32-bit tasks only pgd[0] is
* ever used since that maps the entire low 4GB
* completely. To speed up TLB miss processing we
* make this value available to the handlers. This
* decreases the amount of memory traffic incurred.
*/
extern __inline__ void __get_mmu_context(struct task_struct *tsk)
#define reload_tlbmiss_state(__tsk, __mm) \
do { \
register unsigned long paddr asm("o5"); \
register unsigned long pgd_cache asm("o4"); \
paddr = __pa((__mm)->pgd); \
pgd_cache = 0UL; \
if ((__tsk)->thread.flags & SPARC_FLAG_32BIT) \
pgd_cache = pgd_val((__mm)->pgd[0]) << 11UL; \
__asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
"mov %3, %%g4\n\t" \
"mov %0, %%g7\n\t" \
"stxa %1, [%%g4] %2\n\t" \
"wrpr %%g0, 0x096, %%pstate" \
: /* no outputs */ \
: "r" (paddr), "r" (pgd_cache),\
"i" (ASI_DMMU), "i" (TSB_REG)); \
} while(0)
/* Set MMU context in the actual hardware. */
#define load_secondary_context(__mm) \
__asm__ __volatile__("stxa %0, [%1] %2\n\t" \
"flush %%g6" \
: /* No outputs */ \
: "r" (CTX_HWBITS((__mm)->context)), \
"r" (0x10), "i" (0x58))
/* Clean out potential stale TLB entries due to previous
* users of this TLB context. We flush TLB contexts
* lazily on sparc64.
*/
#define clean_secondary_context() \
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" \
"stxa %%g0, [%0] %2\n\t" \
"flush %%g6" \
: /* No outputs */ \
: "r" (0x50), "i" (0x5f), "i" (0x57))
/* Switch the current MM context. */
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
{
register unsigned long paddr asm("o5");
register unsigned long pgd_cache asm("o4");
struct mm_struct *mm = tsk->mm;
unsigned long asi;
if(!(tsk->tss.flags & SPARC_FLAG_KTHREAD) &&
!(tsk->flags & PF_EXITING)) {
unsigned long ctx = tlb_context_cache;
if((mm->context ^ ctx) & CTX_VERSION_MASK)
long dirty;
spin_lock(&mm->page_table_lock);
if (CTX_VALID(mm->context))
dirty = 0;
else
dirty = 1;
if (dirty || (old_mm != mm)) {
unsigned long vm_mask;
if (dirty)
get_new_mmu_context(mm);
tsk->tss.ctx = mm->context & 0x3ff;
spitfire_set_secondary_context(mm->context & 0x3ff);
__asm__ __volatile__("flush %g6");
if(!(mm->cpu_vm_mask & (1UL<<smp_processor_id()))) {
spitfire_flush_dtlb_secondary_context();
spitfire_flush_itlb_secondary_context();
__asm__ __volatile__("flush %g6");
vm_mask = (1UL << cpu);
if (!(mm->cpu_vm_mask & vm_mask)) {
mm->cpu_vm_mask |= vm_mask;
dirty = 1;
}
asi = tsk->tss.current_ds.seg;
} else {
tsk->tss.ctx = 0;
spitfire_set_secondary_context(0);
__asm__ __volatile__("flush %g6");
asi = ASI_P;
load_secondary_context(mm);
if (dirty != 0)
clean_secondary_context();
reload_tlbmiss_state(tsk, mm);
}
/* Sigh, damned include loops... just poke seg directly. */
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" (asi));
paddr = __pa(mm->pgd);
if((tsk->tss.flags & (SPARC_FLAG_32BIT|SPARC_FLAG_KTHREAD)) ==
(SPARC_FLAG_32BIT))
pgd_cache = ((unsigned long) mm->pgd[0]) << 11UL;
else
pgd_cache = 0;
__asm__ __volatile__("
rdpr %%pstate, %%o2
andn %%o2, %2, %%o3
wrpr %%o3, %5, %%pstate
mov %4, %%g4
mov %0, %%g7
stxa %1, [%%g4] %3
wrpr %%o2, 0x0, %%pstate
" : /* no outputs */
: "r" (paddr), "r" (pgd_cache), "i" (PSTATE_IE),
"i" (ASI_DMMU), "i" (TSB_REG), "i" (PSTATE_MG)
: "o2", "o3");
spin_unlock(&mm->page_table_lock);
}
/* Now we define this as a do nothing macro, because the only
* generic user right now is the scheduler, and we handle all
* the atomicity issues by having switch_to() call the above
* function itself.
*/
#define get_mmu_context(x) do { } while(0)
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings. Currently,
* this is always called for 'current', if that changes put appropriate
* checks here.
*
* We set the cpu_vm_mask first to zero to enforce a tlb flush for
* the new context above, then we set it to the current cpu so the
* smp tlb flush routines do not get confused.
*/
#define activate_context(__tsk) \
do { flushw_user(); \
(__tsk)->mm->cpu_vm_mask = 0; \
__get_mmu_context(__tsk); \
(__tsk)->mm->cpu_vm_mask = (1UL<<smp_processor_id()); \
} while(0)
/* Activate a new MM instance for the current task. */
static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
{
unsigned long vm_mask;
spin_lock(&mm->page_table_lock);
if (!CTX_VALID(mm->context))
get_new_mmu_context(mm);
vm_mask = (1UL << smp_processor_id());
if (!(mm->cpu_vm_mask & vm_mask))
mm->cpu_vm_mask |= vm_mask;
spin_unlock(&mm->page_table_lock);
load_secondary_context(mm);
clean_secondary_context();
reload_tlbmiss_state(current, mm);
}
#endif /* !(__ASSEMBLY__) */
......
/* $Id: page.h,v 1.25 1999/06/23 03:53:15 davem Exp $ */
/* $Id: page.h,v 1.27 1999/07/31 00:07:25 davem Exp $ */
#ifndef _SPARC64_PAGE_H
#define _SPARC64_PAGE_H
......@@ -18,9 +18,8 @@
#ifndef __ASSEMBLY__
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); *(int *)0=0; } while (0)
#define PAGE_BUG(page) do { \
BUG(); } while (0)
#define BUG() __builtin_trap()
#define PAGE_BUG(page) BUG()
extern void clear_page(unsigned long page);
extern void copy_page(unsigned long to, unsigned long from);
......@@ -85,7 +84,7 @@ typedef unsigned long iopgprot_t;
#endif /* (STRICT_MM_TYPECHECKS) */
#define TASK_UNMAPPED_BASE ((current->tss.flags & SPARC_FLAG_32BIT) ? \
#define TASK_UNMAPPED_BASE ((current->thread.flags & SPARC_FLAG_32BIT) ? \
(0x0000000070000000UL) : (PAGE_OFFSET))
#endif /* !(__ASSEMBLY__) */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment