Commit 8d6339b8 authored by Tony Luck's avatar Tony Luck

Pull rmia32 into release branch

parents 60b341b7 32974ad4
...@@ -499,23 +499,6 @@ config ARCH_PROC_KCORE_TEXT ...@@ -499,23 +499,6 @@ config ARCH_PROC_KCORE_TEXT
def_bool y def_bool y
depends on PROC_KCORE depends on PROC_KCORE
config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help
IA-64 processors can execute IA-32 (X86) instructions. By
saying Y here, the kernel will include IA-32 system call
emulation support which makes it possible to transparently
run IA-32 Linux binaries on an IA-64 Linux system.
If in doubt, say Y.
config COMPAT
bool
depends on IA32_SUPPORT
default y
config COMPAT_FOR_U64_ALIGNMENT
def_bool COMPAT
config IA64_MCA_RECOVERY config IA64_MCA_RECOVERY
tristate "MCA recovery from errors other than TLB." tristate "MCA recovery from errors other than TLB."
......
...@@ -46,7 +46,6 @@ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o ...@@ -46,7 +46,6 @@ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
libs-y += arch/ia64/lib/ libs-y += arch/ia64/lib/
core-y += arch/ia64/kernel/ arch/ia64/mm/ core-y += arch/ia64/kernel/ arch/ia64/mm/
core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/
core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
......
...@@ -131,8 +131,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y ...@@ -131,8 +131,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y
# CONFIG_VIRTUAL_MEM_MAP is not set # CONFIG_VIRTUAL_MEM_MAP is not set
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
# CONFIG_IA64_MCA_RECOVERY is not set # CONFIG_IA64_MCA_RECOVERY is not set
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
...@@ -205,8 +205,6 @@ CONFIG_VIRTUAL_MEM_MAP=y ...@@ -205,8 +205,6 @@ CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y CONFIG_HOLES_IN_ZONE=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
CONFIG_COMPAT_FOR_U64_ALIGNMENT=y CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
......
...@@ -139,8 +139,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y ...@@ -139,8 +139,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_NUMA=y CONFIG_NUMA=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
...@@ -130,8 +130,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y ...@@ -130,8 +130,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y
# CONFIG_VIRTUAL_MEM_MAP is not set # CONFIG_VIRTUAL_MEM_MAP is not set
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
# CONFIG_IA64_MCA_RECOVERY is not set # CONFIG_IA64_MCA_RECOVERY is not set
# CONFIG_PERFMON is not set # CONFIG_PERFMON is not set
CONFIG_IA64_PALINFO=m CONFIG_IA64_PALINFO=m
......
...@@ -154,7 +154,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y ...@@ -154,7 +154,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_VIRTUAL_MEM_MAP=y CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y CONFIG_HOLES_IN_ZONE=y
# CONFIG_IA32_SUPPORT is not set
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
...@@ -200,8 +200,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y ...@@ -200,8 +200,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_VIRTUAL_MEM_MAP=y CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y CONFIG_HOLES_IN_ZONE=y
# CONFIG_IA32_SUPPORT is not set
# CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
...@@ -150,8 +150,6 @@ CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y ...@@ -150,8 +150,6 @@ CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_VIRTUAL_MEM_MAP=y CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y CONFIG_HOLES_IN_ZONE=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
#
# Makefile for the ia32 kernel emulation subsystem.
#
obj-y := ia32_entry.o sys_ia32.o ia32_signal.o \
ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
obj-$(CONFIG_AUDIT) += audit.o
# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
# restore_ia32_fpstate_live() can be sure the live register contain user-level state.
CFLAGS_ia32_signal.o += -mfixed-range=f16-f31
#include "../../x86/include/asm/unistd_32.h"
unsigned ia32_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned ia32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned ia32_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned ia32_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned ia32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int ia32_classify_syscall(unsigned syscall)
{
switch(syscall) {
case __NR_open:
return 2;
case __NR_openat:
return 3;
case __NR_socketcall:
return 4;
case __NR_execve:
return 5;
default:
return 1;
}
}
/*
* IA-32 ELF support.
*
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 06/16/00 A. Mallick initialize csd/ssd/tssd/cflg for ia32_load_state
* 04/13/01 D. Mosberger dropped saving tssd in ar.k1---it's not needed
* 09/14/01 D. Mosberger fixed memory management for gdt/tss page
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/security.h>
#include <asm/param.h>
#include <asm/signal.h>
#include "ia32priv.h"
#include "elfcore32.h"
/* Override some function names */
#undef start_thread
#define start_thread ia32_start_thread
#define elf_format elf32_format
#define init_elf_binfmt init_elf32_binfmt
#define exit_elf_binfmt exit_elf32_binfmt
#undef CLOCKS_PER_SEC
#define CLOCKS_PER_SEC IA32_CLOCKS_PER_SEC
extern void ia64_elf32_init (struct pt_regs *regs);
static void elf32_set_personality (void);
static unsigned long __attribute ((unused))
randomize_stack_top(unsigned long stack_top);
#define setup_arg_pages(bprm,tos,exec) ia32_setup_arg_pages(bprm,exec)
#define elf_map elf32_map
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex) elf32_set_personality()
#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
/* Ugly but avoids duplication */
#include "../../../fs/binfmt_elf.c"
extern struct page *ia32_shared_page[];
extern unsigned long *ia32_gdt;
extern struct page *ia32_gate_page;
int
ia32_install_shared_page (struct vm_area_struct *vma, struct vm_fault *vmf)
{
vmf->page = ia32_shared_page[smp_processor_id()];
get_page(vmf->page);
return 0;
}
int
ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf)
{
vmf->page = ia32_gate_page;
get_page(vmf->page);
return 0;
}
static const struct vm_operations_struct ia32_shared_page_vm_ops = {
.fault = ia32_install_shared_page
};
static const struct vm_operations_struct ia32_gate_page_vm_ops = {
.fault = ia32_install_gate_page
};
void
ia64_elf32_init (struct pt_regs *regs)
{
struct vm_area_struct *vma;
/*
* Map GDT below 4GB, where the processor can find it. We need to map
* it with privilege level 3 because the IVE uses non-privileged accesses to these
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
vma->vm_start = IA32_GDT_OFFSET;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
vma->vm_ops = &ia32_shared_page_vm_ops;
down_write(&current->mm->mmap_sem);
{
if (insert_vm_struct(current->mm, vma)) {
kmem_cache_free(vm_area_cachep, vma);
up_write(&current->mm->mmap_sem);
BUG();
}
}
up_write(&current->mm->mmap_sem);
}
/*
* When user stack is not executable, push sigreturn code to stack makes
* segmentation fault raised when returning to kernel. So now sigreturn
* code is locked in specific gate page, which is pointed by pretcode
* when setup_frame_ia32
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
vma->vm_start = IA32_GATE_OFFSET;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = PAGE_COPY_EXEC;
vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC
| VM_MAYEXEC | VM_RESERVED;
vma->vm_ops = &ia32_gate_page_vm_ops;
down_write(&current->mm->mmap_sem);
{
if (insert_vm_struct(current->mm, vma)) {
kmem_cache_free(vm_area_cachep, vma);
up_write(&current->mm->mmap_sem);
BUG();
}
}
up_write(&current->mm->mmap_sem);
}
/*
* Install LDT as anonymous memory. This gives us all-zero segment descriptors
* until a task modifies them via modify_ldt().
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
vma->vm_start = IA32_LDT_OFFSET;
vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
down_write(&current->mm->mmap_sem);
{
if (insert_vm_struct(current->mm, vma)) {
kmem_cache_free(vm_area_cachep, vma);
up_write(&current->mm->mmap_sem);
BUG();
}
}
up_write(&current->mm->mmap_sem);
}
ia64_psr(regs)->ac = 0; /* turn off alignment checking */
regs->loadrs = 0;
/*
* According to the ABI %edx points to an `atexit' handler. Since we don't have
* one we'll set it to 0 and initialize all the other registers just to make
* things more deterministic, ala the i386 implementation.
*/
regs->r8 = 0; /* %eax */
regs->r11 = 0; /* %ebx */
regs->r9 = 0; /* %ecx */
regs->r10 = 0; /* %edx */
regs->r13 = 0; /* %ebp */
regs->r14 = 0; /* %esi */
regs->r15 = 0; /* %edi */
current->thread.eflag = IA32_EFLAG;
current->thread.fsr = IA32_FSR_DEFAULT;
current->thread.fcr = IA32_FCR_DEFAULT;
current->thread.fir = 0;
current->thread.fdr = 0;
/*
* Setup GDTD. Note: GDTD is the descrambled version of the pseudo-descriptor
* format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
* architecture manual. Also note that the only fields that are not ignored are
* `base', `limit', 'G', `P' (must be 1) and `S' (must be 0).
*/
regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1,
0, 0, 0, 1, 0, 0, 0));
/* Setup the segment selectors */
regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */
ia32_load_segment_descriptors(current);
ia32_load_state(current);
}
/*
* Undo the override of setup_arg_pages() without this ia32_setup_arg_pages()
* will suffer infinite self recursion.
*/
#undef setup_arg_pages
int
ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
{
int ret;
ret = setup_arg_pages(bprm, IA32_STACK_TOP, executable_stack);
if (!ret) {
/*
* Can't do it in ia64_elf32_init(). Needs to be done before
* calls to elf32_map()
*/
current->thread.ppl = ia32_init_pp_list();
}
return ret;
}
static void
elf32_set_personality (void)
{
set_personality(PER_LINUX32);
current->thread.map_base = IA32_PAGE_OFFSET/3;
}
static unsigned long
elf32_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt,
int prot, int type, unsigned long unused)
{
unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type,
eppnt->p_offset - pgoff);
}
#define cpu_uses_ia32el() (local_cpu_data->family > 0x1f)
static int __init check_elf32_binfmt(void)
{
if (cpu_uses_ia32el()) {
printk("Please use IA-32 EL for executing IA-32 binaries\n");
unregister_binfmt(&elf_format);
}
return 0;
}
module_init(check_elf32_binfmt)
/*
* IA-32 ELF core dump support.
*
* Copyright (C) 2003 Arun Sharma <arun.sharma@intel.com>
*
* Derived from the x86_64 version
*/
#ifndef _ELFCORE32_H_
#define _ELFCORE32_H_
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct ia32_user_i387_struct elf_fpregset_t;
typedef struct ia32_user_fxsr_struct elf_fpxregset_t;
struct elf_siginfo
{
int si_signo; /* signal number */
int si_code; /* extra code */
int si_errno; /* errno */
};
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Hacks are here since types between compat_timeval (= pair of s32) and
* ia64-native timeval (= pair of s64) are not compatible, at least a file
* arch/ia64/ia32/../../../fs/binfmt_elf.c will get warnings from compiler on
* use of cputime_to_timeval(), which usually an alias of jiffies_to_timeval().
*/
#define cputime_to_timeval(a,b) \
do { (b)->tv_usec = 0; (b)->tv_sec = (a)/NSEC_PER_SEC; } while(0)
#else
#define jiffies_to_timeval(a,b) \
do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; } while(0)
#endif
struct elf_prstatus
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime; /* Cumulative user time */
struct compat_timeval pr_cstime; /* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define ELF_PRARGSZ (80) /* Number of chars for args */
struct elf_prpsinfo
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__u16 pr_uid;
__u16 pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
pr_reg[0] = regs->r11; \
pr_reg[1] = regs->r9; \
pr_reg[2] = regs->r10; \
pr_reg[3] = regs->r14; \
pr_reg[4] = regs->r15; \
pr_reg[5] = regs->r13; \
pr_reg[6] = regs->r8; \
pr_reg[7] = regs->r16 & 0xffff; \
pr_reg[8] = (regs->r16 >> 16) & 0xffff; \
pr_reg[9] = (regs->r16 >> 32) & 0xffff; \
pr_reg[10] = (regs->r16 >> 48) & 0xffff; \
pr_reg[11] = regs->r1; \
pr_reg[12] = regs->cr_iip; \
pr_reg[13] = regs->r17 & 0xffff; \
pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \
pr_reg[15] = regs->r12; \
pr_reg[16] = (regs->r17 >> 16) & 0xffff;
static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
struct pt_regs *regs)
{
ELF_CORE_COPY_REGS((*elfregs), regs)
}
static inline int elf_core_copy_task_regs(struct task_struct *t,
elf_gregset_t* elfregs)
{
ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t));
return 1;
}
static inline int
elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu)
{
struct ia32_user_i387_struct *fpstate = (void*)fpu;
mm_segment_t old_fs;
if (!tsk_used_math(tsk))
return 0;
old_fs = get_fs();
set_fs(KERNEL_DS);
save_ia32_fpstate(tsk, (struct ia32_user_i387_struct __user *) fpstate);
set_fs(old_fs);
return 1;
}
#define ELF_CORE_COPY_XFPREGS 1
#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
static inline int
elf_core_copy_task_xfpregs(struct task_struct *tsk, elf_fpxregset_t *xfpu)
{
struct ia32_user_fxsr_struct *fpxstate = (void*) xfpu;
mm_segment_t old_fs;
if (!tsk_used_math(tsk))
return 0;
old_fs = get_fs();
set_fs(KERNEL_DS);
save_ia32_fpxstate(tsk, (struct ia32_user_fxsr_struct __user *) fpxstate);
set_fs(old_fs);
return 1;
}
#endif /* _ELFCORE32_H_ */
#include <asm/asmmacro.h>
#include <asm/ia32.h>
#include <asm/asm-offsets.h>
#include <asm/signal.h>
#include <asm/thread_info.h>
#include "../kernel/minstate.h"
/*
* execve() is special because in case of success, we need to
* setup a null register window frame (in case an IA-32 process
* is exec'ing an IA-64 program).
*/
ENTRY(ia32_execve)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)
alloc loc1=ar.pfs,3,2,4,0
mov loc0=rp
.body
zxt4 out0=in0 // filename
;; // stop bit between alloc and call
zxt4 out1=in1 // argv
zxt4 out2=in2 // envp
add out3=16,sp // regs
br.call.sptk.few rp=sys32_execve
1: cmp.ge p6,p0=r8,r0
mov ar.pfs=loc1 // restore ar.pfs
;;
(p6) mov ar.pfs=r0 // clear ar.pfs in case of success
sxt4 r8=r8 // return 64-bit result
mov rp=loc0
br.ret.sptk.few rp
END(ia32_execve)
ENTRY(ia32_clone)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
alloc r16=ar.pfs,5,2,6,0
DO_SAVE_SWITCH_STACK
mov loc0=rp
mov loc1=r16 // save ar.pfs across do_fork
.body
zxt4 out1=in1 // newsp
mov out3=16 // stacksize (compensates for 16-byte scratch area)
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
mov out0=in0 // out0 = clone_flags
zxt4 out4=in2 // out4 = parent_tidptr
zxt4 out5=in4 // out5 = child_tidptr
br.call.sptk.many rp=do_fork
.ret0: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
mov ar.pfs=loc1
mov rp=loc0
br.ret.sptk.many rp
END(ia32_clone)
GLOBAL_ENTRY(ia32_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
{ /*
* Some versions of gas generate bad unwind info if the first instruction of a
* procedure doesn't go into the first slot of a bundle. This is a workaround.
*/
nop.m 0
nop.i 0
/*
* We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task.
*/
br.call.sptk.many rp=ia64_invoke_schedule_tail
}
.ret1:
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 r2=[r2]
;;
mov r8=0
and r2=_TIF_SYSCALL_TRACEAUDIT,r2
;;
cmp.ne p6,p0=r2,r0
(p6) br.cond.spnt .ia32_strace_check_retval
;; // prevent RAW on r8
END(ia32_ret_from_clone)
// fall through
GLOBAL_ENTRY(ia32_ret_from_syscall)
PT_REGS_UNWIND_INFO(0)
cmp.ge p6,p7=r8,r0 // syscall executed successfully?
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
;;
alloc r3=ar.pfs,0,0,0,0 // drop the syscall argument frame
st8 [r2]=r8 // store return value in slot for r8
br.cond.sptk.many ia64_leave_kernel
END(ia32_ret_from_syscall)
//
// Invoke a system call, but do some tracing before and after the call.
// We MUST preserve the current register frame throughout this routine
// because some system calls (such as ia64_execve) directly
// manipulate ar.pfs.
//
// Input:
// r8 = syscall number
// b6 = syscall entry point
//
GLOBAL_ENTRY(ia32_trace_syscall)
PT_REGS_UNWIND_INFO(0)
mov r3=-38
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp
;;
st8 [r2]=r3 // initialize return code to -ENOSYS
br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args
cmp.lt p6,p0=r8,r0 // check tracehook
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
;;
(p6) st8.spill [r2]=r8 // store return value in slot for r8
(p6) br.spnt.few .ret4
.ret2: // Need to reload arguments (they may be changed by the tracing process)
adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1
adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13
mov r15=IA32_NR_syscalls
;;
ld4 r8=[r2],IA64_PT_REGS_R9_OFFSET-IA64_PT_REGS_R1_OFFSET
movl r16=ia32_syscall_table
;;
ld4 r33=[r2],8 // r9 == ecx
ld4 r37=[r3],16 // r13 == ebp
cmp.ltu.unc p6,p7=r8,r15
;;
ld4 r34=[r2],8 // r10 == edx
ld4 r36=[r3],8 // r15 == edi
(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
;;
ld8 r16=[r16]
;;
ld4 r32=[r2],8 // r11 == ebx
mov b6=r16
ld4 r35=[r3],8 // r14 == esi
br.call.sptk.few rp=b6 // do the syscall
.ia32_strace_check_retval:
cmp.lt p6,p0=r8,r0 // syscall failed?
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
;;
st8.spill [r2]=r8 // store return value in slot for r8
br.call.sptk.few rp=syscall_trace_leave // give parent a chance to catch return value
.ret4: alloc r2=ar.pfs,0,0,0,0 // drop the syscall argument frame
br.cond.sptk.many ia64_leave_kernel
END(ia32_trace_syscall)
GLOBAL_ENTRY(sys32_vfork)
alloc r16=ar.pfs,2,2,4,0;;
mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags
br.cond.sptk.few .fork1 // do the work
END(sys32_vfork)
GLOBAL_ENTRY(sys32_fork)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
alloc r16=ar.pfs,2,2,4,0
mov out0=SIGCHLD // out0 = clone_flags
;;
.fork1:
mov loc0=rp
mov loc1=r16 // save ar.pfs across do_fork
DO_SAVE_SWITCH_STACK
.body
mov out1=0
mov out3=0
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
br.call.sptk.few rp=do_fork
.ret5: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
mov ar.pfs=loc1
mov rp=loc0
br.ret.sptk.many rp
END(sys32_fork)
.rodata
.align 8
.globl ia32_syscall_table
ia32_syscall_table:
data8 sys_ni_syscall /* 0 - old "setup(" system call*/
data8 sys_exit
data8 sys32_fork
data8 sys_read
data8 sys_write
data8 compat_sys_open /* 5 */
data8 sys_close
data8 sys32_waitpid
data8 sys_creat
data8 sys_link
data8 sys_unlink /* 10 */
data8 ia32_execve
data8 sys_chdir
data8 compat_sys_time
data8 sys_mknod
data8 sys_chmod /* 15 */
data8 sys_lchown /* 16-bit version */
data8 sys_ni_syscall /* old break syscall holder */
data8 sys_ni_syscall
data8 sys32_lseek
data8 sys_getpid /* 20 */
data8 compat_sys_mount
data8 sys_oldumount
data8 sys_setuid /* 16-bit version */
data8 sys_getuid /* 16-bit version */
data8 compat_sys_stime /* 25 */
data8 compat_sys_ptrace
data8 sys32_alarm
data8 sys_ni_syscall
data8 sys_pause
data8 compat_sys_utime /* 30 */
data8 sys_ni_syscall /* old stty syscall holder */
data8 sys_ni_syscall /* old gtty syscall holder */
data8 sys_access
data8 sys_nice
data8 sys_ni_syscall /* 35 */ /* old ftime syscall holder */
data8 sys_sync
data8 sys_kill
data8 sys_rename
data8 sys_mkdir
data8 sys_rmdir /* 40 */
data8 sys_dup
data8 sys_ia64_pipe
data8 compat_sys_times
data8 sys_ni_syscall /* old prof syscall holder */
data8 sys32_brk /* 45 */
data8 sys_setgid /* 16-bit version */
data8 sys_getgid /* 16-bit version */
data8 sys32_signal
data8 sys_geteuid /* 16-bit version */
data8 sys_getegid /* 16-bit version */ /* 50 */
data8 sys_acct
data8 sys_umount /* recycled never used phys( */
data8 sys_ni_syscall /* old lock syscall holder */
data8 compat_sys_ioctl
data8 compat_sys_fcntl /* 55 */
data8 sys_ni_syscall /* old mpx syscall holder */
data8 sys_setpgid
data8 sys_ni_syscall /* old ulimit syscall holder */
data8 sys_ni_syscall
data8 sys_umask /* 60 */
data8 sys_chroot
data8 compat_sys_ustat
data8 sys_dup2
data8 sys_getppid
data8 sys_getpgrp /* 65 */
data8 sys_setsid
data8 sys32_sigaction
data8 sys_ni_syscall
data8 sys_ni_syscall
data8 sys_setreuid /* 16-bit version */ /* 70 */
data8 sys_setregid /* 16-bit version */
data8 sys32_sigsuspend
data8 compat_sys_sigpending
data8 sys_sethostname
data8 compat_sys_setrlimit /* 75 */
data8 compat_sys_old_getrlimit
data8 compat_sys_getrusage
data8 compat_sys_gettimeofday
data8 compat_sys_settimeofday
data8 sys32_getgroups16 /* 80 */
data8 sys32_setgroups16
data8 sys32_old_select
data8 sys_symlink
data8 sys_ni_syscall
data8 sys_readlink /* 85 */
data8 sys_uselib
data8 sys_swapon
data8 sys_reboot
data8 compat_sys_old_readdir
data8 sys32_mmap /* 90 */
data8 sys32_munmap
data8 sys_truncate
data8 sys_ftruncate
data8 sys_fchmod
data8 sys_fchown /* 16-bit version */ /* 95 */
data8 sys_getpriority
data8 sys_setpriority
data8 sys_ni_syscall /* old profil syscall holder */
data8 compat_sys_statfs
data8 compat_sys_fstatfs /* 100 */
data8 sys_ni_syscall /* ioperm */
data8 compat_sys_socketcall
data8 sys_syslog
data8 compat_sys_setitimer
data8 compat_sys_getitimer /* 105 */
data8 compat_sys_newstat
data8 compat_sys_newlstat
data8 compat_sys_newfstat
data8 sys_ni_syscall
data8 sys_ni_syscall /* iopl */ /* 110 */
data8 sys_vhangup
data8 sys_ni_syscall /* used to be sys_idle */
data8 sys_ni_syscall
data8 compat_sys_wait4
data8 sys_swapoff /* 115 */
data8 compat_sys_sysinfo
data8 sys32_ipc
data8 sys_fsync
data8 sys32_sigreturn
data8 ia32_clone /* 120 */
data8 sys_setdomainname
data8 sys32_newuname
data8 sys32_modify_ldt
data8 compat_sys_adjtimex
data8 sys32_mprotect /* 125 */
data8 compat_sys_sigprocmask
data8 sys_ni_syscall /* create_module */
data8 sys_ni_syscall /* init_module */
data8 sys_ni_syscall /* delete_module */
data8 sys_ni_syscall /* get_kernel_syms */ /* 130 */
data8 sys32_quotactl
data8 sys_getpgid
data8 sys_fchdir
data8 sys_ni_syscall /* sys_bdflush */
data8 sys_sysfs /* 135 */
data8 sys32_personality
data8 sys_ni_syscall /* for afs_syscall */
data8 sys_setfsuid /* 16-bit version */
data8 sys_setfsgid /* 16-bit version */
data8 sys_llseek /* 140 */
data8 compat_sys_getdents
data8 compat_sys_select
data8 sys_flock
data8 sys32_msync
data8 compat_sys_readv /* 145 */
data8 compat_sys_writev
data8 sys_getsid
data8 sys_fdatasync
data8 compat_sys_sysctl
data8 sys_mlock /* 150 */
data8 sys_munlock
data8 sys_mlockall
data8 sys_munlockall
data8 sys_sched_setparam
data8 sys_sched_getparam /* 155 */
data8 sys_sched_setscheduler
data8 sys_sched_getscheduler
data8 sys_sched_yield
data8 sys_sched_get_priority_max
data8 sys_sched_get_priority_min /* 160 */
data8 sys32_sched_rr_get_interval
data8 compat_sys_nanosleep
data8 sys32_mremap
data8 sys_setresuid /* 16-bit version */
data8 sys32_getresuid16 /* 16-bit version */ /* 165 */
data8 sys_ni_syscall /* vm86 */
data8 sys_ni_syscall /* sys_query_module */
data8 sys_poll
data8 sys_ni_syscall /* nfsservctl */
data8 sys_setresgid /* 170 */
data8 sys32_getresgid16
data8 sys_prctl
data8 sys32_rt_sigreturn
data8 sys32_rt_sigaction
data8 sys32_rt_sigprocmask /* 175 */
data8 sys_rt_sigpending
data8 compat_sys_rt_sigtimedwait
data8 sys32_rt_sigqueueinfo
data8 compat_sys_rt_sigsuspend
data8 sys32_pread /* 180 */
data8 sys32_pwrite
data8 sys_chown /* 16-bit version */
data8 sys_getcwd
data8 sys_capget
data8 sys_capset /* 185 */
data8 sys32_sigaltstack
data8 sys32_sendfile
data8 sys_ni_syscall /* streams1 */
data8 sys_ni_syscall /* streams2 */
data8 sys32_vfork /* 190 */
data8 compat_sys_getrlimit
data8 sys32_mmap2
data8 sys32_truncate64
data8 sys32_ftruncate64
data8 sys32_stat64 /* 195 */
data8 sys32_lstat64
data8 sys32_fstat64
data8 sys_lchown
data8 sys_getuid
data8 sys_getgid /* 200 */
data8 sys_geteuid
data8 sys_getegid
data8 sys_setreuid
data8 sys_setregid
data8 sys_getgroups /* 205 */
data8 sys_setgroups
data8 sys_fchown
data8 sys_setresuid
data8 sys_getresuid
data8 sys_setresgid /* 210 */
data8 sys_getresgid
data8 sys_chown
data8 sys_setuid
data8 sys_setgid
data8 sys_setfsuid /* 215 */
data8 sys_setfsgid
data8 sys_pivot_root
data8 sys_mincore
data8 sys_madvise
data8 compat_sys_getdents64 /* 220 */
data8 compat_sys_fcntl64
data8 sys_ni_syscall /* reserved for TUX */
data8 sys_ni_syscall /* reserved for Security */
data8 sys_gettid
data8 sys_readahead /* 225 */
data8 sys_setxattr
data8 sys_lsetxattr
data8 sys_fsetxattr
data8 sys_getxattr
data8 sys_lgetxattr /* 230 */
data8 sys_fgetxattr
data8 sys_listxattr
data8 sys_llistxattr
data8 sys_flistxattr
data8 sys_removexattr /* 235 */
data8 sys_lremovexattr
data8 sys_fremovexattr
data8 sys_tkill
data8 sys_sendfile64
data8 compat_sys_futex /* 240 */
data8 compat_sys_sched_setaffinity
data8 compat_sys_sched_getaffinity
data8 sys32_set_thread_area
data8 sys32_get_thread_area
data8 compat_sys_io_setup /* 245 */
data8 sys_io_destroy
data8 compat_sys_io_getevents
data8 compat_sys_io_submit
data8 sys_io_cancel
data8 sys_fadvise64 /* 250 */
data8 sys_ni_syscall
data8 sys_exit_group
data8 sys_lookup_dcookie
data8 sys_epoll_create
data8 sys32_epoll_ctl /* 255 */
data8 sys32_epoll_wait
data8 sys_remap_file_pages
data8 sys_set_tid_address
data8 compat_sys_timer_create
data8 compat_sys_timer_settime /* 260 */
data8 compat_sys_timer_gettime
data8 sys_timer_getoverrun
data8 sys_timer_delete
data8 compat_sys_clock_settime
data8 compat_sys_clock_gettime /* 265 */
data8 compat_sys_clock_getres
data8 compat_sys_clock_nanosleep
data8 compat_sys_statfs64
data8 compat_sys_fstatfs64
data8 sys_tgkill /* 270 */
data8 compat_sys_utimes
data8 sys32_fadvise64_64
data8 sys_ni_syscall
data8 sys_ni_syscall
data8 sys_ni_syscall /* 275 */
data8 sys_ni_syscall
data8 compat_sys_mq_open
data8 sys_mq_unlink
data8 compat_sys_mq_timedsend
data8 compat_sys_mq_timedreceive /* 280 */
data8 compat_sys_mq_notify
data8 compat_sys_mq_getsetattr
data8 sys_ni_syscall /* reserved for kexec */
data8 compat_sys_waitid
// guard against failures to increase IA32_NR_syscalls
.org ia32_syscall_table + 8*IA32_NR_syscalls
/*
* Copyright (C) 2001, 2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Adapted from arch/i386/kernel/ldt.c
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
#include "ia32priv.h"
/*
* read_ldt() is not really atomic - this is not a problem since synchronization of reads
* and writes done to the LDT has to be assured by user-space anyway. Writes are atomic,
* to protect the security checks done on new descriptors.
*/
static int
read_ldt (void __user *ptr, unsigned long bytecount)
{
unsigned long bytes_left, n;
char __user *src, *dst;
char buf[256]; /* temporary buffer (don't overflow kernel stack!) */
if (bytecount > IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE)
bytecount = IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE;
bytes_left = bytecount;
src = (void __user *) IA32_LDT_OFFSET;
dst = ptr;
while (bytes_left) {
n = sizeof(buf);
if (n > bytes_left)
n = bytes_left;
/*
* We know we're reading valid memory, but we still must guard against
* running out of memory.
*/
if (__copy_from_user(buf, src, n))
return -EFAULT;
if (copy_to_user(dst, buf, n))
return -EFAULT;
src += n;
dst += n;
bytes_left -= n;
}
return bytecount;
}
static int
read_default_ldt (void __user * ptr, unsigned long bytecount)
{
unsigned long size;
int err;
/* XXX fix me: should return equivalent of default_ldt[0] */
err = 0;
size = 8;
if (size > bytecount)
size = bytecount;
err = size;
if (clear_user(ptr, size))
err = -EFAULT;
return err;
}
static int
write_ldt (void __user * ptr, unsigned long bytecount, int oldmode)
{
struct ia32_user_desc ldt_info;
__u64 entry;
int ret;
if (bytecount != sizeof(ldt_info))
return -EINVAL;
if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
return -EFAULT;
if (ldt_info.entry_number >= IA32_LDT_ENTRIES)
return -EINVAL;
if (ldt_info.contents == 3) {
if (oldmode)
return -EINVAL;
if (ldt_info.seg_not_present == 0)
return -EINVAL;
}
if (ldt_info.base_addr == 0 && ldt_info.limit == 0
&& (oldmode || (ldt_info.contents == 0 && ldt_info.read_exec_only == 1
&& ldt_info.seg_32bit == 0 && ldt_info.limit_in_pages == 0
&& ldt_info.seg_not_present == 1 && ldt_info.useable == 0)))
/* allow LDTs to be cleared by the user */
entry = 0;
else
/* we must set the "Accessed" bit as IVE doesn't emulate it */
entry = IA32_SEG_DESCRIPTOR(ldt_info.base_addr, ldt_info.limit,
(((ldt_info.read_exec_only ^ 1) << 1)
| (ldt_info.contents << 2)) | 1,
1, 3, ldt_info.seg_not_present ^ 1,
(oldmode ? 0 : ldt_info.useable),
ldt_info.seg_32bit,
ldt_info.limit_in_pages);
/*
* Install the new entry. We know we're accessing valid (mapped) user-level
* memory, but we still need to guard against out-of-memory, hence we must use
* put_user().
*/
ret = __put_user(entry, (__u64 __user *) IA32_LDT_OFFSET + ldt_info.entry_number);
ia32_load_segment_descriptors(current);
return ret;
}
asmlinkage int
sys32_modify_ldt (int func, unsigned int ptr, unsigned int bytecount)
{
int ret = -ENOSYS;
switch (func) {
case 0:
ret = read_ldt(compat_ptr(ptr), bytecount);
break;
case 1:
ret = write_ldt(compat_ptr(ptr), bytecount, 1);
break;
case 2:
ret = read_default_ldt(compat_ptr(ptr), bytecount);
break;
case 0x11:
ret = write_ldt(compat_ptr(ptr), bytecount, 0);
break;
}
return ret;
}
/*
* IA32 Architecture-specific signal handling support.
*
* Copyright (C) 1999, 2001-2002, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2000 VA Linux Co
* Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
*
* Derived from i386 and Alpha versions.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/wait.h>
#include <linux/compat.h>
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
#include <asm/rse.h>
#include <asm/sigcontext.h>
#include "ia32priv.h"
#include "../kernel/sigframe.h"
#define A(__x) ((unsigned long)(__x))
#define DEBUG_SIG 0
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
#define __IA32_NR_sigreturn 119
#define __IA32_NR_rt_sigreturn 173
struct sigframe_ia32
{
int pretcode;
int sig;
struct sigcontext_ia32 sc;
struct _fpstate_ia32 fpstate;
unsigned int extramask[_COMPAT_NSIG_WORDS-1];
char retcode[8];
};
struct rt_sigframe_ia32
{
int pretcode;
int sig;
int pinfo;
int puc;
compat_siginfo_t info;
struct ucontext_ia32 uc;
struct _fpstate_ia32 fpstate;
char retcode[8];
};
int
copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
{
unsigned long tmp;
int err;
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
return -EFAULT;
err = __get_user(to->si_signo, &from->si_signo);
err |= __get_user(to->si_errno, &from->si_errno);
err |= __get_user(to->si_code, &from->si_code);
if (to->si_code < 0)
err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (to->si_code >> 16) {
case __SI_CHLD >> 16:
err |= __get_user(to->si_utime, &from->si_utime);
err |= __get_user(to->si_stime, &from->si_stime);
err |= __get_user(to->si_status, &from->si_status);
default:
err |= __get_user(to->si_pid, &from->si_pid);
err |= __get_user(to->si_uid, &from->si_uid);
break;
case __SI_FAULT >> 16:
err |= __get_user(tmp, &from->si_addr);
to->si_addr = (void __user *) tmp;
break;
case __SI_POLL >> 16:
err |= __get_user(to->si_band, &from->si_band);
err |= __get_user(to->si_fd, &from->si_fd);
break;
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __get_user(to->si_pid, &from->si_pid);
err |= __get_user(to->si_uid, &from->si_uid);
err |= __get_user(to->si_int, &from->si_int);
break;
}
}
return err;
}
int
copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
{
unsigned int addr;
int err;
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
/* If you change siginfo_t structure, please be sure
this code is fixed accordingly.
It should never copy any pad contained in the structure
to avoid security leaks, but must copy the generic
3 ints plus the relevant union member.
This routine must convert siginfo from 64bit to 32bit as well
at the same time. */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
if (from->si_code < 0)
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (from->si_code >> 16) {
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
default:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
break;
case __SI_FAULT >> 16:
/* avoid type-checking warnings by copying _pad[0] in lieu of si_addr... */
err |= __put_user(from->_sifields._pad[0], &to->si_addr);
break;
case __SI_POLL >> 16:
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
addr = (unsigned long) from->si_ptr;
err |= __put_user(addr, &to->si_ptr);
break;
case __SI_RT >> 16: /* Not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
addr = (unsigned long) from->si_ptr;
err |= __put_user(addr, &to->si_ptr);
break;
}
}
return err;
}
/*
* SAVE and RESTORE of ia32 fpstate info, from ia64 current state
* Used in exception handler to pass the fpstate to the user, and restore
* the fpstate while returning from the exception handler.
*
* fpstate info and their mapping to IA64 regs:
* fpstate REG(BITS) Attribute Comments
* cw ar.fcr(0:12) with bits 7 and 6 not used
* sw ar.fsr(0:15)
* tag ar.fsr(16:31) with odd numbered bits not used
* (read returns 0, writes ignored)
* ipoff ar.fir(0:31)
* cssel ar.fir(32:47)
* dataoff ar.fdr(0:31)
* datasel ar.fdr(32:47)
*
* _st[(0+TOS)%8] f8
* _st[(1+TOS)%8] f9
* _st[(2+TOS)%8] f10
* _st[(3+TOS)%8] f11 (f8..f11 from ptregs)
* : : : (f12..f15 from live reg)
* : : :
* _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13)
*
* status Same as sw RO
* magic 0 as X86_FXSR_MAGIC in ia32
* mxcsr Bits(7:15)=ar.fcr(39:47)
* Bits(0:5) =ar.fsr(32:37) with bit 6 reserved
* _xmm[0..7] f16..f31 (live registers)
* with _xmm[0]
* Bit(64:127)=f17(0:63)
* Bit(0:63)=f16(0:63)
* All other fields unused...
*/
static int
save_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
{
struct task_struct *tsk = current;
struct pt_regs *ptp;
struct _fpreg_ia32 *fpregp;
char buf[32];
unsigned long fsr, fcr, fir, fdr;
unsigned long new_fsr;
unsigned long num128[2];
unsigned long mxcsr=0;
int fp_tos, fr8_st_map;
if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
return -EFAULT;
/* Read in fsr, fcr, fir, fdr and copy onto fpstate */
fsr = ia64_getreg(_IA64_REG_AR_FSR);
fcr = ia64_getreg(_IA64_REG_AR_FCR);
fir = ia64_getreg(_IA64_REG_AR_FIR);
fdr = ia64_getreg(_IA64_REG_AR_FDR);
/*
* We need to clear the exception state before calling the signal handler. Clear
* the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex
* instruction.
*/
new_fsr = fsr & ~0x80ff;
ia64_setreg(_IA64_REG_AR_FSR, new_fsr);
__put_user(fcr & 0xffff, &save->cw);
__put_user(fsr & 0xffff, &save->sw);
__put_user((fsr>>16) & 0xffff, &save->tag);
__put_user(fir, &save->ipoff);
__put_user((fir>>32) & 0xffff, &save->cssel);
__put_user(fdr, &save->dataoff);
__put_user((fdr>>32) & 0xffff, &save->datasel);
__put_user(fsr & 0xffff, &save->status);
mxcsr = ((fcr>>32) & 0xff80) | ((fsr>>32) & 0x3f);
__put_user(mxcsr & 0xffff, &save->mxcsr);
__put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000
/*
* save f8..f11 from pt_regs
* save f12..f15 from live register set
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
* TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps
* to.
*/
fp_tos = (fsr>>11)&0x7;
fr8_st_map = (8-fp_tos)&0x7;
ptp = task_pt_regs(tsk);
fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
ia64f2ia32f(fpregp, &ptp->f8);
copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64f2ia32f(fpregp, &ptp->f9);
copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64f2ia32f(fpregp, &ptp->f10);
copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64f2ia32f(fpregp, &ptp->f11);
copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64_stfe(fpregp, 12);
copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64_stfe(fpregp, 13);
copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64_stfe(fpregp, 14);
copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64_stfe(fpregp, 15);
copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64_stf8(&num128[0], 16);
ia64_stf8(&num128[1], 17);
copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32));
ia64_stf8(&num128[0], 18);
ia64_stf8(&num128[1], 19);
copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32));
ia64_stf8(&num128[0], 20);
ia64_stf8(&num128[1], 21);
copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32));
ia64_stf8(&num128[0], 22);
ia64_stf8(&num128[1], 23);
copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32));
ia64_stf8(&num128[0], 24);
ia64_stf8(&num128[1], 25);
copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32));
ia64_stf8(&num128[0], 26);
ia64_stf8(&num128[1], 27);
copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32));
ia64_stf8(&num128[0], 28);
ia64_stf8(&num128[1], 29);
copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32));
ia64_stf8(&num128[0], 30);
ia64_stf8(&num128[1], 31);
copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32));
return 0;
}
static int
restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
{
struct task_struct *tsk = current;
struct pt_regs *ptp;
unsigned int lo, hi;
unsigned long num128[2];
unsigned long num64, mxcsr;
struct _fpreg_ia32 *fpregp;
char buf[32];
unsigned long fsr, fcr, fir, fdr;
int fp_tos, fr8_st_map;
if (!access_ok(VERIFY_READ, save, sizeof(*save)))
return(-EFAULT);
/*
* Updating fsr, fcr, fir, fdr.
* Just a bit more complicated than save.
* - Need to make sure that we don't write any value other than the
* specific fpstate info
* - Need to make sure that the untouched part of frs, fdr, fir, fcr
* should remain same while writing.
* So, we do a read, change specific fields and write.
*/
fsr = ia64_getreg(_IA64_REG_AR_FSR);
fcr = ia64_getreg(_IA64_REG_AR_FCR);
fir = ia64_getreg(_IA64_REG_AR_FIR);
fdr = ia64_getreg(_IA64_REG_AR_FDR);
__get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
/* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */
__get_user(lo, (unsigned int __user *)&save->cw);
num64 = mxcsr & 0xff10;
num64 = (num64 << 32) | (lo & 0x1f3f);
fcr = (fcr & (~0xff1000001f3fUL)) | num64;
/* setting bits 0..31 with sw and tag and 32..37 from mxcsr */
__get_user(lo, (unsigned int __user *)&save->sw);
/* set bits 15,7 (fsw.b, fsw.es) to reflect the current error status */
if ( !(lo & 0x7f) )
lo &= (~0x8080);
__get_user(hi, (unsigned int __user *)&save->tag);
num64 = mxcsr & 0x3f;
num64 = (num64 << 16) | (hi & 0xffff);
num64 = (num64 << 16) | (lo & 0xffff);
fsr = (fsr & (~0x3fffffffffUL)) | num64;
/* setting bits 0..47 with cssel and ipoff */
__get_user(lo, (unsigned int __user *)&save->ipoff);
__get_user(hi, (unsigned int __user *)&save->cssel);
num64 = hi & 0xffff;
num64 = (num64 << 32) | lo;
fir = (fir & (~0xffffffffffffUL)) | num64;
/* setting bits 0..47 with datasel and dataoff */
__get_user(lo, (unsigned int __user *)&save->dataoff);
__get_user(hi, (unsigned int __user *)&save->datasel);
num64 = hi & 0xffff;
num64 = (num64 << 32) | lo;
fdr = (fdr & (~0xffffffffffffUL)) | num64;
ia64_setreg(_IA64_REG_AR_FSR, fsr);
ia64_setreg(_IA64_REG_AR_FCR, fcr);
ia64_setreg(_IA64_REG_AR_FIR, fir);
ia64_setreg(_IA64_REG_AR_FDR, fdr);
/*
* restore f8..f11 onto pt_regs
* restore f12..f15 onto live registers
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
* TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps
* to.
*/
fp_tos = (fsr>>11)&0x7;
fr8_st_map = (8-fp_tos)&0x7;
fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
ptp = task_pt_regs(tsk);
copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia32f2ia64f(&ptp->f8, fpregp);
copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia32f2ia64f(&ptp->f9, fpregp);
copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia32f2ia64f(&ptp->f10, fpregp);
copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia32f2ia64f(&ptp->f11, fpregp);
copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia64_ldfe(12, fpregp);
copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia64_ldfe(13, fpregp);
copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia64_ldfe(14, fpregp);
copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia64_ldfe(15, fpregp);
copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32));
ia64_ldf8(16, &num128[0]);
ia64_ldf8(17, &num128[1]);
copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32));
ia64_ldf8(18, &num128[0]);
ia64_ldf8(19, &num128[1]);
copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32));
ia64_ldf8(20, &num128[0]);
ia64_ldf8(21, &num128[1]);
copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32));
ia64_ldf8(22, &num128[0]);
ia64_ldf8(23, &num128[1]);
copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32));
ia64_ldf8(24, &num128[0]);
ia64_ldf8(25, &num128[1]);
copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32));
ia64_ldf8(26, &num128[0]);
ia64_ldf8(27, &num128[1]);
copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32));
ia64_ldf8(28, &num128[0]);
ia64_ldf8(29, &num128[1]);
copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32));
ia64_ldf8(30, &num128[0]);
ia64_ldf8(31, &num128[1]);
return 0;
}
static inline void
sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int restorer)
{
if (handler + 1 <= 2)
/* SIG_DFL, SIG_IGN, or SIG_ERR: must sign-extend to 64-bits */
sa->sa.sa_handler = (__sighandler_t) A((int) handler);
else
sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler);
}
asmlinkage long
sys32_sigsuspend (int history0, int history1, old_sigset_t mask)
{
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_restore_sigmask();
return -ERESTARTNOHAND;
}
asmlinkage long
sys32_signal (int sig, unsigned int handler)
{
struct k_sigaction new_sa, old_sa;
int ret;
sigact_set_handler(&new_sa, handler, 0);
new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
sigemptyset(&new_sa.sa.sa_mask);
ret = do_sigaction(sig, &new_sa, &old_sa);
return ret ? ret : IA32_SA_HANDLER(&old_sa);
}
asmlinkage long
sys32_rt_sigaction (int sig, struct sigaction32 __user *act,
struct sigaction32 __user *oact, unsigned int sigsetsize)
{
struct k_sigaction new_ka, old_ka;
unsigned int handler, restorer;
int ret;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (act) {
ret = get_user(handler, &act->sa_handler);
ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
ret |= get_user(restorer, &act->sa_restorer);
ret |= copy_from_user(&new_ka.sa.sa_mask, &act->sa_mask, sizeof(compat_sigset_t));
if (ret)
return -EFAULT;
sigact_set_handler(&new_ka, handler, restorer);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler);
ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer);
ret |= copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask, sizeof(compat_sigset_t));
}
return ret;
}
asmlinkage long
sys32_rt_sigprocmask (int how, compat_sigset_t __user *set, compat_sigset_t __user *oset,
unsigned int sigsetsize)
{
mm_segment_t old_fs = get_fs();
sigset_t s;
long ret;
if (sigsetsize > sizeof(s))
return -EINVAL;
if (set) {
memset(&s, 0, sizeof(s));
if (copy_from_user(&s.sig, set, sigsetsize))
return -EFAULT;
}
set_fs(KERNEL_DS);
ret = sys_rt_sigprocmask(how,
set ? (sigset_t __user *) &s : NULL,
oset ? (sigset_t __user *) &s : NULL, sizeof(s));
set_fs(old_fs);
if (ret)
return ret;
if (oset) {
if (copy_to_user(oset, &s.sig, sigsetsize))
return -EFAULT;
}
return 0;
}
asmlinkage long
sys32_rt_sigqueueinfo (int pid, int sig, compat_siginfo_t __user *uinfo)
{
mm_segment_t old_fs = get_fs();
siginfo_t info;
int ret;
if (copy_siginfo_from_user32(&info, uinfo))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
set_fs(old_fs);
return ret;
}
asmlinkage long
sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact)
{
struct k_sigaction new_ka, old_ka;
unsigned int handler, restorer;
int ret;
if (act) {
compat_old_sigset_t mask;
ret = get_user(handler, &act->sa_handler);
ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
ret |= get_user(restorer, &act->sa_restorer);
ret |= get_user(mask, &act->sa_mask);
if (ret)
return ret;
sigact_set_handler(&new_ka, handler, restorer);
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler);
ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer);
ret |= put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
}
static int
setup_sigcontext_ia32 (struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate,
struct pt_regs *regs, unsigned long mask)
{
int err = 0;
unsigned long flag;
if (!access_ok(VERIFY_WRITE, sc, sizeof(*sc)))
return -EFAULT;
err |= __put_user((regs->r16 >> 32) & 0xffff, (unsigned int __user *)&sc->fs);
err |= __put_user((regs->r16 >> 48) & 0xffff, (unsigned int __user *)&sc->gs);
err |= __put_user((regs->r16 >> 16) & 0xffff, (unsigned int __user *)&sc->es);
err |= __put_user(regs->r16 & 0xffff, (unsigned int __user *)&sc->ds);
err |= __put_user(regs->r15, &sc->edi);
err |= __put_user(regs->r14, &sc->esi);
err |= __put_user(regs->r13, &sc->ebp);
err |= __put_user(regs->r12, &sc->esp);
err |= __put_user(regs->r11, &sc->ebx);
err |= __put_user(regs->r10, &sc->edx);
err |= __put_user(regs->r9, &sc->ecx);
err |= __put_user(regs->r8, &sc->eax);
#if 0
err |= __put_user(current->tss.trap_no, &sc->trapno);
err |= __put_user(current->tss.error_code, &sc->err);
#endif
err |= __put_user(regs->cr_iip, &sc->eip);
err |= __put_user(regs->r17 & 0xffff, (unsigned int __user *)&sc->cs);
/*
* `eflags' is in an ar register for this context
*/
flag = ia64_getreg(_IA64_REG_AR_EFLAG);
err |= __put_user((unsigned int)flag, &sc->eflags);
err |= __put_user(regs->r12, &sc->esp_at_signal);
err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int __user *)&sc->ss);
if ( save_ia32_fpstate_live(fpstate) < 0 )
err = -EFAULT;
else
err |= __put_user((u32)(u64)fpstate, &sc->fpstate);
#if 0
tmp = save_i387(fpstate);
if (tmp < 0)
err = 1;
else
err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
/* non-iBCS2 extensions.. */
#endif
err |= __put_user(mask, &sc->oldmask);
#if 0
err |= __put_user(current->tss.cr2, &sc->cr2);
#endif
return err;
}
static int
restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 __user *sc, int *peax)
{
unsigned int err = 0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
return(-EFAULT);
#define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x)
#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48)
#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32)
#define copyseg_cs(tmp) (regs->r17 |= tmp)
#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16)
#define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16)
#define copyseg_ds(tmp) (regs->r16 |= tmp)
#define COPY_SEG(seg) \
{ \
unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \
copyseg_##seg(tmp); \
}
#define COPY_SEG_STRICT(seg) \
{ \
unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \
copyseg_##seg(tmp|3); \
}
/* To make COPY_SEGs easier, we zero r16, r17 */
regs->r16 = 0;
regs->r17 = 0;
COPY_SEG(gs);
COPY_SEG(fs);
COPY_SEG(es);
COPY_SEG(ds);
COPY(r15, edi);
COPY(r14, esi);
COPY(r13, ebp);
COPY(r12, esp);
COPY(r11, ebx);
COPY(r10, edx);
COPY(r9, ecx);
COPY(cr_iip, eip);
COPY_SEG_STRICT(cs);
COPY_SEG_STRICT(ss);
ia32_load_segment_descriptors(current);
{
unsigned int tmpflags;
unsigned long flag;
/*
* IA32 `eflags' is not part of `pt_regs', it's in an ar register which
* is part of the thread context. Fortunately, we are executing in the
* IA32 process's context.
*/
err |= __get_user(tmpflags, &sc->eflags);
flag = ia64_getreg(_IA64_REG_AR_EFLAG);
flag &= ~0x40DD5;
flag |= (tmpflags & 0x40DD5);
ia64_setreg(_IA64_REG_AR_EFLAG, flag);
regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */
}
{
struct _fpstate_ia32 __user *buf = NULL;
u32 fpstate_ptr;
err |= get_user(fpstate_ptr, &(sc->fpstate));
buf = compat_ptr(fpstate_ptr);
if (buf) {
err |= restore_ia32_fpstate_live(buf);
}
}
#if 0
{
struct _fpstate * buf;
err |= __get_user(buf, &sc->fpstate);
if (buf) {
if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
goto badframe;
err |= restore_i387(buf);
}
}
#endif
err |= __get_user(*peax, &sc->eax);
return err;
#if 0
badframe:
return 1;
#endif
}
/*
* Determine which stack to use..
*/
static inline void __user *
get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
{
unsigned long esp;
/* Default to using normal stack (truncate off sign-extension of bit 31: */
esp = (unsigned int) regs->r12;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
int onstack = sas_ss_flags(esp);
if (onstack == 0)
esp = current->sas_ss_sp + current->sas_ss_size;
else if (onstack == SS_ONSTACK) {
/*
* If we are on the alternate signal stack and would
* overflow it, don't. Return an always-bogus address
* instead so we will die with SIGSEGV.
*/
if (!likely(on_sig_stack(esp - frame_size)))
return (void __user *) -1L;
}
}
/* Legacy stack switching not supported */
esp -= frame_size;
/* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0. */
esp = ((esp + 4) & -16ul) - 4;
return (void __user *) esp;
}
static int
setup_frame_ia32 (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs)
{
struct exec_domain *ed = current_thread_info()->exec_domain;
struct sigframe_ia32 __user *frame;
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
err |= __put_user((ed && ed->signal_invmap && sig < 32
? (int)(ed->signal_invmap[sig]) : sig), &frame->sig);
err |= setup_sigcontext_ia32(&frame->sc, &frame->fpstate, regs, set->sig[0]);
if (_COMPAT_NSIG_WORDS > 1)
err |= __copy_to_user(frame->extramask, (char *) &set->sig + 4,
sizeof(frame->extramask));
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
unsigned int restorer = IA32_SA_RESTORER(ka);
err |= __put_user(restorer, &frame->pretcode);
} else {
/* Pointing to restorer in ia32 gate page */
err |= __put_user(IA32_GATE_OFFSET, &frame->pretcode);
}
/* This is popl %eax ; movl $,%eax ; int $0x80
* and there for historical reasons only.
* See arch/i386/kernel/signal.c
*/
err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
err |= __put_user(__IA32_NR_sigreturn, (int __user *)(frame->retcode+2));
err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
if (err)
goto give_sigsegv;
/* Set up registers for signal handler */
regs->r12 = (unsigned long) frame;
regs->cr_iip = IA32_SA_HANDLER(ka);
set_fs(USER_DS);
#if 0
regs->eflags &= ~TF_MASK;
#endif
#if 0
printk("SIG deliver (%s:%d): sig=%d sp=%p pc=%lx ra=%x\n",
current->comm, current->pid, sig, (void *) frame, regs->cr_iip, frame->pretcode);
#endif
return 1;
give_sigsegv:
force_sigsegv(sig, current);
return 0;
}
static int
setup_rt_frame_ia32 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs * regs)
{
struct exec_domain *ed = current_thread_info()->exec_domain;
compat_uptr_t pinfo, puc;
struct rt_sigframe_ia32 __user *frame;
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
err |= __put_user((ed && ed->signal_invmap
&& sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig);
pinfo = (long __user) &frame->info;
puc = (long __user) &frame->uc;
err |= __put_user(pinfo, &frame->pinfo);
err |= __put_user(puc, &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->r12), &frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext_ia32(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
goto give_sigsegv;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
unsigned int restorer = IA32_SA_RESTORER(ka);
err |= __put_user(restorer, &frame->pretcode);
} else {
/* Pointing to rt_restorer in ia32 gate page */
err |= __put_user(IA32_GATE_OFFSET + 8, &frame->pretcode);
}
/* This is movl $,%eax ; int $0x80
* and there for historical reasons only.
* See arch/i386/kernel/signal.c
*/
err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
err |= __put_user(__IA32_NR_rt_sigreturn, (int __user *)(frame->retcode+1));
err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
if (err)
goto give_sigsegv;
/* Set up registers for signal handler */
regs->r12 = (unsigned long) frame;
regs->cr_iip = IA32_SA_HANDLER(ka);
set_fs(USER_DS);
#if 0
regs->eflags &= ~TF_MASK;
#endif
#if 0
printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%x\n",
current->comm, current->pid, (void *) frame, regs->cr_iip, frame->pretcode);
#endif
return 1;
give_sigsegv:
force_sigsegv(sig, current);
return 0;
}
int
ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
return setup_rt_frame_ia32(sig, ka, info, set, regs);
else
return setup_frame_ia32(sig, ka, set, regs);
}
asmlinkage long
sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5,
int arg6, int arg7, struct pt_regs regs)
{
unsigned long esp = (unsigned int) regs.r12;
struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8);
sigset_t set;
int eax;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_COMPAT_NSIG_WORDS > 1 && __copy_from_user((char *) &set.sig + 4, &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext_ia32(&regs, &frame->sc, &eax))
goto badframe;
return eax;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
asmlinkage long
sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4,
int arg5, int arg6, int arg7, struct pt_regs regs)
{
unsigned long esp = (unsigned int) regs.r12;
struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4);
sigset_t set;
int eax;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext_ia32(&regs, &frame->uc.uc_mcontext, &eax))
goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
do_sigaltstack((stack_t __user *) &frame->uc.uc_stack, NULL, esp);
return eax;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
/*
* IA32 helper functions
*
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
* Copyright (C) 2001-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
* 02/19/01 D. Mosberger dropped tssd; it's not needed
* 09/14/01 D. Mosberger fixed memory management for gdt/tss page
* 09/29/01 D. Mosberger added ia32_load_segment_descriptors()
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/intrinsics.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include "ia32priv.h"
extern int die_if_kernel (char *str, struct pt_regs *regs, long err);
struct page *ia32_shared_page[NR_CPUS];
unsigned long *ia32_boot_gdt;
unsigned long *cpu_gdt_table[NR_CPUS];
struct page *ia32_gate_page;
static unsigned long
load_desc (u16 selector)
{
unsigned long *table, limit, index;
if (!selector)
return 0;
if (selector & IA32_SEGSEL_TI) {
table = (unsigned long *) IA32_LDT_OFFSET;
limit = IA32_LDT_ENTRIES;
} else {
table = cpu_gdt_table[smp_processor_id()];
limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
}
index = selector >> IA32_SEGSEL_INDEX_SHIFT;
if (index >= limit)
return 0;
return IA32_SEG_UNSCRAMBLE(table[index]);
}
void
ia32_load_segment_descriptors (struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
/* Setup the segment descriptors */
regs->r24 = load_desc(regs->r16 >> 16); /* ESD */
regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
}
int
ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
{
struct desc_struct *desc;
struct ia32_user_desc info;
int idx;
if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info)))
return -EFAULT;
if (LDT_empty(&info))
return -EINVAL;
idx = info.entry_number;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
desc->a = LDT_entry_a(&info);
desc->b = LDT_entry_b(&info);
/* XXX: can this be done in a cleaner way ? */
load_TLS(&child->thread, smp_processor_id());
ia32_load_segment_descriptors(child);
load_TLS(&current->thread, smp_processor_id());
return 0;
}
void
ia32_save_state (struct task_struct *t)
{
t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR);
t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR);
t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR);
t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR);
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
}
void
ia32_load_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, tssd;
struct pt_regs *regs = task_pt_regs(t);
eflag = t->thread.eflag;
fsr = t->thread.fsr;
fcr = t->thread.fcr;
fir = t->thread.fir;
fdr = t->thread.fdr;
tssd = load_desc(_TSS); /* TSSD */
ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
ia64_setreg(_IA64_REG_AR_FSR, fsr);
ia64_setreg(_IA64_REG_AR_FCR, fcr);
ia64_setreg(_IA64_REG_AR_FIR, fir);
ia64_setreg(_IA64_REG_AR_FDR, fdr);
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
ia64_set_kr(IA64_KR_TSSD, tssd);
regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
regs->r30 = load_desc(_LDT); /* LDTD */
load_TLS(&t->thread, smp_processor_id());
}
/*
* Setup IA32 GDT and TSS
*/
void
ia32_gdt_init (void)
{
int cpu = smp_processor_id();
ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
if (!ia32_shared_page[cpu])
panic("failed to allocate ia32_shared_page[%d]\n", cpu);
cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
/* Copy from the boot cpu's GDT */
memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
}
/*
* Setup IA32 GDT and TSS
*/
static void
ia32_boot_gdt_init (void)
{
unsigned long ldt_size;
ia32_shared_page[0] = alloc_page(GFP_KERNEL);
if (!ia32_shared_page[0])
panic("failed to allocate ia32_shared_page[0]\n");
ia32_boot_gdt = page_address(ia32_shared_page[0]);
cpu_gdt_table[0] = ia32_boot_gdt;
/* CS descriptor in IA-32 (scrambled) format */
ia32_boot_gdt[__USER_CS >> 3]
= IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
0xb, 1, 3, 1, 1, 1, 1);
/* DS descriptor in IA-32 (scrambled) format */
ia32_boot_gdt[__USER_DS >> 3]
= IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
0x3, 1, 3, 1, 1, 1, 1);
ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
0xb, 0, 3, 1, 1, 1, 0);
ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
0x2, 0, 3, 1, 1, 1, 0);
}
static void
ia32_gate_page_init(void)
{
unsigned long *sr;
ia32_gate_page = alloc_page(GFP_KERNEL);
sr = page_address(ia32_gate_page);
/* This is popl %eax ; movl $,%eax ; int $0x80 */
*sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48);
/* This is movl $,%eax ; int $0x80 */
*sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40);
}
void
ia32_mem_init(void)
{
ia32_boot_gdt_init();
ia32_gate_page_init();
}
/*
* Handle bad IA32 interrupt via syscall
*/
void
ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
{
siginfo_t siginfo;
if (die_if_kernel("Bad IA-32 interrupt", regs, int_num))
return;
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
siginfo.si_flags = 0;
siginfo.si_isr = 0;
siginfo.si_addr = NULL;
siginfo.si_imm = 0;
siginfo.si_code = TRAP_BRKPT;
force_sig_info(SIGTRAP, &siginfo, current);
}
void
ia32_cpu_init (void)
{
/* initialize global ia32 state - CR0 and CR4 */
ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
}
static int __init
ia32_init (void)
{
#if PAGE_SHIFT > IA32_PAGE_SHIFT
{
extern struct kmem_cache *ia64_partial_page_cachep;
ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
sizeof(struct ia64_partial_page),
0, SLAB_PANIC, NULL);
}
#endif
return 0;
}
__initcall(ia32_init);
/*
* IA-32 exception handlers
*
* Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
* Copyright (C) 2001-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 06/16/00 A. Mallick added siginfo for most cases (close to IA32)
* 09/29/00 D. Mosberger added ia32_intercept()
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include "ia32priv.h"
#include <asm/intrinsics.h>
#include <asm/ptrace.h>
int
ia32_intercept (struct pt_regs *regs, unsigned long isr)
{
switch ((isr >> 16) & 0xff) {
case 0: /* Instruction intercept fault */
case 4: /* Locked Data reference fault */
case 1: /* Gate intercept trap */
return -1;
case 2: /* System flag trap */
if (((isr >> 14) & 0x3) >= 2) {
/* MOV SS, POP SS instructions */
ia64_psr(regs)->id = 1;
return 0;
} else
return -1;
}
return -1;
}
int
ia32_exception (struct pt_regs *regs, unsigned long isr)
{
struct siginfo siginfo;
/* initialize these fields to avoid leaking kernel bits to user space: */
siginfo.si_errno = 0;
siginfo.si_flags = 0;
siginfo.si_isr = 0;
siginfo.si_imm = 0;
switch ((isr >> 16) & 0xff) {
case 1:
case 2:
siginfo.si_signo = SIGTRAP;
if (isr == 0)
siginfo.si_code = TRAP_TRACE;
else if (isr & 0x4)
siginfo.si_code = TRAP_BRANCH;
else
siginfo.si_code = TRAP_BRKPT;
break;
case 3:
siginfo.si_signo = SIGTRAP;
siginfo.si_code = TRAP_BRKPT;
break;
case 0: /* Divide fault */
siginfo.si_signo = SIGFPE;
siginfo.si_code = FPE_INTDIV;
break;
case 4: /* Overflow */
case 5: /* Bounds fault */
siginfo.si_signo = SIGFPE;
siginfo.si_code = 0;
break;
case 6: /* Invalid Op-code */
siginfo.si_signo = SIGILL;
siginfo.si_code = ILL_ILLOPN;
break;
case 7: /* FP DNA */
case 8: /* Double Fault */
case 9: /* Invalid TSS */
case 11: /* Segment not present */
case 12: /* Stack fault */
case 13: /* General Protection Fault */
siginfo.si_signo = SIGSEGV;
siginfo.si_code = 0;
break;
case 16: /* Pending FP error */
{
unsigned long fsr, fcr;
fsr = ia64_getreg(_IA64_REG_AR_FSR);
fcr = ia64_getreg(_IA64_REG_AR_FCR);
siginfo.si_signo = SIGFPE;
/*
* (~cwd & swd) will mask out exceptions that are not set to unmasked
* status. 0x3f is the exception bits in these regs, 0x200 is the
* C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception,
* then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception
*/
siginfo.si_isr = isr;
siginfo.si_flags = __ISR_VALID;
switch(((~fcr) & (fsr & 0x3f)) | (fsr & 0x240)) {
case 0x000:
default:
siginfo.si_code = 0;
break;
case 0x001: /* Invalid Op */
case 0x040: /* Stack Fault */
case 0x240: /* Stack Fault | Direction */
siginfo.si_code = FPE_FLTINV;
break;
case 0x002: /* Denormalize */
case 0x010: /* Underflow */
siginfo.si_code = FPE_FLTUND;
break;
case 0x004: /* Zero Divide */
siginfo.si_code = FPE_FLTDIV;
break;
case 0x008: /* Overflow */
siginfo.si_code = FPE_FLTOVF;
break;
case 0x020: /* Precision */
siginfo.si_code = FPE_FLTRES;
break;
}
break;
}
case 17: /* Alignment check */
siginfo.si_signo = SIGSEGV;
siginfo.si_code = BUS_ADRALN;
break;
case 19: /* SSE Numeric error */
siginfo.si_signo = SIGFPE;
siginfo.si_code = 0;
break;
default:
return -1;
}
force_sig_info(siginfo.si_signo, &siginfo, current);
return 0;
}
#ifndef _ASM_IA64_IA32_PRIV_H
#define _ASM_IA64_IA32_PRIV_H
#include <asm/ia32.h>
#ifdef CONFIG_IA32_SUPPORT
#include <linux/binfmts.h>
#include <linux/compat.h>
#include <linux/rbtree.h>
#include <asm/processor.h>
/*
* 32 bit structures for IA32 support.
*/
#define IA32_PAGE_SIZE (1UL << IA32_PAGE_SHIFT)
#define IA32_PAGE_MASK (~(IA32_PAGE_SIZE - 1))
#define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK)
#define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */
/*
* partially mapped pages provide precise accounting of which 4k sub pages
* are mapped and which ones are not, thereby improving IA-32 compatibility.
*/
struct ia64_partial_page {
struct ia64_partial_page *next; /* linked list, sorted by address */
struct rb_node pp_rb;
/* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
* should suffice.*/
unsigned long bitmap;
unsigned int base;
};
struct ia64_partial_page_list {
struct ia64_partial_page *pp_head; /* list head, points to the lowest
* addressed partial page */
struct rb_root ppl_rb;
struct ia64_partial_page *pp_hint; /* pp_hint->next is the last
* accessed partial page */
atomic_t pp_count; /* reference count */
};
#if PAGE_SHIFT > IA32_PAGE_SHIFT
struct ia64_partial_page_list* ia32_init_pp_list (void);
#else
# define ia32_init_pp_list() 0
#endif
/* sigcontext.h */
/*
* As documented in the iBCS2 standard..
*
* The first part of "struct _fpstate" is just the
* normal i387 hardware setup, the extra "status"
* word is used to save the coprocessor status word
* before entering the handler.
*/
struct _fpreg_ia32 {
unsigned short significand[4];
unsigned short exponent;
};
struct _fpxreg_ia32 {
unsigned short significand[4];
unsigned short exponent;
unsigned short padding[3];
};
struct _xmmreg_ia32 {
unsigned int element[4];
};
struct _fpstate_ia32 {
unsigned int cw,
sw,
tag,
ipoff,
cssel,
dataoff,
datasel;
struct _fpreg_ia32 _st[8];
unsigned short status;
unsigned short magic; /* 0xffff = regular FPU data only */
/* FXSR FPU environment */
unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */
unsigned int mxcsr;
unsigned int reserved;
struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */
struct _xmmreg_ia32 _xmm[8];
unsigned int padding[56];
};
struct sigcontext_ia32 {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned int edi;
unsigned int esi;
unsigned int ebp;
unsigned int esp;
unsigned int ebx;
unsigned int edx;
unsigned int ecx;
unsigned int eax;
unsigned int trapno;
unsigned int err;
unsigned int eip;
unsigned short cs, __csh;
unsigned int eflags;
unsigned int esp_at_signal;
unsigned short ss, __ssh;
unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
unsigned int oldmask;
unsigned int cr2;
};
/* user.h */
/*
* IA32 (Pentium III/4) FXSR, SSE support
*
* Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
* interacting with the FXSR-format floating point environment. Floating
* point data can be accessed in the regular format in the usual manner,
* and both the standard and SIMD floating point data can be accessed via
* the new ptrace requests. In either case, changes to the FPU environment
* will be reflected in the task's state as expected.
*/
struct ia32_user_i387_struct {
int cwd;
int swd;
int twd;
int fip;
int fcs;
int foo;
int fos;
/* 8*10 bytes for each FP-reg = 80 bytes */
struct _fpreg_ia32 st_space[8];
};
struct ia32_user_fxsr_struct {
unsigned short cwd;
unsigned short swd;
unsigned short twd;
unsigned short fop;
int fip;
int fcs;
int foo;
int fos;
int mxcsr;
int reserved;
int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
int padding[56];
};
/* signal.h */
#define IA32_SET_SA_HANDLER(ka,handler,restorer) \
((ka)->sa.sa_handler = (__sighandler_t) \
(((unsigned long)(restorer) << 32) \
| ((handler) & 0xffffffff)))
#define IA32_SA_HANDLER(ka) ((unsigned long) (ka)->sa.sa_handler & 0xffffffff)
#define IA32_SA_RESTORER(ka) ((unsigned long) (ka)->sa.sa_handler >> 32)
#define __IA32_NR_sigreturn 119
#define __IA32_NR_rt_sigreturn 173
struct sigaction32 {
unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */
unsigned int sa_flags;
unsigned int sa_restorer; /* Another 32 bit pointer */
compat_sigset_t sa_mask; /* A 32 bit mask */
};
struct old_sigaction32 {
unsigned int sa_handler; /* Really a pointer, but need to deal
with 32 bits */
compat_old_sigset_t sa_mask; /* A 32 bit mask */
unsigned int sa_flags;
unsigned int sa_restorer; /* Another 32 bit pointer */
};
typedef struct sigaltstack_ia32 {
unsigned int ss_sp;
int ss_flags;
unsigned int ss_size;
} stack_ia32_t;
struct ucontext_ia32 {
unsigned int uc_flags;
unsigned int uc_link;
stack_ia32_t uc_stack;
struct sigcontext_ia32 uc_mcontext;
sigset_t uc_sigmask; /* mask last for extensibility */
};
struct stat64 {
unsigned long long st_dev;
unsigned char __pad0[4];
unsigned int __st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long long st_rdev;
unsigned char __pad3[4];
unsigned int st_size_lo;
unsigned int st_size_hi;
unsigned int st_blksize;
unsigned int st_blocks; /* Number 512-byte blocks allocated. */
unsigned int __pad4; /* future possible st_blocks high bits */
unsigned int st_atime;
unsigned int st_atime_nsec;
unsigned int st_mtime;
unsigned int st_mtime_nsec;
unsigned int st_ctime;
unsigned int st_ctime_nsec;
unsigned int st_ino_lo;
unsigned int st_ino_hi;
};
typedef struct compat_siginfo {
int si_signo;
int si_errno;
int si_code;
union {
int _pad[((128/sizeof(int)) - 3)];
/* kill() */
struct {
unsigned int _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof(unsigned int) - sizeof(int)];
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
struct {
unsigned int _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
compat_sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
unsigned int _pid; /* which child */
unsigned int _uid; /* sender's uid */
int _status; /* exit code */
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
unsigned int _addr; /* faulting insn/memory ref. */
} _sigfault;
/* SIGPOLL */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
} compat_siginfo_t;
/*
* IA-32 ELF specific definitions for IA-64.
*/
#define _ASM_IA64_ELF_H /* Don't include elf.h */
#include <linux/sched.h>
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_386)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_386
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#define IA32_GATE_OFFSET IA32_PAGE_OFFSET
#define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
/*
* The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can
* access them.
*/
#define IA32_GDT_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE)
#define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE)
#define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 3*PAGE_SIZE)
#define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE
/*
* This is the location that an ET_DYN program is loaded if exec'ed.
* Typical use of this is to invoke "./ld.so someprog" to test out a
* new version of the loader. We need to make sure that it is out of
* the way of the program that it will "exec", and that there is
* sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000)
void ia64_elf32_init(struct pt_regs *regs);
#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r)
/* This macro yields a bitmask that programs can use to figure out
what instruction set this CPU supports. */
#define ELF_HWCAP 0
/* This macro yields a string that ld.so will use to load
implementation specific libraries for optimization. Not terribly
relevant until we have real hardware to play with... */
#define ELF_PLATFORM NULL
#ifdef __KERNEL__
# define SET_PERSONALITY(EX) \
(current->personality = PER_LINUX)
#endif
#define IA32_EFLAG 0x200
/*
* IA-32 ELF specific definitions for IA-64.
*/
#define __USER_CS 0x23
#define __USER_DS 0x2B
/*
* The per-cpu GDT has 32 entries: see <asm-i386/segment.h>
*/
#define GDT_ENTRIES 32
#define GDT_SIZE (GDT_ENTRIES * 8)
#define TSS_ENTRY 14
#define LDT_ENTRY (TSS_ENTRY + 1)
#define IA32_SEGSEL_RPL (0x3 << 0)
#define IA32_SEGSEL_TI (0x1 << 2)
#define IA32_SEGSEL_INDEX_SHIFT 3
#define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
#define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
#define IA32_SEG_BASE 16
#define IA32_SEG_TYPE 40
#define IA32_SEG_SYS 44
#define IA32_SEG_DPL 45
#define IA32_SEG_P 47
#define IA32_SEG_HIGH_LIMIT 48
#define IA32_SEG_AVL 52
#define IA32_SEG_DB 54
#define IA32_SEG_G 55
#define IA32_SEG_HIGH_BASE 56
#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \
(((limit) & 0xffff) \
| (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \
| ((unsigned long) (segtype) << IA32_SEG_TYPE) \
| ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \
| ((unsigned long) (dpl) << IA32_SEG_DPL) \
| ((unsigned long) (segpresent) << IA32_SEG_P) \
| ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \
| ((unsigned long) (avl) << IA32_SEG_AVL) \
| ((unsigned long) (segdb) << IA32_SEG_DB) \
| ((unsigned long) (gran) << IA32_SEG_G) \
| ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE))
#define SEG_LIM 32
#define SEG_TYPE 52
#define SEG_SYS 56
#define SEG_DPL 57
#define SEG_P 59
#define SEG_AVL 60
#define SEG_DB 62
#define SEG_G 63
/* Unscramble an IA-32 segment descriptor into the IA-64 format. */
#define IA32_SEG_UNSCRAMBLE(sd) \
( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \
| ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \
| ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \
| ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \
| ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \
| ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \
| ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \
| ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \
| ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G))
#define IA32_IOBASE 0x2000000000000000UL /* Virtual address for I/O space */
#define IA32_CR0 0x80000001 /* Enable PG and PE bits */
#define IA32_CR4 0x600 /* MMXEX and FXSR on */
/*
* IA32 floating point control registers starting values
*/
#define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */
#define IA32_FCR_DEFAULT 0x17800000037fUL /* extended precision, all masks */
#define IA32_PTRACE_GETREGS 12
#define IA32_PTRACE_SETREGS 13
#define IA32_PTRACE_GETFPREGS 14
#define IA32_PTRACE_SETFPREGS 15
#define IA32_PTRACE_GETFPXREGS 18
#define IA32_PTRACE_SETFPXREGS 19
#define ia32_start_thread(regs,new_ip,new_sp) do { \
set_fs(USER_DS); \
ia64_psr(regs)->cpl = 3; /* set user mode */ \
ia64_psr(regs)->ri = 0; /* clear return slot number */ \
ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \
regs->cr_iip = new_ip; \
regs->ar_rsc = 0xc; /* enforced lazy mode, priv. level 3 */ \
regs->ar_rnat = 0; \
regs->loadrs = 0; \
regs->r12 = new_sp; \
} while (0)
/*
* Local Descriptor Table (LDT) related declarations.
*/
#define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */
#define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */
#define LDT_entry_a(info) \
((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
#define LDT_entry_b(info) \
(((info)->base_addr & 0xff000000) | \
(((info)->base_addr & 0x00ff0000) >> 16) | \
((info)->limit & 0xf0000) | \
(((info)->read_exec_only ^ 1) << 9) | \
((info)->contents << 10) | \
(((info)->seg_not_present ^ 1) << 15) | \
((info)->seg_32bit << 22) | \
((info)->limit_in_pages << 23) | \
((info)->useable << 20) | \
0x7100)
#define LDT_empty(info) ( \
(info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
(info)->read_exec_only == 1 && \
(info)->seg_32bit == 0 && \
(info)->limit_in_pages == 0 && \
(info)->seg_not_present == 1 && \
(info)->useable == 0 )
static inline void
load_TLS (struct thread_struct *t, unsigned int cpu)
{
extern unsigned long *cpu_gdt_table[NR_CPUS];
memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long));
memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long));
memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long));
}
struct ia32_user_desc {
unsigned int entry_number;
unsigned int base_addr;
unsigned int limit;
unsigned int seg_32bit:1;
unsigned int contents:2;
unsigned int read_exec_only:1;
unsigned int limit_in_pages:1;
unsigned int seg_not_present:1;
unsigned int useable:1;
};
struct linux_binprm;
extern void ia32_init_addr_space (struct pt_regs *regs);
extern int ia32_setup_arg_pages (struct linux_binprm *bprm, int exec_stack);
extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
extern void ia32_load_segment_descriptors (struct task_struct *task);
#define ia32f2ia64f(dst,src) \
do { \
ia64_ldfe(6,src); \
ia64_stop(); \
ia64_stf_spill(dst, 6); \
} while(0)
#define ia64f2ia32f(dst,src) \
do { \
ia64_ldf_fill(6, src); \
ia64_stop(); \
ia64_stfe(dst, 6); \
} while(0)
struct user_regs_struct32 {
__u32 ebx, ecx, edx, esi, edi, ebp, eax;
unsigned short ds, __ds, es, __es;
unsigned short fs, __fs, gs, __gs;
__u32 orig_eax, eip;
unsigned short cs, __cs;
__u32 eflags, esp;
unsigned short ss, __ss;
};
/* Prototypes for use in elfcore32.h */
extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *);
extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *);
#endif /* !CONFIG_IA32_SUPPORT */
#endif /* _ASM_IA64_IA32_PRIV_H */
/*
* sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
*
* Copyright (C) 2000 VA Linux Co
* Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment.
*/
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/socket.h>
#include <linux/quota.h>
#include <linux/poll.h>
#include <linux/eventpoll.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/stat.h>
#include <linux/ipc.h>
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/mman.h>
#include <linux/mutex.h>
#include <asm/intrinsics.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "ia32priv.h"
#include <net/scm.h>
#include <net/sock.h>
#define DEBUG 0
#if DEBUG
# define DBG(fmt...) printk(KERN_DEBUG fmt)
#else
# define DBG(fmt...)
#endif
#define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
#define OFFSET4K(a) ((a) & 0xfff)
#define PAGE_START(addr) ((addr) & PAGE_MASK)
#define MINSIGSTKSZ_IA32 2048
#define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
#define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
/*
* Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
* while doing so.
*/
/* XXX make per-mm: */
static DEFINE_MUTEX(ia32_mmap_mutex);
asmlinkage long
sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
struct pt_regs *regs)
{
long error;
char *filename;
unsigned long old_map_base, old_task_size, tssd;
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
old_map_base = current->thread.map_base;
old_task_size = current->thread.task_size;
tssd = ia64_get_kr(IA64_KR_TSSD);
/* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
current->thread.map_base = DEFAULT_MAP_BASE;
current->thread.task_size = DEFAULT_TASK_SIZE;
ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
error = compat_do_execve(filename, argv, envp, regs);
putname(filename);
if (error < 0) {
/* oops, execve failed, switch back to old values... */
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
ia64_set_kr(IA64_KR_TSSD, tssd);
current->thread.map_base = old_map_base;
current->thread.task_size = old_task_size;
}
return error;
}
#if PAGE_SHIFT > IA32_PAGE_SHIFT
static int
get_page_prot (struct vm_area_struct *vma, unsigned long addr)
{
int prot = 0;
if (!vma || vma->vm_start > addr)
return 0;
if (vma->vm_flags & VM_READ)
prot |= PROT_READ;
if (vma->vm_flags & VM_WRITE)
prot |= PROT_WRITE;
if (vma->vm_flags & VM_EXEC)
prot |= PROT_EXEC;
return prot;
}
/*
* Map a subpage by creating an anonymous page that contains the union of the old page and
* the subpage.
*/
static unsigned long
mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
loff_t off)
{
void *page = NULL;
struct inode *inode;
unsigned long ret = 0;
struct vm_area_struct *vma = find_vma(current->mm, start);
int old_prot = get_page_prot(vma, start);
DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
file, start, end, prot, flags, off);
/* Optimize the case where the old mmap and the new mmap are both anonymous */
if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
if (clear_user((void __user *) start, end - start)) {
ret = -EFAULT;
goto out;
}
goto skip_mmap;
}
page = (void *) get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
if (old_prot)
copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
down_write(&current->mm->mmap_sem);
{
ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
flags | MAP_FIXED | MAP_ANONYMOUS, 0);
}
up_write(&current->mm->mmap_sem);
if (IS_ERR((void *) ret))
goto out;
if (old_prot) {
/* copy back the old page contents. */
if (offset_in_page(start))
copy_to_user((void __user *) PAGE_START(start), page,
offset_in_page(start));
if (offset_in_page(end))
copy_to_user((void __user *) end, page + offset_in_page(end),
PAGE_SIZE - offset_in_page(end));
}
if (!(flags & MAP_ANONYMOUS)) {
/* read the file contents */
inode = file->f_path.dentry->d_inode;
if (!inode->i_fop || !file->f_op->read
|| ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
{
ret = -EINVAL;
goto out;
}
}
skip_mmap:
if (!(prot & PROT_WRITE))
ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
out:
if (page)
free_page((unsigned long) page);
return ret;
}
/* SLAB cache for ia64_partial_page structures */
struct kmem_cache *ia64_partial_page_cachep;
/*
* init ia64_partial_page_list.
* return 0 means kmalloc fail.
*/
struct ia64_partial_page_list*
ia32_init_pp_list(void)
{
struct ia64_partial_page_list *p;
if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
return p;
p->pp_head = NULL;
p->ppl_rb = RB_ROOT;
p->pp_hint = NULL;
atomic_set(&p->pp_count, 1);
return p;
}
/*
* Search for the partial page with @start in partial page list @ppl.
* If finds the partial page, return the found partial page.
* Else, return 0 and provide @pprev, @rb_link, @rb_parent to
* be used by later __ia32_insert_pp().
*/
static struct ia64_partial_page *
__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
struct ia64_partial_page **pprev, struct rb_node ***rb_link,
struct rb_node **rb_parent)
{
struct ia64_partial_page *pp;
struct rb_node **__rb_link, *__rb_parent, *rb_prev;
pp = ppl->pp_hint;
if (pp && pp->base == start)
return pp;
__rb_link = &ppl->ppl_rb.rb_node;
rb_prev = __rb_parent = NULL;
while (*__rb_link) {
__rb_parent = *__rb_link;
pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
if (pp->base == start) {
ppl->pp_hint = pp;
return pp;
} else if (pp->base < start) {
rb_prev = __rb_parent;
__rb_link = &__rb_parent->rb_right;
} else {
__rb_link = &__rb_parent->rb_left;
}
}
*rb_link = __rb_link;
*rb_parent = __rb_parent;
*pprev = NULL;
if (rb_prev)
*pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
return NULL;
}
/*
* insert @pp into @ppl.
*/
static void
__ia32_insert_pp(struct ia64_partial_page_list *ppl,
struct ia64_partial_page *pp, struct ia64_partial_page *prev,
struct rb_node **rb_link, struct rb_node *rb_parent)
{
/* link list */
if (prev) {
pp->next = prev->next;
prev->next = pp;
} else {
ppl->pp_head = pp;
if (rb_parent)
pp->next = rb_entry(rb_parent,
struct ia64_partial_page, pp_rb);
else
pp->next = NULL;
}
/* link rb */
rb_link_node(&pp->pp_rb, rb_parent, rb_link);
rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
ppl->pp_hint = pp;
}
/*
* delete @pp from partial page list @ppl.
*/
static void
__ia32_delete_pp(struct ia64_partial_page_list *ppl,
struct ia64_partial_page *pp, struct ia64_partial_page *prev)
{
if (prev) {
prev->next = pp->next;
if (ppl->pp_hint == pp)
ppl->pp_hint = prev;
} else {
ppl->pp_head = pp->next;
if (ppl->pp_hint == pp)
ppl->pp_hint = pp->next;
}
rb_erase(&pp->pp_rb, &ppl->ppl_rb);
kmem_cache_free(ia64_partial_page_cachep, pp);
}
static struct ia64_partial_page *
__pp_prev(struct ia64_partial_page *pp)
{
struct rb_node *prev = rb_prev(&pp->pp_rb);
if (prev)
return rb_entry(prev, struct ia64_partial_page, pp_rb);
else
return NULL;
}
/*
* Delete partial pages with address between @start and @end.
* @start and @end are page aligned.
*/
static void
__ia32_delete_pp_range(unsigned int start, unsigned int end)
{
struct ia64_partial_page *pp, *prev;
struct rb_node **rb_link, *rb_parent;
if (start >= end)
return;
pp = __ia32_find_pp(current->thread.ppl, start, &prev,
&rb_link, &rb_parent);
if (pp)
prev = __pp_prev(pp);
else {
if (prev)
pp = prev->next;
else
pp = current->thread.ppl->pp_head;
}
while (pp && pp->base < end) {
struct ia64_partial_page *tmp = pp->next;
__ia32_delete_pp(current->thread.ppl, pp, prev);
pp = tmp;
}
}
/*
* Set the range between @start and @end in bitmap.
* @start and @end should be IA32 page aligned and in the same IA64 page.
*/
static int
__ia32_set_pp(unsigned int start, unsigned int end, int flags)
{
struct ia64_partial_page *pp, *prev;
struct rb_node ** rb_link, *rb_parent;
unsigned int pstart, start_bit, end_bit, i;
pstart = PAGE_START(start);
start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
if (end_bit == 0)
end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
&rb_link, &rb_parent);
if (pp) {
for (i = start_bit; i < end_bit; i++)
set_bit(i, &pp->bitmap);
/*
* Check: if this partial page has been set to a full page,
* then delete it.
*/
if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
PAGE_SIZE/IA32_PAGE_SIZE) {
__ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
}
return 0;
}
/*
* MAP_FIXED may lead to overlapping mmap.
* In this case, the requested mmap area may already mmaped as a full
* page. So check vma before adding a new partial page.
*/
if (flags & MAP_FIXED) {
struct vm_area_struct *vma = find_vma(current->mm, pstart);
if (vma && vma->vm_start <= pstart)
return 0;
}
/* new a ia64_partial_page */
pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->base = pstart;
pp->bitmap = 0;
for (i=start_bit; i<end_bit; i++)
set_bit(i, &(pp->bitmap));
pp->next = NULL;
__ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
return 0;
}
/*
* @start and @end should be IA32 page aligned, but don't need to be in the
* same IA64 page. Split @start and @end to make sure they're in the same IA64
* page, then call __ia32_set_pp().
*/
static void
ia32_set_pp(unsigned int start, unsigned int end, int flags)
{
down_write(&current->mm->mmap_sem);
if (flags & MAP_FIXED) {
/*
* MAP_FIXED may lead to overlapping mmap. When this happens,
* a series of complete IA64 pages results in deletion of
* old partial pages in that range.
*/
__ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
}
if (end < PAGE_ALIGN(start)) {
__ia32_set_pp(start, end, flags);
} else {
if (offset_in_page(start))
__ia32_set_pp(start, PAGE_ALIGN(start), flags);
if (offset_in_page(end))
__ia32_set_pp(PAGE_START(end), end, flags);
}
up_write(&current->mm->mmap_sem);
}
/*
* Unset the range between @start and @end in bitmap.
* @start and @end should be IA32 page aligned and in the same IA64 page.
* After doing that, if the bitmap is 0, then free the page and return 1,
* else return 0;
* If not find the partial page in the list, then
* If the vma exists, then the full page is set to a partial page;
* Else return -ENOMEM.
*/
static int
__ia32_unset_pp(unsigned int start, unsigned int end)
{
struct ia64_partial_page *pp, *prev;
struct rb_node ** rb_link, *rb_parent;
unsigned int pstart, start_bit, end_bit, i;
struct vm_area_struct *vma;
pstart = PAGE_START(start);
start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
if (end_bit == 0)
end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
&rb_link, &rb_parent);
if (pp) {
for (i = start_bit; i < end_bit; i++)
clear_bit(i, &pp->bitmap);
if (pp->bitmap == 0) {
__ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
return 1;
}
return 0;
}
vma = find_vma(current->mm, pstart);
if (!vma || vma->vm_start > pstart) {
return -ENOMEM;
}
/* new a ia64_partial_page */
pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->base = pstart;
pp->bitmap = 0;
for (i = 0; i < start_bit; i++)
set_bit(i, &(pp->bitmap));
for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
set_bit(i, &(pp->bitmap));
pp->next = NULL;
__ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
return 0;
}
/*
* Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
* __ia32_delete_pp_range(). Unset possible partial pages by calling
* __ia32_unset_pp().
* The returned value see __ia32_unset_pp().
*/
static int
ia32_unset_pp(unsigned int *startp, unsigned int *endp)
{
unsigned int start = *startp, end = *endp;
int ret = 0;
down_write(&current->mm->mmap_sem);
__ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
if (end < PAGE_ALIGN(start)) {
ret = __ia32_unset_pp(start, end);
if (ret == 1) {
*startp = PAGE_START(start);
*endp = PAGE_ALIGN(end);
}
if (ret == 0) {
/* to shortcut sys_munmap() in sys32_munmap() */
*startp = PAGE_START(start);
*endp = PAGE_START(end);
}
} else {
if (offset_in_page(start)) {
ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
if (ret == 1)
*startp = PAGE_START(start);
if (ret == 0)
*startp = PAGE_ALIGN(start);
if (ret < 0)
goto out;
}
if (offset_in_page(end)) {
ret = __ia32_unset_pp(PAGE_START(end), end);
if (ret == 1)
*endp = PAGE_ALIGN(end);
if (ret == 0)
*endp = PAGE_START(end);
}
}
out:
up_write(&current->mm->mmap_sem);
return ret;
}
/*
* Compare the range between @start and @end with bitmap in partial page.
* @start and @end should be IA32 page aligned and in the same IA64 page.
*/
static int
__ia32_compare_pp(unsigned int start, unsigned int end)
{
struct ia64_partial_page *pp, *prev;
struct rb_node ** rb_link, *rb_parent;
unsigned int pstart, start_bit, end_bit, size;
unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
pstart = PAGE_START(start);
pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
&rb_link, &rb_parent);
if (!pp)
return 1;
start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
size = sizeof(pp->bitmap) * 8;
first_bit = find_first_bit(&pp->bitmap, size);
next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
/* exceeds the first range in bitmap */
return -ENOMEM;
} else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
if ((next_zero_bit < first_bit) && (first_bit < size))
return 1; /* has next range */
else
return 0; /* no next range */
} else
return 1;
}
/*
* @start and @end should be IA32 page aligned, but don't need to be in the
* same IA64 page. Split @start and @end to make sure they're in the same IA64
* page, then call __ia32_compare_pp().
*
* Take this as example: the range is the 1st and 2nd 4K page.
* Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
* Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
* Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
* bitmap = 00000101.
*/
static int
ia32_compare_pp(unsigned int *startp, unsigned int *endp)
{
unsigned int start = *startp, end = *endp;
int retval = 0;
down_write(&current->mm->mmap_sem);
if (end < PAGE_ALIGN(start)) {
retval = __ia32_compare_pp(start, end);
if (retval == 0) {
*startp = PAGE_START(start);
*endp = PAGE_ALIGN(end);
}
} else {
if (offset_in_page(start)) {
retval = __ia32_compare_pp(start,
PAGE_ALIGN(start));
if (retval == 0)
*startp = PAGE_START(start);
if (retval < 0)
goto out;
}
if (offset_in_page(end)) {
retval = __ia32_compare_pp(PAGE_START(end), end);
if (retval == 0)
*endp = PAGE_ALIGN(end);
}
}
out:
up_write(&current->mm->mmap_sem);
return retval;
}
static void
__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
{
struct ia64_partial_page *pp = ppl->pp_head;
while (pp) {
struct ia64_partial_page *next = pp->next;
kmem_cache_free(ia64_partial_page_cachep, pp);
pp = next;
}
kfree(ppl);
}
void
ia32_drop_ia64_partial_page_list(struct task_struct *task)
{
struct ia64_partial_page_list* ppl = task->thread.ppl;
if (ppl && atomic_dec_and_test(&ppl->pp_count))
__ia32_drop_pp_list(ppl);
}
/*
* Copy current->thread.ppl to ppl (already initialized).
*/
static int
__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
{
struct ia64_partial_page *pp, *tmp, *prev;
struct rb_node **rb_link, *rb_parent;
ppl->pp_head = NULL;
ppl->pp_hint = NULL;
ppl->ppl_rb = RB_ROOT;
rb_link = &ppl->ppl_rb.rb_node;
rb_parent = NULL;
prev = NULL;
for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
*tmp = *pp;
__ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
prev = tmp;
rb_link = &tmp->pp_rb.rb_right;
rb_parent = &tmp->pp_rb;
}
return 0;
}
int
ia32_copy_ia64_partial_page_list(struct task_struct *p,
unsigned long clone_flags)
{
int retval = 0;
if (clone_flags & CLONE_VM) {
atomic_inc(&current->thread.ppl->pp_count);
p->thread.ppl = current->thread.ppl;
} else {
p->thread.ppl = ia32_init_pp_list();
if (!p->thread.ppl)
return -ENOMEM;
down_write(&current->mm->mmap_sem);
{
retval = __ia32_copy_pp_list(p->thread.ppl);
}
up_write(&current->mm->mmap_sem);
}
return retval;
}
static unsigned long
emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
loff_t off)
{
unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
struct inode *inode;
loff_t poff;
end = start + len;
pstart = PAGE_START(start);
pend = PAGE_ALIGN(end);
if (flags & MAP_FIXED) {
ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
if (start > pstart) {
if (flags & MAP_SHARED)
printk(KERN_INFO
"%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
current->comm, task_pid_nr(current), start);
ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
off);
if (IS_ERR((void *) ret))
return ret;
pstart += PAGE_SIZE;
if (pstart >= pend)
goto out; /* done */
}
if (end < pend) {
if (flags & MAP_SHARED)
printk(KERN_INFO
"%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
current->comm, task_pid_nr(current), end);
ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
(off + len) - offset_in_page(end));
if (IS_ERR((void *) ret))
return ret;
pend -= PAGE_SIZE;
if (pstart >= pend)
goto out; /* done */
}
} else {
/*
* If a start address was specified, use it if the entire rounded out area
* is available.
*/
if (start && !pstart)
fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
if (tmp != pstart) {
pstart = tmp;
start = pstart + offset_in_page(off); /* make start congruent with off */
end = start + len;
pend = PAGE_ALIGN(end);
}
}
poff = off + (pstart - start); /* note: (pstart - start) may be negative */
is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
if ((flags & MAP_SHARED) && !is_congruent)
printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
"(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off);
DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
is_congruent ? "congruent" : "not congruent", poff);
down_write(&current->mm->mmap_sem);
{
if (!(flags & MAP_ANONYMOUS) && is_congruent)
ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
else
ret = do_mmap(NULL, pstart, pend - pstart,
prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
flags | MAP_FIXED | MAP_ANONYMOUS, 0);
}
up_write(&current->mm->mmap_sem);
if (IS_ERR((void *) ret))
return ret;
if (!is_congruent) {
/* read the file contents */
inode = file->f_path.dentry->d_inode;
if (!inode->i_fop || !file->f_op->read
|| ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
< 0))
{
sys_munmap(pstart, pend - pstart);
return -EINVAL;
}
if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
return -EINVAL;
}
if (!(flags & MAP_FIXED))
ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
out:
return start;
}
#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
static inline unsigned int
get_prot32 (unsigned int prot)
{
if (prot & PROT_WRITE)
/* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
else if (prot & (PROT_READ | PROT_EXEC))
/* on x86, there is no distinction between PROT_READ and PROT_EXEC */
prot |= (PROT_READ | PROT_EXEC);
return prot;
}
unsigned long
ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
loff_t offset)
{
DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
file, addr, len, prot, flags, offset);
if (file && (!file->f_op || !file->f_op->mmap))
return -ENODEV;
len = IA32_PAGE_ALIGN(len);
if (len == 0)
return addr;
if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
{
if (flags & MAP_FIXED)
return -ENOMEM;
else
return -EINVAL;
}
if (OFFSET4K(offset))
return -EINVAL;
prot = get_prot32(prot);
if (flags & MAP_HUGETLB)
return -ENOMEM;
#if PAGE_SHIFT > IA32_PAGE_SHIFT
mutex_lock(&ia32_mmap_mutex);
{
addr = emulate_mmap(file, addr, len, prot, flags, offset);
}
mutex_unlock(&ia32_mmap_mutex);
#else
down_write(&current->mm->mmap_sem);
{
addr = do_mmap(file, addr, len, prot, flags, offset);
}
up_write(&current->mm->mmap_sem);
#endif
DBG("ia32_do_mmap: returning 0x%lx\n", addr);
return addr;
}
/*
* Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
* system calls used a memory block for parameter passing..
*/
struct mmap_arg_struct {
unsigned int addr;
unsigned int len;
unsigned int prot;
unsigned int flags;
unsigned int fd;
unsigned int offset;
};
asmlinkage long
sys32_mmap (struct mmap_arg_struct __user *arg)
{
struct mmap_arg_struct a;
struct file *file = NULL;
unsigned long addr;
int flags;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
if (OFFSET4K(a.offset))
return -EINVAL;
flags = a.flags;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
file = fget(a.fd);
if (!file)
return -EBADF;
}
addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
if (file)
fput(file);
return addr;
}
asmlinkage long
sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
unsigned int fd, unsigned int pgoff)
{
struct file *file = NULL;
unsigned long retval;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
file = fget(fd);
if (!file)
return -EBADF;
}
retval = ia32_do_mmap(file, addr, len, prot, flags,
(unsigned long) pgoff << IA32_PAGE_SHIFT);
if (file)
fput(file);
return retval;
}
asmlinkage long
sys32_munmap (unsigned int start, unsigned int len)
{
unsigned int end = start + len;
long ret;
#if PAGE_SHIFT <= IA32_PAGE_SHIFT
ret = sys_munmap(start, end - start);
#else
if (OFFSET4K(start))
return -EINVAL;
end = IA32_PAGE_ALIGN(end);
if (start >= end)
return -EINVAL;
ret = ia32_unset_pp(&start, &end);
if (ret < 0)
return ret;
if (start >= end)
return 0;
mutex_lock(&ia32_mmap_mutex);
ret = sys_munmap(start, end - start);
mutex_unlock(&ia32_mmap_mutex);
#endif
return ret;
}
#if PAGE_SHIFT > IA32_PAGE_SHIFT
/*
* When mprotect()ing a partial page, we set the permission to the union of the old
* settings and the new settings. In other words, it's only possible to make access to a
* partial page less restrictive.
*/
static long
mprotect_subpage (unsigned long address, int new_prot)
{
int old_prot;
struct vm_area_struct *vma;
if (new_prot == PROT_NONE)
return 0; /* optimize case where nothing changes... */
vma = find_vma(current->mm, address);
old_prot = get_page_prot(vma, address);
return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
}
#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
asmlinkage long
sys32_mprotect (unsigned int start, unsigned int len, int prot)
{
unsigned int end = start + len;
#if PAGE_SHIFT > IA32_PAGE_SHIFT
long retval = 0;
#endif
prot = get_prot32(prot);
#if PAGE_SHIFT <= IA32_PAGE_SHIFT
return sys_mprotect(start, end - start, prot);
#else
if (OFFSET4K(start))
return -EINVAL;
end = IA32_PAGE_ALIGN(end);
if (end < start)
return -EINVAL;
retval = ia32_compare_pp(&start, &end);
if (retval < 0)
return retval;
mutex_lock(&ia32_mmap_mutex);
{
if (offset_in_page(start)) {
/* start address is 4KB aligned but not page aligned. */
retval = mprotect_subpage(PAGE_START(start), prot);
if (retval < 0)
goto out;
start = PAGE_ALIGN(start);
if (start >= end)
goto out; /* retval is already zero... */
}
if (offset_in_page(end)) {
/* end address is 4KB aligned but not page aligned. */
retval = mprotect_subpage(PAGE_START(end), prot);
if (retval < 0)
goto out;
end = PAGE_START(end);
}
retval = sys_mprotect(start, end - start, prot);
}
out:
mutex_unlock(&ia32_mmap_mutex);
return retval;
#endif
}
asmlinkage long
sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
unsigned int flags, unsigned int new_addr)
{
long ret;
#if PAGE_SHIFT <= IA32_PAGE_SHIFT
ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
#else
unsigned int old_end, new_end;
if (OFFSET4K(addr))
return -EINVAL;
old_len = IA32_PAGE_ALIGN(old_len);
new_len = IA32_PAGE_ALIGN(new_len);
old_end = addr + old_len;
new_end = addr + new_len;
if (!new_len)
return -EINVAL;
if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
return -EINVAL;
if (old_len >= new_len) {
ret = sys32_munmap(addr + new_len, old_len - new_len);
if (ret && old_len != new_len)
return ret;
ret = addr;
if (!(flags & MREMAP_FIXED) || (new_addr == addr))
return ret;
old_len = new_len;
}
addr = PAGE_START(addr);
old_len = PAGE_ALIGN(old_end) - addr;
new_len = PAGE_ALIGN(new_end) - addr;
mutex_lock(&ia32_mmap_mutex);
ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
mutex_unlock(&ia32_mmap_mutex);
if ((ret >= 0) && (old_len < new_len)) {
/* mremap expanded successfully */
ia32_set_pp(old_end, new_end, flags);
}
#endif
return ret;
}
asmlinkage unsigned long
sys32_alarm (unsigned int seconds)
{
return alarm_setitimer(seconds);
}
struct sel_arg_struct {
unsigned int n;
unsigned int inp;
unsigned int outp;
unsigned int exp;
unsigned int tvp;
};
asmlinkage long
sys32_old_select (struct sel_arg_struct __user *arg)
{
struct sel_arg_struct a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
compat_ptr(a.exp), compat_ptr(a.tvp));
}
#define SEMOP 1
#define SEMGET 2
#define SEMCTL 3
#define SEMTIMEDOP 4
#define MSGSND 11
#define MSGRCV 12
#define MSGGET 13
#define MSGCTL 14
#define SHMAT 21
#define SHMDT 22
#define SHMGET 23
#define SHMCTL 24
asmlinkage long
sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
int version;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
switch (call) {
case SEMTIMEDOP:
if (fifth)
return compat_sys_semtimedop(first, compat_ptr(ptr),
second, compat_ptr(fifth));
/* else fall through for normal semop() */
case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */
return sys_semtimedop(first, compat_ptr(ptr), second,
NULL);
case SEMGET:
return sys_semget(first, second, third);
case SEMCTL:
return compat_sys_semctl(first, second, third, compat_ptr(ptr));
case MSGSND:
return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
case MSGRCV:
return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
case MSGGET:
return sys_msgget((key_t) first, second);
case MSGCTL:
return compat_sys_msgctl(first, second, compat_ptr(ptr));
case SHMAT:
return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
break;
case SHMDT:
return sys_shmdt(compat_ptr(ptr));
case SHMGET:
return sys_shmget(first, (unsigned)second, third);
case SHMCTL:
return compat_sys_shmctl(first, second, compat_ptr(ptr));
default:
return -ENOSYS;
}
return -EINVAL;
}
asmlinkage long
compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
struct compat_rusage *ru);
asmlinkage long
sys32_waitpid (int pid, unsigned int *stat_addr, int options)
{
return compat_sys_wait4(pid, stat_addr, options, NULL);
}
/*
* The order in which registers are stored in the ptrace regs structure
*/
#define PT_EBX 0
#define PT_ECX 1
#define PT_EDX 2
#define PT_ESI 3
#define PT_EDI 4
#define PT_EBP 5
#define PT_EAX 6
#define PT_DS 7
#define PT_ES 8
#define PT_FS 9
#define PT_GS 10
#define PT_ORIG_EAX 11
#define PT_EIP 12
#define PT_CS 13
#define PT_EFL 14
#define PT_UESP 15
#define PT_SS 16
static unsigned int
getreg (struct task_struct *child, int regno)
{
struct pt_regs *child_regs;
child_regs = task_pt_regs(child);
switch (regno / sizeof(int)) {
case PT_EBX: return child_regs->r11;
case PT_ECX: return child_regs->r9;
case PT_EDX: return child_regs->r10;
case PT_ESI: return child_regs->r14;
case PT_EDI: return child_regs->r15;
case PT_EBP: return child_regs->r13;
case PT_EAX: return child_regs->r8;
case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
case PT_EIP: return child_regs->cr_iip;
case PT_UESP: return child_regs->r12;
case PT_EFL: return child->thread.eflag;
case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
return __USER_DS;
case PT_CS: return __USER_CS;
default:
printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
break;
}
return 0;
}
static void
putreg (struct task_struct *child, int regno, unsigned int value)
{
struct pt_regs *child_regs;
child_regs = task_pt_regs(child);
switch (regno / sizeof(int)) {
case PT_EBX: child_regs->r11 = value; break;
case PT_ECX: child_regs->r9 = value; break;
case PT_EDX: child_regs->r10 = value; break;
case PT_ESI: child_regs->r14 = value; break;
case PT_EDI: child_regs->r15 = value; break;
case PT_EBP: child_regs->r13 = value; break;
case PT_EAX: child_regs->r8 = value; break;
case PT_ORIG_EAX: child_regs->r1 = value; break;
case PT_EIP: child_regs->cr_iip = value; break;
case PT_UESP: child_regs->r12 = value; break;
case PT_EFL: child->thread.eflag = value; break;
case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
if (value != __USER_DS)
printk(KERN_ERR
"ia32.putreg: attempt to set invalid segment register %d = %x\n",
regno, value);
break;
case PT_CS:
if (value != __USER_CS)
printk(KERN_ERR
"ia32.putreg: attempt to set invalid segment register %d = %x\n",
regno, value);
break;
default:
printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
break;
}
}
static void
put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
struct switch_stack *swp, int tos)
{
struct _fpreg_ia32 *f;
char buf[32];
f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
if ((regno += tos) >= 8)
regno -= 8;
switch (regno) {
case 0:
ia64f2ia32f(f, &ptp->f8);
break;
case 1:
ia64f2ia32f(f, &ptp->f9);
break;
case 2:
ia64f2ia32f(f, &ptp->f10);
break;
case 3:
ia64f2ia32f(f, &ptp->f11);
break;
case 4:
case 5:
case 6:
case 7:
ia64f2ia32f(f, &swp->f12 + (regno - 4));
break;
}
copy_to_user(reg, f, sizeof(*reg));
}
static void
get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
struct switch_stack *swp, int tos)
{
if ((regno += tos) >= 8)
regno -= 8;
switch (regno) {
case 0:
copy_from_user(&ptp->f8, reg, sizeof(*reg));
break;
case 1:
copy_from_user(&ptp->f9, reg, sizeof(*reg));
break;
case 2:
copy_from_user(&ptp->f10, reg, sizeof(*reg));
break;
case 3:
copy_from_user(&ptp->f11, reg, sizeof(*reg));
break;
case 4:
case 5:
case 6:
case 7:
copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
break;
}
return;
}
int
save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
{
struct switch_stack *swp;
struct pt_regs *ptp;
int i, tos;
if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
return -EFAULT;
__put_user(tsk->thread.fcr & 0xffff, &save->cwd);
__put_user(tsk->thread.fsr & 0xffff, &save->swd);
__put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
__put_user(tsk->thread.fir, &save->fip);
__put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
__put_user(tsk->thread.fdr, &save->foo);
__put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
/*
* Stack frames start with 16-bytes of temp space
*/
swp = (struct switch_stack *)(tsk->thread.ksp + 16);
ptp = task_pt_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
put_fpreg(i, &save->st_space[i], ptp, swp, tos);
return 0;
}
static int
restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
{
struct switch_stack *swp;
struct pt_regs *ptp;
int i, tos;
unsigned int fsrlo, fsrhi, num32;
if (!access_ok(VERIFY_READ, save, sizeof(*save)))
return(-EFAULT);
__get_user(num32, (unsigned int __user *)&save->cwd);
tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
__get_user(fsrlo, (unsigned int __user *)&save->swd);
__get_user(fsrhi, (unsigned int __user *)&save->twd);
num32 = (fsrhi << 16) | fsrlo;
tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
__get_user(num32, (unsigned int __user *)&save->fip);
tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
__get_user(num32, (unsigned int __user *)&save->foo);
tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
/*
* Stack frames start with 16-bytes of temp space
*/
swp = (struct switch_stack *)(tsk->thread.ksp + 16);
ptp = task_pt_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
get_fpreg(i, &save->st_space[i], ptp, swp, tos);
return 0;
}
int
save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
{
struct switch_stack *swp;
struct pt_regs *ptp;
int i, tos;
unsigned long mxcsr=0;
unsigned long num128[2];
if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
return -EFAULT;
__put_user(tsk->thread.fcr & 0xffff, &save->cwd);
__put_user(tsk->thread.fsr & 0xffff, &save->swd);
__put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
__put_user(tsk->thread.fir, &save->fip);
__put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
__put_user(tsk->thread.fdr, &save->foo);
__put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
/*
* Stack frames start with 16-bytes of temp space
*/
swp = (struct switch_stack *)(tsk->thread.ksp + 16);
ptp = task_pt_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
__put_user(mxcsr & 0xffff, &save->mxcsr);
for (i = 0; i < 8; i++) {
memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
}
return 0;
}
static int
restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
{
struct switch_stack *swp;
struct pt_regs *ptp;
int i, tos;
unsigned int fsrlo, fsrhi, num32;
int mxcsr;
unsigned long num64;
unsigned long num128[2];
if (!access_ok(VERIFY_READ, save, sizeof(*save)))
return(-EFAULT);
__get_user(num32, (unsigned int __user *)&save->cwd);
tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
__get_user(fsrlo, (unsigned int __user *)&save->swd);
__get_user(fsrhi, (unsigned int __user *)&save->twd);
num32 = (fsrhi << 16) | fsrlo;
tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
__get_user(num32, (unsigned int __user *)&save->fip);
tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
__get_user(num32, (unsigned int __user *)&save->foo);
tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
/*
* Stack frames start with 16-bytes of temp space
*/
swp = (struct switch_stack *)(tsk->thread.ksp + 16);
ptp = task_pt_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
__get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
num64 = mxcsr & 0xff10;
tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
num64 = mxcsr & 0x3f;
tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
for (i = 0; i < 8; i++) {
copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
}
return 0;
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
unsigned long addr = caddr;
unsigned long data = cdata;
unsigned int tmp;
long i, ret;
switch (request) {
case PTRACE_PEEKUSR: /* read word at addr in USER area */
ret = -EIO;
if ((addr & 3) || addr > 17*sizeof(int))
break;
tmp = getreg(child, addr);
if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
ret = 0;
break;
case PTRACE_POKEUSR: /* write word at addr in USER area */
ret = -EIO;
if ((addr & 3) || addr > 17*sizeof(int))
break;
putreg(child, addr, data);
ret = 0;
break;
case IA32_PTRACE_GETREGS:
if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
ret = -EIO;
break;
}
for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
data += sizeof(int);
}
ret = 0;
break;
case IA32_PTRACE_SETREGS:
if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
ret = -EIO;
break;
}
for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
get_user(tmp, (unsigned int __user *) compat_ptr(data));
putreg(child, i, tmp);
data += sizeof(int);
}
ret = 0;
break;
case IA32_PTRACE_GETFPREGS:
ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
compat_ptr(data));
break;
case IA32_PTRACE_GETFPXREGS:
ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
compat_ptr(data));
break;
case IA32_PTRACE_SETFPREGS:
ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
compat_ptr(data));
break;
case IA32_PTRACE_SETFPXREGS:
ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
compat_ptr(data));
break;
default:
return compat_ptrace_request(child, request, caddr, cdata);
}
return ret;
}
typedef struct {
unsigned int ss_sp;
unsigned int ss_flags;
unsigned int ss_size;
} ia32_stack_t;
asmlinkage long
sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
long arg2, long arg3, long arg4, long arg5, long arg6,
long arg7, struct pt_regs pt)
{
stack_t uss, uoss;
ia32_stack_t buf32;
int ret;
mm_segment_t old_fs = get_fs();
if (uss32) {
if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
return -EFAULT;
uss.ss_sp = (void __user *) (long) buf32.ss_sp;
uss.ss_flags = buf32.ss_flags;
/* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
check and set it to the user requested value later */
if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
ret = -ENOMEM;
goto out;
}
uss.ss_size = MINSIGSTKSZ;
}
set_fs(KERNEL_DS);
ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
(stack_t __user *) &uoss, pt.r12);
current->sas_ss_size = buf32.ss_size;
set_fs(old_fs);
out:
if (ret < 0)
return(ret);
if (uoss32) {
buf32.ss_sp = (long __user) uoss.ss_sp;
buf32.ss_flags = uoss.ss_flags;
buf32.ss_size = uoss.ss_size;
if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
return -EFAULT;
}
return ret;
}
asmlinkage int
sys32_msync (unsigned int start, unsigned int len, int flags)
{
unsigned int addr;
if (OFFSET4K(start))
return -EINVAL;
addr = PAGE_START(start);
return sys_msync(addr, len + (start - addr), flags);
}
asmlinkage long
sys32_newuname (struct new_utsname __user *name)
{
int ret = sys_newuname(name);
if (!ret)
if (copy_to_user(name->machine, "i686\0\0\0", 8))
ret = -EFAULT;
return ret;
}
asmlinkage long
sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
{
uid_t a, b, c;
int ret;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
set_fs(old_fs);
if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
return -EFAULT;
return ret;
}
asmlinkage long
sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
{
gid_t a, b, c;
int ret;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
set_fs(old_fs);
if (ret)
return ret;
return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
}
asmlinkage long
sys32_lseek (unsigned int fd, int offset, unsigned int whence)
{
/* Sign-extension of "offset" is important here... */
return sys_lseek(fd, offset, whence);
}
static int
groups16_to_user(short __user *grouplist, struct group_info *group_info)
{
int i;
short group;
for (i = 0; i < group_info->ngroups; i++) {
group = (short)GROUP_AT(group_info, i);
if (put_user(group, grouplist+i))
return -EFAULT;
}
return 0;
}
static int
groups16_from_user(struct group_info *group_info, short __user *grouplist)
{
int i;
short group;
for (i = 0; i < group_info->ngroups; i++) {
if (get_user(group, grouplist+i))
return -EFAULT;
GROUP_AT(group_info, i) = (gid_t)group;
}
return 0;
}
asmlinkage long
sys32_getgroups16 (int gidsetsize, short __user *grouplist)
{
const struct cred *cred = current_cred();
int i;
if (gidsetsize < 0)
return -EINVAL;
i = cred->group_info->ngroups;
if (gidsetsize) {
if (i > gidsetsize) {
i = -EINVAL;
goto out;
}
if (groups16_to_user(grouplist, cred->group_info)) {
i = -EFAULT;
goto out;
}
}
out:
return i;
}
asmlinkage long
sys32_setgroups16 (int gidsetsize, short __user *grouplist)
{
struct group_info *group_info;
int retval;
if (!capable(CAP_SETGID))
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
group_info = groups_alloc(gidsetsize);
if (!group_info)
return -ENOMEM;
retval = groups16_from_user(group_info, grouplist);
if (retval) {
put_group_info(group_info);
return retval;
}
retval = set_current_groups(group_info);
put_group_info(group_info);
return retval;
}
asmlinkage long
sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
{
return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
}
asmlinkage long
sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
{
return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
}
static int
putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
{
int err;
u64 hdev;
if (clear_user(ubuf, sizeof(*ubuf)))
return -EFAULT;
hdev = huge_encode_dev(kbuf->dev);
err = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
err |= __put_user(kbuf->ino, &ubuf->__st_ino);
err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
err |= __put_user(kbuf->mode, &ubuf->st_mode);
err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
err |= __put_user(kbuf->uid, &ubuf->st_uid);
err |= __put_user(kbuf->gid, &ubuf->st_gid);
hdev = huge_encode_dev(kbuf->rdev);
err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
err |= __put_user(kbuf->size, &ubuf->st_size_lo);
err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
return err;
}
asmlinkage long
sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
{
struct kstat s;
long ret = vfs_stat(filename, &s);
if (!ret)
ret = putstat64(statbuf, &s);
return ret;
}
asmlinkage long
sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
{
struct kstat s;
long ret = vfs_lstat(filename, &s);
if (!ret)
ret = putstat64(statbuf, &s);
return ret;
}
asmlinkage long
sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
{
struct kstat s;
long ret = vfs_fstat(fd, &s);
if (!ret)
ret = putstat64(statbuf, &s);
return ret;
}
asmlinkage long
sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
{
mm_segment_t old_fs = get_fs();
struct timespec t;
long ret;
set_fs(KERNEL_DS);
ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
set_fs(old_fs);
if (put_compat_timespec(&t, interval))
return -EFAULT;
return ret;
}
asmlinkage long
sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
{
return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
}
asmlinkage long
sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
{
return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
}
asmlinkage long
sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
{
mm_segment_t old_fs = get_fs();
long ret;
off_t of;
if (offset && get_user(of, offset))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
set_fs(old_fs);
if (offset && put_user(of, offset))
return -EFAULT;
return ret;
}
asmlinkage long
sys32_personality (unsigned int personality)
{
long ret;
if (current->personality == PER_LINUX32 && personality == PER_LINUX)
personality = PER_LINUX32;
ret = sys_personality(personality);
if (ret == PER_LINUX32)
ret = PER_LINUX;
return ret;
}
asmlinkage unsigned long
sys32_brk (unsigned int brk)
{
unsigned long ret, obrk;
struct mm_struct *mm = current->mm;
obrk = mm->brk;
ret = sys_brk(brk);
if (ret < obrk)
clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
return ret;
}
/* Structure for ia32 emulation on ia64 */
struct epoll_event32
{
u32 events;
u32 data[2];
};
asmlinkage long
sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
{
mm_segment_t old_fs = get_fs();
struct epoll_event event64;
int error;
u32 data_halfword;
if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
return -EFAULT;
__get_user(event64.events, &event->events);
__get_user(data_halfword, &event->data[0]);
event64.data = data_halfword;
__get_user(data_halfword, &event->data[1]);
event64.data |= (u64)data_halfword << 32;
set_fs(KERNEL_DS);
error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
set_fs(old_fs);
return error;
}
asmlinkage long
sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
int timeout)
{
struct epoll_event *events64 = NULL;
mm_segment_t old_fs = get_fs();
int numevents, size;
int evt_idx;
int do_free_pages = 0;
if (maxevents <= 0) {
return -EINVAL;
}
/* Verify that the area passed by the user is writeable */
if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
return -EFAULT;
/*
* Allocate space for the intermediate copy. If the space needed
* is large enough to cause kmalloc to fail, then try again with
* __get_free_pages.
*/
size = maxevents * sizeof(struct epoll_event);
events64 = kmalloc(size, GFP_KERNEL);
if (events64 == NULL) {
events64 = (struct epoll_event *)
__get_free_pages(GFP_KERNEL, get_order(size));
if (events64 == NULL)
return -ENOMEM;
do_free_pages = 1;
}
/* Do the system call */
set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
maxevents, timeout);
set_fs(old_fs);
/* Don't modify userspace memory if we're returning an error */
if (numevents > 0) {
/* Translate the 64-bit structures back into the 32-bit
structures */
for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
__put_user(events64[evt_idx].events,
&events[evt_idx].events);
__put_user((u32)events64[evt_idx].data,
&events[evt_idx].data[0]);
__put_user((u32)(events64[evt_idx].data >> 32),
&events[evt_idx].data[1]);
}
}
if (do_free_pages)
free_pages((unsigned long) events64, get_order(size));
else
kfree(events64);
return numevents;
}
/*
* Get a yet unused TLS descriptor index.
*/
static int
get_free_idx (void)
{
struct thread_struct *t = &current->thread;
int idx;
for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
if (desc_empty(t->tls_array + idx))
return idx + GDT_ENTRY_TLS_MIN;
return -ESRCH;
}
static void set_tls_desc(struct task_struct *p, int idx,
const struct ia32_user_desc *info, int n)
{
struct thread_struct *t = &p->thread;
struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
int cpu;
/*
* We must not get preempted while modifying the TLS.
*/
cpu = get_cpu();
while (n-- > 0) {
if (LDT_empty(info)) {
desc->a = 0;
desc->b = 0;
} else {
desc->a = LDT_entry_a(info);
desc->b = LDT_entry_b(info);
}
++info;
++desc;
}
if (t == &current->thread)
load_TLS(t, cpu);
put_cpu();
}
/*
* Set a given TLS descriptor:
*/
asmlinkage int
sys32_set_thread_area (struct ia32_user_desc __user *u_info)
{
struct ia32_user_desc info;
int idx;
if (copy_from_user(&info, u_info, sizeof(info)))
return -EFAULT;
idx = info.entry_number;
/*
* index -1 means the kernel should try to find and allocate an empty descriptor:
*/
if (idx == -1) {
idx = get_free_idx();
if (idx < 0)
return idx;
if (put_user(idx, &u_info->entry_number))
return -EFAULT;
}
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
set_tls_desc(current, idx, &info, 1);
return 0;
}
/*
* Get the current Thread-Local Storage area:
*/
#define GET_BASE(desc) ( \
(((desc)->a >> 16) & 0x0000ffff) | \
(((desc)->b << 16) & 0x00ff0000) | \
( (desc)->b & 0xff000000) )
#define GET_LIMIT(desc) ( \
((desc)->a & 0x0ffff) | \
((desc)->b & 0xf0000) )
#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
static void fill_user_desc(struct ia32_user_desc *info, int idx,
const struct desc_struct *desc)
{
info->entry_number = idx;
info->base_addr = GET_BASE(desc);
info->limit = GET_LIMIT(desc);
info->seg_32bit = GET_32BIT(desc);
info->contents = GET_CONTENTS(desc);
info->read_exec_only = !GET_WRITABLE(desc);
info->limit_in_pages = GET_LIMIT_PAGES(desc);
info->seg_not_present = !GET_PRESENT(desc);
info->useable = GET_USEABLE(desc);
}
asmlinkage int
sys32_get_thread_area (struct ia32_user_desc __user *u_info)
{
struct ia32_user_desc info;
struct desc_struct *desc;
int idx;
if (get_user(idx, &u_info->entry_number))
return -EFAULT;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
fill_user_desc(&info, idx, desc);
if (copy_to_user(u_info, &info, sizeof(info)))
return -EFAULT;
return 0;
}
struct regset_get {
void *kbuf;
void __user *ubuf;
};
struct regset_set {
const void *kbuf;
const void __user *ubuf;
};
struct regset_getset {
struct task_struct *target;
const struct user_regset *regset;
union {
struct regset_get get;
struct regset_set set;
} u;
unsigned int pos;
unsigned int count;
int ret;
};
static void getfpreg(struct task_struct *task, int regno, int *val)
{
switch (regno / sizeof(int)) {
case 0:
*val = task->thread.fcr & 0xffff;
break;
case 1:
*val = task->thread.fsr & 0xffff;
break;
case 2:
*val = (task->thread.fsr>>16) & 0xffff;
break;
case 3:
*val = task->thread.fir;
break;
case 4:
*val = (task->thread.fir>>32) & 0xffff;
break;
case 5:
*val = task->thread.fdr;
break;
case 6:
*val = (task->thread.fdr >> 32) & 0xffff;
break;
}
}
static void setfpreg(struct task_struct *task, int regno, int val)
{
switch (regno / sizeof(int)) {
case 0:
task->thread.fcr = (task->thread.fcr & (~0x1f3f))
| (val & 0x1f3f);
break;
case 1:
task->thread.fsr = (task->thread.fsr & (~0xffff)) | val;
break;
case 2:
task->thread.fsr = (task->thread.fsr & (~0xffff0000))
| (val << 16);
break;
case 3:
task->thread.fir = (task->thread.fir & (~0xffffffff)) | val;
break;
case 5:
task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val;
break;
}
}
static void access_fpreg_ia32(int regno, void *reg,
struct pt_regs *pt, struct switch_stack *sw,
int tos, int write)
{
void *f;
if ((regno += tos) >= 8)
regno -= 8;
if (regno < 4)
f = &pt->f8 + regno;
else if (regno <= 7)
f = &sw->f12 + (regno - 4);
else {
printk(KERN_ERR "regno must be less than 7 \n");
return;
}
if (write)
memcpy(f, reg, sizeof(struct _fpreg_ia32));
else
memcpy(reg, f, sizeof(struct _fpreg_ia32));
}
static void do_fpregs_get(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
struct task_struct *task = dst->target;
struct pt_regs *pt;
int start, end, tos;
char buf[80];
if (dst->count == 0 || unw_unwind_to_user(info) < 0)
return;
if (dst->pos < 7 * sizeof(int)) {
end = min((dst->pos + dst->count),
(unsigned int)(7 * sizeof(int)));
for (start = dst->pos; start < end; start += sizeof(int))
getfpreg(task, start, (int *)(buf + start));
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, buf,
0, 7 * sizeof(int));
if (dst->ret || dst->count == 0)
return;
}
if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
pt = task_pt_regs(task);
tos = (task->thread.fsr >> 11) & 7;
end = min(dst->pos + dst->count,
(unsigned int)(sizeof(struct ia32_user_i387_struct)));
start = (dst->pos - 7 * sizeof(int)) /
sizeof(struct _fpreg_ia32);
end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
for (; start < end; start++)
access_fpreg_ia32(start,
(struct _fpreg_ia32 *)buf + start,
pt, info->sw, tos, 0);
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf,
buf, 7 * sizeof(int),
sizeof(struct ia32_user_i387_struct));
if (dst->ret || dst->count == 0)
return;
}
}
static void do_fpregs_set(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
struct task_struct *task = dst->target;
struct pt_regs *pt;
char buf[80];
int end, start, tos;
if (dst->count == 0 || unw_unwind_to_user(info) < 0)
return;
if (dst->pos < 7 * sizeof(int)) {
start = dst->pos;
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf, buf,
0, 7 * sizeof(int));
if (dst->ret)
return;
for (; start < dst->pos; start += sizeof(int))
setfpreg(task, start, *((int *)(buf + start)));
if (dst->count == 0)
return;
}
if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
start = (dst->pos - 7 * sizeof(int)) /
sizeof(struct _fpreg_ia32);
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf,
buf, 7 * sizeof(int),
sizeof(struct ia32_user_i387_struct));
if (dst->ret)
return;
pt = task_pt_regs(task);
tos = (task->thread.fsr >> 11) & 7;
end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
for (; start < end; start++)
access_fpreg_ia32(start,
(struct _fpreg_ia32 *)buf + start,
pt, info->sw, tos, 1);
if (dst->count == 0)
return;
}
}
#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member)))
static void getfpxreg(struct task_struct *task, int start, int end, char *buf)
{
int min_val;
min_val = min(end, OFFSET(fop));
while (start < min_val) {
if (start == OFFSET(cwd))
*((short *)buf) = task->thread.fcr & 0xffff;
else if (start == OFFSET(swd))
*((short *)buf) = task->thread.fsr & 0xffff;
else if (start == OFFSET(twd))
*((short *)buf) = (task->thread.fsr>>16) & 0xffff;
buf += 2;
start += 2;
}
/* skip fop element */
if (start == OFFSET(fop)) {
start += 2;
buf += 2;
}
while (start < end) {
if (start == OFFSET(fip))
*((int *)buf) = task->thread.fir;
else if (start == OFFSET(fcs))
*((int *)buf) = (task->thread.fir>>32) & 0xffff;
else if (start == OFFSET(foo))
*((int *)buf) = task->thread.fdr;
else if (start == OFFSET(fos))
*((int *)buf) = (task->thread.fdr>>32) & 0xffff;
else if (start == OFFSET(mxcsr))
*((int *)buf) = ((task->thread.fcr>>32) & 0xff80)
| ((task->thread.fsr>>32) & 0x3f);
buf += 4;
start += 4;
}
}
static void setfpxreg(struct task_struct *task, int start, int end, char *buf)
{
int min_val, num32;
short num;
unsigned long num64;
min_val = min(end, OFFSET(fop));
while (start < min_val) {
num = *((short *)buf);
if (start == OFFSET(cwd)) {
task->thread.fcr = (task->thread.fcr & (~0x1f3f))
| (num & 0x1f3f);
} else if (start == OFFSET(swd)) {
task->thread.fsr = (task->thread.fsr & (~0xffff)) | num;
} else if (start == OFFSET(twd)) {
task->thread.fsr = (task->thread.fsr & (~0xffff0000))
| (((int)num) << 16);
}
buf += 2;
start += 2;
}
/* skip fop element */
if (start == OFFSET(fop)) {
start += 2;
buf += 2;
}
while (start < end) {
num32 = *((int *)buf);
if (start == OFFSET(fip))
task->thread.fir = (task->thread.fir & (~0xffffffff))
| num32;
else if (start == OFFSET(foo))
task->thread.fdr = (task->thread.fdr & (~0xffffffff))
| num32;
else if (start == OFFSET(mxcsr)) {
num64 = num32 & 0xff10;
task->thread.fcr = (task->thread.fcr &
(~0xff1000000000UL)) | (num64<<32);
num64 = num32 & 0x3f;
task->thread.fsr = (task->thread.fsr &
(~0x3f00000000UL)) | (num64<<32);
}
buf += 4;
start += 4;
}
}
static void do_fpxregs_get(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
struct task_struct *task = dst->target;
struct pt_regs *pt;
char buf[128];
int start, end, tos;
if (dst->count == 0 || unw_unwind_to_user(info) < 0)
return;
if (dst->pos < OFFSET(st_space[0])) {
end = min(dst->pos + dst->count, (unsigned int)32);
getfpxreg(task, dst->pos, end, buf);
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf, buf,
0, OFFSET(st_space[0]));
if (dst->ret || dst->count == 0)
return;
}
if (dst->pos < OFFSET(xmm_space[0])) {
pt = task_pt_regs(task);
tos = (task->thread.fsr >> 11) & 7;
end = min(dst->pos + dst->count,
(unsigned int)OFFSET(xmm_space[0]));
start = (dst->pos - OFFSET(st_space[0])) / 16;
end = (end - OFFSET(st_space[0])) / 16;
for (; start < end; start++)
access_fpreg_ia32(start, buf + 16 * start, pt,
info->sw, tos, 0);
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf,
buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
if (dst->ret || dst->count == 0)
return;
}
if (dst->pos < OFFSET(padding[0]))
dst->ret = user_regset_copyout(&dst->pos, &dst->count,
&dst->u.get.kbuf, &dst->u.get.ubuf,
&info->sw->f16, OFFSET(xmm_space[0]),
OFFSET(padding[0]));
}
static void do_fpxregs_set(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
struct task_struct *task = dst->target;
char buf[128];
int start, end;
if (dst->count == 0 || unw_unwind_to_user(info) < 0)
return;
if (dst->pos < OFFSET(st_space[0])) {
start = dst->pos;
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf,
buf, 0, OFFSET(st_space[0]));
if (dst->ret)
return;
setfpxreg(task, start, dst->pos, buf);
if (dst->count == 0)
return;
}
if (dst->pos < OFFSET(xmm_space[0])) {
struct pt_regs *pt;
int tos;
pt = task_pt_regs(task);
tos = (task->thread.fsr >> 11) & 7;
start = (dst->pos - OFFSET(st_space[0])) / 16;
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf,
buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
if (dst->ret)
return;
end = (dst->pos - OFFSET(st_space[0])) / 16;
for (; start < end; start++)
access_fpreg_ia32(start, buf + 16 * start, pt, info->sw,
tos, 1);
if (dst->count == 0)
return;
}
if (dst->pos < OFFSET(padding[0]))
dst->ret = user_regset_copyin(&dst->pos, &dst->count,
&dst->u.set.kbuf, &dst->u.set.ubuf,
&info->sw->f16, OFFSET(xmm_space[0]),
OFFSET(padding[0]));
}
#undef OFFSET
static int do_regset_call(void (*call)(struct unw_frame_info *, void *),
struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct regset_getset info = { .target = target, .regset = regset,
.pos = pos, .count = count,
.u.set = { .kbuf = kbuf, .ubuf = ubuf },
.ret = 0 };
if (target == current)
unw_init_running(call, &info);
else {
struct unw_frame_info ufi;
memset(&ufi, 0, sizeof(ufi));
unw_init_from_blocked_task(&ufi, target);
(*call)(&ufi, &info);
}
return info.ret;
}
static int ia32_fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return do_regset_call(do_fpregs_get, target, regset, pos, count,
kbuf, ubuf);
}
static int ia32_fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return do_regset_call(do_fpregs_set, target, regset, pos, count,
kbuf, ubuf);
}
static int ia32_fpxregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return do_regset_call(do_fpxregs_get, target, regset, pos, count,
kbuf, ubuf);
}
static int ia32_fpxregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return do_regset_call(do_fpxregs_set, target, regset, pos, count,
kbuf, ubuf);
}
static int ia32_genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
if (kbuf) {
u32 *kp = kbuf;
while (count > 0) {
*kp++ = getreg(target, pos);
pos += 4;
count -= 4;
}
} else {
u32 __user *up = ubuf;
while (count > 0) {
if (__put_user(getreg(target, pos), up++))
return -EFAULT;
pos += 4;
count -= 4;
}
}
return 0;
}
static int ia32_genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret = 0;
if (kbuf) {
const u32 *kp = kbuf;
while (!ret && count > 0) {
putreg(target, pos, *kp++);
pos += 4;
count -= 4;
}
} else {
const u32 __user *up = ubuf;
u32 val;
while (!ret && count > 0) {
ret = __get_user(val, up++);
if (!ret)
putreg(target, pos, val);
pos += 4;
count -= 4;
}
}
return ret;
}
static int ia32_tls_active(struct task_struct *target,
const struct user_regset *regset)
{
struct thread_struct *t = &target->thread;
int n = GDT_ENTRY_TLS_ENTRIES;
while (n > 0 && desc_empty(&t->tls_array[n -1]))
--n;
return n;
}
static int ia32_tls_get(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, void *kbuf, void __user *ubuf)
{
const struct desc_struct *tls;
if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
(pos % sizeof(struct ia32_user_desc)) != 0 ||
(count % sizeof(struct ia32_user_desc)) != 0)
return -EINVAL;
pos /= sizeof(struct ia32_user_desc);
count /= sizeof(struct ia32_user_desc);
tls = &target->thread.tls_array[pos];
if (kbuf) {
struct ia32_user_desc *info = kbuf;
while (count-- > 0)
fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
tls++);
} else {
struct ia32_user_desc __user *u_info = ubuf;
while (count-- > 0) {
struct ia32_user_desc info;
fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
if (__copy_to_user(u_info++, &info, sizeof(info)))
return -EFAULT;
}
}
return 0;
}
static int ia32_tls_set(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, const void *kbuf, const void __user *ubuf)
{
struct ia32_user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
const struct ia32_user_desc *info;
if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
(pos % sizeof(struct ia32_user_desc)) != 0 ||
(count % sizeof(struct ia32_user_desc)) != 0)
return -EINVAL;
if (kbuf)
info = kbuf;
else if (__copy_from_user(infobuf, ubuf, count))
return -EFAULT;
else
info = infobuf;
set_tls_desc(target,
GDT_ENTRY_TLS_MIN + (pos / sizeof(struct ia32_user_desc)),
info, count / sizeof(struct ia32_user_desc));
return 0;
}
/*
* This should match arch/i386/kernel/ptrace.c:native_regsets.
* XXX ioperm? vm86?
*/
static const struct user_regset ia32_regsets[] = {
{
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct32)/4,
.size = 4, .align = 4,
.get = ia32_genregs_get, .set = ia32_genregs_set
},
{
.core_note_type = NT_PRFPREG,
.n = sizeof(struct ia32_user_i387_struct) / 4,
.size = 4, .align = 4,
.get = ia32_fpregs_get, .set = ia32_fpregs_set
},
{
.core_note_type = NT_PRXFPREG,
.n = sizeof(struct ia32_user_fxsr_struct) / 4,
.size = 4, .align = 4,
.get = ia32_fpxregs_get, .set = ia32_fpxregs_set
},
{
.core_note_type = NT_386_TLS,
.n = GDT_ENTRY_TLS_ENTRIES,
.bias = GDT_ENTRY_TLS_MIN,
.size = sizeof(struct ia32_user_desc),
.align = sizeof(struct ia32_user_desc),
.active = ia32_tls_active,
.get = ia32_tls_get, .set = ia32_tls_set,
},
};
const struct user_regset_view user_ia32_view = {
.name = "i386", .e_machine = EM_386,
.regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets)
};
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
__u32 len_low, __u32 len_high, int advice)
{
return sys_fadvise64_64(fd,
(((u64)offset_high)<<32) | offset_low,
(((u64)len_high)<<32) | len_low,
advice);
}
#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
{
uid_t sruid, seuid;
sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
return sys_setreuid(sruid, seuid);
}
asmlinkage long
sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
compat_uid_t suid)
{
uid_t sruid, seuid, ssuid;
sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
return sys_setresuid(sruid, seuid, ssuid);
}
asmlinkage long
sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
{
gid_t srgid, segid;
srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
return sys_setregid(srgid, segid);
}
asmlinkage long
sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
compat_gid_t sgid)
{
gid_t srgid, segid, ssgid;
srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
return sys_setresgid(srgid, segid, ssgid);
}
#endif /* NOTYET */
#ifndef _ASM_IA64_IA32_H
#define _ASM_IA64_IA32_H
#include <asm/ptrace.h>
#include <asm/signal.h>
#define IA32_NR_syscalls 285 /* length of syscall table */
#define IA32_PAGE_SHIFT 12 /* 4KB pages */
#ifndef __ASSEMBLY__
# ifdef CONFIG_IA32_SUPPORT
#define IA32_PAGE_OFFSET 0xc0000000
extern void ia32_cpu_init (void);
extern void ia32_mem_init (void);
extern void ia32_gdt_init (void);
extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs);
# endif /* !CONFIG_IA32_SUPPORT */
/* Declare this unconditionally, so we don't get warnings for unreachable code. */
extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs);
#if PAGE_SHIFT > IA32_PAGE_SHIFT
extern int ia32_copy_ia64_partial_page_list(struct task_struct *,
unsigned long);
extern void ia32_drop_ia64_partial_page_list(struct task_struct *);
#else
# define ia32_copy_ia64_partial_page_list(a1, a2) 0
# define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0)
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_IA32_H */
...@@ -270,23 +270,6 @@ typedef struct { ...@@ -270,23 +270,6 @@ typedef struct {
(int __user *) (addr)); \ (int __user *) (addr)); \
}) })
#ifdef CONFIG_IA32_SUPPORT
struct desc_struct {
unsigned int a, b;
};
#define desc_empty(desc) (!((desc)->a | (desc)->b))
#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
#define GDT_ENTRY_TLS_ENTRIES 3
#define GDT_ENTRY_TLS_MIN 6
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
struct ia64_partial_page_list;
#endif
struct thread_struct { struct thread_struct {
__u32 flags; /* various thread flags (see IA64_THREAD_*) */ __u32 flags; /* various thread flags (see IA64_THREAD_*) */
/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
...@@ -298,29 +281,6 @@ struct thread_struct { ...@@ -298,29 +281,6 @@ struct thread_struct {
__u64 rbs_bot; /* the base address for the RBS */ __u64 rbs_bot; /* the base address for the RBS */
int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
#ifdef CONFIG_IA32_SUPPORT
__u64 eflag; /* IA32 EFLAGS reg */
__u64 fsr; /* IA32 floating pt status reg */
__u64 fcr; /* IA32 floating pt control reg */
__u64 fir; /* IA32 fp except. instr. reg */
__u64 fdr; /* IA32 fp except. data reg */
__u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */
struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */
/* cached TLS descriptors. */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
# define INIT_THREAD_IA32 .eflag = 0, \
.fsr = 0, \
.fcr = 0x17800000037fULL, \
.fir = 0, \
.fdr = 0, \
.old_k1 = 0, \
.old_iob = 0, \
.ppl = NULL,
#else
# define INIT_THREAD_IA32
#endif /* CONFIG_IA32_SUPPORT */
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
void *pfm_context; /* pointer to detailed PMU context */ void *pfm_context; /* pointer to detailed PMU context */
unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
...@@ -342,7 +302,6 @@ struct thread_struct { ...@@ -342,7 +302,6 @@ struct thread_struct {
.rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
.task_size = DEFAULT_TASK_SIZE, \ .task_size = DEFAULT_TASK_SIZE, \
.last_fph_cpu = -1, \ .last_fph_cpu = -1, \
INIT_THREAD_IA32 \
INIT_THREAD_PM \ INIT_THREAD_PM \
.dbr = {0, }, \ .dbr = {0, }, \
.ibr = {0, }, \ .ibr = {0, }, \
...@@ -485,11 +444,6 @@ extern void __ia64_load_fpu (struct ia64_fpreg *fph); ...@@ -485,11 +444,6 @@ extern void __ia64_load_fpu (struct ia64_fpreg *fph);
extern void ia64_save_debug_regs (unsigned long *save_area); extern void ia64_save_debug_regs (unsigned long *save_area);
extern void ia64_load_debug_regs (unsigned long *save_area); extern void ia64_load_debug_regs (unsigned long *save_area);
#ifdef CONFIG_IA32_SUPPORT
extern void ia32_save_state (struct task_struct *task);
extern void ia32_load_state (struct task_struct *task);
#endif
#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
......
...@@ -22,33 +22,18 @@ static inline long syscall_get_nr(struct task_struct *task, ...@@ -22,33 +22,18 @@ static inline long syscall_get_nr(struct task_struct *task,
if ((long)regs->cr_ifs < 0) /* Not a syscall */ if ((long)regs->cr_ifs < 0) /* Not a syscall */
return -1; return -1;
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs))
return regs->r1;
#endif
return regs->r15; return regs->r15;
} }
static inline void syscall_rollback(struct task_struct *task, static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs))
regs->r8 = regs->r1;
#endif
/* do nothing */ /* do nothing */
} }
static inline long syscall_get_error(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs))
return regs->r8;
#endif
return regs->r10 == -1 ? regs->r8:0; return regs->r10 == -1 ? regs->r8:0;
} }
...@@ -62,13 +47,6 @@ static inline void syscall_set_return_value(struct task_struct *task, ...@@ -62,13 +47,6 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
int error, long val) int error, long val)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs)) {
regs->r8 = (long) error ? error : val;
return;
}
#endif
if (error) { if (error) {
/* error < 0, but ia64 uses > 0 return value */ /* error < 0, but ia64 uses > 0 return value */
regs->r8 = -error; regs->r8 = -error;
...@@ -89,37 +67,6 @@ static inline void syscall_get_arguments(struct task_struct *task, ...@@ -89,37 +67,6 @@ static inline void syscall_get_arguments(struct task_struct *task,
{ {
BUG_ON(i + n > 6); BUG_ON(i + n > 6);
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs)) {
switch (i + n) {
case 6:
if (!n--) break;
*args++ = regs->r13;
case 5:
if (!n--) break;
*args++ = regs->r15;
case 4:
if (!n--) break;
*args++ = regs->r14;
case 3:
if (!n--) break;
*args++ = regs->r10;
case 2:
if (!n--) break;
*args++ = regs->r9;
case 1:
if (!n--) break;
*args++ = regs->r11;
case 0:
if (!n--) break;
default:
BUG();
break;
}
return;
}
#endif
ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
} }
...@@ -130,34 +77,6 @@ static inline void syscall_set_arguments(struct task_struct *task, ...@@ -130,34 +77,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
{ {
BUG_ON(i + n > 6); BUG_ON(i + n > 6);
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs)) {
switch (i + n) {
case 6:
if (!n--) break;
regs->r13 = *args++;
case 5:
if (!n--) break;
regs->r15 = *args++;
case 4:
if (!n--) break;
regs->r14 = *args++;
case 3:
if (!n--) break;
regs->r10 = *args++;
case 2:
if (!n--) break;
regs->r9 = *args++;
case 1:
if (!n--) break;
regs->r11 = *args++;
case 0:
if (!n--) break;
}
return;
}
#endif
ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
} }
#endif /* _ASM_SYSCALL_H */ #endif /* _ASM_SYSCALL_H */
...@@ -191,15 +191,6 @@ do { \ ...@@ -191,15 +191,6 @@ do { \
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef CONFIG_IA32_SUPPORT
# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
#else
# define IS_IA32_PROCESS(regs) 0
struct task_struct;
static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){}
static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){}
#endif
/* /*
* Context switch from one thread to another. If the two threads have * Context switch from one thread to another. If the two threads have
* different address spaces, schedule() has already taken care of * different address spaces, schedule() has already taken care of
...@@ -233,7 +224,7 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct ...@@ -233,7 +224,7 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct
#define IA64_HAS_EXTRA_STATE(t) \ #define IA64_HAS_EXTRA_STATE(t) \
((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
|| IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) || PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \ #define __switch_to(prev,next,last) do { \
IA64_ACCOUNT_ON_SWITCH(prev, next); \ IA64_ACCOUNT_ON_SWITCH(prev, next); \
......
...@@ -335,20 +335,6 @@ ...@@ -335,20 +335,6 @@
#define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_RT_SIGSUSPEND
#ifdef CONFIG_IA32_SUPPORT
# define __ARCH_WANT_SYS_FADVISE64
# define __ARCH_WANT_SYS_GETPGRP
# define __ARCH_WANT_SYS_LLSEEK
# define __ARCH_WANT_SYS_NICE
# define __ARCH_WANT_SYS_OLD_GETRLIMIT
# define __ARCH_WANT_SYS_OLDUMOUNT
# define __ARCH_WANT_SYS_PAUSE
# define __ARCH_WANT_SYS_SIGPENDING
# define __ARCH_WANT_SYS_SIGPROCMASK
# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
# define __ARCH_WANT_COMPAT_SYS_TIME
#endif
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
#include <linux/types.h> #include <linux/types.h>
......
...@@ -30,20 +30,11 @@ static unsigned signal_class[] = { ...@@ -30,20 +30,11 @@ static unsigned signal_class[] = {
int audit_classify_arch(int arch) int audit_classify_arch(int arch)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (arch == AUDIT_ARCH_I386)
return 1;
#endif
return 0; return 0;
} }
int audit_classify_syscall(int abi, unsigned syscall) int audit_classify_syscall(int abi, unsigned syscall)
{ {
#ifdef CONFIG_IA32_SUPPORT
extern int ia32_classify_syscall(unsigned);
if (abi == AUDIT_ARCH_I386)
return ia32_classify_syscall(syscall);
#endif
switch(syscall) { switch(syscall) {
case __NR_open: case __NR_open:
return 2; return 2;
...@@ -58,18 +49,6 @@ int audit_classify_syscall(int abi, unsigned syscall) ...@@ -58,18 +49,6 @@ int audit_classify_syscall(int abi, unsigned syscall)
static int __init audit_classes_init(void) static int __init audit_classes_init(void)
{ {
#ifdef CONFIG_IA32_SUPPORT
extern __u32 ia32_dir_class[];
extern __u32 ia32_write_class[];
extern __u32 ia32_read_class[];
extern __u32 ia32_chattr_class[];
extern __u32 ia32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
......
...@@ -71,15 +71,6 @@ ENTRY(ia64_execve) ...@@ -71,15 +71,6 @@ ENTRY(ia64_execve)
add out3=16,sp // regs add out3=16,sp // regs
br.call.sptk.many rp=sys_execve br.call.sptk.many rp=sys_execve
.ret0: .ret0:
#ifdef CONFIG_IA32_SUPPORT
/*
* Check if we're returning to ia32 mode. If so, we need to restore ia32 registers
* from pt_regs.
*/
adds r16=PT(CR_IPSR)+16,sp
;;
ld8 r16=[r16]
#endif
cmp4.ge p6,p7=r8,r0 cmp4.ge p6,p7=r8,r0
mov ar.pfs=loc1 // restore ar.pfs mov ar.pfs=loc1 // restore ar.pfs
sxt4 r8=r8 // return 64-bit result sxt4 r8=r8 // return 64-bit result
...@@ -108,12 +99,6 @@ ENTRY(ia64_execve) ...@@ -108,12 +99,6 @@ ENTRY(ia64_execve)
ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
#ifdef CONFIG_IA32_SUPPORT
tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
movl loc0=ia64_ret_from_ia32_execve
;;
(p6) mov rp=loc0
#endif
br.ret.sptk.many rp br.ret.sptk.many rp
END(ia64_execve) END(ia64_execve)
...@@ -848,30 +833,6 @@ __paravirt_work_processed_syscall: ...@@ -848,30 +833,6 @@ __paravirt_work_processed_syscall:
br.cond.sptk.many rbs_switch // B br.cond.sptk.many rbs_switch // B
END(__paravirt_leave_syscall) END(__paravirt_leave_syscall)
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
#ifdef CONFIG_IA32_SUPPORT
GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
PT_REGS_UNWIND_INFO(0)
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
;;
.mem.offset 0,0
st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0
st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
#ifdef CONFIG_PARAVIRT
;;
// don't fall through, ia64_leave_kernel may be #define'd
br.cond.sptk.few ia64_leave_kernel
;;
#endif /* CONFIG_PARAVIRT */
END(ia64_ret_from_ia32_execve)
#ifndef CONFIG_PARAVIRT
// fall through
#endif
#endif /* CONFIG_IA32_SUPPORT */
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
GLOBAL_ENTRY(__paravirt_leave_kernel) GLOBAL_ENTRY(__paravirt_leave_kernel)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
/* /*
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/break.h> #include <asm/break.h>
#include <asm/ia32.h>
#include <asm/kregs.h> #include <asm/kregs.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -1386,28 +1385,6 @@ END(ia32_exception) ...@@ -1386,28 +1385,6 @@ END(ia32_exception)
// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
ENTRY(ia32_intercept) ENTRY(ia32_intercept)
DBG_FAULT(46) DBG_FAULT(46)
#ifdef CONFIG_IA32_SUPPORT
mov r31=pr
MOV_FROM_ISR(r16)
;;
extr.u r17=r16,16,8 // get ISR.code
mov r18=ar.eflag
MOV_FROM_IIM(r19) // old eflag value
;;
cmp.ne p6,p0=2,r17
(p6) br.cond.spnt 1f // not a system flag fault
xor r16=r18,r19
;;
extr.u r17=r16,18,1 // get the eflags.ac bit
;;
cmp.eq p6,p0=0,r17
(p6) br.cond.spnt 1f // eflags.ac bit didn't change
;;
mov pr=r31,-1 // restore predicate registers
RFI
1:
#endif // CONFIG_IA32_SUPPORT
FAULT(46) FAULT(46)
END(ia32_intercept) END(ia32_intercept)
...@@ -1416,12 +1393,7 @@ END(ia32_intercept) ...@@ -1416,12 +1393,7 @@ END(ia32_intercept)
// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
ENTRY(ia32_interrupt) ENTRY(ia32_interrupt)
DBG_FAULT(47) DBG_FAULT(47)
#ifdef CONFIG_IA32_SUPPORT
mov r31=pr
br.sptk.many dispatch_to_ia32_handler
#else
FAULT(47) FAULT(47)
#endif
END(ia32_interrupt) END(ia32_interrupt)
.org ia64_ivt+0x6c00 .org ia64_ivt+0x6c00
...@@ -1715,89 +1687,3 @@ ENTRY(dispatch_illegal_op_fault) ...@@ -1715,89 +1687,3 @@ ENTRY(dispatch_illegal_op_fault)
(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
br.sptk.many ia64_leave_kernel br.sptk.many ia64_leave_kernel
END(dispatch_illegal_op_fault) END(dispatch_illegal_op_fault)
#ifdef CONFIG_IA32_SUPPORT
/*
* There is no particular reason for this code to be here, other than that
* there happens to be space here that would go unused otherwise. If this
* fault ever gets "unreserved", simply moved the following code to a more
* suitable spot...
*/
// IA32 interrupt entry point
ENTRY(dispatch_to_ia32_handler)
SAVE_MIN
;;
MOV_FROM_ISR(r14)
SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
// guarantee that interruption collection is on
;;
SSM_PSR_I(p15, p15, r3)
adds r3=8,r2 // Base pointer for SAVE_REST
;;
SAVE_REST
;;
mov r15=0x80
shr r14=r14,16 // Get interrupt number
;;
cmp.ne p6,p0=r14,r15
(p6) br.call.dpnt.many b6=non_ia32_syscall
adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
;;
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
ld8 r8=[r14] // get r8
;;
st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
;;
alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
;;
ld4 r8=[r14],8 // r8 == eax (syscall number)
mov r15=IA32_NR_syscalls
;;
cmp.ltu.unc p6,p7=r8,r15
ld4 out1=[r14],8 // r9 == ecx
;;
ld4 out2=[r14],8 // r10 == edx
;;
ld4 out0=[r14] // r11 == ebx
adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
;;
ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
;;
ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 out4=[r14] // r15 == edi
movl r16=ia32_syscall_table
;;
(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
ld4 r2=[r2] // r2 = current_thread_info()->flags
;;
ld8 r16=[r16]
and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
;;
mov b6=r16
movl r15=ia32_ret_from_syscall
cmp.eq p8,p0=r2,r0
;;
mov rp=r15
(p8) br.call.sptk.many b6=b6
br.cond.sptk ia32_trace_syscall
non_ia32_syscall:
alloc r15=ar.pfs,0,0,2,0
mov out0=r14 // interrupt #
add out1=16,sp // pointer to pt_regs
;; // avoid WAW on CFM
br.call.sptk.many rp=ia32_bad_interrupt
.ret1: movl r15=ia64_leave_kernel
;;
mov rp=r15
br.ret.sptk.many rp
END(dispatch_to_ia32_handler)
#endif /* CONFIG_IA32_SUPPORT */
...@@ -33,7 +33,6 @@ ...@@ -33,7 +33,6 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/ia32.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -358,11 +357,6 @@ ia64_save_extra (struct task_struct *task) ...@@ -358,11 +357,6 @@ ia64_save_extra (struct task_struct *task)
if (info & PFM_CPUINFO_SYST_WIDE) if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 0); pfm_syst_wide_update_task(task, info, 0);
#endif #endif
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(task_pt_regs(task)))
ia32_save_state(task);
#endif
} }
void void
...@@ -383,11 +377,6 @@ ia64_load_extra (struct task_struct *task) ...@@ -383,11 +377,6 @@ ia64_load_extra (struct task_struct *task)
if (info & PFM_CPUINFO_SYST_WIDE) if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 1); pfm_syst_wide_update_task(task, info, 1);
#endif #endif
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(task_pt_regs(task)))
ia32_load_state(task);
#endif
} }
/* /*
...@@ -426,7 +415,7 @@ copy_thread(unsigned long clone_flags, ...@@ -426,7 +415,7 @@ copy_thread(unsigned long clone_flags,
unsigned long user_stack_base, unsigned long user_stack_size, unsigned long user_stack_base, unsigned long user_stack_size,
struct task_struct *p, struct pt_regs *regs) struct task_struct *p, struct pt_regs *regs)
{ {
extern char ia64_ret_from_clone, ia32_ret_from_clone; extern char ia64_ret_from_clone;
struct switch_stack *child_stack, *stack; struct switch_stack *child_stack, *stack;
unsigned long rbs, child_rbs, rbs_size; unsigned long rbs, child_rbs, rbs_size;
struct pt_regs *child_ptregs; struct pt_regs *child_ptregs;
...@@ -457,7 +446,7 @@ copy_thread(unsigned long clone_flags, ...@@ -457,7 +446,7 @@ copy_thread(unsigned long clone_flags,
memcpy((void *) child_rbs, (void *) rbs, rbs_size); memcpy((void *) child_rbs, (void *) rbs, rbs_size);
if (likely(user_mode(child_ptregs))) { if (likely(user_mode(child_ptregs))) {
if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs)) if (clone_flags & CLONE_SETTLS)
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
if (user_stack_base) { if (user_stack_base) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16; child_ptregs->r12 = user_stack_base + user_stack_size - 16;
...@@ -477,10 +466,7 @@ copy_thread(unsigned long clone_flags, ...@@ -477,10 +466,7 @@ copy_thread(unsigned long clone_flags,
child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */
} }
child_stack->ar_bspstore = child_rbs + rbs_size; child_stack->ar_bspstore = child_rbs + rbs_size;
if (IS_IA32_PROCESS(regs)) child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
child_stack->b0 = (unsigned long) &ia32_ret_from_clone;
else
child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
/* copy parts of thread_struct: */ /* copy parts of thread_struct: */
p->thread.ksp = (unsigned long) child_stack - 16; p->thread.ksp = (unsigned long) child_stack - 16;
...@@ -515,22 +501,6 @@ copy_thread(unsigned long clone_flags, ...@@ -515,22 +501,6 @@ copy_thread(unsigned long clone_flags,
p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
| THREAD_FLAGS_TO_SET); | THREAD_FLAGS_TO_SET);
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
#ifdef CONFIG_IA32_SUPPORT
/*
* If we're cloning an IA32 task then save the IA32 extra
* state from the current task to the new task
*/
if (IS_IA32_PROCESS(task_pt_regs(current))) {
ia32_save_state(p);
if (clone_flags & CLONE_SETTLS)
retval = ia32_clone_tls(p, child_ptregs);
/* Copy partially mapped page list */
if (!retval)
retval = ia32_copy_ia64_partial_page_list(p,
clone_flags);
}
#endif
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
if (current->thread.pfm_context) if (current->thread.pfm_context)
...@@ -704,15 +674,6 @@ EXPORT_SYMBOL(kernel_thread); ...@@ -704,15 +674,6 @@ EXPORT_SYMBOL(kernel_thread);
int int
kernel_thread_helper (int (*fn)(void *), void *arg) kernel_thread_helper (int (*fn)(void *), void *arg)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(task_pt_regs(current))) {
/* A kernel thread is always a 64-bit process. */
current->thread.map_base = DEFAULT_MAP_BASE;
current->thread.task_size = DEFAULT_TASK_SIZE;
ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
}
#endif
return (*fn)(arg); return (*fn)(arg);
} }
...@@ -725,14 +686,6 @@ flush_thread (void) ...@@ -725,14 +686,6 @@ flush_thread (void)
/* drop floating-point and debug-register state if it exists: */ /* drop floating-point and debug-register state if it exists: */
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
ia64_drop_fpu(current); ia64_drop_fpu(current);
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(task_pt_regs(current))) {
ia32_drop_ia64_partial_page_list(current);
current->thread.task_size = IA32_PAGE_OFFSET;
set_fs(USER_DS);
memset(current->thread.tls_array, 0, sizeof(current->thread.tls_array));
}
#endif
} }
/* /*
...@@ -753,8 +706,6 @@ exit_thread (void) ...@@ -753,8 +706,6 @@ exit_thread (void)
if (current->thread.flags & IA64_THREAD_DBG_VALID) if (current->thread.flags & IA64_THREAD_DBG_VALID)
pfm_release_debug_registers(current); pfm_release_debug_registers(current);
#endif #endif
if (IS_IA32_PROCESS(task_pt_regs(current)))
ia32_drop_ia64_partial_page_list(current);
} }
unsigned long unsigned long
......
...@@ -1250,13 +1250,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, ...@@ -1250,13 +1250,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
long syscall; long syscall;
int arch; int arch;
if (IS_IA32_PROCESS(&regs)) { syscall = regs.r15;
syscall = regs.r1; arch = AUDIT_ARCH_IA64;
arch = AUDIT_ARCH_I386;
} else {
syscall = regs.r15;
arch = AUDIT_ARCH_IA64;
}
audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
} }
...@@ -2172,11 +2167,6 @@ static const struct user_regset_view user_ia64_view = { ...@@ -2172,11 +2167,6 @@ static const struct user_regset_view user_ia64_view = {
const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
{ {
#ifdef CONFIG_IA32_SUPPORT
extern const struct user_regset_view user_ia32_view;
if (IS_IA32_PROCESS(task_pt_regs(tsk)))
return &user_ia32_view;
#endif
return &user_ia64_view; return &user_ia64_view;
} }
......
...@@ -46,7 +46,6 @@ ...@@ -46,7 +46,6 @@
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <asm/ia32.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/meminit.h> #include <asm/meminit.h>
...@@ -1016,10 +1015,6 @@ cpu_init (void) ...@@ -1016,10 +1015,6 @@ cpu_init (void)
ia64_mmu_init(ia64_imva(cpu_data)); ia64_mmu_init(ia64_imva(cpu_data));
ia64_mca_cpu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data));
#ifdef CONFIG_IA32_SUPPORT
ia32_cpu_init();
#endif
/* Clear ITC to eliminate sched_clock() overflows in human time. */ /* Clear ITC to eliminate sched_clock() overflows in human time. */
ia64_set_itc(0); ia64_set_itc(0);
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <asm/ia32.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/rse.h> #include <asm/rse.h>
...@@ -425,14 +424,8 @@ static long ...@@ -425,14 +424,8 @@ static long
handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset,
struct sigscratch *scr) struct sigscratch *scr)
{ {
if (IS_IA32_PROCESS(&scr->pt)) { if (!setup_frame(sig, ka, info, oldset, scr))
/* send signal to IA-32 process */ return 0;
if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt))
return 0;
} else
/* send signal to IA-64 process */
if (!setup_frame(sig, ka, info, oldset, scr))
return 0;
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
...@@ -462,7 +455,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) ...@@ -462,7 +455,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
siginfo_t info; siginfo_t info;
long restart = in_syscall; long restart = in_syscall;
long errno = scr->pt.r8; long errno = scr->pt.r8;
# define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c))
/* /*
* In the ia64_leave_kernel code path, we want the common case to go fast, which * In the ia64_leave_kernel code path, we want the common case to go fast, which
...@@ -490,14 +482,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) ...@@ -490,14 +482,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
* inferior call), thus it's important to check for restarting _after_ * inferior call), thus it's important to check for restarting _after_
* get_signal_to_deliver(). * get_signal_to_deliver().
*/ */
if (IS_IA32_PROCESS(&scr->pt)) { if ((long) scr->pt.r10 != -1)
if (in_syscall) {
if (errno >= 0)
restart = 0;
else
errno = -errno;
}
} else if ((long) scr->pt.r10 != -1)
/* /*
* A system calls has to be restarted only if one of the error codes * A system calls has to be restarted only if one of the error codes
* ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10
...@@ -513,22 +498,18 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) ...@@ -513,22 +498,18 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
switch (errno) { switch (errno) {
case ERESTART_RESTARTBLOCK: case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND: case ERESTARTNOHAND:
scr->pt.r8 = ERR_CODE(EINTR); scr->pt.r8 = EINTR;
/* note: scr->pt.r10 is already -1 */ /* note: scr->pt.r10 is already -1 */
break; break;
case ERESTARTSYS: case ERESTARTSYS:
if ((ka.sa.sa_flags & SA_RESTART) == 0) { if ((ka.sa.sa_flags & SA_RESTART) == 0) {
scr->pt.r8 = ERR_CODE(EINTR); scr->pt.r8 = EINTR;
/* note: scr->pt.r10 is already -1 */ /* note: scr->pt.r10 is already -1 */
break; break;
} }
case ERESTARTNOINTR: case ERESTARTNOINTR:
if (IS_IA32_PROCESS(&scr->pt)) { ia64_decrement_ip(&scr->pt);
scr->pt.r8 = scr->pt.r1;
scr->pt.cr_iip -= 2;
} else
ia64_decrement_ip(&scr->pt);
restart = 0; /* don't restart twice if handle_signal() fails... */ restart = 0; /* don't restart twice if handle_signal() fails... */
} }
} }
...@@ -555,21 +536,14 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) ...@@ -555,21 +536,14 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR
|| errno == ERESTART_RESTARTBLOCK) || errno == ERESTART_RESTARTBLOCK)
{ {
if (IS_IA32_PROCESS(&scr->pt)) { /*
scr->pt.r8 = scr->pt.r1; * Note: the syscall number is in r15 which is saved in
scr->pt.cr_iip -= 2; * pt_regs so all we need to do here is adjust ip so that
if (errno == ERESTART_RESTARTBLOCK) * the "break" instruction gets re-executed.
scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */ */
} else { ia64_decrement_ip(&scr->pt);
/* if (errno == ERESTART_RESTARTBLOCK)
* Note: the syscall number is in r15 which is saved in scr->pt.r15 = __NR_restart_syscall;
* pt_regs so all we need to do here is adjust ip so that
* the "break" instruction gets re-executed.
*/
ia64_decrement_ip(&scr->pt);
if (errno == ERESTART_RESTARTBLOCK)
scr->pt.r15 = __NR_restart_syscall;
}
} }
} }
......
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/ia32.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/machvec.h> #include <asm/machvec.h>
...@@ -443,10 +442,6 @@ smp_callin (void) ...@@ -443,10 +442,6 @@ smp_callin (void)
calibrate_delay(); calibrate_delay();
local_cpu_data->loops_per_jiffy = loops_per_jiffy; local_cpu_data->loops_per_jiffy = loops_per_jiffy;
#ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init();
#endif
/* /*
* Allow the master to continue. * Allow the master to continue.
*/ */
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <asm/fpswa.h> #include <asm/fpswa.h>
#include <asm/ia32.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -626,10 +625,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -626,10 +625,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
break; break;
case 45: case 45:
#ifdef CONFIG_IA32_SUPPORT
if (ia32_exception(&regs, isr) == 0)
return;
#endif
printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
iip, ifa, isr); iip, ifa, isr);
...@@ -637,10 +632,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -637,10 +632,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
break; break;
case 46: case 46:
#ifdef CONFIG_IA32_SUPPORT
if (ia32_intercept(&regs, isr) == 0)
return;
#endif
printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
iip, ifa, isr, iim); iip, ifa, isr, iim);
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <linux/kexec.h> #include <linux/kexec.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/ia32.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/numa.h> #include <asm/numa.h>
...@@ -668,10 +667,6 @@ mem_init (void) ...@@ -668,10 +667,6 @@ mem_init (void)
fsyscall_table[i] = sys_call_table[i] | 1; fsyscall_table[i] = sys_call_table[i] | 1;
} }
setup_gate(); setup_gate();
#ifdef CONFIG_IA32_SUPPORT
ia32_mem_init();
#endif
} }
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
......
...@@ -58,11 +58,6 @@ __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) ...@@ -58,11 +58,6 @@ __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
#ifdef CONFIG_IA32_SUPPORT
__HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8
#endif /* CONFIG_IA32_SUPPORT */
GLOBAL_ENTRY(xen_set_rr0_to_rr4) GLOBAL_ENTRY(xen_set_rr0_to_rr4)
mov r8=r32 mov r8=r32
mov r9=r33 mov r9=r33
......
...@@ -301,11 +301,6 @@ static void xen_setreg(int regnum, unsigned long val) ...@@ -301,11 +301,6 @@ static void xen_setreg(int regnum, unsigned long val)
case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
xen_set_kr(regnum - _IA64_REG_AR_KR0, val); xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
break; break;
#ifdef CONFIG_IA32_SUPPORT
case _IA64_REG_AR_EFLAG:
xen_set_eflag(val);
break;
#endif
case _IA64_REG_AR_ITC: case _IA64_REG_AR_ITC:
xen_set_itc(val); xen_set_itc(val);
break; break;
...@@ -332,11 +327,6 @@ static unsigned long xen_getreg(int regnum) ...@@ -332,11 +327,6 @@ static unsigned long xen_getreg(int regnum)
case _IA64_REG_PSR: case _IA64_REG_PSR:
res = xen_get_psr(); res = xen_get_psr();
break; break;
#ifdef CONFIG_IA32_SUPPORT
case _IA64_REG_AR_EFLAG:
res = xen_get_eflag();
break;
#endif
case _IA64_REG_AR_ITC: case _IA64_REG_AR_ITC:
res = xen_get_itc(); res = xen_get_itc();
break; break;
...@@ -710,9 +700,6 @@ extern unsigned long xen_getreg(int regnum); ...@@ -710,9 +700,6 @@ extern unsigned long xen_getreg(int regnum);
__DEFINE_FUNC(getreg, __DEFINE_FUNC(getreg,
__DEFINE_GET_REG(PSR, PSR) __DEFINE_GET_REG(PSR, PSR)
#ifdef CONFIG_IA32_SUPPORT
__DEFINE_GET_REG(AR_EFLAG, EFLAG)
#endif
/* get_itc */ /* get_itc */
"mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
...@@ -789,9 +776,6 @@ __DEFINE_FUNC(setreg, ...@@ -789,9 +776,6 @@ __DEFINE_FUNC(setreg,
";;\n" ";;\n"
"(p6) br.cond.spnt xen_set_itc\n" "(p6) br.cond.spnt xen_set_itc\n"
#ifdef CONFIG_IA32_SUPPORT
__DEFINE_SET_REG(AR_EFLAG, SET_EFLAG)
#endif
__DEFINE_SET_REG(CR_TPR, SET_TPR) __DEFINE_SET_REG(CR_TPR, SET_TPR)
__DEFINE_SET_REG(CR_EOI, EOI) __DEFINE_SET_REG(CR_EOI, EOI)
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
you why the ifdefs are needed? Think about it again. -AK */ you why the ifdefs are needed? Think about it again. -AK */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
# define INPUT_COMPAT_TEST is_compat_task() # define INPUT_COMPAT_TEST is_compat_task()
#elif defined(CONFIG_IA64)
# define INPUT_COMPAT_TEST IS_IA32_PROCESS(task_pt_regs(current))
#elif defined(CONFIG_S390) #elif defined(CONFIG_S390)
# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT) # define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT)
#elif defined(CONFIG_MIPS) #elif defined(CONFIG_MIPS)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment