Commit 5f65098e authored by Stephen Lord's avatar Stephen Lord

Merge kernel.bkbits.net:/home/repos/linux-2.5

into kernel.bkbits.net:/home/lord/xfs-2.6
parents 59e0f831 164cf89e
...@@ -380,7 +380,7 @@ void enable_irq(unsigned int irq) ...@@ -380,7 +380,7 @@ void enable_irq(unsigned int irq)
spin_lock_irqsave(&desc->lock, flags); spin_lock_irqsave(&desc->lock, flags);
switch (desc->depth) { switch (desc->depth) {
case 1: { case 1: {
unsigned int status = desc->status & ~(IRQ_DISABLED | IRQ_INPROGRESS); unsigned int status = desc->status & ~IRQ_DISABLED;
desc->status = status; desc->status = status;
if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = status | IRQ_REPLAY; desc->status = status | IRQ_REPLAY;
......
...@@ -62,14 +62,13 @@ extern unsigned long *ia32_gdt; ...@@ -62,14 +62,13 @@ extern unsigned long *ia32_gdt;
struct page * struct page *
ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int no_share) ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int no_share)
{ {
struct page *pg = ia32_shared_page[(address - vma->vm_start)/PAGE_SIZE]; struct page *pg = ia32_shared_page[smp_processor_id()];
get_page(pg); get_page(pg);
return pg; return pg;
} }
static struct vm_operations_struct ia32_shared_page_vm_ops = { static struct vm_operations_struct ia32_shared_page_vm_ops = {
.nopage =ia32_install_shared_page .nopage = ia32_install_shared_page
}; };
void void
...@@ -78,7 +77,7 @@ ia64_elf32_init (struct pt_regs *regs) ...@@ -78,7 +77,7 @@ ia64_elf32_init (struct pt_regs *regs)
struct vm_area_struct *vma; struct vm_area_struct *vma;
/* /*
* Map GDT and TSS below 4GB, where the processor can find them. We need to map * Map GDT below 4GB, where the processor can find it. We need to map
* it with privilege level 3 because the IVE uses non-privileged accesses to these * it with privilege level 3 because the IVE uses non-privileged accesses to these
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them. * tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
*/ */
...@@ -86,7 +85,7 @@ ia64_elf32_init (struct pt_regs *regs) ...@@ -86,7 +85,7 @@ ia64_elf32_init (struct pt_regs *regs)
if (vma) { if (vma) {
vma->vm_mm = current->mm; vma->vm_mm = current->mm;
vma->vm_start = IA32_GDT_OFFSET; vma->vm_start = IA32_GDT_OFFSET;
vma->vm_end = vma->vm_start + max(PAGE_SIZE, 2*IA32_PAGE_SIZE); vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = PAGE_SHARED; vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_READ|VM_MAYREAD; vma->vm_flags = VM_READ|VM_MAYREAD;
vma->vm_ops = &ia32_shared_page_vm_ops; vma->vm_ops = &ia32_shared_page_vm_ops;
......
...@@ -32,7 +32,7 @@ END(ia32_execve) ...@@ -32,7 +32,7 @@ END(ia32_execve)
ENTRY(ia32_clone) ENTRY(ia32_clone)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
alloc r16=ar.pfs,2,2,4,0 alloc r16=ar.pfs,5,2,6,0
DO_SAVE_SWITCH_STACK DO_SAVE_SWITCH_STACK
mov loc0=rp mov loc0=rp
mov loc1=r16 // save ar.pfs across do_fork mov loc1=r16 // save ar.pfs across do_fork
...@@ -41,6 +41,8 @@ ENTRY(ia32_clone) ...@@ -41,6 +41,8 @@ ENTRY(ia32_clone)
mov out3=16 // stacksize (compensates for 16-byte scratch area) mov out3=16 // stacksize (compensates for 16-byte scratch area)
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK dep out0=0,in0,CLONE_IDLETASK_BIT,1 // out0 = clone_flags & ~CLONE_IDLETASK
zxt4 out4=in2 // out4 = parent_tidptr
zxt4 out5=in4 // out5 = child_tidptr
br.call.sptk.many rp=do_fork br.call.sptk.many rp=do_fork
.ret0: .restore sp .ret0: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
...@@ -437,13 +439,13 @@ ia32_syscall_table: ...@@ -437,13 +439,13 @@ ia32_syscall_table:
data8 sys_ni_syscall /* 235 */ data8 sys_ni_syscall /* 235 */
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_tkill
data8 sys_ni_syscall data8 sys_ni_syscall
data8 compat_sys_futex /* 240 */ data8 compat_sys_futex /* 240 */
data8 compat_sys_sched_setaffinity data8 compat_sys_sched_setaffinity
data8 compat_sys_sched_getaffinity data8 compat_sys_sched_getaffinity
data8 sys_ni_syscall data8 sys32_set_thread_area
data8 sys_ni_syscall data8 sys32_get_thread_area
data8 sys_ni_syscall /* 245 */ data8 sys_ni_syscall /* 245 */
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_ni_syscall
...@@ -451,13 +453,13 @@ ia32_syscall_table: ...@@ -451,13 +453,13 @@ ia32_syscall_table:
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall /* 250 */ data8 sys_ni_syscall /* 250 */
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_exit_group
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_epoll_create data8 sys_epoll_create
data8 sys32_epoll_ctl /* 255 */ data8 sys32_epoll_ctl /* 255 */
data8 sys32_epoll_wait data8 sys32_epoll_wait
data8 sys_ni_syscall data8 sys_remap_file_pages
data8 sys_ni_syscall data8 sys_set_tid_address
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall /* 260 */ data8 sys_ni_syscall /* 260 */
data8 sys_ni_syscall data8 sys_ni_syscall
......
...@@ -82,7 +82,7 @@ read_default_ldt (void * ptr, unsigned long bytecount) ...@@ -82,7 +82,7 @@ read_default_ldt (void * ptr, unsigned long bytecount)
static int static int
write_ldt (void * ptr, unsigned long bytecount, int oldmode) write_ldt (void * ptr, unsigned long bytecount, int oldmode)
{ {
struct ia32_modify_ldt_ldt_s ldt_info; struct ia32_user_desc ldt_info;
__u64 entry; __u64 entry;
int ret; int ret;
......
...@@ -877,8 +877,6 @@ setup_frame_ia32 (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs ...@@ -877,8 +877,6 @@ setup_frame_ia32 (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs
regs->cr_iip = IA32_SA_HANDLER(ka); regs->cr_iip = IA32_SA_HANDLER(ka);
set_fs(USER_DS); set_fs(USER_DS);
regs->r16 = (__USER_DS << 16) | (__USER_DS); /* ES == DS, GS, FS are zero */
regs->r17 = (__USER_DS << 16) | __USER_CS;
#if 0 #if 0
regs->eflags &= ~TF_MASK; regs->eflags &= ~TF_MASK;
...@@ -950,9 +948,6 @@ setup_rt_frame_ia32 (int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -950,9 +948,6 @@ setup_rt_frame_ia32 (int sig, struct k_sigaction *ka, siginfo_t *info,
set_fs(USER_DS); set_fs(USER_DS);
regs->r16 = (__USER_DS << 16) | (__USER_DS); /* ES == DS, GS, FS are zero */
regs->r17 = (__USER_DS << 16) | __USER_CS;
#if 0 #if 0
regs->eflags &= ~TF_MASK; regs->eflags &= ~TF_MASK;
#endif #endif
......
...@@ -23,14 +23,16 @@ ...@@ -23,14 +23,16 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h>
#include "ia32priv.h" #include "ia32priv.h"
extern void die_if_kernel (char *str, struct pt_regs *regs, long err); extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
struct exec_domain ia32_exec_domain; struct exec_domain ia32_exec_domain;
struct page *ia32_shared_page[(2*IA32_PAGE_SIZE + PAGE_SIZE - 1)/PAGE_SIZE]; struct page *ia32_shared_page[NR_CPUS];
unsigned long *ia32_gdt; unsigned long *ia32_boot_gdt;
unsigned long *cpu_gdt_table[NR_CPUS];
static unsigned long static unsigned long
load_desc (u16 selector) load_desc (u16 selector)
...@@ -43,8 +45,8 @@ load_desc (u16 selector) ...@@ -43,8 +45,8 @@ load_desc (u16 selector)
table = (unsigned long *) IA32_LDT_OFFSET; table = (unsigned long *) IA32_LDT_OFFSET;
limit = IA32_LDT_ENTRIES; limit = IA32_LDT_ENTRIES;
} else { } else {
table = ia32_gdt; table = cpu_gdt_table[smp_processor_id()];
limit = IA32_PAGE_SIZE / sizeof(ia32_gdt[0]); limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
} }
index = selector >> IA32_SEGSEL_INDEX_SHIFT; index = selector >> IA32_SEGSEL_INDEX_SHIFT;
if (index >= limit) if (index >= limit)
...@@ -66,6 +68,34 @@ ia32_load_segment_descriptors (struct task_struct *task) ...@@ -66,6 +68,34 @@ ia32_load_segment_descriptors (struct task_struct *task)
regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */ regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
} }
int
ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
{
struct desc_struct *desc;
struct ia32_user_desc info;
int idx;
if (copy_from_user(&info, (void *)(childregs->r14 & 0xffffffff), sizeof(info)))
return -EFAULT;
if (LDT_empty(&info))
return -EINVAL;
idx = info.entry_number;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
desc->a = LDT_entry_a(&info);
desc->b = LDT_entry_b(&info);
/* XXX: can this be done in a cleaner way ? */
load_TLS(&child->thread, smp_processor_id());
ia32_load_segment_descriptors(child);
load_TLS(&current->thread, smp_processor_id());
return 0;
}
void void
ia32_save_state (struct task_struct *t) ia32_save_state (struct task_struct *t)
{ {
...@@ -83,14 +113,13 @@ ia32_load_state (struct task_struct *t) ...@@ -83,14 +113,13 @@ ia32_load_state (struct task_struct *t)
{ {
unsigned long eflag, fsr, fcr, fir, fdr, tssd; unsigned long eflag, fsr, fcr, fir, fdr, tssd;
struct pt_regs *regs = ia64_task_regs(t); struct pt_regs *regs = ia64_task_regs(t);
int nr = get_cpu(); /* LDT and TSS depend on CPU number: */
eflag = t->thread.eflag; eflag = t->thread.eflag;
fsr = t->thread.fsr; fsr = t->thread.fsr;
fcr = t->thread.fcr; fcr = t->thread.fcr;
fir = t->thread.fir; fir = t->thread.fir;
fdr = t->thread.fdr; fdr = t->thread.fdr;
tssd = load_desc(_TSS(nr)); /* TSSD */ tssd = load_desc(_TSS); /* TSSD */
ia64_setreg(_IA64_REG_AR_EFLAG, eflag); ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
ia64_setreg(_IA64_REG_AR_FSR, fsr); ia64_setreg(_IA64_REG_AR_FSR, fsr);
...@@ -102,8 +131,10 @@ ia32_load_state (struct task_struct *t) ...@@ -102,8 +131,10 @@ ia32_load_state (struct task_struct *t)
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
ia64_set_kr(IA64_KR_TSSD, tssd); ia64_set_kr(IA64_KR_TSSD, tssd);
regs->r17 = (_TSS(nr) << 48) | (_LDT(nr) << 32) | (__u32) regs->r17; regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
regs->r30 = load_desc(_LDT(nr)); /* LDTD */ regs->r30 = load_desc(_LDT); /* LDTD */
load_TLS(&t->thread, smp_processor_id());
put_cpu(); put_cpu();
} }
...@@ -113,37 +144,43 @@ ia32_load_state (struct task_struct *t) ...@@ -113,37 +144,43 @@ ia32_load_state (struct task_struct *t)
void void
ia32_gdt_init (void) ia32_gdt_init (void)
{ {
unsigned long *tss; int cpu = smp_processor_id();
ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
/* Copy from the boot cpu's GDT */
memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
}
/*
* Setup IA32 GDT and TSS
*/
void
ia32_boot_gdt_init (void)
{
unsigned long ldt_size; unsigned long ldt_size;
int nr;
ia32_shared_page[0] = alloc_page(GFP_KERNEL); ia32_shared_page[0] = alloc_page(GFP_KERNEL);
ia32_gdt = page_address(ia32_shared_page[0]); ia32_boot_gdt = page_address(ia32_shared_page[0]);
tss = ia32_gdt + IA32_PAGE_SIZE/sizeof(ia32_gdt[0]); cpu_gdt_table[0] = ia32_boot_gdt;
if (IA32_PAGE_SIZE == PAGE_SIZE) {
ia32_shared_page[1] = alloc_page(GFP_KERNEL);
tss = page_address(ia32_shared_page[1]);
}
/* CS descriptor in IA-32 (scrambled) format */ /* CS descriptor in IA-32 (scrambled) format */
ia32_gdt[__USER_CS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT, ia32_boot_gdt[__USER_CS >> 3]
0xb, 1, 3, 1, 1, 1, 1); = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
0xb, 1, 3, 1, 1, 1, 1);
/* DS descriptor in IA-32 (scrambled) format */ /* DS descriptor in IA-32 (scrambled) format */
ia32_gdt[__USER_DS >> 3] = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT, ia32_boot_gdt[__USER_DS >> 3]
0x3, 1, 3, 1, 1, 1, 1); = IA32_SEG_DESCRIPTOR(0, (IA32_PAGE_OFFSET-1) >> IA32_PAGE_SHIFT,
0x3, 1, 3, 1, 1, 1, 1);
/* We never change the TSS and LDT descriptors, so we can share them across all CPUs. */
ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
for (nr = 0; nr < NR_CPUS; ++nr) { ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
ia32_gdt[_TSS(nr) >> IA32_SEGSEL_INDEX_SHIFT] 0xb, 0, 3, 1, 1, 1, 0);
= IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235, ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
0xb, 0, 3, 1, 1, 1, 0); 0x2, 0, 3, 1, 1, 1, 0);
ia32_gdt[_LDT(nr) >> IA32_SEGSEL_INDEX_SHIFT]
= IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
0x2, 0, 3, 1, 1, 1, 0);
}
} }
/* /*
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include <linux/binfmts.h> #include <linux/binfmts.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <asm/processor.h>
/* /*
* 32 bit structures for IA32 support. * 32 bit structures for IA32 support.
*/ */
...@@ -327,15 +329,23 @@ void ia64_elf32_init(struct pt_regs *regs); ...@@ -327,15 +329,23 @@ void ia64_elf32_init(struct pt_regs *regs);
#define __USER_CS 0x23 #define __USER_CS 0x23
#define __USER_DS 0x2B #define __USER_DS 0x2B
#define FIRST_TSS_ENTRY 6 /*
#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1) * The per-cpu GDT has 32 entries: see <asm-i386/segment.h>
#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3)) */
#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3)) #define GDT_ENTRIES 32
#define GDT_SIZE (GDT_ENTRIES * 8)
#define TSS_ENTRY 14
#define LDT_ENTRY (TSS_ENTRY + 1)
#define IA32_SEGSEL_RPL (0x3 << 0) #define IA32_SEGSEL_RPL (0x3 << 0)
#define IA32_SEGSEL_TI (0x1 << 2) #define IA32_SEGSEL_TI (0x1 << 2)
#define IA32_SEGSEL_INDEX_SHIFT 3 #define IA32_SEGSEL_INDEX_SHIFT 3
#define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
#define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
#define IA32_SEG_BASE 16 #define IA32_SEG_BASE 16
#define IA32_SEG_TYPE 40 #define IA32_SEG_TYPE 40
#define IA32_SEG_SYS 44 #define IA32_SEG_SYS 44
...@@ -419,7 +429,42 @@ void ia64_elf32_init(struct pt_regs *regs); ...@@ -419,7 +429,42 @@ void ia64_elf32_init(struct pt_regs *regs);
#define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */ #define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */
#define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */ #define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */
struct ia32_modify_ldt_ldt_s { #define LDT_entry_a(info) \
((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
#define LDT_entry_b(info) \
(((info)->base_addr & 0xff000000) | \
(((info)->base_addr & 0x00ff0000) >> 16) | \
((info)->limit & 0xf0000) | \
(((info)->read_exec_only ^ 1) << 9) | \
((info)->contents << 10) | \
(((info)->seg_not_present ^ 1) << 15) | \
((info)->seg_32bit << 22) | \
((info)->limit_in_pages << 23) | \
((info)->useable << 20) | \
0x7100)
#define LDT_empty(info) ( \
(info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
(info)->read_exec_only == 1 && \
(info)->seg_32bit == 0 && \
(info)->limit_in_pages == 0 && \
(info)->seg_not_present == 1 && \
(info)->useable == 0 )
static inline void
load_TLS (struct thread_struct *t, unsigned int cpu)
{
extern unsigned long *cpu_gdt_table[NR_CPUS];
memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long));
memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long));
memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long));
}
struct ia32_user_desc {
unsigned int entry_number; unsigned int entry_number;
unsigned int base_addr; unsigned int base_addr;
unsigned int limit; unsigned int limit;
......
...@@ -2817,6 +2817,114 @@ sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents, ...@@ -2817,6 +2817,114 @@ sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents,
return numevents; return numevents;
} }
/*
* Get a yet unused TLS descriptor index.
*/
static int
get_free_idx (void)
{
struct thread_struct *t = &current->thread;
int idx;
for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
if (desc_empty(t->tls_array + idx))
return idx + GDT_ENTRY_TLS_MIN;
return -ESRCH;
}
/*
* Set a given TLS descriptor:
*/
asmlinkage int
sys32_set_thread_area (struct ia32_user_desc *u_info)
{
struct thread_struct *t = &current->thread;
struct ia32_user_desc info;
struct desc_struct *desc;
int cpu, idx;
if (copy_from_user(&info, u_info, sizeof(info)))
return -EFAULT;
idx = info.entry_number;
/*
* index -1 means the kernel should try to find and allocate an empty descriptor:
*/
if (idx == -1) {
idx = get_free_idx();
if (idx < 0)
return idx;
if (put_user(idx, &u_info->entry_number))
return -EFAULT;
}
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
cpu = smp_processor_id();
if (LDT_empty(&info)) {
desc->a = 0;
desc->b = 0;
} else {
desc->a = LDT_entry_a(&info);
desc->b = LDT_entry_b(&info);
}
load_TLS(t, cpu);
return 0;
}
/*
* Get the current Thread-Local Storage area:
*/
#define GET_BASE(desc) ( \
(((desc)->a >> 16) & 0x0000ffff) | \
(((desc)->b << 16) & 0x00ff0000) | \
( (desc)->b & 0xff000000) )
#define GET_LIMIT(desc) ( \
((desc)->a & 0x0ffff) | \
((desc)->b & 0xf0000) )
#define GET_32BIT(desc) (((desc)->b >> 23) & 1)
#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
asmlinkage int
sys32_get_thread_area (struct ia32_user_desc *u_info)
{
struct ia32_user_desc info;
struct desc_struct *desc;
int idx;
if (get_user(idx, &u_info->entry_number))
return -EFAULT;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
info.entry_number = idx;
info.base_addr = GET_BASE(desc);
info.limit = GET_LIMIT(desc);
info.seg_32bit = GET_32BIT(desc);
info.contents = GET_CONTENTS(desc);
info.read_exec_only = !GET_WRITABLE(desc);
info.limit_in_pages = GET_LIMIT_PAGES(desc);
info.seg_not_present = !GET_PRESENT(desc);
info.useable = GET_USEABLE(desc);
if (copy_to_user(u_info, &info, sizeof(info)))
return -EFAULT;
return 0;
}
#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */ #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
struct ncp_mount_data32 { struct ncp_mount_data32 {
......
...@@ -685,8 +685,7 @@ efi_mem_type (unsigned long phys_addr) ...@@ -685,8 +685,7 @@ efi_mem_type (unsigned long phys_addr)
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p; md = p;
if ((md->phys_addr <= phys_addr) && (phys_addr <= if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
(md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1)))
return md->type; return md->type;
} }
return 0; return 0;
...@@ -706,8 +705,7 @@ efi_mem_attributes (unsigned long phys_addr) ...@@ -706,8 +705,7 @@ efi_mem_attributes (unsigned long phys_addr)
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p; md = p;
if ((md->phys_addr <= phys_addr) && (phys_addr <= if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
(md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1)))
return md->attribute; return md->attribute;
} }
return 0; return 0;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/ia32.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sal.h> #include <asm/sal.h>
...@@ -324,7 +325,7 @@ copy_thread (int nr, unsigned long clone_flags, ...@@ -324,7 +325,7 @@ copy_thread (int nr, unsigned long clone_flags,
memcpy((void *) child_rbs, (void *) rbs, rbs_size); memcpy((void *) child_rbs, (void *) rbs, rbs_size);
if (user_mode(child_ptregs)) { if (user_mode(child_ptregs)) {
if (clone_flags & CLONE_SETTLS) if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs))
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
if (user_stack_base) { if (user_stack_base) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16; child_ptregs->r12 = user_stack_base + user_stack_size - 16;
...@@ -383,8 +384,11 @@ copy_thread (int nr, unsigned long clone_flags, ...@@ -383,8 +384,11 @@ copy_thread (int nr, unsigned long clone_flags,
* If we're cloning an IA32 task then save the IA32 extra * If we're cloning an IA32 task then save the IA32 extra
* state from the current task to the new task * state from the current task to the new task
*/ */
if (IS_IA32_PROCESS(ia64_task_regs(current))) if (IS_IA32_PROCESS(ia64_task_regs(current))) {
ia32_save_state(p); ia32_save_state(p);
if (clone_flags & CLONE_SETTLS)
retval = ia32_clone_tls(p, child_ptregs);
}
#endif #endif
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/ia32.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/machvec.h> #include <asm/machvec.h>
...@@ -312,6 +313,9 @@ smp_callin (void) ...@@ -312,6 +313,9 @@ smp_callin (void)
local_irq_enable(); local_irq_enable();
calibrate_delay(); calibrate_delay();
local_cpu_data->loops_per_jiffy = loops_per_jiffy; local_cpu_data->loops_per_jiffy = loops_per_jiffy;
#ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init();
#endif
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
/* /*
......
...@@ -186,7 +186,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, ...@@ -186,7 +186,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
*/ */
for (cpu = 0; cpu < NR_CPUS; cpu++) { for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (node == node_cpuid[cpu].nid) { if (node == node_cpuid[cpu].nid) {
memcpy(cpu_data, __phys_per_cpu_start, memcpy(__va(cpu_data), __phys_per_cpu_start,
__per_cpu_end-__per_cpu_start); __per_cpu_end-__per_cpu_start);
__per_cpu_offset[cpu] = __per_cpu_offset[cpu] =
(char*)__va(cpu_data) - (char*)__va(cpu_data) -
......
...@@ -558,6 +558,6 @@ mem_init (void) ...@@ -558,6 +558,6 @@ mem_init (void)
setup_gate(); /* setup gate pages before we free up boot memory... */ setup_gate(); /* setup gate pages before we free up boot memory... */
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init(); ia32_boot_gdt_init();
#endif #endif
} }
...@@ -9,9 +9,11 @@ ...@@ -9,9 +9,11 @@
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
extern void ia32_cpu_init (void); extern void ia32_cpu_init (void);
extern void ia32_boot_gdt_init (void);
extern void ia32_gdt_init (void); extern void ia32_gdt_init (void);
extern int ia32_exception (struct pt_regs *regs, unsigned long isr); extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
extern int ia32_intercept (struct pt_regs *regs, unsigned long isr); extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs);
#endif /* !CONFIG_IA32_SUPPORT */ #endif /* !CONFIG_IA32_SUPPORT */
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#ifdef CONFIG_IA64_DIG #ifdef CONFIG_IA64_DIG
/* Max 8 Nodes */ /* Max 8 Nodes */
#define NODES_SHIFT 3 #define NODES_SHIFT 3
#elif defined(CONFIG_IA64_SGI_SN2) #elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
/* Max 128 Nodes */ /* Max 128 Nodes */
#define NODES_SHIFT 7 #define NODES_SHIFT 7
#endif #endif
......
...@@ -230,6 +230,22 @@ typedef struct { ...@@ -230,6 +230,22 @@ typedef struct {
(int *) (addr)); \ (int *) (addr)); \
}) })
#ifdef CONFIG_IA32_SUPPORT
struct desc_struct {
unsigned int a, b;
};
#define desc_empty(desc) (!((desc)->a + (desc)->b))
#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
#define GDT_ENTRY_TLS_ENTRIES 3
#define GDT_ENTRY_TLS_MIN 6
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
#endif
struct thread_struct { struct thread_struct {
__u32 flags; /* various thread flags (see IA64_THREAD_*) */ __u32 flags; /* various thread flags (see IA64_THREAD_*) */
/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
...@@ -249,6 +265,9 @@ struct thread_struct { ...@@ -249,6 +265,9 @@ struct thread_struct {
__u64 fdr; /* IA32 fp except. data reg */ __u64 fdr; /* IA32 fp except. data reg */
__u64 old_k1; /* old value of ar.k1 */ __u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */ __u64 old_iob; /* old IOBase value */
/* cached TLS descriptors. */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
# define INIT_THREAD_IA32 .eflag = 0, \ # define INIT_THREAD_IA32 .eflag = 0, \
.fsr = 0, \ .fsr = 0, \
.fcr = 0x17800000037fULL, \ .fcr = 0x17800000037fULL, \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment