Commit 042b54a1 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390/s390x unification (4/7)

Merge s390x and s390 to one architecture.
parent b50819c2
This diff is collapsed.
This diff is collapsed.
...@@ -37,8 +37,11 @@ ...@@ -37,8 +37,11 @@
#define DEBUGP(fmt , ...) #define DEBUGP(fmt , ...)
#endif #endif
#define GOT_ENTRY_SIZE 4 #ifndef CONFIG_ARCH_S390X
#define PLT_ENTRY_SIZE 12 #define PLT_ENTRY_SIZE 12
#else /* CONFIG_ARCH_S390X */
#define PLT_ENTRY_SIZE 20
#endif /* CONFIG_ARCH_S390X */
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
...@@ -56,30 +59,34 @@ void module_free(struct module *mod, void *module_region) ...@@ -56,30 +59,34 @@ void module_free(struct module *mod, void *module_region)
} }
static inline void static inline void
check_rela(Elf32_Rela *rela, struct module *me) check_rela(Elf_Rela *rela, struct module *me)
{ {
struct mod_arch_syminfo *info; struct mod_arch_syminfo *info;
info = me->arch.syminfo + ELF32_R_SYM (rela->r_info); info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
switch (ELF32_R_TYPE (rela->r_info)) { switch (ELF_R_TYPE (rela->r_info)) {
case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT12: /* 12 bit GOT offset. */
case R_390_GOT16: /* 16 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */
case R_390_GOT32: /* 32 bit GOT offset. */ case R_390_GOT32: /* 32 bit GOT offset. */
case R_390_GOT64: /* 64 bit GOT offset. */
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
if (info->got_offset == -1UL) { if (info->got_offset == -1UL) {
info->got_offset = me->arch.got_size; info->got_offset = me->arch.got_size;
me->arch.got_size += GOT_ENTRY_SIZE; me->arch.got_size += sizeof(void*);
} }
break; break;
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
case R_390_PLT32: /* 32 bit PC relative PLT address. */ case R_390_PLT32: /* 32 bit PC relative PLT address. */
case R_390_PLT64: /* 64 bit PC relative PLT address. */
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_offset == -1UL) { if (info->plt_offset == -1UL) {
info->plt_offset = me->arch.plt_size; info->plt_offset = me->arch.plt_size;
me->arch.plt_size += PLT_ENTRY_SIZE; me->arch.plt_size += PLT_ENTRY_SIZE;
...@@ -100,12 +107,12 @@ check_rela(Elf32_Rela *rela, struct module *me) ...@@ -100,12 +107,12 @@ check_rela(Elf32_Rela *rela, struct module *me)
* got and plt but we can increase the core module size. * got and plt but we can increase the core module size.
*/ */
int int
module_frob_arch_sections(Elf32_Ehdr *hdr, Elf32_Shdr *sechdrs, module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *me) char *secstrings, struct module *me)
{ {
Elf32_Shdr *symtab; Elf_Shdr *symtab;
Elf32_Sym *symbols; Elf_Sym *symbols;
Elf32_Rela *rela; Elf_Rela *rela;
char *strings; char *strings;
int nrela, i, j; int nrela, i, j;
...@@ -123,7 +130,7 @@ module_frob_arch_sections(Elf32_Ehdr *hdr, Elf32_Shdr *sechdrs, ...@@ -123,7 +130,7 @@ module_frob_arch_sections(Elf32_Ehdr *hdr, Elf32_Shdr *sechdrs,
} }
/* Allocate one syminfo structure per symbol. */ /* Allocate one syminfo structure per symbol. */
me->arch.nsyms = symtab->sh_size / sizeof(Elf32_Sym); me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
me->arch.syminfo = kmalloc(me->arch.nsyms * me->arch.syminfo = kmalloc(me->arch.nsyms *
sizeof(struct mod_arch_syminfo), sizeof(struct mod_arch_syminfo),
GFP_KERNEL); GFP_KERNEL);
...@@ -148,7 +155,7 @@ module_frob_arch_sections(Elf32_Ehdr *hdr, Elf32_Shdr *sechdrs, ...@@ -148,7 +155,7 @@ module_frob_arch_sections(Elf32_Ehdr *hdr, Elf32_Shdr *sechdrs,
for (i = 0; i < hdr->e_shnum; i++) { for (i = 0; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type != SHT_RELA) if (sechdrs[i].sh_type != SHT_RELA)
continue; continue;
nrela = sechdrs[i].sh_size / sizeof(Elf32_Rela); nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
rela = (void *) hdr + sechdrs[i].sh_offset; rela = (void *) hdr + sechdrs[i].sh_offset;
for (j = 0; j < nrela; j++) for (j = 0; j < nrela; j++)
check_rela(rela + j, me); check_rela(rela + j, me);
...@@ -174,19 +181,19 @@ apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, ...@@ -174,19 +181,19 @@ apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
} }
static inline int static inline int
apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab, apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
struct module *me) struct module *me)
{ {
struct mod_arch_syminfo *info; struct mod_arch_syminfo *info;
Elf32_Addr loc, val; Elf_Addr loc, val;
int r_type, r_sym; int r_type, r_sym;
/* This is where to make the change */ /* This is where to make the change */
loc = base + rela->r_offset; loc = base + rela->r_offset;
/* This is the symbol it is referring to. Note that all /* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */ undefined symbols have been resolved. */
r_sym = ELF32_R_SYM(rela->r_info); r_sym = ELF_R_SYM(rela->r_info);
r_type = ELF32_R_TYPE(rela->r_info); r_type = ELF_R_TYPE(rela->r_info);
info = me->arch.syminfo + r_sym; info = me->arch.syminfo + r_sym;
val = symtab[r_sym].st_value; val = symtab[r_sym].st_value;
...@@ -195,6 +202,7 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab, ...@@ -195,6 +202,7 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab,
case R_390_12: /* Direct 12 bit. */ case R_390_12: /* Direct 12 bit. */
case R_390_16: /* Direct 16 bit. */ case R_390_16: /* Direct 16 bit. */
case R_390_32: /* Direct 32 bit. */ case R_390_32: /* Direct 32 bit. */
case R_390_64: /* Direct 64 bit. */
val += rela->r_addend; val += rela->r_addend;
if (r_type == R_390_8) if (r_type == R_390_8)
*(unsigned char *) loc = val; *(unsigned char *) loc = val;
...@@ -205,11 +213,14 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab, ...@@ -205,11 +213,14 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab,
*(unsigned short *) loc = val; *(unsigned short *) loc = val;
else if (r_type == R_390_32) else if (r_type == R_390_32)
*(unsigned int *) loc = val; *(unsigned int *) loc = val;
else if (r_type == R_390_64)
*(unsigned long *) loc = val;
break; break;
case R_390_PC16: /* PC relative 16 bit. */ case R_390_PC16: /* PC relative 16 bit. */
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
case R_390_PC32: /* PC relative 32 bit. */ case R_390_PC32: /* PC relative 32 bit. */
case R_390_PC64: /* PC relative 64 bit. */
val += rela->r_addend - loc; val += rela->r_addend - loc;
if (r_type == R_390_PC16) if (r_type == R_390_PC16)
*(unsigned short *) loc = val; *(unsigned short *) loc = val;
...@@ -219,17 +230,22 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab, ...@@ -219,17 +230,22 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab,
*(unsigned int *) loc = val >> 1; *(unsigned int *) loc = val >> 1;
else if (r_type == R_390_PC32) else if (r_type == R_390_PC32)
*(unsigned int *) loc = val; *(unsigned int *) loc = val;
else if (r_type == R_390_PC64)
*(unsigned long *) loc = val;
break; break;
case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT12: /* 12 bit GOT offset. */
case R_390_GOT16: /* 16 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */
case R_390_GOT32: /* 32 bit GOT offset. */ case R_390_GOT32: /* 32 bit GOT offset. */
case R_390_GOT64: /* 64 bit GOT offset. */
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */ case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */ case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */ case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */ case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
if (info->got_initialized == 0) { if (info->got_initialized == 0) {
Elf32_Addr *gotent; Elf_Addr *gotent;
gotent = me->module_core + me->arch.got_offset + gotent = me->module_core + me->arch.got_offset +
info->got_offset; info->got_offset;
*gotent = val; *gotent = val;
...@@ -249,27 +265,42 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab, ...@@ -249,27 +265,42 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab,
else if (r_type == R_390_GOTENT || else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT) r_type == R_390_GOTPLTENT)
*(unsigned int *) loc = val >> 1; *(unsigned int *) loc = val >> 1;
else if (r_type == R_390_GOT64 ||
r_type == R_390_GOTPLT64)
*(unsigned long *) loc = val;
break; break;
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */ case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
case R_390_PLT32: /* 32 bit PC relative PLT address. */ case R_390_PLT32: /* 32 bit PC relative PLT address. */
case R_390_PLT64: /* 64 bit PC relative PLT address. */
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */ case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) { if (info->plt_initialized == 0) {
unsigned int *ip; unsigned int *ip;
ip = me->module_core + me->arch.plt_offset + ip = me->module_core + me->arch.plt_offset +
info->plt_offset; info->plt_offset;
#ifndef CONFIG_ARCH_S390X
ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
ip[1] = 0x100607f1; ip[1] = 0x100607f1;
ip[2] = val; ip[2] = val;
#else /* CONFIG_ARCH_S390X */
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
ip[1] = 0x100a0004;
ip[2] = 0x07f10000;
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
#endif /* CONFIG_ARCH_S390X */
info->plt_initialized = 1; info->plt_initialized = 1;
} }
if (r_type == R_390_PLTOFF16 || if (r_type == R_390_PLTOFF16 ||
r_type == R_390_PLTOFF32) r_type == R_390_PLTOFF32
|| r_type == R_390_PLTOFF64
)
val = me->arch.plt_offset - me->arch.got_offset + val = me->arch.plt_offset - me->arch.got_offset +
info->plt_offset + rela->r_addend; info->plt_offset + rela->r_addend;
else else
val = (Elf32_Addr) me->module_core + val = (Elf_Addr) me->module_core +
me->arch.plt_offset + info->plt_offset + me->arch.plt_offset + info->plt_offset +
rela->r_addend - loc; rela->r_addend - loc;
if (r_type == R_390_PLT16DBL) if (r_type == R_390_PLT16DBL)
...@@ -281,19 +312,25 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab, ...@@ -281,19 +312,25 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab,
else if (r_type == R_390_PLT32 || else if (r_type == R_390_PLT32 ||
r_type == R_390_PLTOFF32) r_type == R_390_PLTOFF32)
*(unsigned int *) loc = val; *(unsigned int *) loc = val;
else if (r_type == R_390_PLT64 ||
r_type == R_390_PLTOFF64)
*(unsigned long *) loc = val;
break; break;
case R_390_GOTOFF16: /* 16 bit offset to GOT. */ case R_390_GOTOFF16: /* 16 bit offset to GOT. */
case R_390_GOTOFF32: /* 32 bit offset to GOT. */ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
val = val + rela->r_addend - val = val + rela->r_addend -
((Elf32_Addr) me->module_core + me->arch.got_offset); ((Elf_Addr) me->module_core + me->arch.got_offset);
if (r_type == R_390_GOTOFF16) if (r_type == R_390_GOTOFF16)
*(unsigned short *) loc = val; *(unsigned short *) loc = val;
else if (r_type == R_390_GOTOFF32) else if (r_type == R_390_GOTOFF32)
*(unsigned int *) loc = val; *(unsigned int *) loc = val;
else if (r_type == R_390_GOTOFF64)
*(unsigned long *) loc = val;
break; break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
val = (Elf32_Addr) me->module_core + me->arch.got_offset + val = (Elf_Addr) me->module_core + me->arch.got_offset +
rela->r_addend - loc; rela->r_addend - loc;
if (r_type == R_390_GOTPC) if (r_type == R_390_GOTPC)
*(unsigned int *) loc = val; *(unsigned int *) loc = val;
...@@ -316,22 +353,23 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab, ...@@ -316,22 +353,23 @@ apply_rela(Elf32_Rela *rela, Elf32_Addr base, Elf32_Sym *symtab,
} }
int int
apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec, unsigned int symindex, unsigned int relsec,
struct module *me) struct module *me)
{ {
Elf32_Addr base; Elf_Addr base;
Elf32_Sym *symtab; Elf_Sym *symtab;
Elf32_Rela *rela; Elf_Rela *rela;
unsigned long i, n; unsigned long i, n;
int rc; int rc;
DEBUGP("Applying relocate section %u to %u\n", DEBUGP("Applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info); relsec, sechdrs[relsec].sh_info);
base = sechdrs[sechdrs[relsec].sh_info].sh_addr; base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
symtab = (Elf32_Sym *) sechdrs[symindex].sh_addr; symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
rela = (Elf32_Rela *) sechdrs[relsec].sh_addr; rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(Elf32_Rela); n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
for (i = 0; i < n; i++, rela++) { for (i = 0; i < n; i++, rela++) {
rc = apply_rela(rela, base, symtab, me); rc = apply_rela(rela, base, symtab, me);
if (rc) if (rc)
......
...@@ -56,7 +56,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -56,7 +56,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
unsigned long bc; unsigned long bc;
bc = *((unsigned long *) tsk->thread.ksp); bc = *((unsigned long *) tsk->thread.ksp);
#ifndef CONFIG_ARCH_S390X
return *((unsigned long *) (bc+56)); return *((unsigned long *) (bc+56));
#else
return *((unsigned long *) (bc+112));
#endif
} }
/* /*
...@@ -79,6 +83,7 @@ void default_idle(void) ...@@ -79,6 +83,7 @@ void default_idle(void)
*/ */
wait_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK | PSW_MASK_WAIT | wait_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK | PSW_MASK_WAIT |
PSW_MASK_IO | PSW_MASK_EXT; PSW_MASK_IO | PSW_MASK_EXT;
#ifndef CONFIG_ARCH_S390X
asm volatile ( asm volatile (
" basr %0,0\n" " basr %0,0\n"
"0: la %0,1f-0b(%0)\n" "0: la %0,1f-0b(%0)\n"
...@@ -92,6 +97,18 @@ void default_idle(void) ...@@ -92,6 +97,18 @@ void default_idle(void)
" lpsw 0(%1)\n" " lpsw 0(%1)\n"
"2:" "2:"
: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" ); : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
#else /* CONFIG_ARCH_S390X */
asm volatile (
" larl %0,0f\n"
" stg %0,8(%1)\n"
" lpswe 0(%1)\n"
"0: larl %0,1f\n"
" stg %0,8(%1)\n"
" ni 1(%1),0xf9\n"
" lpswe 0(%1)\n"
"1:"
: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
#endif /* CONFIG_ARCH_S390X */
} }
int cpu_idle(void) int cpu_idle(void)
...@@ -109,9 +126,9 @@ void show_regs(struct pt_regs *regs) ...@@ -109,9 +126,9 @@ void show_regs(struct pt_regs *regs)
struct task_struct *tsk = current; struct task_struct *tsk = current;
printk("CPU: %d %s\n", tsk->thread_info->cpu, print_tainted()); printk("CPU: %d %s\n", tsk->thread_info->cpu, print_tainted());
printk("Process %s (pid: %d, task: %08lx, ksp: %08x)\n", printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
current->comm, current->pid, (unsigned long) tsk, current->comm, current->pid, (void *) tsk,
tsk->thread.ksp); (void *) tsk->thread.ksp);
show_registers(regs); show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */ /* Show stack backtrace if pt_regs is from kernel mode */
...@@ -120,6 +137,9 @@ void show_regs(struct pt_regs *regs) ...@@ -120,6 +137,9 @@ void show_regs(struct pt_regs *regs)
} }
extern void kernel_thread_starter(void); extern void kernel_thread_starter(void);
#ifndef CONFIG_ARCH_S390X
__asm__(".align 4\n" __asm__(".align 4\n"
"kernel_thread_starter:\n" "kernel_thread_starter:\n"
" l 15,0(8)\n" " l 15,0(8)\n"
...@@ -130,6 +150,20 @@ __asm__(".align 4\n" ...@@ -130,6 +150,20 @@ __asm__(".align 4\n"
" sr 2,2\n" " sr 2,2\n"
" br 11\n"); " br 11\n");
#else /* CONFIG_ARCH_S390X */
__asm__(".align 4\n"
"kernel_thread_starter:\n"
" lg 15,0(8)\n"
" sgr 15,7\n"
" stosm 48(15),3\n"
" lgr 2,10\n"
" basr 14,9\n"
" sgr 2,2\n"
" br 11\n");
#endif /* CONFIG_ARCH_S390X */
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{ {
struct task_struct *p; struct task_struct *p;
...@@ -137,7 +171,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) ...@@ -137,7 +171,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
memset(&regs, 0, sizeof(regs)); memset(&regs, 0, sizeof(regs));
regs.psw.mask = PSW_KERNEL_BITS; regs.psw.mask = PSW_KERNEL_BITS;
regs.psw.addr = (__u32) kernel_thread_starter | PSW_ADDR_AMODE31; regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
regs.gprs[7] = STACK_FRAME_OVERHEAD; regs.gprs[7] = STACK_FRAME_OVERHEAD;
regs.gprs[8] = __LC_KERNEL_STACK; regs.gprs[8] = __LC_KERNEL_STACK;
regs.gprs[9] = (unsigned long) fn; regs.gprs[9] = (unsigned long) fn;
...@@ -180,8 +214,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, ...@@ -180,8 +214,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
unsigned long glue2; unsigned long glue2;
unsigned long scratch[2]; unsigned long scratch[2];
unsigned long gprs[10]; /* gprs 6 -15 */ unsigned long gprs[10]; /* gprs 6 -15 */
unsigned long fprs[4]; /* fpr 4 and 6 */ unsigned int fprs[4]; /* fpr 4 and 6 */
unsigned long empty[4]; unsigned int empty[4];
struct pt_regs childregs; struct pt_regs childregs;
} *frame; } *frame;
...@@ -198,7 +232,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, ...@@ -198,7 +232,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
frame->gprs[8] = (unsigned long) ret_from_fork; frame->gprs[8] = (unsigned long) ret_from_fork;
/* fake return stack for resume(), don't go back to schedule */ /* fake return stack for resume(), don't go back to schedule */
frame->gprs[9] = (unsigned long) frame; frame->gprs[9] = (unsigned long) frame;
#ifndef CONFIG_ARCH_S390X
/* /*
* save fprs to current->thread.fp_regs to merge them with * save fprs to current->thread.fp_regs to merge them with
* the emulated registers and then copy the result to the child. * the emulated registers and then copy the result to the child.
...@@ -207,14 +242,31 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, ...@@ -207,14 +242,31 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
memcpy(&p->thread.fp_regs, &current->thread.fp_regs, memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
sizeof(s390_fp_regs)); sizeof(s390_fp_regs));
p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE; p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE;
/* start process with ar4 pointing to the correct address space */ /* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS)
frame->childregs.acrs[0] = regs->gprs[6];
#else /* CONFIG_ARCH_S390X */
/* Save the fpu registers to new thread structure. */
save_fp_regs(&p->thread.fp_regs);
p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS) {
if (test_thread_flag(TIF_31BIT)) {
frame->childregs.acrs[0] =
(unsigned int) regs->gprs[6];
} else {
frame->childregs.acrs[0] =
(unsigned int)(regs->gprs[6] >> 32);
frame->childregs.acrs[1] =
(unsigned int) regs->gprs[6];
}
}
#endif /* CONFIG_ARCH_S390X */
/* start new process with ar4 pointing to the correct address space */
p->thread.ar4 = get_fs().ar4; p->thread.ar4 = get_fs().ar4;
/* Don't copy debug registers */ /* Don't copy debug registers */
memset(&p->thread.per_info,0,sizeof(p->thread.per_info)); memset(&p->thread.per_info,0,sizeof(p->thread.per_info));
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS)
frame->childregs.acrs[0] = regs->gprs[6];
return 0; return 0;
} }
...@@ -292,12 +344,16 @@ asmlinkage int sys_execve(struct pt_regs regs) ...@@ -292,12 +344,16 @@ asmlinkage int sys_execve(struct pt_regs regs)
*/ */
int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
{ {
#ifndef CONFIG_ARCH_S390X
/* /*
* save fprs to current->thread.fp_regs to merge them with * save fprs to current->thread.fp_regs to merge them with
* the emulated registers and then copy the result to the dump. * the emulated registers and then copy the result to the dump.
*/ */
save_fp_regs(&current->thread.fp_regs); save_fp_regs(&current->thread.fp_regs);
memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs)); memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
#else /* CONFIG_ARCH_S390X */
save_fp_regs(fpregs);
#endif /* CONFIG_ARCH_S390X */
return 1; return 1;
} }
...@@ -339,16 +395,22 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -339,16 +395,22 @@ unsigned long get_wchan(struct task_struct *p)
return 0; return 0;
stack_page = (unsigned long) p->thread_info; stack_page = (unsigned long) p->thread_info;
r15 = p->thread.ksp; r15 = p->thread.ksp;
if (!stack_page || r15 < stack_page || r15 >= 8188+stack_page) if (!stack_page || r15 < stack_page ||
return 0; r15 >= THREAD_SIZE - sizeof(unsigned long) + stack_page)
bc = (*(unsigned long *) r15) & 0x7fffffff; return 0;
bc = (*(unsigned long *) r15) & PSW_ADDR_INSN;
do { do {
if (bc < stack_page || bc >= 8188+stack_page) if (bc < stack_page ||
return 0; bc >= THREAD_SIZE - sizeof(unsigned long) + stack_page)
r14 = (*(unsigned long *) (bc+56)) & 0x7fffffff; return 0;
#ifndef CONFIG_ARCH_S390X
r14 = (*(unsigned long *) (bc+56)) & PSW_ADDR_INSN;
#else
r14 = *(unsigned long *) (bc+112);
#endif
if (r14 < first_sched || r14 >= last_sched) if (r14 < first_sched || r14 >= last_sched)
return r14; return r14;
bc = (*(unsigned long *) bc) & 0x7fffffff; bc = (*(unsigned long *) bc) & PSW_ADDR_INSN;
} while (count++ < 16); } while (count++ < 16);
return 0; return 0;
} }
......
This diff is collapsed.
/*
* arch/s390/kernel/reipl.S
*
* S390 version
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*/
#include <asm/lowcore.h>
.globl do_reipl
do_reipl: basr %r13,0
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
.Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13)
stctg %c0,%c0,.Lctlsave-.Lpg0(%r13)
ni .Lctlsave+4-.Lpg0(%r13),0xef
lctlg %c0,%c0,.Lctlsave-.Lpg0(%r13)
lgr %r1,%r2
mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
stsch .Lschib-.Lpg0(%r13)
oi .Lschib+5-.Lpg0(%r13),0x84
.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
msch .Lschib-.Lpg0(%r13)
lghi %r0,5
.Lssch: ssch .Liplorb-.Lpg0(%r13)
jz .L001
brct %r0,.Lssch
bas %r14,.Ldisab-.Lpg0(%r13)
.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
.Lcont: c %r1,__LC_SUBCHANNEL_ID
jnz .Ltpi
clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
jnz .Ltpi
tsch .Liplirb-.Lpg0(%r13)
tm .Liplirb+9-.Lpg0(%r13),0xbf
jz .L002
bas %r14,.Ldisab-.Lpg0(%r13)
.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
jz .L003
bas %r14,.Ldisab-.Lpg0(%r13)
.L003: spx .Lnull-.Lpg0(%r13)
st %r1,__LC_SUBCHANNEL_ID
lhi %r1,0 # mode 0 = esa
slr %r0,%r0 # set cpuid to zero
sigp %r1,%r0,0x12 # switch to esa mode
lpsw 0
.Ldisab: sll %r14,1
srl %r14,1 # need to kill hi bit to avoid specification exceptions.
st %r14,.Ldispsw+12-.Lpg0(%r13)
lpswe .Ldispsw-.Lpg0(%r13)
.align 8
.Lall: .quad 0x00000000ff000000
.Lctlsave: .quad 0x0000000000000000
.Lnull: .long 0x0000000000000000
.align 16
/*
* These addresses have to be 31 bit otherwise
* the sigp will throw a specifcation exception
* when switching to ESA mode as bit 31 be set
* in the ESA psw.
* Bit 31 of the addresses has to be 0 for the
* 31bit lpswe instruction a fact they appear to have
* ommited from the pop.
*/
.Lnewpsw: .quad 0x0000000080000000
.quad .Lpg1
.Lpcnew: .quad 0x0000000080000000
.quad .Lecs
.Lionew: .quad 0x0000000080000000
.quad .Lcont
.Lwaitpsw: .quad 0x0202000080000000
.quad .Ltpi
.Ldispsw: .quad 0x0002000080000000
.quad 0x0000000000000000
.Liplccws: .long 0x02000000,0x60000018
.long 0x08000008,0x20000001
.Liplorb: .long 0x0049504c,0x0040ff80
.long 0x00000000+.Liplccws
.Lschib: .long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.Liplirb: .long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
...@@ -4,11 +4,14 @@ ...@@ -4,11 +4,14 @@
* S390 version * S390 version
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/highuid.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/pgalloc.h>
#include <asm/setup.h> #include <asm/setup.h>
#if CONFIG_IP_MULTICAST #if CONFIG_IP_MULTICAST
#include <net/arp.h> #include <net/arp.h>
...@@ -49,6 +52,28 @@ EXPORT_SYMBOL_NOVERS(strrchr); ...@@ -49,6 +52,28 @@ EXPORT_SYMBOL_NOVERS(strrchr);
EXPORT_SYMBOL_NOVERS(strstr); EXPORT_SYMBOL_NOVERS(strstr);
EXPORT_SYMBOL_NOVERS(strpbrk); EXPORT_SYMBOL_NOVERS(strpbrk);
/*
* binfmt_elf loader
*/
extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid);
#ifdef CONFIG_S390_SUPPORT
/*
* Dynamically add/remove 31 bit ioctl conversion functions.
*/
extern int register_ioctl32_conversion(unsigned int cmd,
int (*handler)(unsigned int,
unsigned int,
unsigned long,
struct file *));
int unregister_ioctl32_conversion(unsigned int cmd);
EXPORT_SYMBOL(register_ioctl32_conversion);
EXPORT_SYMBOL(unregister_ioctl32_conversion);
#endif
/* /*
* misc. * misc.
*/ */
......
...@@ -324,15 +324,22 @@ void __init setup_arch(char **cmdline_p) ...@@ -324,15 +324,22 @@ void __init setup_arch(char **cmdline_p)
/* /*
* print what head.S has found out about the machine * print what head.S has found out about the machine
*/ */
#ifndef CONFIG_ARCH_S390X
printk((MACHINE_IS_VM) ? printk((MACHINE_IS_VM) ?
"We are running under VM (31 bit mode)\n" : "We are running under VM (31 bit mode)\n" :
"We are running native (31 bit mode)\n"); "We are running native (31 bit mode)\n");
printk((MACHINE_HAS_IEEE) ? printk((MACHINE_HAS_IEEE) ?
"This machine has an IEEE fpu\n" : "This machine has an IEEE fpu\n" :
"This machine has no IEEE fpu\n"); "This machine has no IEEE fpu\n");
#else /* CONFIG_ARCH_S390X */
printk((MACHINE_IS_VM) ?
"We are running under VM (64 bit mode)\n" :
"We are running native (64 bit mode)\n");
#endif /* CONFIG_ARCH_S390X */
ROOT_DEV = Root_RAM0; ROOT_DEV = Root_RAM0;
memory_start = (unsigned long) &_end; /* fixit if use $CODELO etc*/ memory_start = (unsigned long) &_end; /* fixit if use $CODELO etc*/
#ifndef CONFIG_ARCH_S390X
memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */
/* /*
* We need some free virtual space to be able to do vmalloc. * We need some free virtual space to be able to do vmalloc.
...@@ -341,6 +348,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -341,6 +348,9 @@ void __init setup_arch(char **cmdline_p)
*/ */
if (memory_end > 1920*1024*1024) if (memory_end > 1920*1024*1024)
memory_end = 1920*1024*1024; memory_end = 1920*1024*1024;
#else /* CONFIG_ARCH_S390X */
memory_end = memory_size & ~0x200000UL; /* detected in head.s */
#endif /* CONFIG_ARCH_S390X */
init_mm.start_code = PAGE_OFFSET; init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) &_etext; init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata; init_mm.end_data = (unsigned long) &_edata;
...@@ -461,26 +471,46 @@ void __init setup_arch(char **cmdline_p) ...@@ -461,26 +471,46 @@ void __init setup_arch(char **cmdline_p)
/* /*
* Setup lowcore for boot cpu * Setup lowcore for boot cpu
*/ */
#ifndef CONFIG_ARCH_S390X
lc = (struct _lowcore *) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); lc = (struct _lowcore *) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
memset(lc, 0, PAGE_SIZE); memset(lc, 0, PAGE_SIZE);
#else /* CONFIG_ARCH_S390X */
lc = (struct _lowcore *) __alloc_bootmem(2*PAGE_SIZE, 2*PAGE_SIZE, 0);
memset(lc, 0, 2*PAGE_SIZE);
#endif /* CONFIG_ARCH_S390X */
lc->restart_psw.mask = PSW_BASE_BITS; lc->restart_psw.mask = PSW_BASE_BITS;
lc->restart_psw.addr = PSW_ADDR_AMODE31 + (__u32) restart_int_handler; lc->restart_psw.addr =
PSW_ADDR_AMODE + (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS; lc->external_new_psw.mask = PSW_KERNEL_BITS;
lc->external_new_psw.addr = PSW_ADDR_AMODE31 + (__u32) ext_int_handler; lc->external_new_psw.addr =
PSW_ADDR_AMODE + (unsigned long) ext_int_handler;
lc->svc_new_psw.mask = PSW_KERNEL_BITS; lc->svc_new_psw.mask = PSW_KERNEL_BITS;
lc->svc_new_psw.addr = PSW_ADDR_AMODE31 + (__u32) system_call; lc->svc_new_psw.addr = PSW_ADDR_AMODE + (unsigned long) system_call;
lc->program_new_psw.mask = PSW_KERNEL_BITS; lc->program_new_psw.mask = PSW_KERNEL_BITS;
lc->program_new_psw.addr = PSW_ADDR_AMODE31 + (__u32)pgm_check_handler; lc->program_new_psw.addr =
lc->mcck_new_psw.mask = PSW_KERNEL_BITS; PSW_ADDR_AMODE + (unsigned long)pgm_check_handler;
lc->mcck_new_psw.addr = PSW_ADDR_AMODE31 + (__u32) mcck_int_handler; lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
lc->mcck_new_psw.addr =
PSW_ADDR_AMODE + (unsigned long) mcck_int_handler;
lc->io_new_psw.mask = PSW_KERNEL_BITS; lc->io_new_psw.mask = PSW_KERNEL_BITS;
lc->io_new_psw.addr = PSW_ADDR_AMODE31 + (__u32) io_int_handler; lc->io_new_psw.addr = PSW_ADDR_AMODE + (unsigned long) io_int_handler;
lc->ipl_device = S390_lowcore.ipl_device; lc->ipl_device = S390_lowcore.ipl_device;
lc->jiffy_timer = -1LL;
#ifndef CONFIG_ARCH_S390X
lc->kernel_stack = ((__u32) &init_thread_union) + 8192; lc->kernel_stack = ((__u32) &init_thread_union) + 8192;
lc->async_stack = (__u32) lc->async_stack = (__u32)
__alloc_bootmem(2*PAGE_SIZE, 2*PAGE_SIZE, 0) + 8192; __alloc_bootmem(2*PAGE_SIZE, 2*PAGE_SIZE, 0) + 8192;
lc->jiffy_timer = -1LL;
set_prefix((__u32) lc); set_prefix((__u32) lc);
#else /* CONFIG_ARCH_S390X */
lc->kernel_stack = ((__u64) &init_thread_union) + 16384;
lc->async_stack = (__u64)
__alloc_bootmem(4*PAGE_SIZE, 4*PAGE_SIZE, 0) + 16384;
if (MACHINE_HAS_DIAG44)
lc->diag44_opcode = 0x83000044;
else
lc->diag44_opcode = 0x07000700;
set_prefix((__u32)(__u64) lc);
#endif /* CONFIG_ARCH_S390X */
cpu_init(); cpu_init();
__cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
......
...@@ -146,7 +146,7 @@ sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs) ...@@ -146,7 +146,7 @@ sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs)
/* Returns non-zero on fault. */ /* Returns non-zero on fault. */
static int save_sigregs(struct pt_regs *regs,_sigregs *sregs) static int save_sigregs(struct pt_regs *regs, _sigregs *sregs)
{ {
int err; int err;
...@@ -158,18 +158,18 @@ static int save_sigregs(struct pt_regs *regs,_sigregs *sregs) ...@@ -158,18 +158,18 @@ static int save_sigregs(struct pt_regs *regs,_sigregs *sregs)
* to merge them with the emulated registers. * to merge them with the emulated registers.
*/ */
save_fp_regs(&current->thread.fp_regs); save_fp_regs(&current->thread.fp_regs);
return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs, return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
sizeof(s390_fp_regs)); sizeof(s390_fp_regs));
} }
/* Returns positive number on error */ /* Returns positive number on error */
static int restore_sigregs(struct pt_regs *regs,_sigregs *sregs) static int restore_sigregs(struct pt_regs *regs, _sigregs *sregs)
{ {
int err; int err;
err = __copy_from_user(regs, &sregs->regs, sizeof(_s390_regs_common)); err = __copy_from_user(regs, &sregs->regs, sizeof(_s390_regs_common));
regs->psw.mask = PSW_USER_BITS | (regs->psw.mask & PSW_MASK_CC); regs->psw.mask = PSW_USER_BITS | (regs->psw.mask & PSW_MASK_CC);
regs->psw.addr |= PSW_ADDR_AMODE31; regs->psw.addr |= PSW_ADDR_AMODE;
if (err) if (err)
return err; return err;
...@@ -299,9 +299,11 @@ static void setup_frame(int sig, struct k_sigaction *ka, ...@@ -299,9 +299,11 @@ static void setup_frame(int sig, struct k_sigaction *ka,
/* Set up to return from userspace. If provided, use a stub /* Set up to return from userspace. If provided, use a stub
already in userspace. */ already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) { if (ka->sa.sa_flags & SA_RESTORER) {
regs->gprs[14] = (__u32) ka->sa.sa_restorer | PSW_ADDR_AMODE31; regs->gprs[14] = (unsigned long)
ka->sa.sa_restorer | PSW_ADDR_AMODE;
} else { } else {
regs->gprs[14] = (__u32) frame->retcode | PSW_ADDR_AMODE31; regs->gprs[14] = (unsigned long)
frame->retcode | PSW_ADDR_AMODE;
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
(u16 *)(frame->retcode))) (u16 *)(frame->retcode)))
goto give_sigsegv; goto give_sigsegv;
...@@ -312,12 +314,12 @@ static void setup_frame(int sig, struct k_sigaction *ka, ...@@ -312,12 +314,12 @@ static void setup_frame(int sig, struct k_sigaction *ka,
goto give_sigsegv; goto give_sigsegv;
/* Set up registers for signal handler */ /* Set up registers for signal handler */
regs->gprs[15] = (__u32) frame; regs->gprs[15] = (unsigned long) frame;
regs->psw.addr = (__u32) ka->sa.sa_handler | PSW_ADDR_AMODE31; regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->psw.mask = PSW_USER_BITS; regs->psw.mask = PSW_USER_BITS;
regs->gprs[2] = map_signal(sig); regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (__u32) &frame->sc; regs->gprs[3] = (unsigned long) &frame->sc;
/* We forgot to include these in the sigcontext. /* We forgot to include these in the sigcontext.
To avoid breaking binary compatibility, they are passed as args. */ To avoid breaking binary compatibility, they are passed as args. */
...@@ -357,9 +359,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -357,9 +359,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up to return from userspace. If provided, use a stub /* Set up to return from userspace. If provided, use a stub
already in userspace. */ already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) { if (ka->sa.sa_flags & SA_RESTORER) {
regs->gprs[14] = (__u32) ka->sa.sa_restorer | PSW_ADDR_AMODE31; regs->gprs[14] = (unsigned long)
ka->sa.sa_restorer | PSW_ADDR_AMODE;
} else { } else {
regs->gprs[14] = (__u32) frame->retcode | PSW_ADDR_AMODE31; regs->gprs[14] = (unsigned long)
frame->retcode | PSW_ADDR_AMODE;
err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
(u16 *)(frame->retcode)); (u16 *)(frame->retcode));
} }
...@@ -369,13 +373,13 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -369,13 +373,13 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
goto give_sigsegv; goto give_sigsegv;
/* Set up registers for signal handler */ /* Set up registers for signal handler */
regs->gprs[15] = (__u32) frame; regs->gprs[15] = (unsigned long) frame;
regs->psw.addr = (__u32) ka->sa.sa_handler | PSW_ADDR_AMODE31; regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->psw.mask = PSW_USER_BITS; regs->psw.mask = PSW_USER_BITS;
regs->gprs[2] = map_signal(sig); regs->gprs[2] = map_signal(sig);
regs->gprs[3] = (__u32) &frame->info; regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (__u32) &frame->uc; regs->gprs[4] = (unsigned long) &frame->uc;
return; return;
give_sigsegv: give_sigsegv:
...@@ -461,6 +465,13 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) ...@@ -461,6 +465,13 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
if (!oldset) if (!oldset)
oldset = &current->blocked; oldset = &current->blocked;
#ifdef CONFIG_S390_SUPPORT
if (test_thread_flag(TIF_31BIT)) {
extern asmlinkage int do_signal32(struct pt_regs *regs,
sigset_t *oldset);
return do_signal32(regs, oldset);
}
#endif
signr = get_signal_to_deliver(&info, regs, NULL); signr = get_signal_to_deliver(&info, regs, NULL);
if (signr > 0) { if (signr > 0) {
......
...@@ -171,7 +171,7 @@ static inline void do_store_status(void) ...@@ -171,7 +171,7 @@ static inline void do_store_status(void)
/* store status of all processors in their lowcores (real 0) */ /* store status of all processors in their lowcores (real 0) */
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i) || smp_processor_id() == i) if (!cpu_online(i) || smp_processor_id() == i)
continue; continue;
low_core_addr = (unsigned long) lowcore_ptr[i]; low_core_addr = (unsigned long) lowcore_ptr[i];
do { do {
...@@ -310,13 +310,11 @@ static sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig) ...@@ -310,13 +310,11 @@ static sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig)
*/ */
static void smp_ext_bitcall_others(ec_bit_sig sig) static void smp_ext_bitcall_others(ec_bit_sig sig)
{ {
struct _lowcore *lowcore;
int i; int i;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i) || smp_processor_id() == i) if (!cpu_online(i) || smp_processor_id() == i)
continue; continue;
lowcore = lowcore_ptr[i];
/* /*
* Set signaling bit in lowcore of target cpu and kick it * Set signaling bit in lowcore of target cpu and kick it
*/ */
...@@ -326,6 +324,7 @@ static void smp_ext_bitcall_others(ec_bit_sig sig) ...@@ -326,6 +324,7 @@ static void smp_ext_bitcall_others(ec_bit_sig sig)
} }
} }
#ifndef CONFIG_ARCH_S390X
/* /*
* this function sends a 'purge tlb' signal to another CPU. * this function sends a 'purge tlb' signal to another CPU.
*/ */
...@@ -338,6 +337,7 @@ void smp_ptlb_all(void) ...@@ -338,6 +337,7 @@ void smp_ptlb_all(void)
{ {
on_each_cpu(smp_ptlb_callback, NULL, 0, 1); on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
} }
#endif /* ! CONFIG_ARCH_S390X */
/* /*
* this function sends a 'reschedule' IPI to another CPU. * this function sends a 'reschedule' IPI to another CPU.
...@@ -356,8 +356,8 @@ typedef struct ...@@ -356,8 +356,8 @@ typedef struct
{ {
__u16 start_ctl; __u16 start_ctl;
__u16 end_ctl; __u16 end_ctl;
__u32 orvals[16]; unsigned long orvals[16];
__u32 andvals[16]; unsigned long andvals[16];
} ec_creg_mask_parms; } ec_creg_mask_parms;
/* /*
...@@ -365,25 +365,14 @@ typedef struct ...@@ -365,25 +365,14 @@ typedef struct
*/ */
void smp_ctl_bit_callback(void *info) { void smp_ctl_bit_callback(void *info) {
ec_creg_mask_parms *pp; ec_creg_mask_parms *pp;
u32 cregs[16]; unsigned long cregs[16];
int i; int i;
pp = (ec_creg_mask_parms *) info; pp = (ec_creg_mask_parms *) info;
asm volatile (" bras 1,0f\n" __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
" stctl 0,0,0(%0)\n"
"0: ex %1,0(1)\n"
: : "a" (cregs+pp->start_ctl),
"a" ((pp->start_ctl<<4) + pp->end_ctl)
: "memory", "1" );
for (i = pp->start_ctl; i <= pp->end_ctl; i++) for (i = pp->start_ctl; i <= pp->end_ctl; i++)
cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
asm volatile (" bras 1,0f\n" __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
" lctl 0,0,0(%0)\n"
"0: ex %1,0(1)\n"
: : "a" (cregs+pp->start_ctl),
"a" ((pp->start_ctl<<4) + pp->end_ctl)
: "memory", "1" );
return;
} }
/* /*
...@@ -395,7 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) { ...@@ -395,7 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) {
parms.start_ctl = cr; parms.start_ctl = cr;
parms.end_ctl = cr; parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit; parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = 0xFFFFFFFF; parms.andvals[cr] = -1L;
preempt_disable(); preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_set_bit(cr, bit); __ctl_set_bit(cr, bit);
...@@ -410,8 +399,8 @@ void smp_ctl_clear_bit(int cr, int bit) { ...@@ -410,8 +399,8 @@ void smp_ctl_clear_bit(int cr, int bit) {
parms.start_ctl = cr; parms.start_ctl = cr;
parms.end_ctl = cr; parms.end_ctl = cr;
parms.orvals[cr] = 0x00000000; parms.orvals[cr] = 0;
parms.andvals[cr] = ~(1 << bit); parms.andvals[cr] = ~(1L << bit);
preempt_disable(); preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_clear_bit(cr, bit); __ctl_clear_bit(cr, bit);
...@@ -493,7 +482,7 @@ int __cpu_up(unsigned int cpu) ...@@ -493,7 +482,7 @@ int __cpu_up(unsigned int cpu)
* Set prefix page for new cpu * Set prefix page for new cpu
*/ */
ccode = signal_processor_p((u32)(lowcore_ptr[cpu]), ccode = signal_processor_p((unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix); cpu, sigp_set_prefix);
if (ccode){ if (ccode){
printk("sigp_set_prefix failed for cpu %d " printk("sigp_set_prefix failed for cpu %d "
...@@ -502,7 +491,6 @@ int __cpu_up(unsigned int cpu) ...@@ -502,7 +491,6 @@ int __cpu_up(unsigned int cpu)
return -EIO; return -EIO;
} }
/* We can't use kernel_thread since we must _avoid_ to reschedule /* We can't use kernel_thread since we must _avoid_ to reschedule
the child. */ the child. */
idle = fork_by_hand(); idle = fork_by_hand();
...@@ -521,15 +509,13 @@ int __cpu_up(unsigned int cpu) ...@@ -521,15 +509,13 @@ int __cpu_up(unsigned int cpu)
cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore = lowcore_ptr[cpu];
cpu_lowcore->save_area[15] = idle->thread.ksp; cpu_lowcore->save_area[15] = idle->thread.ksp;
cpu_lowcore->kernel_stack = (__u32) idle->thread_info + (2*PAGE_SIZE); cpu_lowcore->kernel_stack = (unsigned long)
__asm__ __volatile__("la 1,%0\n\t" idle->thread_info + (THREAD_SIZE);
"stctl 0,15,0(1)\n\t" __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
"la 1,%1\n\t" __asm__ __volatile__("la 1,%0\n\t"
"stam 0,15,0(1)" "stam 0,15,0(1)"
: "=m" (cpu_lowcore->cregs_save_area[0]), : "=m" (cpu_lowcore->access_regs_save_area[0])
"=m" (cpu_lowcore->access_regs_save_area[0]) : : "1", "memory");
: : "1", "memory");
eieio(); eieio();
signal_processor(cpu,sigp_restart); signal_processor(cpu,sigp_restart);
...@@ -551,7 +537,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -551,7 +537,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
panic("Couldn't request external interrupt 0x1202"); panic("Couldn't request external interrupt 0x1202");
smp_check_cpus(max_cpus); smp_check_cpus(max_cpus);
memset(lowcore_ptr,0,sizeof(lowcore_ptr)); memset(lowcore_ptr,0,sizeof(lowcore_ptr));
/* /*
* Initialize prefix pages and stacks for all possible cpus * Initialize prefix pages and stacks for all possible cpus
*/ */
...@@ -561,15 +546,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -561,15 +546,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (!cpu_possible(i)) if (!cpu_possible(i))
continue; continue;
lowcore_ptr[i] = (struct _lowcore *) lowcore_ptr[i] = (struct _lowcore *)
__get_free_page(GFP_KERNEL|GFP_DMA); __get_free_pages(GFP_KERNEL|GFP_DMA,
async_stack = __get_free_pages(GFP_KERNEL,1); sizeof(void*) == 8 ? 1 : 0);
if (lowcore_ptr[i] == NULL || async_stack == 0UL) async_stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
if (lowcore_ptr[i] == NULL || async_stack == 0ULL)
panic("smp_boot_cpus failed to allocate memory\n"); panic("smp_boot_cpus failed to allocate memory\n");
memcpy(lowcore_ptr[i], &S390_lowcore, sizeof(struct _lowcore)); memcpy(lowcore_ptr[i], &S390_lowcore, sizeof(struct _lowcore));
lowcore_ptr[i]->async_stack = async_stack + (2 * PAGE_SIZE); lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
} }
set_prefix((u32) lowcore_ptr[smp_processor_id()]); set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
} }
void __devinit smp_prepare_boot_cpu(void) void __devinit smp_prepare_boot_cpu(void)
......
...@@ -24,15 +24,24 @@ ...@@ -24,15 +24,24 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#ifdef CONFIG_ARCH_S390X
#include <linux/personality.h>
#endif /* CONFIG_ARCH_S390X */
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/ipc.h> #include <asm/ipc.h>
#ifndef CONFIG_ARCH_S390X
#define __SYS_RETTYPE int
#else
#define __SYS_RETTYPE long
#endif /* CONFIG_ARCH_S390X */
/* /*
* sys_pipe() is the normal C calling standard for creating * sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though. * a pipe. It's not the way Unix traditionally does this, though.
*/ */
asmlinkage int sys_pipe(unsigned long * fildes) asmlinkage __SYS_RETTYPE sys_pipe(unsigned long * fildes)
{ {
int fd[2]; int fd[2];
int error; int error;
...@@ -51,7 +60,7 @@ static inline long do_mmap2( ...@@ -51,7 +60,7 @@ static inline long do_mmap2(
unsigned long prot, unsigned long flags, unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff) unsigned long fd, unsigned long pgoff)
{ {
int error = -EBADF; __SYS_RETTYPE error = -EBADF;
struct file * file = NULL; struct file * file = NULL;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
...@@ -99,10 +108,10 @@ asmlinkage long sys_mmap2(struct mmap_arg_struct *arg) ...@@ -99,10 +108,10 @@ asmlinkage long sys_mmap2(struct mmap_arg_struct *arg)
return error; return error;
} }
asmlinkage int old_mmap(struct mmap_arg_struct *arg) asmlinkage __SYS_RETTYPE old_mmap(struct mmap_arg_struct *arg)
{ {
struct mmap_arg_struct a; struct mmap_arg_struct a;
int error = -EFAULT; __SYS_RETTYPE error = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a))) if (copy_from_user(&a, arg, sizeof(a)))
goto out; goto out;
...@@ -118,6 +127,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg) ...@@ -118,6 +127,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg)
extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
#ifndef CONFIG_ARCH_S390X
struct sel_arg_struct { struct sel_arg_struct {
unsigned long n; unsigned long n;
fd_set *inp, *outp, *exp; fd_set *inp, *outp, *exp;
...@@ -132,22 +142,61 @@ asmlinkage int old_select(struct sel_arg_struct *arg) ...@@ -132,22 +142,61 @@ asmlinkage int old_select(struct sel_arg_struct *arg)
return -EFAULT; return -EFAULT;
/* sys_select() does the appropriate kernel locking */ /* sys_select() does the appropriate kernel locking */
return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
}
#else /* CONFIG_ARCH_S390X */
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct vm_area_struct *vma;
unsigned long end;
if (test_thread_flag(TIF_31BIT)) {
if (!addr)
addr = 0x40000000;
end = 0x80000000;
} else {
if (!addr)
addr = TASK_SIZE / 2;
end = TASK_SIZE;
}
if (len > end)
return -ENOMEM;
addr = PAGE_ALIGN(addr);
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (end - len < addr)
return -ENOMEM;
if (!vma || addr + len <= vma->vm_start)
return addr;
addr = vma->vm_end;
}
} }
#endif /* CONFIG_ARCH_S390X */
/* /*
* sys_ipc() is the de-multiplexer for the SysV IPC calls.. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
* *
* This is really horribly ugly. * This is really horribly ugly.
*/ */
asmlinkage int sys_ipc (uint call, int first, int second, asmlinkage __SYS_RETTYPE sys_ipc (uint call, int first, int second,
int third, void *ptr) unsigned long third, void *ptr,
unsigned long fifth)
{ {
struct ipc_kludge tmp; struct ipc_kludge tmp;
int ret; int ret;
switch (call) { switch (call) {
case SEMOP: case SEMOP:
return sys_semop (first, (struct sembuf *)ptr, second); return sys_semtimedop (first, (struct sembuf *) ptr, second,
NULL);
case SEMTIMEDOP:
return sys_semtimedop(first, (struct sembuf *) ptr, second,
(const struct timespec *) fifth);
case SEMGET: case SEMGET:
return sys_semget (first, second, third); return sys_semget (first, second, third);
case SEMCTL: { case SEMCTL: {
...@@ -191,7 +240,7 @@ asmlinkage int sys_ipc (uint call, int first, int second, ...@@ -191,7 +240,7 @@ asmlinkage int sys_ipc (uint call, int first, int second,
return sys_shmctl (first, second, return sys_shmctl (first, second,
(struct shmid_ds *) ptr); (struct shmid_ds *) ptr);
default: default:
return -EINVAL; return -ENOSYS;
} }
...@@ -212,6 +261,7 @@ asmlinkage int sys_uname(struct old_utsname * name) ...@@ -212,6 +261,7 @@ asmlinkage int sys_uname(struct old_utsname * name)
return err?-EFAULT:0; return err?-EFAULT:0;
} }
#ifndef CONFIG_ARCH_S390X
asmlinkage int sys_olduname(struct oldold_utsname * name) asmlinkage int sys_olduname(struct oldold_utsname * name)
{ {
int error; int error;
...@@ -243,6 +293,36 @@ asmlinkage int sys_olduname(struct oldold_utsname * name) ...@@ -243,6 +293,36 @@ asmlinkage int sys_olduname(struct oldold_utsname * name)
asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on) asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on)
{ {
return -ENOSYS; return -ENOSYS;
} }
#else /* CONFIG_ARCH_S390X */
extern asmlinkage int sys_newuname(struct new_utsname * name);
asmlinkage int s390x_newuname(struct new_utsname * name)
{
int ret = sys_newuname(name);
if (current->personality == PER_LINUX32 && !ret) {
ret = copy_to_user(name->machine, "s390\0\0\0\0", 8);
if (ret) ret = -EFAULT;
}
return ret;
}
extern asmlinkage long sys_personality(unsigned long);
asmlinkage int s390x_personality(unsigned long personality)
{
int ret;
if (current->personality == PER_LINUX32 && personality == PER_LINUX)
personality = PER_LINUX32;
ret = sys_personality(personality);
if (ret == PER_LINUX32)
ret = PER_LINUX;
return ret;
}
#endif /* CONFIG_ARCH_S390X */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment