Commit e5ef21d1 authored by Arnd Bergmann's avatar Arnd Bergmann

ia64: remove CONFIG_SET_FS support

ia64 only uses set_fs() in one file to handle unaligned access for
both user space and kernel instructions. Rewrite this to explicitly
pass around a flag about which one it is and drop the feature from
the architecture.
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parent 75d4d295
...@@ -61,7 +61,6 @@ config IA64 ...@@ -61,7 +61,6 @@ config IA64
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
select NUMA if !FLATMEM select NUMA if !FLATMEM
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select SET_FS
select ZONE_DMA32 select ZONE_DMA32
default y default y
help help
......
...@@ -243,10 +243,6 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); ...@@ -243,10 +243,6 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
extern void print_cpu_info (struct cpuinfo_ia64 *); extern void print_cpu_info (struct cpuinfo_ia64 *);
typedef struct {
unsigned long seg;
} mm_segment_t;
#define SET_UNALIGN_CTL(task,value) \ #define SET_UNALIGN_CTL(task,value) \
({ \ ({ \
(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
......
...@@ -27,7 +27,6 @@ struct thread_info { ...@@ -27,7 +27,6 @@ struct thread_info {
__u32 cpu; /* current CPU */ __u32 cpu; /* current CPU */
__u32 last_cpu; /* Last CPU thread ran on */ __u32 last_cpu; /* Last CPU thread ran on */
__u32 status; /* Thread synchronous flags */ __u32 status; /* Thread synchronous flags */
mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
__u64 utime; __u64 utime;
...@@ -48,7 +47,6 @@ struct thread_info { ...@@ -48,7 +47,6 @@ struct thread_info {
.task = &tsk, \ .task = &tsk, \
.flags = 0, \ .flags = 0, \
.cpu = 0, \ .cpu = 0, \
.addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \ .preempt_count = INIT_PREEMPT_COUNT, \
} }
......
...@@ -42,26 +42,17 @@ ...@@ -42,26 +42,17 @@
#include <asm/extable.h> #include <asm/extable.h>
/* /*
* For historical reasons, the following macros are grossly misnamed: * When accessing user memory, we need to make sure the entire area really is
*/ * in user-level space. We also need to make sure that the address doesn't
#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
/*
* When accessing user memory, we need to make sure the entire area really is in
* user-level space. In order to do this efficiently, we make sure that the page at
* address TASK_SIZE is never valid. We also need to make sure that the address doesn't
* point inside the virtually mapped linear page table. * point inside the virtually mapped linear page table.
*/ */
static inline int __access_ok(const void __user *p, unsigned long size) static inline int __access_ok(const void __user *p, unsigned long size)
{ {
unsigned long limit = TASK_SIZE;
unsigned long addr = (unsigned long)p; unsigned long addr = (unsigned long)p;
unsigned long seg = get_fs().seg;
return likely(addr <= seg) && return likely((size <= limit) && (addr <= (limit - size)) &&
(seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT));
} }
#define __access_ok __access_ok #define __access_ok __access_ok
#include <asm-generic/access_ok.h> #include <asm-generic/access_ok.h>
......
...@@ -749,9 +749,25 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi ...@@ -749,9 +749,25 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
} }
} }
static int emulate_store(unsigned long ifa, void *val, int len, bool kernel_mode)
{
if (kernel_mode)
return copy_to_kernel_nofault((void *)ifa, val, len);
return copy_to_user((void __user *)ifa, val, len);
}
static int emulate_load(void *val, unsigned long ifa, int len, bool kernel_mode)
{
if (kernel_mode)
return copy_from_kernel_nofault(val, (void *)ifa, len);
return copy_from_user(val, (void __user *)ifa, len);
}
static int static int
emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
bool kernel_mode)
{ {
unsigned int len = 1 << ld.x6_sz; unsigned int len = 1 << ld.x6_sz;
unsigned long val = 0; unsigned long val = 0;
...@@ -774,7 +790,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) ...@@ -774,7 +790,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
return -1; return -1;
} }
/* this assumes little-endian byte-order: */ /* this assumes little-endian byte-order: */
if (copy_from_user(&val, (void __user *) ifa, len)) if (emulate_load(&val, ifa, len, kernel_mode))
return -1; return -1;
setreg(ld.r1, val, 0, regs); setreg(ld.r1, val, 0, regs);
...@@ -872,7 +888,8 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) ...@@ -872,7 +888,8 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
} }
static int static int
emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
bool kernel_mode)
{ {
unsigned long r2; unsigned long r2;
unsigned int len = 1 << ld.x6_sz; unsigned int len = 1 << ld.x6_sz;
...@@ -901,7 +918,7 @@ emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs) ...@@ -901,7 +918,7 @@ emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
} }
/* this assumes little-endian byte-order: */ /* this assumes little-endian byte-order: */
if (copy_to_user((void __user *) ifa, &r2, len)) if (emulate_store(ifa, &r2, len, kernel_mode))
return -1; return -1;
/* /*
...@@ -1021,7 +1038,7 @@ float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final) ...@@ -1021,7 +1038,7 @@ float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
} }
static int static int
emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs) emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode)
{ {
struct ia64_fpreg fpr_init[2]; struct ia64_fpreg fpr_init[2];
struct ia64_fpreg fpr_final[2]; struct ia64_fpreg fpr_final[2];
...@@ -1050,8 +1067,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs ...@@ -1050,8 +1067,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
* This assumes little-endian byte-order. Note that there is no "ldfpe" * This assumes little-endian byte-order. Note that there is no "ldfpe"
* instruction: * instruction:
*/ */
if (copy_from_user(&fpr_init[0], (void __user *) ifa, len) if (emulate_load(&fpr_init[0], ifa, len, kernel_mode)
|| copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len)) || emulate_load(&fpr_init[1], (ifa + len), len, kernel_mode))
return -1; return -1;
DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz); DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
...@@ -1126,7 +1143,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs ...@@ -1126,7 +1143,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
static int static int
emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
bool kernel_mode)
{ {
struct ia64_fpreg fpr_init; struct ia64_fpreg fpr_init;
struct ia64_fpreg fpr_final; struct ia64_fpreg fpr_final;
...@@ -1152,7 +1170,7 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) ...@@ -1152,7 +1170,7 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
* See comments in ldX for descriptions on how the various loads are handled. * See comments in ldX for descriptions on how the various loads are handled.
*/ */
if (ld.x6_op != 0x2) { if (ld.x6_op != 0x2) {
if (copy_from_user(&fpr_init, (void __user *) ifa, len)) if (emulate_load(&fpr_init, ifa, len, kernel_mode))
return -1; return -1;
DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz); DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
...@@ -1202,7 +1220,8 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) ...@@ -1202,7 +1220,8 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
static int static int
emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
bool kernel_mode)
{ {
struct ia64_fpreg fpr_init; struct ia64_fpreg fpr_init;
struct ia64_fpreg fpr_final; struct ia64_fpreg fpr_final;
...@@ -1244,7 +1263,7 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs) ...@@ -1244,7 +1263,7 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
DDUMP("fpr_init =", &fpr_init, len); DDUMP("fpr_init =", &fpr_init, len);
DDUMP("fpr_final =", &fpr_final, len); DDUMP("fpr_final =", &fpr_final, len);
if (copy_to_user((void __user *) ifa, &fpr_final, len)) if (emulate_store(ifa, &fpr_final, len, kernel_mode))
return -1; return -1;
/* /*
...@@ -1295,7 +1314,6 @@ void ...@@ -1295,7 +1314,6 @@ void
ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
{ {
struct ia64_psr *ipsr = ia64_psr(regs); struct ia64_psr *ipsr = ia64_psr(regs);
mm_segment_t old_fs = get_fs();
unsigned long bundle[2]; unsigned long bundle[2];
unsigned long opcode; unsigned long opcode;
const struct exception_table_entry *eh = NULL; const struct exception_table_entry *eh = NULL;
...@@ -1304,6 +1322,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1304,6 +1322,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
load_store_t insn; load_store_t insn;
} u; } u;
int ret = -1; int ret = -1;
bool kernel_mode = false;
if (ia64_psr(regs)->be) { if (ia64_psr(regs)->be) {
/* we don't support big-endian accesses */ /* we don't support big-endian accesses */
...@@ -1367,13 +1386,13 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1367,13 +1386,13 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (unaligned_dump_stack) if (unaligned_dump_stack)
dump_stack(); dump_stack();
} }
set_fs(KERNEL_DS); kernel_mode = true;
} }
DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n", DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it); regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16)) if (emulate_load(bundle, regs->cr_iip, 16, kernel_mode))
goto failure; goto failure;
/* /*
...@@ -1467,7 +1486,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1467,7 +1486,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
case LDCCLR_IMM_OP: case LDCCLR_IMM_OP:
case LDCNC_IMM_OP: case LDCNC_IMM_OP:
case LDCCLRACQ_IMM_OP: case LDCCLRACQ_IMM_OP:
ret = emulate_load_int(ifa, u.insn, regs); ret = emulate_load_int(ifa, u.insn, regs, kernel_mode);
break; break;
case ST_OP: case ST_OP:
...@@ -1478,7 +1497,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1478,7 +1497,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
fallthrough; fallthrough;
case ST_IMM_OP: case ST_IMM_OP:
case STREL_IMM_OP: case STREL_IMM_OP:
ret = emulate_store_int(ifa, u.insn, regs); ret = emulate_store_int(ifa, u.insn, regs, kernel_mode);
break; break;
case LDF_OP: case LDF_OP:
...@@ -1486,21 +1505,21 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1486,21 +1505,21 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
case LDFCCLR_OP: case LDFCCLR_OP:
case LDFCNC_OP: case LDFCNC_OP:
if (u.insn.x) if (u.insn.x)
ret = emulate_load_floatpair(ifa, u.insn, regs); ret = emulate_load_floatpair(ifa, u.insn, regs, kernel_mode);
else else
ret = emulate_load_float(ifa, u.insn, regs); ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
break; break;
case LDF_IMM_OP: case LDF_IMM_OP:
case LDFA_IMM_OP: case LDFA_IMM_OP:
case LDFCCLR_IMM_OP: case LDFCCLR_IMM_OP:
case LDFCNC_IMM_OP: case LDFCNC_IMM_OP:
ret = emulate_load_float(ifa, u.insn, regs); ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
break; break;
case STF_OP: case STF_OP:
case STF_IMM_OP: case STF_IMM_OP:
ret = emulate_store_float(ifa, u.insn, regs); ret = emulate_store_float(ifa, u.insn, regs, kernel_mode);
break; break;
default: default:
...@@ -1521,7 +1540,6 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) ...@@ -1521,7 +1540,6 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip); DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
done: done:
set_fs(old_fs); /* restore original address limit */
return; return;
failure: failure:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment