Commit a53d5182 authored by Paul Mackerras's avatar Paul Mackerras Committed by Michael Ellerman

powerpc: Separate out load/store emulation into its own function

This moves the parts of emulate_step() that deal with emulating
load and store instructions into a new function called
emulate_loadstore().  This is to make it possible to reuse this
code in the alignment handler.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent d955189a
...@@ -152,6 +152,15 @@ void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op); ...@@ -152,6 +152,15 @@ void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
*/ */
extern int emulate_step(struct pt_regs *regs, unsigned int instr); extern int emulate_step(struct pt_regs *regs, unsigned int instr);
/*
* Emulate a load or store instruction by reading/writing the
* memory of the current process. FP/VMX/VSX registers are assumed
* to hold live values if the appropriate enable bit in regs->msr is
* set; otherwise this will use the saved values in the thread struct
* for user-mode accesses.
*/
extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
const void *mem, bool cross_endian); const void *mem, bool cross_endian);
extern void emulate_vsx_store(struct instruction_op *op, extern void emulate_vsx_store(struct instruction_op *op,
......
...@@ -2667,76 +2667,35 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) ...@@ -2667,76 +2667,35 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
} }
/* /*
* Emulate instructions that cause a transfer of control, * Emulate a previously-analysed load or store instruction.
* loads and stores, and a few other instructions. * Return values are:
* Returns 1 if the step was emulated, 0 if not, * 0 = instruction emulated successfully
* or -1 if the instruction is one that should not be stepped, * -EFAULT = address out of range or access faulted (regs->dar
* such as an rfid, or a mtmsrd that would clear MSR_RI. * contains the faulting address)
* -EACCES = misaligned access, instruction requires alignment
* -EINVAL = unknown operation in *op
*/ */
int emulate_step(struct pt_regs *regs, unsigned int instr) int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
{ {
struct instruction_op op; int err, size, type;
int r, err, size, type;
unsigned long val;
unsigned int cr;
int i, rd, nb; int i, rd, nb;
unsigned int cr;
unsigned long val;
unsigned long ea; unsigned long ea;
bool cross_endian; bool cross_endian;
r = analyse_instr(&op, regs, instr);
if (r < 0)
return r;
if (r > 0) {
emulate_update_regs(regs, &op);
return 1;
}
err = 0; err = 0;
size = GETSIZE(op.type); size = GETSIZE(op->type);
type = op.type & INSTR_TYPE_MASK; type = op->type & INSTR_TYPE_MASK;
cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
ea = truncate_if_32bit(regs->msr, op->ea);
ea = op.ea;
if (OP_IS_LOAD_STORE(type) || type == CACHEOP)
ea = truncate_if_32bit(regs->msr, op.ea);
switch (type) { switch (type) {
case CACHEOP:
if (!address_ok(regs, ea, 8))
return 0;
switch (op.type & CACHEOP_MASK) {
case DCBST:
__cacheop_user_asmx(ea, err, "dcbst");
break;
case DCBF:
__cacheop_user_asmx(ea, err, "dcbf");
break;
case DCBTST:
if (op.reg == 0)
prefetchw((void *) ea);
break;
case DCBT:
if (op.reg == 0)
prefetch((void *) ea);
break;
case ICBI:
__cacheop_user_asmx(ea, err, "icbi");
break;
case DCBZ:
err = emulate_dcbz(ea, regs);
break;
}
if (err) {
regs->dar = ea;
return 0;
}
goto instr_done;
case LARX: case LARX:
if (ea & (size - 1)) if (ea & (size - 1))
break; /* can't handle misaligned */ return -EACCES; /* can't handle misaligned */
if (!address_ok(regs, ea, size)) if (!address_ok(regs, ea, size))
return 0; return -EFAULT;
err = 0; err = 0;
switch (size) { switch (size) {
#ifdef __powerpc64__ #ifdef __powerpc64__
...@@ -2755,49 +2714,49 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2755,49 +2714,49 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
__get_user_asmx(val, ea, err, "ldarx"); __get_user_asmx(val, ea, err, "ldarx");
break; break;
case 16: case 16:
err = do_lqarx(ea, &regs->gpr[op.reg]); err = do_lqarx(ea, &regs->gpr[op->reg]);
break; break;
#endif #endif
default: default:
return 0; return -EINVAL;
} }
if (err) { if (err) {
regs->dar = ea; regs->dar = ea;
return 0; break;
} }
if (size < 16) if (size < 16)
regs->gpr[op.reg] = val; regs->gpr[op->reg] = val;
goto ldst_done; break;
case STCX: case STCX:
if (ea & (size - 1)) if (ea & (size - 1))
break; /* can't handle misaligned */ return -EACCES; /* can't handle misaligned */
if (!address_ok(regs, ea, size)) if (!address_ok(regs, ea, size))
return 0; return -EFAULT;
err = 0; err = 0;
switch (size) { switch (size) {
#ifdef __powerpc64__ #ifdef __powerpc64__
case 1: case 1:
__put_user_asmx(op.val, ea, err, "stbcx.", cr); __put_user_asmx(op->val, ea, err, "stbcx.", cr);
break; break;
case 2: case 2:
__put_user_asmx(op.val, ea, err, "stbcx.", cr); __put_user_asmx(op->val, ea, err, "stbcx.", cr);
break; break;
#endif #endif
case 4: case 4:
__put_user_asmx(op.val, ea, err, "stwcx.", cr); __put_user_asmx(op->val, ea, err, "stwcx.", cr);
break; break;
#ifdef __powerpc64__ #ifdef __powerpc64__
case 8: case 8:
__put_user_asmx(op.val, ea, err, "stdcx.", cr); __put_user_asmx(op->val, ea, err, "stdcx.", cr);
break; break;
case 16: case 16:
err = do_stqcx(ea, regs->gpr[op.reg], err = do_stqcx(ea, regs->gpr[op->reg],
regs->gpr[op.reg + 1], &cr); regs->gpr[op->reg + 1], &cr);
break; break;
#endif #endif
default: default:
return 0; return -EINVAL;
} }
if (!err) if (!err)
regs->ccr = (regs->ccr & 0x0fffffff) | regs->ccr = (regs->ccr & 0x0fffffff) |
...@@ -2805,23 +2764,23 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2805,23 +2764,23 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
((regs->xer >> 3) & 0x10000000); ((regs->xer >> 3) & 0x10000000);
else else
regs->dar = ea; regs->dar = ea;
goto ldst_done; break;
case LOAD: case LOAD:
#ifdef __powerpc64__ #ifdef __powerpc64__
if (size == 16) { if (size == 16) {
err = emulate_lq(regs, ea, op.reg, cross_endian); err = emulate_lq(regs, ea, op->reg, cross_endian);
goto ldst_done; break;
} }
#endif #endif
err = read_mem(&regs->gpr[op.reg], ea, size, regs); err = read_mem(&regs->gpr[op->reg], ea, size, regs);
if (!err) { if (!err) {
if (op.type & SIGNEXT) if (op->type & SIGNEXT)
do_signext(&regs->gpr[op.reg], size); do_signext(&regs->gpr[op->reg], size);
if ((op.type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
do_byterev(&regs->gpr[op.reg], size); do_byterev(&regs->gpr[op->reg], size);
} }
goto ldst_done; break;
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
case LOAD_FP: case LOAD_FP:
...@@ -2833,15 +2792,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2833,15 +2792,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
*/ */
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
return 0; return 0;
err = do_fp_load(op.reg, ea, size, regs, cross_endian); err = do_fp_load(op->reg, ea, size, regs, cross_endian);
goto ldst_done; break;
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case LOAD_VMX: case LOAD_VMX:
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
return 0; return 0;
err = do_vec_load(op.reg, ea, size, regs, cross_endian); err = do_vec_load(op->reg, ea, size, regs, cross_endian);
goto ldst_done; break;
#endif #endif
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
case LOAD_VSX: { case LOAD_VSX: {
...@@ -2851,18 +2810,18 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2851,18 +2810,18 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
* Some VSX instructions check the MSR_VEC bit rather than MSR_VSX * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
* when the target of the instruction is a vector register. * when the target of the instruction is a vector register.
*/ */
if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC)) if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
msrbit = MSR_VEC; msrbit = MSR_VEC;
if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
return 0; return 0;
err = do_vsx_load(&op, ea, regs, cross_endian); err = do_vsx_load(op, ea, regs, cross_endian);
goto ldst_done; break;
} }
#endif #endif
case LOAD_MULTI: case LOAD_MULTI:
if (!address_ok(regs, ea, size)) if (!address_ok(regs, ea, size))
return -EFAULT; return -EFAULT;
rd = op.reg; rd = op->reg;
for (i = 0; i < size; i += 4) { for (i = 0; i < size; i += 4) {
unsigned int v32 = 0; unsigned int v32 = 0;
...@@ -2871,47 +2830,47 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2871,47 +2830,47 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
nb = 4; nb = 4;
err = copy_mem_in((u8 *) &v32, ea, nb, regs); err = copy_mem_in((u8 *) &v32, ea, nb, regs);
if (err) if (err)
return 0; break;
if (unlikely(cross_endian)) if (unlikely(cross_endian))
v32 = byterev_4(v32); v32 = byterev_4(v32);
regs->gpr[rd] = v32; regs->gpr[rd] = v32;
ea += 4; ea += 4;
++rd; ++rd;
} }
goto instr_done; break;
case STORE: case STORE:
#ifdef __powerpc64__ #ifdef __powerpc64__
if (size == 16) { if (size == 16) {
err = emulate_stq(regs, ea, op.reg, cross_endian); err = emulate_stq(regs, ea, op->reg, cross_endian);
goto ldst_done; break;
} }
#endif #endif
if ((op.type & UPDATE) && size == sizeof(long) && if ((op->type & UPDATE) && size == sizeof(long) &&
op.reg == 1 && op.update_reg == 1 && op->reg == 1 && op->update_reg == 1 &&
!(regs->msr & MSR_PR) && !(regs->msr & MSR_PR) &&
ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
err = handle_stack_update(ea, regs); err = handle_stack_update(ea, regs);
goto ldst_done; break;
} }
if (unlikely(cross_endian)) if (unlikely(cross_endian))
do_byterev(&op.val, size); do_byterev(&op->val, size);
err = write_mem(op.val, ea, size, regs); err = write_mem(op->val, ea, size, regs);
goto ldst_done; break;
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
case STORE_FP: case STORE_FP:
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
return 0; return 0;
err = do_fp_store(op.reg, ea, size, regs, cross_endian); err = do_fp_store(op->reg, ea, size, regs, cross_endian);
goto ldst_done; break;
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case STORE_VMX: case STORE_VMX:
if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
return 0; return 0;
err = do_vec_store(op.reg, ea, size, regs, cross_endian); err = do_vec_store(op->reg, ea, size, regs, cross_endian);
goto ldst_done; break;
#endif #endif
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
case STORE_VSX: { case STORE_VSX: {
...@@ -2921,18 +2880,18 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2921,18 +2880,18 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
* Some VSX instructions check the MSR_VEC bit rather than MSR_VSX * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
* when the target of the instruction is a vector register. * when the target of the instruction is a vector register.
*/ */
if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC)) if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
msrbit = MSR_VEC; msrbit = MSR_VEC;
if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
return 0; return 0;
err = do_vsx_store(&op, ea, regs, cross_endian); err = do_vsx_store(op, ea, regs, cross_endian);
goto ldst_done; break;
} }
#endif #endif
case STORE_MULTI: case STORE_MULTI:
if (!address_ok(regs, ea, size)) if (!address_ok(regs, ea, size))
return -EFAULT; return -EFAULT;
rd = op.reg; rd = op->reg;
for (i = 0; i < size; i += 4) { for (i = 0; i < size; i += 4) {
unsigned int v32 = regs->gpr[rd]; unsigned int v32 = regs->gpr[rd];
...@@ -2943,10 +2902,89 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2943,10 +2902,89 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
v32 = byterev_4(v32); v32 = byterev_4(v32);
err = copy_mem_out((u8 *) &v32, ea, nb, regs); err = copy_mem_out((u8 *) &v32, ea, nb, regs);
if (err) if (err)
return 0; break;
ea += 4; ea += 4;
++rd; ++rd;
} }
break;
default:
return -EINVAL;
}
if (err)
return err;
if (op->type & UPDATE)
regs->gpr[op->update_reg] = op->ea;
return 0;
}
NOKPROBE_SYMBOL(emulate_loadstore);
/*
* Emulate instructions that cause a transfer of control,
* loads and stores, and a few other instructions.
* Returns 1 if the step was emulated, 0 if not,
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
int emulate_step(struct pt_regs *regs, unsigned int instr)
{
struct instruction_op op;
int r, err, type;
unsigned long val;
unsigned long ea;
r = analyse_instr(&op, regs, instr);
if (r < 0)
return r;
if (r > 0) {
emulate_update_regs(regs, &op);
return 1;
}
err = 0;
type = op.type & INSTR_TYPE_MASK;
if (OP_IS_LOAD_STORE(type)) {
err = emulate_loadstore(regs, &op);
if (err)
return 0;
goto instr_done;
}
switch (type) {
case CACHEOP:
ea = truncate_if_32bit(regs->msr, op.ea);
if (!address_ok(regs, ea, 8))
return 0;
switch (op.type & CACHEOP_MASK) {
case DCBST:
__cacheop_user_asmx(ea, err, "dcbst");
break;
case DCBF:
__cacheop_user_asmx(ea, err, "dcbf");
break;
case DCBTST:
if (op.reg == 0)
prefetchw((void *) ea);
break;
case DCBT:
if (op.reg == 0)
prefetch((void *) ea);
break;
case ICBI:
__cacheop_user_asmx(ea, err, "icbi");
break;
case DCBZ:
err = emulate_dcbz(ea, regs);
break;
}
if (err) {
regs->dar = ea;
return 0;
}
goto instr_done; goto instr_done;
case MFMSR: case MFMSR:
...@@ -2989,12 +3027,6 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2989,12 +3027,6 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
} }
return 0; return 0;
ldst_done:
if (err)
return 0;
if (op.type & UPDATE)
regs->gpr[op.update_reg] = op.ea;
instr_done: instr_done:
regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment