Commit 256c0960 authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon

kvm/arm: use PSR_AA32 definitions

Some code cares about the SPSR_ELx format for exceptions taken from
AArch32 to inspect or manipulate the SPSR_ELx value, which is already in
the SPSR_ELx format, and not in the AArch32 PSR format.

To separate these from cases where we care about the AArch32 PSR format,
migrate these cases to use the PSR_AA32_* definitions rather than
COMPAT_PSR_*.

There should be no functional change as a result of this patch.

Note that arm64 KVM does not support a compat KVM API, and always uses
the SPSR_ELx format, even for AArch32 guests.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent d64567f6
...@@ -26,13 +26,13 @@ ...@@ -26,13 +26,13 @@
#include <asm/cputype.h> #include <asm/cputype.h>
/* arm64 compatibility macros */ /* arm64 compatibility macros */
#define COMPAT_PSR_MODE_ABT ABT_MODE #define PSR_AA32_MODE_ABT ABT_MODE
#define COMPAT_PSR_MODE_UND UND_MODE #define PSR_AA32_MODE_UND UND_MODE
#define COMPAT_PSR_T_BIT PSR_T_BIT #define PSR_AA32_T_BIT PSR_T_BIT
#define COMPAT_PSR_I_BIT PSR_I_BIT #define PSR_AA32_I_BIT PSR_I_BIT
#define COMPAT_PSR_A_BIT PSR_A_BIT #define PSR_AA32_A_BIT PSR_A_BIT
#define COMPAT_PSR_E_BIT PSR_E_BIT #define PSR_AA32_E_BIT PSR_E_BIT
#define COMPAT_PSR_IT_MASK PSR_IT_MASK #define PSR_AA32_IT_MASK PSR_IT_MASK
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
......
...@@ -140,7 +140,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) ...@@ -140,7 +140,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
{ {
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
} }
/* /*
...@@ -190,8 +190,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) ...@@ -190,8 +190,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
u32 mode; u32 mode;
if (vcpu_mode_is_32bit(vcpu)) { if (vcpu_mode_is_32bit(vcpu)) {
mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
return mode > COMPAT_PSR_MODE_USR; return mode > PSR_AA32_MODE_USR;
} }
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
...@@ -329,7 +329,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) ...@@ -329,7 +329,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) { if (vcpu_mode_is_32bit(vcpu)) {
*vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else { } else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
sctlr |= (1 << 25); sctlr |= (1 << 25);
...@@ -340,7 +340,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) ...@@ -340,7 +340,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) if (vcpu_mode_is_32bit(vcpu))
return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
} }
......
...@@ -107,14 +107,14 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -107,14 +107,14 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
} }
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK; u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) { switch (mode) {
case COMPAT_PSR_MODE_USR: case PSR_AA32_MODE_USR:
case COMPAT_PSR_MODE_FIQ: case PSR_AA32_MODE_FIQ:
case COMPAT_PSR_MODE_IRQ: case PSR_AA32_MODE_IRQ:
case COMPAT_PSR_MODE_SVC: case PSR_AA32_MODE_SVC:
case COMPAT_PSR_MODE_ABT: case PSR_AA32_MODE_ABT:
case COMPAT_PSR_MODE_UND: case PSR_AA32_MODE_UND:
case PSR_MODE_EL0t: case PSR_MODE_EL0t:
case PSR_MODE_EL1t: case PSR_MODE_EL1t:
case PSR_MODE_EL1h: case PSR_MODE_EL1h:
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) if (vcpu_mode_is_32bit(vcpu))
return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT); return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT);
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE); return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
} }
......
...@@ -112,22 +112,22 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = { ...@@ -112,22 +112,22 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
{ {
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
switch (mode) { switch (mode) {
case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC: case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */ mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
break; break;
case COMPAT_PSR_MODE_ABT: case PSR_AA32_MODE_ABT:
mode = 4; mode = 4;
break; break;
case COMPAT_PSR_MODE_UND: case PSR_AA32_MODE_UND:
mode = 5; mode = 5;
break; break;
case COMPAT_PSR_MODE_SYS: case PSR_AA32_MODE_SYS:
mode = 0; /* SYS maps to USR */ mode = 0; /* SYS maps to USR */
break; break;
...@@ -143,13 +143,13 @@ unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) ...@@ -143,13 +143,13 @@ unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
*/ */
static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu) static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
{ {
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
switch (mode) { switch (mode) {
case COMPAT_PSR_MODE_SVC: return KVM_SPSR_SVC; case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC;
case COMPAT_PSR_MODE_ABT: return KVM_SPSR_ABT; case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT;
case COMPAT_PSR_MODE_UND: return KVM_SPSR_UND; case PSR_AA32_MODE_UND: return KVM_SPSR_UND;
case COMPAT_PSR_MODE_IRQ: return KVM_SPSR_IRQ; case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ;
case COMPAT_PSR_MODE_FIQ: return KVM_SPSR_FIQ; case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ;
default: BUG(); default: BUG();
} }
} }
......
...@@ -42,8 +42,8 @@ static const struct kvm_regs default_regs_reset = { ...@@ -42,8 +42,8 @@ static const struct kvm_regs default_regs_reset = {
}; };
static const struct kvm_regs default_regs_reset32 = { static const struct kvm_regs default_regs_reset32 = {
.regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT | .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT), PSR_AA32_I_BIT | PSR_AA32_F_BIT),
}; };
static bool cpu_has_32bit_el1(void) static bool cpu_has_32bit_el1(void)
......
...@@ -108,9 +108,9 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) ...@@ -108,9 +108,9 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{ {
unsigned long itbits, cond; unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu); unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_arm = !(cpsr & COMPAT_PSR_T_BIT); bool is_arm = !(cpsr & PSR_AA32_T_BIT);
if (is_arm || !(cpsr & COMPAT_PSR_IT_MASK)) if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
return; return;
cond = (cpsr & 0xe000) >> 13; cond = (cpsr & 0xe000) >> 13;
...@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) ...@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
else else
itbits = (itbits << 1) & 0x1f; itbits = (itbits << 1) & 0x1f;
cpsr &= ~COMPAT_PSR_IT_MASK; cpsr &= ~PSR_AA32_IT_MASK;
cpsr |= cond << 13; cpsr |= cond << 13;
cpsr |= (itbits & 0x1c) << (10 - 2); cpsr |= (itbits & 0x1c) << (10 - 2);
cpsr |= (itbits & 0x3) << 25; cpsr |= (itbits & 0x3) << 25;
...@@ -138,7 +138,7 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) ...@@ -138,7 +138,7 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{ {
bool is_thumb; bool is_thumb;
is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT); is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
if (is_thumb && !is_wide_instr) if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2; *vcpu_pc(vcpu) += 2;
else else
...@@ -164,16 +164,16 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) ...@@ -164,16 +164,16 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{ {
unsigned long cpsr; unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu); unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
u32 return_offset = return_offsets[vect_offset >> 2][is_thumb]; u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
cpsr = mode | COMPAT_PSR_I_BIT; cpsr = mode | PSR_AA32_I_BIT;
if (sctlr & (1 << 30)) if (sctlr & (1 << 30))
cpsr |= COMPAT_PSR_T_BIT; cpsr |= PSR_AA32_T_BIT;
if (sctlr & (1 << 25)) if (sctlr & (1 << 25))
cpsr |= COMPAT_PSR_E_BIT; cpsr |= PSR_AA32_E_BIT;
*vcpu_cpsr(vcpu) = cpsr; *vcpu_cpsr(vcpu) = cpsr;
...@@ -192,7 +192,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) ...@@ -192,7 +192,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
void kvm_inject_undef32(struct kvm_vcpu *vcpu) void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{ {
prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
} }
/* /*
...@@ -216,7 +216,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, ...@@ -216,7 +216,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
fsr = &vcpu_cp15(vcpu, c5_DFSR); fsr = &vcpu_cp15(vcpu, c5_DFSR);
} }
prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
*far = addr; *far = addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment