Commit 1f3d8699 authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon

arm64/kvm: use {read,write}_sysreg()

A while back we added {read,write}_sysreg accessors to handle accesses
to system registers, without the usual boilerplate asm volatile,
temporary variable, etc.

This patch makes use of these in the arm64 KVM code to make the code
shorter and clearer.

At the same time, a comment style violation next to a system register
access is fixed up in reset_pmcr, and comments describing whether
operations are reads or writes are removed as this is now painfully
obvious.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Acked-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent d0a69d9f
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/sysreg.h>
/* /*
* __boot_cpu_mode records what mode CPUs were booted in. * __boot_cpu_mode records what mode CPUs were booted in.
...@@ -76,10 +77,7 @@ static inline bool is_hyp_mode_mismatched(void) ...@@ -76,10 +77,7 @@ static inline bool is_hyp_mode_mismatched(void)
static inline bool is_kernel_in_hyp_mode(void) static inline bool is_kernel_in_hyp_mode(void)
{ {
u64 el; return read_sysreg(CurrentEL) == CurrentEL_EL2;
asm("mrs %0, CurrentEL" : "=r" (el));
return el == CurrentEL_EL2;
} }
#ifdef CONFIG_ARM64_VHE #ifdef CONFIG_ARM64_VHE
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
#include <asm/sysreg.h>
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
...@@ -67,11 +68,9 @@ static u32 get_ccsidr(u32 csselr) ...@@ -67,11 +68,9 @@ static u32 get_ccsidr(u32 csselr)
/* Make sure noone else changes CSSELR during this! */ /* Make sure noone else changes CSSELR during this! */
local_irq_disable(); local_irq_disable();
/* Put value into CSSELR */ write_sysreg(csselr, csselr_el1);
asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
isb(); isb();
/* Read result out of CCSIDR */ ccsidr = read_sysreg(ccsidr_el1);
asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
local_irq_enable(); local_irq_enable();
return ccsidr; return ccsidr;
...@@ -174,9 +173,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, ...@@ -174,9 +173,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
if (p->is_write) { if (p->is_write) {
return ignore_write(vcpu, p); return ignore_write(vcpu, p);
} else { } else {
u32 val; p->regval = read_sysreg(dbgauthstatus_el1);
asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
p->regval = val;
return true; return true;
} }
} }
...@@ -429,10 +426,7 @@ static void reset_wcr(struct kvm_vcpu *vcpu, ...@@ -429,10 +426,7 @@ static void reset_wcr(struct kvm_vcpu *vcpu,
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{ {
u64 amair; vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
} }
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
...@@ -456,8 +450,9 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) ...@@ -456,8 +450,9 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{ {
u64 pmcr, val; u64 pmcr, val;
asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr)); pmcr = read_sysreg(pmcr_el0);
/* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN /*
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
* except PMCR.E resetting to zero. * except PMCR.E resetting to zero.
*/ */
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
...@@ -557,9 +552,9 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -557,9 +552,9 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return false; return false;
if (!(p->Op2 & 1)) if (!(p->Op2 & 1))
asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid)); pmceid = read_sysreg(pmceid0_el0);
else else
asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid)); pmceid = read_sysreg(pmceid1_el0);
p->regval = pmceid; p->regval = pmceid;
...@@ -1841,11 +1836,7 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, ...@@ -1841,11 +1836,7 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
static void get_##reg(struct kvm_vcpu *v, \ static void get_##reg(struct kvm_vcpu *v, \
const struct sys_reg_desc *r) \ const struct sys_reg_desc *r) \
{ \ { \
u64 val; \ ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
\
asm volatile("mrs %0, " __stringify(reg) "\n" \
: "=r" (val)); \
((struct sys_reg_desc *)r)->val = val; \
} }
FUNCTION_INVARIANT(midr_el1) FUNCTION_INVARIANT(midr_el1)
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include <asm/sysreg.h>
#include <linux/init.h> #include <linux/init.h>
#include "sys_regs.h" #include "sys_regs.h"
...@@ -43,10 +44,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu, ...@@ -43,10 +44,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{ {
u64 actlr; vcpu_sys_reg(vcpu, ACTLR_EL1) = read_sysreg(actlr_el1);
asm volatile("mrs %0, actlr_el1\n" : "=r" (actlr));
vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment