Commit 2a64bc67 authored by Jordan Niethe's avatar Jordan Niethe Committed by Michael Ellerman

KVM: PPC: Rename accessor generator macros

More "wrapper" style accessor generating macros will be introduced for
the nestedv2 guest support. Rename the existing macros with more
descriptive names now so there is a consistent naming convention.
Reviewed-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarJordan Niethe <jniethe5@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230914030600.16993-4-jniethe5@gmail.com
parent 52425a3b
...@@ -927,19 +927,19 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu) ...@@ -927,19 +927,19 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
#endif #endif
} }
#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \ #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \ { \
return mfspr(bookehv_spr); \ return mfspr(bookehv_spr); \
} \ } \
#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \ #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
{ \ { \
mtspr(bookehv_spr, val); \ mtspr(bookehv_spr, val); \
} \ } \
#define SHARED_WRAPPER_GET(reg, size) \ #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size) \
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \ { \
if (kvmppc_shared_big_endian(vcpu)) \ if (kvmppc_shared_big_endian(vcpu)) \
...@@ -948,7 +948,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ ...@@ -948,7 +948,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
return le##size##_to_cpu(vcpu->arch.shared->reg); \ return le##size##_to_cpu(vcpu->arch.shared->reg); \
} \ } \
#define SHARED_WRAPPER_SET(reg, size) \ #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
{ \ { \
if (kvmppc_shared_big_endian(vcpu)) \ if (kvmppc_shared_big_endian(vcpu)) \
...@@ -957,36 +957,36 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ ...@@ -957,36 +957,36 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
vcpu->arch.shared->reg = cpu_to_le##size(val); \ vcpu->arch.shared->reg = cpu_to_le##size(val); \
} \ } \
#define SHARED_WRAPPER(reg, size) \ #define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size) \
SHARED_WRAPPER_GET(reg, size) \ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size) \
SHARED_WRAPPER_SET(reg, size) \ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size) \
#define SPRNG_WRAPPER(reg, bookehv_spr) \ #define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
SPRNG_WRAPPER_GET(reg, bookehv_spr) \ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
SPRNG_WRAPPER_SET(reg, bookehv_spr) \ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV
#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \ #define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr) \
SPRNG_WRAPPER(reg, bookehv_spr) \ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
#else #else
#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \ #define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr) \
SHARED_WRAPPER(reg, size) \ KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size) \
#endif #endif
SHARED_WRAPPER(critical, 64) KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64)
SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0) KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0)
SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1) KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1)
SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2) KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2)
SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3) KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3)
SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0) KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0)
SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1) KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1)
SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR) KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR)
SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR) KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR)
SHARED_WRAPPER_GET(msr, 64) KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64)
static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
{ {
if (kvmppc_shared_big_endian(vcpu)) if (kvmppc_shared_big_endian(vcpu))
...@@ -994,12 +994,12 @@ static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) ...@@ -994,12 +994,12 @@ static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
else else
vcpu->arch.shared->msr = cpu_to_le64(val); vcpu->arch.shared->msr = cpu_to_le64(val);
} }
SHARED_WRAPPER(dsisr, 32) KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32)
SHARED_WRAPPER(int_pending, 32) KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32)
SHARED_WRAPPER(sprg4, 64) KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64)
SHARED_WRAPPER(sprg5, 64) KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64)
SHARED_WRAPPER(sprg6, 64) KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64)
SHARED_WRAPPER(sprg7, 64) KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64)
static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment