Commit 7e814a20 authored by Fuad Tabba's avatar Fuad Tabba Committed by Oliver Upton

KVM: arm64: Tidying up PAuth code in KVM

Tidy up some of the PAuth trapping code to clear up some comments
and avoid clang/checkpatch warnings. Also, don't bother setting
PAuth HCR_EL2 bits in pKVM, since it's handled by the hypervisor.
Signed-off-by: default avatarFuad Tabba <tabba@google.com>
Link: https://lore.kernel.org/r/20240722163311.1493879-1-tabba@google.comSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 01ab08ca
......@@ -522,10 +522,10 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
{
if (vcpu_has_ptrauth(vcpu)) {
if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) {
/*
* Either we're running running an L2 guest, and the API/APK
* bits come from L1's HCR_EL2, or API/APK are both set.
* Either we're running an L2 guest, and the API/APK bits come
* from L1's HCR_EL2, or API/APK are both set.
*/
if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
u64 val;
......@@ -542,16 +542,10 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
* Save the host keys if there is any chance for the guest
* to use pauth, as the entry code will reload the guest
* keys in that case.
* Protected mode is the exception to that rule, as the
* entry into the EL2 code eagerly switch back and forth
* between host and hyp keys (and kvm_hyp_ctxt is out of
* reach anyway).
*/
if (is_protected_kvm_enabled())
return;
if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
struct kvm_cpu_context *ctxt;
ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
ptrauth_save_keys(ctxt);
}
......
......@@ -27,7 +27,6 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
#include <asm/kvm_ptrauth.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
......
......@@ -173,9 +173,8 @@ static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
{
/*
* Make sure we handle the exit for workarounds and ptrauth
* before the pKVM handling, as the latter could decide to
* UNDEF.
* Make sure we handle the exit for workarounds before the pKVM
* handling, as the latter could decide to UNDEF.
*/
return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
kvm_handle_pvm_sysreg(vcpu, exit_code));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment