Commit cfbdc546 authored by Fuad Tabba's avatar Fuad Tabba Committed by Marc Zyngier

KVM: arm64: Rename __tlb_switch_to_{guest,host}() in VHE

Rename __tlb_switch_to_{guest,host}() to
{enter,exit}_vmid_context() in VHE code to maintain symmetry
between the nVHE and VHE TLB invalidations.

No functional change intended.
Suggested-by: default avatarOliver Upton <oliver.upton@linux.dev>
Signed-off-by: default avatarFuad Tabba <tabba@google.com>
Acked-by: default avatarOliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240423150538.2103045-11-tabba@google.comSigned-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 58f3b0fc
...@@ -17,8 +17,8 @@ struct tlb_inv_context { ...@@ -17,8 +17,8 @@ struct tlb_inv_context {
u64 sctlr; u64 sctlr;
}; };
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, static void enter_vmid_context(struct kvm_s2_mmu *mmu,
struct tlb_inv_context *cxt) struct tlb_inv_context *cxt)
{ {
struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
u64 val; u64 val;
...@@ -67,7 +67,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, ...@@ -67,7 +67,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
isb(); isb();
} }
static void __tlb_switch_to_host(struct tlb_inv_context *cxt) static void exit_vmid_context(struct tlb_inv_context *cxt)
{ {
/* /*
* We're done with the TLB operation, let's restore the host's * We're done with the TLB operation, let's restore the host's
...@@ -97,7 +97,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, ...@@ -97,7 +97,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ishst); dsb(ishst);
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt); enter_vmid_context(mmu, &cxt);
/* /*
* We could do so much better if we had the VA as well. * We could do so much better if we had the VA as well.
...@@ -118,7 +118,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, ...@@ -118,7 +118,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
...@@ -129,7 +129,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, ...@@ -129,7 +129,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
dsb(nshst); dsb(nshst);
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt); enter_vmid_context(mmu, &cxt);
/* /*
* We could do so much better if we had the VA as well. * We could do so much better if we had the VA as well.
...@@ -150,7 +150,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, ...@@ -150,7 +150,7 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
dsb(nsh); dsb(nsh);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
...@@ -169,7 +169,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, ...@@ -169,7 +169,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
dsb(ishst); dsb(ishst);
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt); enter_vmid_context(mmu, &cxt);
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0); __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
...@@ -178,7 +178,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, ...@@ -178,7 +178,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
...@@ -188,13 +188,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) ...@@ -188,13 +188,13 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
dsb(ishst); dsb(ishst);
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt); enter_vmid_context(mmu, &cxt);
__tlbi(vmalls12e1is); __tlbi(vmalls12e1is);
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
...@@ -202,14 +202,14 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) ...@@ -202,14 +202,14 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(mmu, &cxt); enter_vmid_context(mmu, &cxt);
__tlbi(vmalle1); __tlbi(vmalle1);
asm volatile("ic iallu"); asm volatile("ic iallu");
dsb(nsh); dsb(nsh);
isb(); isb();
__tlb_switch_to_host(&cxt); exit_vmid_context(&cxt);
} }
void __kvm_flush_vm_context(void) void __kvm_flush_vm_context(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment