tlb.c 4.04 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6
/*
 * Copyright (C) 2015 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

7
#include <asm/kvm_hyp.h>
8
#include <asm/kvm_mmu.h>
9
#include <asm/tlbflush.h>
10

11 12 13 14
struct tlb_inv_context {
	u64		tcr;
};

15 16
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
				  struct tlb_inv_context *cxt)
17
{
18
	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
		u64 val;

		/*
		 * For CPUs that are affected by ARM 1319367, we need to
		 * avoid a host Stage-1 walk while we have the guest's
		 * VMID set in the VTTBR in order to invalidate TLBs.
		 * We're guaranteed that the S1 MMU is enabled, so we can
		 * simply set the EPD bits to avoid any further TLB fill.
		 */
		val = cxt->tcr = read_sysreg_el1(SYS_TCR);
		val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
		write_sysreg_el1(val, SYS_TCR);
		isb();
	}

34 35 36 37 38 39
	/*
	 * __load_guest_stage2() includes an ISB only when the AT
	 * workaround is applied. Take care of the opposite condition,
	 * ensuring that we always have an ISB, but not two ISBs back
	 * to back.
	 */
40
	__load_guest_stage2(mmu);
41
	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
42 43
}

44
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
45 46
{
	write_sysreg(0, vttbr_el2);
47

48
	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
49 50 51 52 53
		/* Ensure write of the host VMID */
		isb();
		/* Restore the host's TCR_EL1 */
		write_sysreg_el1(cxt->tcr, SYS_TCR);
	}
54 55
}

56 57
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
			      phys_addr_t ipa, int level)
58
{
59
	struct tlb_inv_context cxt;
60

61 62 63
	dsb(ishst);

	/* Switch to requested VMID */
64
	__tlb_switch_to_guest(mmu, &cxt);
65 66 67 68 69 70 71

	/*
	 * We could do so much better if we had the VA as well.
	 * Instead, we invalidate Stage-2 for this IPA, and the
	 * whole of Stage-1. Weep...
	 */
	ipa >>= 12;
72
	__tlbi_level(ipas2e1is, ipa, level);
73 74 75 76 77 78 79 80

	/*
	 * We have to ensure completion of the invalidation at Stage-2,
	 * since a table walk on another CPU could refill a TLB with a
	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
	 * the Stage-1 invalidation happened first.
	 */
	dsb(ish);
81
	__tlbi(vmalle1is);
82 83 84
	dsb(ish);
	isb();

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
	/*
	 * If the host is running at EL1 and we have a VPIPT I-cache,
	 * then we must perform I-cache maintenance at EL2 in order for
	 * it to have an effect on the guest. Since the guest cannot hit
	 * I-cache lines allocated with a different VMID, we don't need
	 * to worry about junk out of guest reset (we nuke the I-cache on
	 * VMID rollover), but we do need to be careful when remapping
	 * executable pages for the same guest. This can happen when KSM
	 * takes a CoW fault on an executable page, copies the page into
	 * a page that was previously mapped in the guest and then needs
	 * to invalidate the guest view of the I-cache for that page
	 * from EL1. To solve this, we invalidate the entire I-cache when
	 * unmapping a page from a guest if we have a VPIPT I-cache but
	 * the host is running at EL1. As above, we could do better if
	 * we had the VA.
	 *
	 * The moral of this story is: if you have a VPIPT I-cache, then
	 * you should be running with VHE enabled.
	 */
104
	if (icache_is_vpipt())
105 106
		__flush_icache_all();

107
	__tlb_switch_to_host(&cxt);
108 109
}

110
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
111
{
112
	struct tlb_inv_context cxt;
113

114 115 116
	dsb(ishst);

	/* Switch to requested VMID */
117
	__tlb_switch_to_guest(mmu, &cxt);
118

119
	__tlbi(vmalls12e1is);
120 121 122
	dsb(ish);
	isb();

123
	__tlb_switch_to_host(&cxt);
124 125
}

126
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
127
{
128
	struct tlb_inv_context cxt;
129 130

	/* Switch to requested VMID */
131
	__tlb_switch_to_guest(mmu, &cxt);
132

133
	__tlbi(vmalle1);
134
	asm volatile("ic iallu");
135 136 137
	dsb(nsh);
	isb();

138
	__tlb_switch_to_host(&cxt);
139 140
}

141
void __kvm_flush_vm_context(void)
142 143
{
	dsb(ishst);
144
	__tlbi(alle1is);
145 146 147 148 149 150 151 152 153 154 155 156 157

	/*
	 * VIPT and PIPT caches are not affected by VMID, so no maintenance
	 * is necessary across a VMID rollover.
	 *
	 * VPIPT caches constrain lookup and maintenance to the active VMID,
	 * so we need to invalidate lines with a stale VMID to avoid an ABA
	 * race after multiple rollovers.
	 *
	 */
	if (icache_is_vpipt())
		asm volatile("ic ialluis");

158 159
	dsb(ish);
}