Commit 69bbe136 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "Fixes for ARM and aarch64.

  This pull request is coming a bit later than I would have preferred,
  because I and Gleb happened to have holidays around the same weeks of
  August...  sorry about that"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: ARM: Squash len warning
  arm64: KVM: use 'int' instead of 'u32' for variable 'target' in kvm_host.h.
  arm64: KVM: add missing dsb before invalidating Stage-2 TLBs
  arm64: KVM: perform save/restore of PAR_EL1
  arm64: KVM: fix 2-level page tables unmapping
  ARM: KVM: Fix unaligned unmap_range leak
  ARM: KVM: Fix 64-bit coprocessor handling
parents da2ad2a2 c566ccfc
...@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu, ...@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
#define access_pmintenclr pm_fake #define access_pmintenclr pm_fake
/* Architected CP15 registers. /* Architected CP15 registers.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 * CRn denotes the primary register number, but is copied to the CRm in the
* user space API for 64-bit register access in line with the terminology used
* in the ARM ARM.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
* registers preceding 32-bit ones.
*/ */
static const struct coproc_reg cp15_regs[] = { static const struct coproc_reg cp15_regs[] = {
/* CSSELR: swapped by interrupt.S. */ /* CSSELR: swapped by interrupt.S. */
...@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = { ...@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
NULL, reset_unknown, c0_CSSELR }, NULL, reset_unknown, c0_CSSELR },
/* TTBR0/TTBR1: swapped by interrupt.S. */ /* TTBR0/TTBR1: swapped by interrupt.S. */
{ CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
{ CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
/* TTBCR: swapped by interrupt.S. */ /* TTBCR: swapped by interrupt.S. */
{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
...@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = { ...@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = {
NULL, reset_unknown, c6_IFAR }, NULL, reset_unknown, c6_IFAR },
/* PAR swapped by interrupt.S */ /* PAR swapped by interrupt.S */
{ CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
/* /*
* DC{C,I,CI}SW operations: * DC{C,I,CI}SW operations:
...@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params) ...@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
| KVM_REG_ARM_OPC1_MASK)) | KVM_REG_ARM_OPC1_MASK))
return false; return false;
params->is_64bit = true; params->is_64bit = true;
params->CRm = ((id & KVM_REG_ARM_CRM_MASK) /* CRm to CRn: see cp15_to_index for details */
params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
>> KVM_REG_ARM_CRM_SHIFT); >> KVM_REG_ARM_CRM_SHIFT);
params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
>> KVM_REG_ARM_OPC1_SHIFT); >> KVM_REG_ARM_OPC1_SHIFT);
params->Op2 = 0; params->Op2 = 0;
params->CRn = 0; params->CRm = 0;
return true; return true;
default: default:
return false; return false;
...@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg) ...@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
if (reg->is_64) { if (reg->is_64) {
val |= KVM_REG_SIZE_U64; val |= KVM_REG_SIZE_U64;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); /*
* CRn always denotes the primary coproc. reg. nr. for the
* in-kernel representation, but the user space API uses the
* CRm for the encoding, because it is modelled after the
* MRRC/MCRR instructions: see the ARM ARM rev. c page
* B3-1445
*/
val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
} else { } else {
val |= KVM_REG_SIZE_U32; val |= KVM_REG_SIZE_U32;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
......
...@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, ...@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
return -1; return -1;
if (i1->CRn != i2->CRn) if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn; return i1->CRn - i2->CRn;
if (i1->is_64 != i2->is_64)
return i2->is_64 - i1->is_64;
if (i1->CRm != i2->CRm) if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm; return i1->CRm - i2->CRm;
if (i1->Op1 != i2->Op1) if (i1->Op1 != i2->Op1)
...@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, ...@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define CRn(_x) .CRn = _x #define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x #define CRm(_x) .CRm = _x
#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x #define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x #define Op2(_x) .Op2 = _x
#define is64 .is_64 = true #define is64 .is_64 = true
......
...@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, ...@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
/* /*
* A15-specific CP15 registers. * A15-specific CP15 registers.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 * CRn denotes the primary register number, but is copied to the CRm in the
* user space API for 64-bit register access in line with the terminology used
* in the ARM ARM.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
* registers preceding 32-bit ones.
*/ */
static const struct coproc_reg a15_regs[] = { static const struct coproc_reg a15_regs[] = {
/* MPIDR: we use VMPIDR for guest access. */ /* MPIDR: we use VMPIDR for guest access. */
......
...@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio) struct kvm_exit_mmio *mmio)
{ {
unsigned long rt, len; unsigned long rt;
int len;
bool is_write, sign_extend; bool is_write, sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) { if (kvm_vcpu_dabt_isextabt(vcpu)) {
......
...@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) ...@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
return p; return p;
} }
static bool page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);
return page_count(ptr_page) == 1;
}
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{ {
pmd_t *pmd_table = pmd_offset(pud, 0); pmd_t *pmd_table = pmd_offset(pud, 0);
...@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) ...@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
put_page(virt_to_page(pmd)); put_page(virt_to_page(pmd));
} }
static bool pmd_empty(pmd_t *pmd)
{
struct page *pmd_page = virt_to_page(pmd);
return page_count(pmd_page) == 1;
}
static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
{ {
if (pte_present(*pte)) { if (pte_present(*pte)) {
...@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) ...@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
} }
} }
static bool pte_empty(pte_t *pte)
{
struct page *pte_page = virt_to_page(pte);
return page_count(pte_page) == 1;
}
static void unmap_range(struct kvm *kvm, pgd_t *pgdp, static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
unsigned long long start, u64 size) unsigned long long start, u64 size)
{ {
...@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, ...@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
unsigned long long addr = start, end = start + size; unsigned long long addr = start, end = start + size;
u64 range; u64 next;
while (addr < end) { while (addr < end) {
pgd = pgdp + pgd_index(addr); pgd = pgdp + pgd_index(addr);
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (pud_none(*pud)) { if (pud_none(*pud)) {
addr += PUD_SIZE; addr = pud_addr_end(addr, end);
continue; continue;
} }
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
addr += PMD_SIZE; addr = pmd_addr_end(addr, end);
continue; continue;
} }
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
clear_pte_entry(kvm, pte, addr); clear_pte_entry(kvm, pte, addr);
range = PAGE_SIZE; next = addr + PAGE_SIZE;
/* If we emptied the pte, walk back up the ladder */ /* If we emptied the pte, walk back up the ladder */
if (pte_empty(pte)) { if (page_empty(pte)) {
clear_pmd_entry(kvm, pmd, addr); clear_pmd_entry(kvm, pmd, addr);
range = PMD_SIZE; next = pmd_addr_end(addr, end);
if (pmd_empty(pmd)) { if (page_empty(pmd) && !page_empty(pud)) {
clear_pud_entry(kvm, pud, addr); clear_pud_entry(kvm, pud, addr);
range = PUD_SIZE; next = pud_addr_end(addr, end);
} }
} }
addr += range; addr = next;
} }
} }
......
...@@ -42,14 +42,15 @@ ...@@ -42,14 +42,15 @@
#define TPIDR_EL1 18 /* Thread ID, Privileged */ #define TPIDR_EL1 18 /* Thread ID, Privileged */
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
#define PAR_EL1 21 /* Physical Address Register */
/* 32bit specific registers. Keep them at the end of the range */ /* 32bit specific registers. Keep them at the end of the range */
#define DACR32_EL2 21 /* Domain Access Control Register */ #define DACR32_EL2 22 /* Domain Access Control Register */
#define IFSR32_EL2 22 /* Instruction Fault Status Register */ #define IFSR32_EL2 23 /* Instruction Fault Status Register */
#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ #define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ #define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
#define TEECR32_EL1 25 /* ThumbEE Configuration Register */ #define TEECR32_EL1 26 /* ThumbEE Configuration Register */
#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ #define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
#define NR_SYS_REGS 27 #define NR_SYS_REGS 28
/* 32bit mapping */ /* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
...@@ -69,6 +70,8 @@ ...@@ -69,6 +70,8 @@
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
......
...@@ -129,7 +129,7 @@ struct kvm_vcpu_arch { ...@@ -129,7 +129,7 @@ struct kvm_vcpu_arch {
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
/* Target CPU and feature flags */ /* Target CPU and feature flags */
u32 target; int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */ /* Detect first run of a vcpu */
......
...@@ -214,6 +214,7 @@ __kvm_hyp_code_start: ...@@ -214,6 +214,7 @@ __kvm_hyp_code_start:
mrs x21, tpidr_el1 mrs x21, tpidr_el1
mrs x22, amair_el1 mrs x22, amair_el1
mrs x23, cntkctl_el1 mrs x23, cntkctl_el1
mrs x24, par_el1
stp x4, x5, [x3] stp x4, x5, [x3]
stp x6, x7, [x3, #16] stp x6, x7, [x3, #16]
...@@ -225,6 +226,7 @@ __kvm_hyp_code_start: ...@@ -225,6 +226,7 @@ __kvm_hyp_code_start:
stp x18, x19, [x3, #112] stp x18, x19, [x3, #112]
stp x20, x21, [x3, #128] stp x20, x21, [x3, #128]
stp x22, x23, [x3, #144] stp x22, x23, [x3, #144]
str x24, [x3, #160]
.endm .endm
.macro restore_sysregs .macro restore_sysregs
...@@ -243,6 +245,7 @@ __kvm_hyp_code_start: ...@@ -243,6 +245,7 @@ __kvm_hyp_code_start:
ldp x18, x19, [x3, #112] ldp x18, x19, [x3, #112]
ldp x20, x21, [x3, #128] ldp x20, x21, [x3, #128]
ldp x22, x23, [x3, #144] ldp x22, x23, [x3, #144]
ldr x24, [x3, #160]
msr vmpidr_el2, x4 msr vmpidr_el2, x4
msr csselr_el1, x5 msr csselr_el1, x5
...@@ -264,6 +267,7 @@ __kvm_hyp_code_start: ...@@ -264,6 +267,7 @@ __kvm_hyp_code_start:
msr tpidr_el1, x21 msr tpidr_el1, x21
msr amair_el1, x22 msr amair_el1, x22
msr cntkctl_el1, x23 msr cntkctl_el1, x23
msr par_el1, x24
.endm .endm
.macro skip_32bit_state tmp, target .macro skip_32bit_state tmp, target
...@@ -600,6 +604,8 @@ END(__kvm_vcpu_run) ...@@ -600,6 +604,8 @@ END(__kvm_vcpu_run)
// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
ENTRY(__kvm_tlb_flush_vmid_ipa) ENTRY(__kvm_tlb_flush_vmid_ipa)
dsb ishst
kern_hyp_va x0 kern_hyp_va x0
ldr x2, [x0, #KVM_VTTBR] ldr x2, [x0, #KVM_VTTBR]
msr vttbr_el2, x2 msr vttbr_el2, x2
...@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) ...@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
ENDPROC(__kvm_tlb_flush_vmid_ipa) ENDPROC(__kvm_tlb_flush_vmid_ipa)
ENTRY(__kvm_flush_vm_context) ENTRY(__kvm_flush_vm_context)
dsb ishst
tlbi alle1is tlbi alle1is
ic ialluis ic ialluis
dsb sy dsb sy
...@@ -753,6 +760,10 @@ el1_trap: ...@@ -753,6 +760,10 @@ el1_trap:
*/ */
tbnz x1, #7, 1f // S1PTW is set tbnz x1, #7, 1f // S1PTW is set
/* Preserve PAR_EL1 */
mrs x3, par_el1
push x3, xzr
/* /*
* Permission fault, HPFAR_EL2 is invalid. * Permission fault, HPFAR_EL2 is invalid.
* Resolve the IPA the hard way using the guest VA. * Resolve the IPA the hard way using the guest VA.
...@@ -766,6 +777,8 @@ el1_trap: ...@@ -766,6 +777,8 @@ el1_trap:
/* Read result */ /* Read result */
mrs x3, par_el1 mrs x3, par_el1
pop x0, xzr // Restore PAR_EL1 from the stack
msr par_el1, x0
tbnz x3, #0, 3f // Bail out if we failed the translation tbnz x3, #0, 3f // Bail out if we failed the translation
ubfx x3, x3, #12, #36 // Extract IPA ubfx x3, x3, #12, #36 // Extract IPA
lsl x3, x3, #4 // and present it like HPFAR lsl x3, x3, #4 // and present it like HPFAR
......
...@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* FAR_EL1 */ /* FAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, FAR_EL1 }, NULL, reset_unknown, FAR_EL1 },
/* PAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
NULL, reset_unknown, PAR_EL1 },
/* PMINTENSET_EL1 */ /* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment