Commit 7b6871f6 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/pkvm-cleanups-5.17 into kvmarm-master/next

* kvm-arm64/pkvm-cleanups-5.17:
  : .
  : pKVM cleanups from Quentin Perret:
  :
  : This series is a collection of various fixes and cleanups for KVM/arm64
  : when running in nVHE protected mode. The first two patches are real
  : fixes/improvements, the following two are minor cleanups, and the last
  : two help satisfy my paranoia so they're certainly optional.
  : .
  KVM: arm64: pkvm: Make kvm_host_owns_hyp_mappings() robust to VHE
  KVM: arm64: pkvm: Stub io map functions
  KVM: arm64: Make __io_map_base static
  KVM: arm64: Make the hyp memory pool static
  KVM: arm64: pkvm: Disable GICv2 support
  KVM: arm64: pkvm: Fix hyp_pool max order
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 94b4a6d5 64a1fbda
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
extern struct kvm_pgtable pkvm_pgtable; extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock; extern hyp_spinlock_t pkvm_pgd_lock;
extern struct hyp_pool hpool;
extern u64 __io_map_base;
int hyp_create_idmap(u32 hyp_va_bits); int hyp_create_idmap(u32 hyp_va_bits);
int hyp_map_vectors(void); int hyp_map_vectors(void);
......
...@@ -19,11 +19,12 @@ ...@@ -19,11 +19,12 @@
struct kvm_pgtable pkvm_pgtable; struct kvm_pgtable pkvm_pgtable;
hyp_spinlock_t pkvm_pgd_lock; hyp_spinlock_t pkvm_pgd_lock;
u64 __io_map_base;
struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]; struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
unsigned int hyp_memblock_nr; unsigned int hyp_memblock_nr;
static u64 __io_map_base;
static int __pkvm_create_mappings(unsigned long start, unsigned long size, static int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot) unsigned long phys, enum kvm_pgtable_prot prot)
{ {
......
...@@ -241,7 +241,7 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, ...@@ -241,7 +241,7 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
int i; int i;
hyp_spin_lock_init(&pool->lock); hyp_spin_lock_init(&pool->lock);
pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT)); pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
for (i = 0; i < pool->max_order; i++) for (i = 0; i < pool->max_order; i++)
INIT_LIST_HEAD(&pool->free_area[i]); INIT_LIST_HEAD(&pool->free_area[i]);
pool->range_start = phys; pool->range_start = phys;
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <nvhe/mm.h> #include <nvhe/mm.h>
#include <nvhe/trap_handler.h> #include <nvhe/trap_handler.h>
struct hyp_pool hpool;
unsigned long hyp_nr_cpus; unsigned long hyp_nr_cpus;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \ #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
...@@ -28,6 +27,7 @@ static void *vmemmap_base; ...@@ -28,6 +27,7 @@ static void *vmemmap_base;
static void *hyp_pgt_base; static void *hyp_pgt_base;
static void *host_s2_pgt_base; static void *host_s2_pgt_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
static int divide_memory_pool(void *virt, unsigned long size) static int divide_memory_pool(void *virt, unsigned long size)
{ {
......
...@@ -239,6 +239,9 @@ void free_hyp_pgds(void) ...@@ -239,6 +239,9 @@ void free_hyp_pgds(void)
static bool kvm_host_owns_hyp_mappings(void) static bool kvm_host_owns_hyp_mappings(void)
{ {
if (is_kernel_in_hyp_mode())
return false;
if (static_branch_likely(&kvm_protected_mode_initialized)) if (static_branch_likely(&kvm_protected_mode_initialized))
return false; return false;
...@@ -407,6 +410,9 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, ...@@ -407,6 +410,9 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
unsigned long addr; unsigned long addr;
int ret; int ret;
if (is_protected_kvm_enabled())
return -EPERM;
*kaddr = ioremap(phys_addr, size); *kaddr = ioremap(phys_addr, size);
if (!*kaddr) if (!*kaddr)
return -ENOMEM; return -ENOMEM;
...@@ -650,6 +656,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, ...@@ -650,6 +656,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_R |
(writable ? KVM_PGTABLE_PROT_W : 0); (writable ? KVM_PGTABLE_PROT_W : 0);
if (is_protected_kvm_enabled())
return -EPERM;
size += offset_in_page(guest_ipa); size += offset_in_page(guest_ipa);
guest_ipa &= PAGE_MASK; guest_ipa &= PAGE_MASK;
......
...@@ -345,6 +345,11 @@ int vgic_v2_probe(const struct gic_kvm_info *info) ...@@ -345,6 +345,11 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
int ret; int ret;
u32 vtr; u32 vtr;
if (is_protected_kvm_enabled()) {
kvm_err("GICv2 not supported in protected mode\n");
return -ENXIO;
}
if (!info->vctrl.start) { if (!info->vctrl.start) {
kvm_err("GICH not present in the firmware table\n"); kvm_err("GICH not present in the firmware table\n");
return -ENXIO; return -ENXIO;
......
...@@ -651,7 +651,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info) ...@@ -651,7 +651,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
} else if (!PAGE_ALIGNED(info->vcpu.start)) { } else if (!PAGE_ALIGNED(info->vcpu.start)) {
pr_warn("GICV physical address 0x%llx not page aligned\n", pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start); (unsigned long long)info->vcpu.start);
} else { } else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
kvm_vgic_global_state.vcpu_base = info->vcpu.start; kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true; kvm_vgic_global_state.can_emulate_gicv2 = true;
ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment