Commit e3f019b3 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm/arm64: Move HYP IO VAs to the "idmap" range

We so far mapped our HYP IO (which is essentially the GICv2 control
registers) using the same method as for memory. It recently appeared
that is a bit unsafe:

We compute the HYP VA using the kern_hyp_va helper, but that helper
is only designed to deal with kernel VAs coming from the linear map,
and not from the vmalloc region... This could in turn cause some bad
aliasing between the two, amplified by the upcoming VA randomisation.

A solution is to come up with our very own basic VA allocator for
MMIO. Since half of the HYP address space only contains a single
page (the idmap), we have plenty to borrow from. Let's use the idmap
as a base, and allocate downwards from it. GICv2 now lives on the
other side of the great VA barrier.
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 3ddd4556
...@@ -49,6 +49,9 @@ ...@@ -49,6 +49,9 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/stage2_pgtable.h> #include <asm/stage2_pgtable.h>
/* Ensure compatibility with arm64 */
#define VA_BITS 32
int create_hyp_mappings(void *from, void *to, pgprot_t prot); int create_hyp_mappings(void *from, void *to, pgprot_t prot);
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr, void __iomem **kaddr,
......
...@@ -43,6 +43,8 @@ static unsigned long hyp_idmap_start; ...@@ -43,6 +43,8 @@ static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end; static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector; static phys_addr_t hyp_idmap_vector;
static unsigned long io_map_base;
#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t)) #define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
...@@ -518,27 +520,35 @@ static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size) ...@@ -518,27 +520,35 @@ static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
* *
* Assumes hyp_pgd is a page table used strictly in Hyp-mode and * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
* therefore contains either mappings in the kernel memory area (above * therefore contains either mappings in the kernel memory area (above
* PAGE_OFFSET), or device mappings in the vmalloc range (from * PAGE_OFFSET), or device mappings in the idmap range.
* VMALLOC_START to VMALLOC_END).
* *
* boot_hyp_pgd should only map two pages for the init code. * boot_hyp_pgd should only map the idmap range, and is only used in
* the extended idmap case.
*/ */
void free_hyp_pgds(void) void free_hyp_pgds(void)
{ {
pgd_t *id_pgd;
mutex_lock(&kvm_hyp_pgd_mutex); mutex_lock(&kvm_hyp_pgd_mutex);
id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
if (id_pgd) {
/* In case we never called hyp_mmu_init() */
if (!io_map_base)
io_map_base = hyp_idmap_start;
unmap_hyp_idmap_range(id_pgd, io_map_base,
hyp_idmap_start + PAGE_SIZE - io_map_base);
}
if (boot_hyp_pgd) { if (boot_hyp_pgd) {
unmap_hyp_idmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
boot_hyp_pgd = NULL; boot_hyp_pgd = NULL;
} }
if (hyp_pgd) { if (hyp_pgd) {
unmap_hyp_idmap_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET), unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
(uintptr_t)high_memory - PAGE_OFFSET); (uintptr_t)high_memory - PAGE_OFFSET);
unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
VMALLOC_END - VMALLOC_START);
free_pages((unsigned long)hyp_pgd, hyp_pgd_order); free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL; hyp_pgd = NULL;
...@@ -735,8 +745,9 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, ...@@ -735,8 +745,9 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr, void __iomem **kaddr,
void __iomem **haddr) void __iomem **haddr)
{ {
unsigned long start, end; pgd_t *pgd = hyp_pgd;
int ret; unsigned long base;
int ret = 0;
*kaddr = ioremap(phys_addr, size); *kaddr = ioremap(phys_addr, size);
if (!*kaddr) if (!*kaddr)
...@@ -747,19 +758,52 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, ...@@ -747,19 +758,52 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
return 0; return 0;
} }
mutex_lock(&kvm_hyp_pgd_mutex);
start = kern_hyp_va((unsigned long)*kaddr); /*
end = kern_hyp_va((unsigned long)*kaddr + size); * This assumes that we we have enough space below the idmap
ret = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD, start, end, * page to allocate our VAs. If not, the check below will
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); * kick. A potential alternative would be to detect that
* overflow and switch to an allocation above the idmap.
*
* The allocated size is always a multiple of PAGE_SIZE.
*/
size = PAGE_ALIGN(size + offset_in_page(phys_addr));
base = io_map_base - size;
/*
* Verify that BIT(VA_BITS - 1) hasn't been flipped by
* allocating the new area, as it would indicate we've
* overflowed the idmap/IO address range.
*/
if ((base ^ io_map_base) & BIT(VA_BITS - 1))
ret = -ENOMEM;
else
io_map_base = base;
mutex_unlock(&kvm_hyp_pgd_mutex);
if (ret)
goto out;
if (__kvm_cpu_uses_extended_idmap())
pgd = boot_hyp_pgd;
ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
base, base + size,
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
if (ret)
goto out;
*haddr = (void __iomem *)base + offset_in_page(phys_addr);
out:
if (ret) { if (ret) {
iounmap(*kaddr); iounmap(*kaddr);
*kaddr = NULL; *kaddr = NULL;
return ret; return ret;
} }
*haddr = (void __iomem *)start;
return 0; return 0;
} }
...@@ -1892,6 +1936,7 @@ int kvm_mmu_init(void) ...@@ -1892,6 +1936,7 @@ int kvm_mmu_init(void)
goto out; goto out;
} }
io_map_base = hyp_idmap_start;
return 0; return 0;
out: out:
free_hyp_pgds(); free_hyp_pgds();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment