Commit 6c41a413 authored by Marc Zyngier's avatar Marc Zyngier Committed by Christoffer Dall

arm/arm64: Get rid of KERN_TO_HYP

We have both KERN_TO_HYP and kern_hyp_va, which do the exact same
thing. Let's standardize on the latter.
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent eac378a9
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#define __hyp_text __section(.hyp.text) notrace #define __hyp_text __section(.hyp.text) notrace
#define kern_hyp_va(v) (v)
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ #define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
#define __ACCESS_CP15_64(Op1, CRm) \ #define __ACCESS_CP15_64(Op1, CRm) \
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* We directly use the kernel VA for the HYP, as we can directly share * We directly use the kernel VA for the HYP, as we can directly share
* the mapping (HTTBR "covers" TTBR1). * the mapping (HTTBR "covers" TTBR1).
*/ */
#define KERN_TO_HYP(kva) (kva) #define kern_hyp_va(kva) (kva)
/* /*
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
......
...@@ -506,9 +506,9 @@ void free_hyp_pgds(void) ...@@ -506,9 +506,9 @@ void free_hyp_pgds(void)
if (hyp_pgd) { if (hyp_pgd) {
unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE); unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
free_pages((unsigned long)hyp_pgd, hyp_pgd_order); free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL; hyp_pgd = NULL;
...@@ -670,8 +670,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot) ...@@ -670,8 +670,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot)
{ {
phys_addr_t phys_addr; phys_addr_t phys_addr;
unsigned long virt_addr; unsigned long virt_addr;
unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long start = kern_hyp_va((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to); unsigned long end = kern_hyp_va((unsigned long)to);
if (is_kernel_in_hyp_mode()) if (is_kernel_in_hyp_mode())
return 0; return 0;
...@@ -705,8 +705,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot) ...@@ -705,8 +705,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot)
*/ */
int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
{ {
unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long start = kern_hyp_va((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to); unsigned long end = kern_hyp_va((unsigned long)to);
if (is_kernel_in_hyp_mode()) if (is_kernel_in_hyp_mode())
return 0; return 0;
...@@ -1711,10 +1711,10 @@ int kvm_mmu_init(void) ...@@ -1711,10 +1711,10 @@ int kvm_mmu_init(void)
kvm_info("IDMAP page: %lx\n", hyp_idmap_start); kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
kvm_info("HYP VA range: %lx:%lx\n", kvm_info("HYP VA range: %lx:%lx\n",
KERN_TO_HYP(PAGE_OFFSET), KERN_TO_HYP(~0UL)); kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
if (hyp_idmap_start >= KERN_TO_HYP(PAGE_OFFSET) && if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
hyp_idmap_start < KERN_TO_HYP(~0UL)) { hyp_idmap_start < kern_hyp_va(~0UL)) {
/* /*
* The idmap page is intersecting with the VA space, * The idmap page is intersecting with the VA space,
* it is not safe to continue further. * it is not safe to continue further.
......
...@@ -133,7 +133,6 @@ static inline unsigned long __kern_hyp_va(unsigned long v) ...@@ -133,7 +133,6 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
} }
#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
#define KERN_TO_HYP(v) kern_hyp_va(v)
/* /*
* We currently only support a 40bit IPA. * We currently only support a 40bit IPA.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment