Commit 658e2c51 authored by Alexandre Ghiti's avatar Alexandre Ghiti Committed by Palmer Dabbelt

riscv: Introduce structure that group all variables regarding kernel mapping

We have a lot of variables that are used to hold kernel mapping addresses,
offsets between physical and virtual mappings and some others used for XIP
kernels: they are all defined at different places in mm/init.c, so group
them into a single structure with, for some of them, more explicit and concise
names.
Signed-off-by: default avatarAlexandre Ghiti <alex@ghiti.fr>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent 01112e5e
...@@ -79,46 +79,52 @@ typedef struct page *pgtable_t; ...@@ -79,46 +79,52 @@ typedef struct page *pgtable_t;
#endif #endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern unsigned long va_pa_offset;
#ifdef CONFIG_64BIT
extern unsigned long va_kernel_pa_offset;
#endif
extern unsigned long va_kernel_xip_pa_offset;
extern unsigned long pfn_base; extern unsigned long pfn_base;
extern uintptr_t load_sz;
#define ARCH_PFN_OFFSET (pfn_base) #define ARCH_PFN_OFFSET (pfn_base)
#else #else
#define va_pa_offset 0
#ifdef CONFIG_64BIT
#define va_kernel_pa_offset 0
#endif
#define va_kernel_xip_pa_offset 0
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
extern unsigned long kernel_virt_addr; struct kernel_mapping {
unsigned long virt_addr;
uintptr_t phys_addr;
uintptr_t size;
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
#ifdef CONFIG_64BIT
/* Offset between kernel mapping virtual address and kernel load address */
unsigned long va_kernel_pa_offset;
#endif
unsigned long va_kernel_xip_pa_offset;
#ifdef CONFIG_XIP_KERNEL
uintptr_t xiprom;
uintptr_t xiprom_sz;
#endif
};
extern struct kernel_mapping kernel_map;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define is_kernel_mapping(x) \ #define is_kernel_mapping(x) \
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
#define is_linear_mapping(x) \ #define is_linear_mapping(x) \
((x) >= PAGE_OFFSET && (x) < kernel_virt_addr) ((x) >= PAGE_OFFSET && (x) < kernel_map.virt_addr)
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
#define kernel_mapping_pa_to_va(y) ({ \ #define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = y; \ unsigned long _y = y; \
(_y >= CONFIG_PHYS_RAM_BASE) ? \ (_y >= CONFIG_PHYS_RAM_BASE) ? \
(void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \ (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET) : \
(void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \ (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset); \
}) })
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset) #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
#define kernel_mapping_va_to_pa(y) ({ \ #define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = y; \ unsigned long _y = y; \
(_y < kernel_virt_addr + XIP_OFFSET) ? \ (_y < kernel_map.virt_addr + XIP_OFFSET) ? \
((unsigned long)(_y) - va_kernel_xip_pa_offset) : \ ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \ ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
}) })
#define __va_to_pa_nodebug(x) ({ \ #define __va_to_pa_nodebug(x) ({ \
...@@ -128,12 +134,12 @@ extern unsigned long kernel_virt_addr; ...@@ -128,12 +134,12 @@ extern unsigned long kernel_virt_addr;
}) })
#else #else
#define is_kernel_mapping(x) \ #define is_kernel_mapping(x) \
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
#define is_linear_mapping(x) \ #define is_linear_mapping(x) \
((x) >= PAGE_OFFSET) ((x) >= PAGE_OFFSET)
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + kernel_map.va_pa_offset))
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) #define __va_to_pa_nodebug(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
#ifdef CONFIG_DEBUG_VIRTUAL #ifdef CONFIG_DEBUG_VIRTUAL
......
...@@ -311,4 +311,6 @@ void asm_offsets(void) ...@@ -311,4 +311,6 @@ void asm_offsets(void)
* ensures the alignment is sane. * ensures the alignment is sane.
*/ */
DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN)); DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
} }
...@@ -81,9 +81,9 @@ pe_head_start: ...@@ -81,9 +81,9 @@ pe_head_start:
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
relocate: relocate:
/* Relocate return address */ /* Relocate return address */
la a1, kernel_virt_addr la a1, kernel_map
XIP_FIXUP_OFFSET a1 XIP_FIXUP_OFFSET a1
REG_L a1, 0(a1) REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
la a2, _start la a2, _start
sub a1, a1, a2 sub a1, a1, a2
add ra, ra, a1 add ra, ra, a1
......
...@@ -20,7 +20,7 @@ SYM_CODE_START(riscv_kexec_relocate) ...@@ -20,7 +20,7 @@ SYM_CODE_START(riscv_kexec_relocate)
* s4: Pointer to the destination address for the relocation * s4: Pointer to the destination address for the relocation
* s5: (const) Number of words per page * s5: (const) Number of words per page
* s6: (const) 1, used for subtraction * s6: (const) 1, used for subtraction
* s7: (const) va_pa_offset, used when switching MMU off * s7: (const) kernel_map.va_pa_offset, used when switching MMU off
* s8: (const) Physical address of the main loop * s8: (const) Physical address of the main loop
* s9: (debug) indirection page counter * s9: (debug) indirection page counter
* s10: (debug) entry counter * s10: (debug) entry counter
...@@ -159,7 +159,7 @@ SYM_CODE_START(riscv_kexec_norelocate) ...@@ -159,7 +159,7 @@ SYM_CODE_START(riscv_kexec_norelocate)
* s0: (const) Phys address to jump to * s0: (const) Phys address to jump to
* s1: (const) Phys address of the FDT image * s1: (const) Phys address of the FDT image
* s2: (const) The hartid of the current hart * s2: (const) The hartid of the current hart
* s3: (const) va_pa_offset, used when switching MMU off * s3: (const) kernel_map.va_pa_offset, used when switching MMU off
*/ */
mv s0, a1 mv s0, a1
mv s1, a2 mv s1, a2
......
...@@ -188,6 +188,6 @@ machine_kexec(struct kimage *image) ...@@ -188,6 +188,6 @@ machine_kexec(struct kimage *image)
/* Jump to the relocation code */ /* Jump to the relocation code */
pr_notice("Bye...\n"); pr_notice("Bye...\n");
kexec_method(first_ind_entry, jump_addr, fdt_addr, kexec_method(first_ind_entry, jump_addr, fdt_addr,
this_hart_id, va_pa_offset); this_hart_id, kernel_map.va_pa_offset);
unreachable(); unreachable();
} }
...@@ -30,10 +30,13 @@ ...@@ -30,10 +30,13 @@
#include "../kernel/head.h" #include "../kernel/head.h"
unsigned long kernel_virt_addr = KERNEL_LINK_ADDR; struct kernel_mapping kernel_map __ro_after_init;
EXPORT_SYMBOL(kernel_virt_addr); EXPORT_SYMBOL(kernel_map);
#ifdef CONFIG_XIP_KERNEL
#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
#endif
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr)))
extern char _xiprom[], _exiprom[]; extern char _xiprom[], _exiprom[];
#endif #endif
...@@ -211,25 +214,6 @@ static struct pt_alloc_ops _pt_ops __initdata; ...@@ -211,25 +214,6 @@ static struct pt_alloc_ops _pt_ops __initdata;
#define pt_ops _pt_ops #define pt_ops _pt_ops
#endif #endif
/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset __ro_after_init;
EXPORT_SYMBOL(va_pa_offset);
#ifdef CONFIG_XIP_KERNEL
#define va_pa_offset (*((unsigned long *)XIP_FIXUP(&va_pa_offset)))
#endif
/* Offset between kernel mapping virtual address and kernel load address */
#ifdef CONFIG_64BIT
unsigned long va_kernel_pa_offset __ro_after_init;
EXPORT_SYMBOL(va_kernel_pa_offset);
#endif
#ifdef CONFIG_XIP_KERNEL
#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset)))
#endif
unsigned long va_kernel_xip_pa_offset __ro_after_init;
EXPORT_SYMBOL(va_kernel_xip_pa_offset);
#ifdef CONFIG_XIP_KERNEL
#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset)))
#endif
unsigned long pfn_base __ro_after_init; unsigned long pfn_base __ro_after_init;
EXPORT_SYMBOL(pfn_base); EXPORT_SYMBOL(pfn_base);
...@@ -345,7 +329,7 @@ static pmd_t *__init get_pmd_virt_late(phys_addr_t pa) ...@@ -345,7 +329,7 @@ static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
static phys_addr_t __init alloc_pmd_early(uintptr_t va) static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{ {
BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT); BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
return (uintptr_t)early_pmd; return (uintptr_t)early_pmd;
} }
...@@ -510,36 +494,24 @@ static __init pgprot_t pgprot_from_va(uintptr_t va) ...@@ -510,36 +494,24 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif #endif
static uintptr_t load_pa __initdata;
uintptr_t load_sz;
#ifdef CONFIG_XIP_KERNEL
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
#endif
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
static uintptr_t xiprom __initdata;
static uintptr_t xiprom_sz __initdata;
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
__always_unused bool early) __always_unused bool early)
{ {
uintptr_t va, end_va; uintptr_t va, end_va;
/* Map the flash resident part */ /* Map the flash resident part */
end_va = kernel_virt_addr + xiprom_sz; end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
for (va = kernel_virt_addr; va < end_va; va += map_size) for (va = kernel_map.virt_addr; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va, create_pgd_mapping(pgdir, va,
xiprom + (va - kernel_virt_addr), kernel_map.xiprom + (va - kernel_map.virt_addr),
map_size, PAGE_KERNEL_EXEC); map_size, PAGE_KERNEL_EXEC);
/* Map the data in RAM */ /* Map the data in RAM */
end_va = kernel_virt_addr + XIP_OFFSET + load_sz; end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
for (va = kernel_virt_addr + XIP_OFFSET; va < end_va; va += map_size) for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va, create_pgd_mapping(pgdir, va,
load_pa + (va - (kernel_virt_addr + XIP_OFFSET)), kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
map_size, PAGE_KERNEL); map_size, PAGE_KERNEL);
} }
#else #else
...@@ -548,10 +520,10 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, ...@@ -548,10 +520,10 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
{ {
uintptr_t va, end_va; uintptr_t va, end_va;
end_va = kernel_virt_addr + load_sz; end_va = kernel_map.virt_addr + kernel_map.size;
for (va = kernel_virt_addr; va < end_va; va += map_size) for (va = kernel_map.virt_addr; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va, create_pgd_mapping(pgdir, va,
load_pa + (va - kernel_virt_addr), kernel_map.phys_addr + (va - kernel_map.virt_addr),
map_size, map_size,
early ? early ?
PAGE_KERNEL_EXEC : pgprot_from_va(va)); PAGE_KERNEL_EXEC : pgprot_from_va(va));
...@@ -566,25 +538,27 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) ...@@ -566,25 +538,27 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pmd_t fix_bmap_spmd, fix_bmap_epmd; pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif #endif
kernel_map.virt_addr = KERNEL_LINK_ADDR;
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
load_pa = (uintptr_t)CONFIG_PHYS_RAM_BASE; kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
load_sz = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
va_kernel_xip_pa_offset = kernel_virt_addr - xiprom; kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
#else #else
load_pa = (uintptr_t)(&_start); kernel_map.phys_addr = (uintptr_t)(&_start);
load_sz = (uintptr_t)(&_end) - load_pa; kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif #endif
va_pa_offset = PAGE_OFFSET - load_pa; kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
va_kernel_pa_offset = kernel_virt_addr - load_pa; kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
#endif #endif
pfn_base = PFN_DOWN(load_pa); pfn_base = PFN_DOWN(kernel_map.phys_addr);
/* /*
* Enforce boot alignment requirements of RV32 and * Enforce boot alignment requirements of RV32 and
...@@ -594,7 +568,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) ...@@ -594,7 +568,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
/* Sanity check alignment and size */ /* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((load_pa % map_size) != 0); BUG_ON((kernel_map.phys_addr % map_size) != 0);
pt_ops.alloc_pte = alloc_pte_early; pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early; pt_ops.get_pte_virt = get_pte_virt_early;
...@@ -611,19 +585,19 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) ...@@ -611,19 +585,19 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pmd_mapping(fixmap_pmd, FIXADDR_START, create_pmd_mapping(fixmap_pmd, FIXADDR_START,
(uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
/* Setup trampoline PGD and PMD */ /* Setup trampoline PGD and PMD */
create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
(uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE); (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
create_pmd_mapping(trampoline_pmd, kernel_virt_addr, create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
#else #else
create_pmd_mapping(trampoline_pmd, kernel_virt_addr, create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
load_pa, PMD_SIZE, PAGE_KERNEL_EXEC); kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
#endif #endif
#else #else
/* Setup trampoline PGD */ /* Setup trampoline PGD */
create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr, create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC); kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif #endif
/* /*
......
...@@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys); ...@@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x) phys_addr_t __phys_addr_symbol(unsigned long x)
{ {
unsigned long kernel_start = (unsigned long)kernel_virt_addr; unsigned long kernel_start = kernel_map.virt_addr;
unsigned long kernel_end = (unsigned long)_end; unsigned long kernel_end = (unsigned long)_end;
/* /*
......
...@@ -379,7 +379,7 @@ static int __init ptdump_init(void) ...@@ -379,7 +379,7 @@ static int __init ptdump_init(void)
address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET; address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR; address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
address_markers[KERNEL_MAPPING_NR].start_address = kernel_virt_addr; address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
#endif #endif
kernel_ptd_info.base_addr = KERN_VIRT_START; kernel_ptd_info.base_addr = KERN_VIRT_START;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment