Commit 8fbdccd2 authored by Qinglin Pan's avatar Qinglin Pan Committed by Palmer Dabbelt

riscv: mm: Support kasan for sv57

This patchset add kasan_populate and kasan_shallow_populate for sv57,
and is tested on both qemu and unmatched with CONFIG_KASAN and
CONFIG_KASAN_VMALLOC on.
Signed-off-by: default avatarQinglin Pan <panqinglin2020@iscas.ac.cn>
Signed-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 011f09d1
......@@ -111,6 +111,8 @@ static void __init kasan_populate_pud(pgd_t *pgd,
* pt_ops facility.
*/
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
} else if (pgd_none(*pgd)) {
base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
} else {
base_pud = (pud_t *)pgd_page_vaddr(*pgd);
if (base_pud == lm_alias(kasan_early_shadow_pud))
......@@ -149,13 +151,72 @@ static void __init kasan_populate_pud(pgd_t *pgd,
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
}
#define kasan_early_shadow_pgd_next (pgtable_l4_enabled ? \
static void __init kasan_populate_p4d(pgd_t *pgd,
unsigned long vaddr, unsigned long end,
bool early)
{
phys_addr_t phys_addr;
p4d_t *p4dp, *base_p4d;
unsigned long next;
if (early) {
/*
* We can't use pgd_page_vaddr here as it would return a linear
* mapping address but it is not mapped yet, but when populating
* early_pg_dir, we need the physical address and when populating
* swapper_pg_dir, we need the kernel virtual address so use
* pt_ops facility.
*/
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
} else {
base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
if (base_p4d == lm_alias(kasan_early_shadow_p4d))
base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
}
p4dp = base_p4d + p4d_index(vaddr);
do {
next = p4d_addr_end(vaddr, end);
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
if (early) {
phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
continue;
} else {
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
if (phys_addr) {
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
continue;
}
}
}
kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
} while (p4dp++, vaddr = next, vaddr != end);
/*
* Wait for the whole P4D to be populated before setting the P4D in
* the page table, otherwise, if we did set the P4D before populating
* it entirely, memblock could allocate a page at a physical address
* where KASAN is not populated yet and then we'd get a page fault.
*/
if (!early)
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
}
#define kasan_early_shadow_pgd_next (pgtable_l5_enabled ? \
(uintptr_t)kasan_early_shadow_p4d : \
(pgtable_l4_enabled ? \
(uintptr_t)kasan_early_shadow_pud : \
(uintptr_t)kasan_early_shadow_pmd)
(uintptr_t)kasan_early_shadow_pmd))
#define kasan_populate_pgd_next(pgdp, vaddr, next, early) \
(pgtable_l5_enabled ? \
kasan_populate_p4d(pgdp, vaddr, next, early) : \
(pgtable_l4_enabled ? \
kasan_populate_pud(pgdp, vaddr, next, early) : \
kasan_populate_pmd((pud_t *)pgdp, vaddr, next))
kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
static void __init kasan_populate_pgd(pgd_t *pgdp,
unsigned long vaddr, unsigned long end,
......@@ -219,6 +280,14 @@ asmlinkage void __init kasan_early_init(void)
PAGE_TABLE));
}
if (pgtable_l5_enabled) {
for (i = 0; i < PTRS_PER_P4D; ++i)
set_p4d(kasan_early_shadow_p4d + i,
pfn_p4d(PFN_DOWN
(__pa(((uintptr_t)kasan_early_shadow_pud))),
PAGE_TABLE));
}
kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
KASAN_SHADOW_START, KASAN_SHADOW_END, true);
......@@ -244,9 +313,27 @@ static void __init kasan_populate(void *start, void *end)
memset(start, KASAN_SHADOW_INIT, end - start);
}
static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
unsigned long vaddr, unsigned long end)
{
unsigned long next;
pmd_t *pmdp, *base_pmd;
bool is_kasan_pte;
base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
pmdp = base_pmd + pmd_index(vaddr);
do {
next = pmd_addr_end(vaddr, end);
is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
if (is_kasan_pte)
pmd_clear(pmdp);
} while (pmdp++, vaddr = next, vaddr != end);
}
static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
unsigned long vaddr, unsigned long end,
bool kasan_populate)
unsigned long vaddr, unsigned long end)
{
unsigned long next;
pud_t *pudp, *base_pud;
......@@ -256,21 +343,60 @@ static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
pudp = base_pud + pud_index(vaddr);
if (kasan_populate)
memcpy(base_pud, (void *)kasan_early_shadow_pgd_next,
sizeof(pud_t) * PTRS_PER_PUD);
do {
next = pud_addr_end(vaddr, end);
is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
if (is_kasan_pmd) {
if (!is_kasan_pmd)
continue;
base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
}
if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
continue;
memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
} while (pudp++, vaddr = next, vaddr != end);
}
static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
unsigned long vaddr, unsigned long end)
{
unsigned long next;
p4d_t *p4dp, *base_p4d;
pud_t *base_pud;
bool is_kasan_pud;
base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
p4dp = base_p4d + p4d_index(vaddr);
do {
next = p4d_addr_end(vaddr, end);
is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
if (!is_kasan_pud)
continue;
base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
continue;
memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
} while (p4dp++, vaddr = next, vaddr != end);
}
#define kasan_shallow_populate_pgd_next(pgdp, vaddr, next) \
(pgtable_l5_enabled ? \
kasan_shallow_populate_p4d(pgdp, vaddr, next) : \
(pgtable_l4_enabled ? \
kasan_shallow_populate_pud(pgdp, vaddr, next) : \
kasan_shallow_populate_pmd(pgdp, vaddr, next)))
static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
{
unsigned long next;
......@@ -291,7 +417,8 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
continue;
kasan_shallow_populate_pud(pgd_k, vaddr, next, is_kasan_pgd_next);
memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
} while (pgd_k++, vaddr = next, vaddr != end);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment