Commit 8b4bb0ad authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/code-patching: Speed up page mapping/unmapping

Since commit 591b4b26 ("powerpc/code-patching: Pre-map patch area")
the patch area is premapped so intermediate page tables are already
allocated.

Use __set_pte_at() directly instead of the heavy map_kernel_page(),
at for unmapping just do a pte_clear() followed by a flush.

__set_pte_at() can be used directly without the filters in
set_pte_at() because we are mapping a normal page non executable.

Make sure gcc knows text_poke_area is page aligned in order to
optimise the flush.

This change reduces by 66% the time needed to activate ftrace on
an 8xx (588000 tb ticks instead of 1744000).
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
[mpe: Add ptesync needed on radix to avoid spurious fault]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220815114840.1468656-1-mpe@ellerman.id.au
parent fd20b60a
......@@ -94,17 +94,20 @@ void __init poking_init(void)
static_branch_enable(&poking_init_done);
}
static unsigned long get_patch_pfn(void *addr)
{
if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
return vmalloc_to_pfn(addr);
else
return __pa_symbol(addr) >> PAGE_SHIFT;
}
/*
* This can be called for kernel text or a module.
*/
static int map_patch_area(void *addr, unsigned long text_poke_addr)
{
unsigned long pfn;
if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
pfn = vmalloc_to_pfn(addr);
else
pfn = __pa_symbol(addr) >> PAGE_SHIFT;
unsigned long pfn = get_patch_pfn(addr);
return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
}
......@@ -149,17 +152,22 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
int err;
u32 *patch_addr;
unsigned long text_poke_addr;
pte_t *pte;
unsigned long pfn = get_patch_pfn(addr);
text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr & PAGE_MASK;
patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
err = map_patch_area(addr, text_poke_addr);
if (err)
return err;
pte = virt_to_kpte(text_poke_addr);
__set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
/* See ptesync comment in radix__set_pte_at() */
if (radix_enabled())
asm volatile("ptesync": : :"memory");
err = __patch_instruction(addr, instr, patch_addr);
unmap_patch_area(text_poke_addr);
pte_clear(&init_mm, text_poke_addr, pte);
flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
return err;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment