Commit 78cb0945 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc: Handle error in mark_rodata_ro() and mark_initmem_nx()

mark_rodata_ro() and mark_initmem_nx() use functions that can
fail like set_memory_nx() and set_memory_ro(), leading to a not
protected kernel.

In case of failure, panic.

Link: https://github.com/KSPP/linux/issues/7Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/b16329611deb89e1af505d43f0e2a91310584d26.1710587887.git.christophe.leroy@csgroup.eu
parent 66a27aba
...@@ -193,7 +193,7 @@ static bool is_module_segment(unsigned long addr) ...@@ -193,7 +193,7 @@ static bool is_module_segment(unsigned long addr)
return true; return true;
} }
void mmu_mark_initmem_nx(void) int mmu_mark_initmem_nx(void)
{ {
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i; int i;
...@@ -230,9 +230,10 @@ void mmu_mark_initmem_nx(void) ...@@ -230,9 +230,10 @@ void mmu_mark_initmem_nx(void)
mtsr(mfsr(i << 28) | 0x10000000, i << 28); mtsr(mfsr(i << 28) | 0x10000000, i << 28);
} }
return 0;
} }
void mmu_mark_rodata_ro(void) int mmu_mark_rodata_ro(void)
{ {
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i; int i;
...@@ -245,6 +246,8 @@ void mmu_mark_rodata_ro(void) ...@@ -245,6 +246,8 @@ void mmu_mark_rodata_ro(void)
} }
update_bats(); update_bats();
return 0;
} }
/* /*
......
...@@ -160,11 +160,11 @@ static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } ...@@ -160,11 +160,11 @@ static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif #endif
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500) #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500)
void mmu_mark_initmem_nx(void); int mmu_mark_initmem_nx(void);
void mmu_mark_rodata_ro(void); int mmu_mark_rodata_ro(void);
#else #else
static inline void mmu_mark_initmem_nx(void) { } static inline int mmu_mark_initmem_nx(void) { return 0; }
static inline void mmu_mark_rodata_ro(void) { } static inline int mmu_mark_rodata_ro(void) { return 0; }
#endif #endif
#ifdef CONFIG_PPC_8xx #ifdef CONFIG_PPC_8xx
......
...@@ -119,23 +119,26 @@ void __init mmu_mapin_immr(void) ...@@ -119,23 +119,26 @@ void __init mmu_mapin_immr(void)
PAGE_KERNEL_NCG, MMU_PAGE_512K, true); PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
} }
static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
pgprot_t prot, bool new) pgprot_t prot, bool new)
{ {
unsigned long v = PAGE_OFFSET + offset; unsigned long v = PAGE_OFFSET + offset;
unsigned long p = offset; unsigned long p = offset;
int err = 0;
WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K) for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K)
__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M) for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M)
__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K) for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K)
__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
if (!new) if (!new)
flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
return err;
} }
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
...@@ -166,27 +169,33 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) ...@@ -166,27 +169,33 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return top; return top;
} }
void mmu_mark_initmem_nx(void) int mmu_mark_initmem_nx(void)
{ {
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
unsigned long sinittext = __pa(_sinittext); unsigned long sinittext = __pa(_sinittext);
unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
int err = 0;
if (!debug_pagealloc_enabled_or_kfence()) if (!debug_pagealloc_enabled_or_kfence())
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
mmu_pin_tlb(block_mapped_ram, false); mmu_pin_tlb(block_mapped_ram, false);
return err;
} }
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void) int mmu_mark_rodata_ro(void)
{ {
unsigned long sinittext = __pa(_sinittext); unsigned long sinittext = __pa(_sinittext);
int err;
mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); err = mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
mmu_pin_tlb(block_mapped_ram, true); mmu_pin_tlb(block_mapped_ram, true);
return err;
} }
#endif #endif
......
...@@ -285,19 +285,23 @@ void __init adjust_total_lowmem(void) ...@@ -285,19 +285,23 @@ void __init adjust_total_lowmem(void)
} }
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void) int mmu_mark_rodata_ro(void)
{ {
unsigned long remapped; unsigned long remapped;
remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
WARN_ON(__max_low_memory != remapped); if (WARN_ON(__max_low_memory != remapped))
return -EINVAL;
return 0;
} }
#endif #endif
void mmu_mark_initmem_nx(void) int mmu_mark_initmem_nx(void)
{ {
/* Everything is done in mmu_mark_rodata_ro() */ /* Everything is done in mmu_mark_rodata_ro() */
return 0;
} }
void setup_initial_memory_limit(phys_addr_t first_memblock_base, void setup_initial_memory_limit(phys_addr_t first_memblock_base,
......
...@@ -130,31 +130,41 @@ void __init mapin_ram(void) ...@@ -130,31 +130,41 @@ void __init mapin_ram(void)
} }
} }
void mark_initmem_nx(void) static int __mark_initmem_nx(void)
{ {
unsigned long numpages = PFN_UP((unsigned long)_einittext) - unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext); PFN_DOWN((unsigned long)_sinittext);
int err;
mmu_mark_initmem_nx(); err = mmu_mark_initmem_nx();
if (!v_block_mapped((unsigned long)_sinittext)) { if (!v_block_mapped((unsigned long)_sinittext)) {
set_memory_nx((unsigned long)_sinittext, numpages); err = set_memory_nx((unsigned long)_sinittext, numpages);
set_memory_rw((unsigned long)_sinittext, numpages); if (err)
return err;
err = set_memory_rw((unsigned long)_sinittext, numpages);
} }
return err;
}
void mark_initmem_nx(void)
{
int err = __mark_initmem_nx();
if (err)
panic("%s() failed, err = %d\n", __func__, err);
} }
#ifdef CONFIG_STRICT_KERNEL_RWX #ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void) static int __mark_rodata_ro(void)
{ {
unsigned long numpages; unsigned long numpages;
if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE)) if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE))
pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n"); pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n");
if (v_block_mapped((unsigned long)_stext + 1)) { if (v_block_mapped((unsigned long)_stext + 1))
mmu_mark_rodata_ro(); return mmu_mark_rodata_ro();
return;
}
/* /*
* mark text and rodata as read only. __end_rodata is set by * mark text and rodata as read only. __end_rodata is set by
...@@ -164,6 +174,14 @@ void mark_rodata_ro(void) ...@@ -164,6 +174,14 @@ void mark_rodata_ro(void)
numpages = PFN_UP((unsigned long)__end_rodata) - numpages = PFN_UP((unsigned long)__end_rodata) -
PFN_DOWN((unsigned long)_stext); PFN_DOWN((unsigned long)_stext);
set_memory_ro((unsigned long)_stext, numpages); return set_memory_ro((unsigned long)_stext, numpages);
}
void mark_rodata_ro(void)
{
int err = __mark_rodata_ro();
if (err)
panic("%s() failed, err = %d\n", __func__, err);
} }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment