Commit f5780555 authored by Palmer Dabbelt's avatar Palmer Dabbelt

Merge patch series "riscv: Introduce KASLR"

Alexandre Ghiti <alexghiti@rivosinc.com> says:

The following KASLR implementation allows to randomize the kernel mapping:

- virtually: we expect the bootloader to provide a seed in the device-tree
- physically: only implemented in the EFI stub, it relies on the firmware to
  provide a seed using EFI_RNG_PROTOCOL. arm64 has a similar implementation
  hence the patch 3 factorizes KASLR related functions for riscv to take
  advantage.

The new virtual kernel location is limited by the early page table that only
has one PUD and with the PMD alignment constraint, the kernel can only take
< 512 positions.

* b4-shazam-merge:
  riscv: libstub: Implement KASLR by using generic functions
  libstub: Fix compilation warning for rv32
  arm64: libstub: Move KASLR handling functions to kaslr.c
  riscv: Dump out kernel offset information on panic
  riscv: Introduce virtual kernel mapping KASLR

Link: https://lore.kernel.org/r/20230722123850.634544-1-alexghiti@rivosinc.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parents f0936363 b7ac4b8e
......@@ -156,4 +156,6 @@ static inline void efi_capsule_flush_cache_range(void *addr, int size)
efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f);
void efi_icache_sync(unsigned long start, unsigned long end);
#endif /* _ASM_EFI_H */
......@@ -720,6 +720,25 @@ config RELOCATABLE
If unsure, say N.
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
select RELOCATABLE
depends on MMU && 64BIT && !XIP_KERNEL
help
Randomizes the virtual address at which the kernel image is
loaded, as a security feature that deters exploit attempts
relying on knowledge of the location of kernel internals.
It is the bootloader's job to provide entropy, by passing a
random u64 value in /chosen/kaslr-seed at kernel entry.
When booting via the UEFI stub, it will invoke the firmware's
EFI_RNG_PROTOCOL implementation (if available) to supply entropy
to the kernel proper. In addition, it will randomise the physical
location of the kernel Image as well.
If unsure, say N.
endmenu # "Kernel features"
menu "Boot options"
......
......@@ -45,4 +45,6 @@ void arch_efi_call_virt_teardown(void);
unsigned long stext_offset(void);
void efi_icache_sync(unsigned long start, unsigned long end);
#endif /* _ASM_EFI_H */
......@@ -106,6 +106,7 @@ typedef struct page *pgtable_t;
struct kernel_mapping {
unsigned long page_offset;
unsigned long virt_addr;
unsigned long virt_offset;
uintptr_t phys_addr;
uintptr_t size;
/* Offset between linear mapping virtual address and kernel load address */
......@@ -185,6 +186,8 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
unsigned long kaslr_offset(void);
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) ({ \
......
......@@ -27,6 +27,7 @@ __efistub__start = _start;
__efistub__start_kernel = _start_kernel;
__efistub__end = _end;
__efistub__edata = _edata;
__efistub___init_text_end = __init_text_end;
__efistub_screen_info = screen_info;
#endif
......
......@@ -35,5 +35,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE
$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
$(call if_changed_rule,cc_o_c)
obj-y := cmdline_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
......@@ -14,6 +14,7 @@ static char early_cmdline[COMMAND_LINE_SIZE];
* LLVM complain because the function is actually unused in this file).
*/
u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa);
bool set_nokaslr_from_cmdline(uintptr_t dtb_pa);
static char *get_early_cmdline(uintptr_t dtb_pa)
{
......@@ -60,3 +61,15 @@ u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa)
return match_noXlvl(cmdline);
}
static bool match_nokaslr(char *cmdline)
{
return strstr(cmdline, "nokaslr");
}
bool set_nokaslr_from_cmdline(uintptr_t dtb_pa)
{
char *cmdline = get_early_cmdline(dtb_pa);
return match_nokaslr(cmdline);
}
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/types.h>
#include <linux/init.h>
#include <linux/libfdt.h>
/*
* Declare the functions that are exported (but prefixed) here so that LLVM
* does not complain it lacks the 'static' keyword (which, if added, makes
* LLVM complain because the function is actually unused in this file).
*/
u64 get_kaslr_seed(uintptr_t dtb_pa);
u64 get_kaslr_seed(uintptr_t dtb_pa)
{
int node, len;
fdt64_t *prop;
u64 ret;
node = fdt_path_offset((void *)dtb_pa, "/chosen");
if (node < 0)
return 0;
prop = fdt_getprop_w((void *)dtb_pa, node, "kaslr-seed", &len);
if (!prop || len != sizeof(u64))
return 0;
ret = fdt64_to_cpu(*prop);
*prop = 0;
return ret;
}
......@@ -21,6 +21,7 @@
#include <linux/smp.h>
#include <linux/efi.h>
#include <linux/crash_dump.h>
#include <linux/panic_notifier.h>
#include <asm/acpi.h>
#include <asm/alternative.h>
......@@ -347,3 +348,27 @@ void free_initmem(void)
free_initmem_default(POISON_FREE_INITMEM);
}
static int dump_kernel_offset(struct notifier_block *self,
unsigned long v, void *p)
{
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
kernel_map.virt_offset,
KERNEL_LINK_ADDR);
return 0;
}
static struct notifier_block kernel_offset_notifier = {
.notifier_call = dump_kernel_offset
};
static int __init register_kernel_offset_dumper(void)
{
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_offset_notifier);
return 0;
}
device_initcall(register_kernel_offset_dumper);
......@@ -1014,11 +1014,45 @@ static void __init pt_ops_set_late(void)
#endif
}
#ifdef CONFIG_RANDOMIZE_BASE
extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
static int __init print_nokaslr(char *p)
{
pr_info("Disabled KASLR");
return 0;
}
early_param("nokaslr", print_nokaslr);
unsigned long kaslr_offset(void)
{
return kernel_map.virt_offset;
}
#endif
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
kernel_map.virt_addr = KERNEL_LINK_ADDR;
#ifdef CONFIG_RANDOMIZE_BASE
if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
u32 nr_pos;
/*
* Compute the number of positions available: we are limited
* by the early page table that only has one PUD and we must
* be aligned on PMD_SIZE.
*/
nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE;
kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE;
}
#endif
kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
#ifdef CONFIG_XIP_KERNEL
......
......@@ -86,10 +86,10 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
screen_info.o efi-stub-entry.o
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_ARM64) += kaslr.o arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o
lib-$(CONFIG_RISCV) += kaslr.o riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
......
......@@ -14,42 +14,6 @@
#include "efistub.h"
/*
* Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
* to provide space, and fail to zero it). Check for this condition by double
* checking that the first and the last byte of the image are covered by the
* same EFI memory map entry.
*/
static bool check_image_region(u64 base, u64 size)
{
struct efi_boot_memmap *map;
efi_status_t status;
bool ret = false;
int map_offset;
status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
return false;
for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
efi_memory_desc_t *md = (void *)map->map + map_offset;
u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
/*
* Find the region that covers base, and return whether
* it covers base+size bytes.
*/
if (base >= md->phys_addr && base < end) {
ret = (base + size) <= end;
break;
}
}
efi_bs_call(free_pool, map);
return ret;
}
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
......@@ -59,31 +23,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
{
efi_status_t status;
unsigned long kernel_size, kernel_codesize, kernel_memsize;
u32 phys_seed = 0;
u64 min_kimg_align = efi_get_kimg_min_align();
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID;
void *p;
if (efi_nokaslr) {
efi_info("KASLR disabled on kernel command line\n");
} else if (efi_bs_call(handle_protocol, image_handle,
&li_fixed_proto, &p) == EFI_SUCCESS) {
efi_info("Image placement fixed by loader\n");
} else {
status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_NOT_FOUND) {
efi_info("EFI_RNG_PROTOCOL unavailable\n");
efi_nokaslr = true;
} else if (status != EFI_SUCCESS) {
efi_err("efi_get_random_bytes() failed (0x%lx)\n",
status);
efi_nokaslr = true;
}
}
}
if (image->image_base != _text) {
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
......@@ -98,50 +37,15 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
kernel_codesize = __inittext_end - _text;
kernel_memsize = kernel_size + (_end - _edata);
*reserve_size = kernel_memsize;
*image_addr = (unsigned long)_text;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed,
EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else {
status = EFI_OUT_OF_RESOURCES;
}
if (status != EFI_SUCCESS) {
if (!check_image_region((u64)_text, kernel_memsize)) {
efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
} else if (IS_ALIGNED((u64)_text, min_kimg_align) &&
(u64)_end < EFI_ALLOC_LIMIT) {
/*
* Just execute from wherever we were loaded by the
* UEFI PE/COFF loader if the placement is suitable.
*/
*image_addr = (u64)_text;
*reserve_size = 0;
return EFI_SUCCESS;
}
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
ULONG_MAX, min_kimg_align,
EFI_LOADER_CODE);
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");
*reserve_size = 0;
return status;
}
}
*image_addr = *reserve_addr;
memcpy((void *)*image_addr, _text, kernel_size);
caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
status = efi_kaslr_relocate_kernel(image_addr,
reserve_addr, reserve_size,
kernel_size, kernel_codesize,
kernel_memsize,
efi_kaslr_get_phys_seed(image_handle));
if (status != EFI_SUCCESS)
return status;
return EFI_SUCCESS;
}
......@@ -159,3 +63,8 @@ unsigned long primary_entry_offset(void)
*/
return (char *)primary_entry - _text;
}
void efi_icache_sync(unsigned long start, unsigned long end)
{
caches_clean_inval_pou(start, end);
}
......@@ -1133,6 +1133,14 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
void efi_remap_image(unsigned long image_base, unsigned alloc_size,
unsigned long code_size);
efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
unsigned long *reserve_addr,
unsigned long *reserve_size,
unsigned long kernel_size,
unsigned long kernel_codesize,
unsigned long kernel_memsize,
u32 phys_seed);
u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle);
asmlinkage efi_status_t __efiapi
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
......
// SPDX-License-Identifier: GPL-2.0
/*
* Helper functions used by the EFI stub on multiple
* architectures to deal with physical address space randomization.
*/
#include <linux/efi.h>
#include "efistub.h"
/**
* efi_kaslr_get_phys_seed() - Get random seed for physical kernel KASLR
* @image_handle: Handle to the image
*
* If KASLR is not disabled, obtain a random seed using EFI_RNG_PROTOCOL
* that will be used to move the kernel physical mapping.
*
* Return: the random seed
*/
u32 efi_kaslr_get_phys_seed(efi_handle_t image_handle)
{
efi_status_t status;
u32 phys_seed;
efi_guid_t li_fixed_proto = LINUX_EFI_LOADED_IMAGE_FIXED_GUID;
void *p;
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
return 0;
if (efi_nokaslr) {
efi_info("KASLR disabled on kernel command line\n");
} else if (efi_bs_call(handle_protocol, image_handle,
&li_fixed_proto, &p) == EFI_SUCCESS) {
efi_info("Image placement fixed by loader\n");
} else {
status = efi_get_random_bytes(sizeof(phys_seed),
(u8 *)&phys_seed);
if (status == EFI_SUCCESS) {
return phys_seed;
} else if (status == EFI_NOT_FOUND) {
efi_info("EFI_RNG_PROTOCOL unavailable\n");
efi_nokaslr = true;
} else if (status != EFI_SUCCESS) {
efi_err("efi_get_random_bytes() failed (0x%lx)\n",
status);
efi_nokaslr = true;
}
}
return 0;
}
/*
* Distro versions of GRUB may ignore the BSS allocation entirely (i.e., fail
* to provide space, and fail to zero it). Check for this condition by double
* checking that the first and the last byte of the image are covered by the
* same EFI memory map entry.
*/
static bool check_image_region(u64 base, u64 size)
{
struct efi_boot_memmap *map;
efi_status_t status;
bool ret = false;
int map_offset;
status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
return false;
for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
efi_memory_desc_t *md = (void *)map->map + map_offset;
u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
/*
* Find the region that covers base, and return whether
* it covers base+size bytes.
*/
if (base >= md->phys_addr && base < end) {
ret = (base + size) <= end;
break;
}
}
efi_bs_call(free_pool, map);
return ret;
}
/**
* efi_kaslr_relocate_kernel() - Relocate the kernel (random if KASLR enabled)
* @image_addr: Pointer to the current kernel location
* @reserve_addr: Pointer to the relocated kernel location
* @reserve_size: Size of the relocated kernel
* @kernel_size: Size of the text + data
* @kernel_codesize: Size of the text
* @kernel_memsize: Size of the text + data + bss
* @phys_seed: Random seed used for the relocation
*
* If KASLR is not enabled, this function relocates the kernel to a fixed
* address (or leave it as its current location). If KASLR is enabled, the
* kernel physical location is randomized using the seed in parameter.
*
* Return: status code, EFI_SUCCESS if relocation is successful
*/
efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
unsigned long *reserve_addr,
unsigned long *reserve_size,
unsigned long kernel_size,
unsigned long kernel_codesize,
unsigned long kernel_memsize,
u32 phys_seed)
{
efi_status_t status;
u64 min_kimg_align = efi_get_kimg_min_align();
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed,
EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else {
status = EFI_OUT_OF_RESOURCES;
}
if (status != EFI_SUCCESS) {
if (!check_image_region(*image_addr, kernel_memsize)) {
efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
} else if (IS_ALIGNED(*image_addr, min_kimg_align) &&
(unsigned long)_end < EFI_ALLOC_LIMIT) {
/*
* Just execute from wherever we were loaded by the
* UEFI PE/COFF loader if the placement is suitable.
*/
*reserve_size = 0;
return EFI_SUCCESS;
}
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
ULONG_MAX, min_kimg_align,
EFI_LOADER_CODE);
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");
*reserve_size = 0;
return status;
}
}
memcpy((void *)*reserve_addr, (void *)*image_addr, kernel_size);
*image_addr = *reserve_addr;
efi_icache_sync(*image_addr, *image_addr + kernel_codesize);
efi_remap_image(*image_addr, *reserve_size, kernel_codesize);
return status;
}
......@@ -30,32 +30,29 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
efi_loaded_image_t *image,
efi_handle_t image_handle)
{
unsigned long kernel_size = 0;
unsigned long preferred_addr;
unsigned long kernel_size, kernel_codesize, kernel_memsize;
efi_status_t status;
kernel_size = _edata - _start;
kernel_codesize = __init_text_end - _start;
kernel_memsize = kernel_size + (_end - _edata);
*image_addr = (unsigned long)_start;
*image_size = kernel_size + (_end - _edata);
/*
* RISC-V kernel maps PAGE_OFFSET virtual address to the same physical
* address where kernel is booted. That's why kernel should boot from
* as low as possible to avoid wastage of memory. Currently, dram_base
* is occupied by the firmware. So the preferred address for kernel to
* boot is next aligned address. If preferred address is not available,
* relocate_kernel will fall back to efi_low_alloc_above to allocate
* lowest possible memory region as long as the address and size meets
* the alignment constraints.
*/
preferred_addr = EFI_KIMG_PREFERRED_ADDRESS;
status = efi_relocate_kernel(image_addr, kernel_size, *image_size,
preferred_addr, efi_get_kimg_min_align(),
0x0);
*image_size = kernel_memsize;
*reserve_size = *image_size;
status = efi_kaslr_relocate_kernel(image_addr,
reserve_addr, reserve_size,
kernel_size, kernel_codesize, kernel_memsize,
efi_kaslr_get_phys_seed(image_handle));
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");
*image_size = 0;
}
return status;
}
void efi_icache_sync(unsigned long start, unsigned long end)
{
asm volatile ("fence.i" ::: "memory");
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment