Commit 021182e5 authored by Thomas Garnier's avatar Thomas Garnier Committed by Ingo Molnar

x86/mm: Enable KASLR for physical mapping memory regions

Add the physical mapping in the list of randomized memory regions.

The physical memory mapping holds most allocations from boot and heap
allocators. Knowing the base address and physical memory size, an attacker
can deduce the PDE virtual address for the vDSO memory page. This attack
was demonstrated at CanSecWest 2016, in the following presentation:

  "Getting Physical: Extreme Abuse of Intel Based Paged Systems":
  https://github.com/n3k/CansecWest2016_Getting_Physical_Extreme_Abuse_of_Intel_Based_Paging_Systems/blob/master/Presentation/CanSec2016_Presentation.pdf

(See second part of the presentation).

The exploits used against Linux worked successfully against 4.6+ but
fail with KASLR memory enabled:

  https://github.com/n3k/CansecWest2016_Getting_Physical_Extreme_Abuse_of_Intel_Based_Paging_Systems/tree/master/Demos/Linux/exploits

Similar research was done at Google leading to this patch proposal.

Variants exists to overwrite /proc or /sys objects ACLs leading to
elevation of privileges. These variants were tested against 4.6+.

The page offset used by the compressed kernel retains the static value
since it is not yet randomized during this boot stage.
Signed-off-by: default avatarThomas Garnier <thgarnie@google.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Kuleshov <kuleshovmail@gmail.com>
Cc: Alexander Popov <alpopov@ptsecurity.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lv Zheng <lv.zheng@intel.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: kernel-hardening@lists.openwall.com
Cc: linux-doc@vger.kernel.org
Link: http://lkml.kernel.org/r/1466556426-32664-7-git-send-email-keescook@chromium.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0483e1fa
...@@ -20,6 +20,9 @@ ...@@ -20,6 +20,9 @@
/* These actually do the work of building the kernel identity maps. */ /* These actually do the work of building the kernel identity maps. */
#include <asm/init.h> #include <asm/init.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
/* Use the static base for this part of the boot process */
#undef __PAGE_OFFSET
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
#include "../../mm/ident_map.c" #include "../../mm/ident_map.c"
/* Used by pgtable.h asm code to force instruction serialization. */ /* Used by pgtable.h asm code to force instruction serialization. */
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
unsigned long kaslr_get_random_long(const char *purpose); unsigned long kaslr_get_random_long(const char *purpose);
#ifdef CONFIG_RANDOMIZE_MEMORY #ifdef CONFIG_RANDOMIZE_MEMORY
extern unsigned long page_offset_base;
void kernel_randomize_memory(void); void kernel_randomize_memory(void);
#else #else
static inline void kernel_randomize_memory(void) { } static inline void kernel_randomize_memory(void) { }
......
#ifndef _ASM_X86_PAGE_64_DEFS_H #ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H #define _ASM_X86_PAGE_64_DEFS_H
#ifndef __ASSEMBLY__
#include <asm/kaslr.h>
#endif
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
#define KASAN_STACK_ORDER 1 #define KASAN_STACK_ORDER 1
#else #else
...@@ -32,7 +36,12 @@ ...@@ -32,7 +36,12 @@
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires. * what Xen requires.
*/ */
#define __PAGE_OFFSET _AC(0xffff880000000000, UL) #define __PAGE_OFFSET_BASE _AC(0xffff880000000000, UL)
#ifdef CONFIG_RANDOMIZE_MEMORY
#define __PAGE_OFFSET page_offset_base
#else
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
#endif /* CONFIG_RANDOMIZE_MEMORY */
#define __START_KERNEL_map _AC(0xffffffff80000000, UL) #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
L4_START_KERNEL = pgd_index(__START_KERNEL_map) L4_START_KERNEL = pgd_index(__START_KERNEL_map)
L3_START_KERNEL = pud_index(__START_KERNEL_map) L3_START_KERNEL = pud_index(__START_KERNEL_map)
......
...@@ -43,8 +43,12 @@ ...@@ -43,8 +43,12 @@
* before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to * before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to
* ensure that this order is correct and won't be changed. * ensure that this order is correct and won't be changed.
*/ */
static const unsigned long vaddr_start; static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
static const unsigned long vaddr_end; static const unsigned long vaddr_end = VMALLOC_START;
/* Default values */
unsigned long page_offset_base = __PAGE_OFFSET_BASE;
EXPORT_SYMBOL(page_offset_base);
/* /*
* Memory regions randomized by KASLR (except modules that use a separate logic * Memory regions randomized by KASLR (except modules that use a separate logic
...@@ -55,6 +59,7 @@ static __initdata struct kaslr_memory_region { ...@@ -55,6 +59,7 @@ static __initdata struct kaslr_memory_region {
unsigned long *base; unsigned long *base;
unsigned long size_tb; unsigned long size_tb;
} kaslr_regions[] = { } kaslr_regions[] = {
{ &page_offset_base, 64/* Maximum */ },
}; };
/* Get size in bytes used by the memory region */ /* Get size in bytes used by the memory region */
...@@ -77,13 +82,20 @@ void __init kernel_randomize_memory(void) ...@@ -77,13 +82,20 @@ void __init kernel_randomize_memory(void)
{ {
size_t i; size_t i;
unsigned long vaddr = vaddr_start; unsigned long vaddr = vaddr_start;
unsigned long rand; unsigned long rand, memory_tb;
struct rnd_state rand_state; struct rnd_state rand_state;
unsigned long remain_entropy; unsigned long remain_entropy;
if (!kaslr_memory_enabled()) if (!kaslr_memory_enabled())
return; return;
BUG_ON(kaslr_regions[0].base != &page_offset_base);
memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT);
/* Adapt phyiscal memory region size based on available memory */
if (memory_tb < kaslr_regions[0].size_tb)
kaslr_regions[0].size_tb = memory_tb;
/* Calculate entropy available between regions */ /* Calculate entropy available between regions */
remain_entropy = vaddr_end - vaddr_start; remain_entropy = vaddr_end - vaddr_start;
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment