Commit dc6a7eff authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'lkdtm-next' of...

Merge tag 'lkdtm-next' of https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into char-misc-next

Kees writes:

lkdtm updates for -next

- Test for new usercopy memory regions
- avoid GCC 12 warnings
- update expected CONFIGs for selftests

* tag 'lkdtm-next' of https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  lkdtm/heap: Hide allocation size from -Warray-bounds
  selftests/lkdtm: Add configs for stackleak and "after free" tests
  lkdtm/usercopy: Check vmalloc and >0-order folios
  lkdtm/usercopy: Rename "heap" to "slab"
  lkdtm: cfi: Fix type width for masking PAC bits
parents bab6ffa2 f260fd59
...@@ -56,6 +56,7 @@ static void lkdtm_SLAB_LINEAR_OVERFLOW(void) ...@@ -56,6 +56,7 @@ static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
return; return;
pr_info("Attempting slab linear overflow ...\n"); pr_info("Attempting slab linear overflow ...\n");
OPTIMIZER_HIDE_VAR(data);
data[1024 / sizeof(u32)] = 0x12345678; data[1024 / sizeof(u32)] = 0x12345678;
kfree(data); kfree(data);
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
*/ */
#include "lkdtm.h" #include "lkdtm.h"
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/mman.h> #include <linux/mman.h>
...@@ -130,7 +131,7 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame) ...@@ -130,7 +131,7 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
* This checks for whole-object size validation with hardened usercopy, * This checks for whole-object size validation with hardened usercopy,
* with or without usercopy whitelisting. * with or without usercopy whitelisting.
*/ */
static void do_usercopy_heap_size(bool to_user) static void do_usercopy_slab_size(bool to_user)
{ {
unsigned long user_addr; unsigned long user_addr;
unsigned char *one, *two; unsigned char *one, *two;
...@@ -196,9 +197,9 @@ static void do_usercopy_heap_size(bool to_user) ...@@ -196,9 +197,9 @@ static void do_usercopy_heap_size(bool to_user)
/* /*
* This checks for the specific whitelist window within an object. If this * This checks for the specific whitelist window within an object. If this
* test passes, then do_usercopy_heap_size() tests will pass too. * test passes, then do_usercopy_slab_size() tests will pass too.
*/ */
static void do_usercopy_heap_whitelist(bool to_user) static void do_usercopy_slab_whitelist(bool to_user)
{ {
unsigned long user_alloc; unsigned long user_alloc;
unsigned char *buf = NULL; unsigned char *buf = NULL;
...@@ -272,24 +273,24 @@ static void do_usercopy_heap_whitelist(bool to_user) ...@@ -272,24 +273,24 @@ static void do_usercopy_heap_whitelist(bool to_user)
} }
/* Callable tests. */ /* Callable tests. */
static void lkdtm_USERCOPY_HEAP_SIZE_TO(void) static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
{ {
do_usercopy_heap_size(true); do_usercopy_slab_size(true);
} }
static void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
{ {
do_usercopy_heap_size(false); do_usercopy_slab_size(false);
} }
static void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void) static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
{ {
do_usercopy_heap_whitelist(true); do_usercopy_slab_whitelist(true);
} }
static void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void) static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
{ {
do_usercopy_heap_whitelist(false); do_usercopy_slab_whitelist(false);
} }
static void lkdtm_USERCOPY_STACK_FRAME_TO(void) static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
...@@ -341,6 +342,86 @@ static void lkdtm_USERCOPY_KERNEL(void) ...@@ -341,6 +342,86 @@ static void lkdtm_USERCOPY_KERNEL(void)
vm_munmap(user_addr, PAGE_SIZE); vm_munmap(user_addr, PAGE_SIZE);
} }
/*
* This expects "kaddr" to point to a PAGE_SIZE allocation, which means
* a more complete test that would include copy_from_user() would risk
* memory corruption. Just test copy_to_user() here, as that exercises
* almost exactly the same code paths.
*/
static void do_usercopy_page_span(const char *name, void *kaddr)
{
unsigned long uaddr;
uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (uaddr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
/* Initialize contents. */
memset(kaddr, 0xAA, PAGE_SIZE);
/* Bump the kaddr forward to detect a page-spanning overflow. */
kaddr += PAGE_SIZE / 2;
pr_info("attempting good copy_to_user() from kernel %s: %px\n",
name, kaddr);
if (copy_to_user((void __user *)uaddr, kaddr,
unconst + (PAGE_SIZE / 2))) {
pr_err("copy_to_user() failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
name, kaddr);
if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
goto free_user;
}
pr_err("FAIL: bad copy_to_user() not detected!\n");
pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
free_user:
vm_munmap(uaddr, PAGE_SIZE);
}
static void lkdtm_USERCOPY_VMALLOC(void)
{
void *addr;
addr = vmalloc(PAGE_SIZE);
if (!addr) {
pr_err("vmalloc() failed!?\n");
return;
}
do_usercopy_page_span("vmalloc", addr);
vfree(addr);
}
static void lkdtm_USERCOPY_FOLIO(void)
{
struct folio *folio;
void *addr;
/*
* FIXME: Folio checking currently misses 0-order allocations, so
* allocate and bump forward to the last page.
*/
folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
if (!folio) {
pr_err("folio_alloc() failed!?\n");
return;
}
addr = folio_address(folio);
if (addr)
do_usercopy_page_span("folio", addr + PAGE_SIZE);
else
pr_err("folio_address() failed?!\n");
folio_put(folio);
}
void __init lkdtm_usercopy_init(void) void __init lkdtm_usercopy_init(void)
{ {
/* Prepare cache that lacks SLAB_USERCOPY flag. */ /* Prepare cache that lacks SLAB_USERCOPY flag. */
...@@ -358,13 +439,15 @@ void __exit lkdtm_usercopy_exit(void) ...@@ -358,13 +439,15 @@ void __exit lkdtm_usercopy_exit(void)
} }
static struct crashtype crashtypes[] = { static struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_HEAP_SIZE_TO), CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM), CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO), CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM), CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
CRASHTYPE(USERCOPY_STACK_FRAME_TO), CRASHTYPE(USERCOPY_STACK_FRAME_TO),
CRASHTYPE(USERCOPY_STACK_FRAME_FROM), CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND), CRASHTYPE(USERCOPY_STACK_BEYOND),
CRASHTYPE(USERCOPY_VMALLOC),
CRASHTYPE(USERCOPY_FOLIO),
CRASHTYPE(USERCOPY_KERNEL), CRASHTYPE(USERCOPY_KERNEL),
}; };
......
...@@ -2,8 +2,10 @@ CONFIG_LKDTM=y ...@@ -2,8 +2,10 @@ CONFIG_LKDTM=y
CONFIG_DEBUG_LIST=y CONFIG_DEBUG_LIST=y
CONFIG_SLAB_FREELIST_HARDENED=y CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_FORTIFY_SOURCE=y CONFIG_FORTIFY_SOURCE=y
CONFIG_GCC_PLUGIN_STACKLEAK=y
CONFIG_HARDENED_USERCOPY=y CONFIG_HARDENED_USERCOPY=y
CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
CONFIG_INIT_ON_FREE_DEFAULT_ON=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_UBSAN=y CONFIG_UBSAN=y
CONFIG_UBSAN_BOUNDS=y CONFIG_UBSAN_BOUNDS=y
......
...@@ -64,10 +64,10 @@ REFCOUNT_DEC_AND_TEST_SATURATED Saturation detected: still saturated ...@@ -64,10 +64,10 @@ REFCOUNT_DEC_AND_TEST_SATURATED Saturation detected: still saturated
REFCOUNT_SUB_AND_TEST_SATURATED Saturation detected: still saturated REFCOUNT_SUB_AND_TEST_SATURATED Saturation detected: still saturated
#REFCOUNT_TIMING timing only #REFCOUNT_TIMING timing only
#ATOMIC_TIMING timing only #ATOMIC_TIMING timing only
USERCOPY_HEAP_SIZE_TO USERCOPY_SLAB_SIZE_TO
USERCOPY_HEAP_SIZE_FROM USERCOPY_SLAB_SIZE_FROM
USERCOPY_HEAP_WHITELIST_TO USERCOPY_SLAB_WHITELIST_TO
USERCOPY_HEAP_WHITELIST_FROM USERCOPY_SLAB_WHITELIST_FROM
USERCOPY_STACK_FRAME_TO USERCOPY_STACK_FRAME_TO
USERCOPY_STACK_FRAME_FROM USERCOPY_STACK_FRAME_FROM
USERCOPY_STACK_BEYOND USERCOPY_STACK_BEYOND
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment