Commit 97a9474a authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'kcsan-for-tip' of...

Merge branch 'kcsan-for-tip' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into locking/kcsan

Pull KCSAN updates from Paul McKenney.
parents 3b02a051 50a19ad4
This diff is collapsed.
...@@ -326,9 +326,9 @@ unsigned long read_word_at_a_time(const void *addr) ...@@ -326,9 +326,9 @@ unsigned long read_word_at_a_time(const void *addr)
#define data_race(expr) \ #define data_race(expr) \
({ \ ({ \
typeof(({ expr; })) __val; \ typeof(({ expr; })) __val; \
kcsan_nestable_atomic_begin(); \ kcsan_disable_current(); \
__val = ({ expr; }); \ __val = ({ expr; }); \
kcsan_nestable_atomic_end(); \ kcsan_enable_current(); \
__val; \ __val; \
}) })
#else #else
......
This diff is collapsed.
...@@ -40,6 +40,9 @@ struct kcsan_ctx { ...@@ -40,6 +40,9 @@ struct kcsan_ctx {
* Access mask for all accesses if non-zero. * Access mask for all accesses if non-zero.
*/ */
unsigned long access_mask; unsigned long access_mask;
/* List of scoped accesses. */
struct list_head scoped_accesses;
}; };
/** /**
...@@ -47,25 +50,9 @@ struct kcsan_ctx { ...@@ -47,25 +50,9 @@ struct kcsan_ctx {
*/ */
void kcsan_init(void); void kcsan_init(void);
/**
* kcsan_disable_current - disable KCSAN for the current context
*
* Supports nesting.
*/
void kcsan_disable_current(void);
/**
* kcsan_enable_current - re-enable KCSAN for the current context
*
* Supports nesting.
*/
void kcsan_enable_current(void);
#else /* CONFIG_KCSAN */ #else /* CONFIG_KCSAN */
static inline void kcsan_init(void) { } static inline void kcsan_init(void) { }
static inline void kcsan_disable_current(void) { }
static inline void kcsan_enable_current(void) { }
#endif /* CONFIG_KCSAN */ #endif /* CONFIG_KCSAN */
......
...@@ -169,6 +169,7 @@ struct task_struct init_task ...@@ -169,6 +169,7 @@ struct task_struct init_task
.atomic_nest_count = 0, .atomic_nest_count = 0,
.in_flat_atomic = false, .in_flat_atomic = false,
.access_mask = 0, .access_mask = 0,
.scoped_accesses = {LIST_POISON1, NULL},
}, },
#endif #endif
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
......
...@@ -4,24 +4,17 @@ ...@@ -4,24 +4,17 @@
#define _KERNEL_KCSAN_ATOMIC_H #define _KERNEL_KCSAN_ATOMIC_H
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/sched.h>
/* /*
* Helper that returns true if access to @ptr should be considered an atomic * Special rules for certain memory where concurrent conflicting accesses are
* access, even though it is not explicitly atomic. * common, however, the current convention is to not mark them; returns true if
* * access to @ptr should be considered atomic. Called from slow-path.
* List all volatile globals that have been observed in races, to suppress
* data race reports between accesses to these variables.
*
* For now, we assume that volatile accesses of globals are as strong as atomic
* accesses (READ_ONCE, WRITE_ONCE cast to volatile). The situation is still not
* entirely clear, as on some architectures (Alpha) READ_ONCE/WRITE_ONCE do more
* than cast to volatile. Eventually, we hope to be able to remove this
* function.
*/ */
static __always_inline bool kcsan_is_atomic(const volatile void *ptr) static bool kcsan_is_atomic_special(const volatile void *ptr)
{ {
/* only jiffies for now */ /* volatile globals that have been observed in data races. */
return ptr == &jiffies; return ptr == &jiffies || ptr == &current->state;
} }
#endif /* _KERNEL_KCSAN_ATOMIC_H */ #endif /* _KERNEL_KCSAN_ATOMIC_H */
This diff is collapsed.
...@@ -74,25 +74,34 @@ void kcsan_counter_dec(enum kcsan_counter_id id) ...@@ -74,25 +74,34 @@ void kcsan_counter_dec(enum kcsan_counter_id id)
*/ */
static noinline void microbenchmark(unsigned long iters) static noinline void microbenchmark(unsigned long iters)
{ {
const struct kcsan_ctx ctx_save = current->kcsan_ctx;
const bool was_enabled = READ_ONCE(kcsan_enabled);
cycles_t cycles; cycles_t cycles;
/* We may have been called from an atomic region; reset context. */
memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
/*
* Disable to benchmark fast-path for all accesses, and (expected
* negligible) call into slow-path, but never set up watchpoints.
*/
WRITE_ONCE(kcsan_enabled, false);
pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters); pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
cycles = get_cycles(); cycles = get_cycles();
while (iters--) { while (iters--) {
/* unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
* We can run this benchmark from multiple tasks; this address int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
* calculation increases likelyhood of some accesses (!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
* overlapping. Make the access type an atomic read, to never __kcsan_check_access((void *)addr, sizeof(long), type);
* set up watchpoints and test the fast-path only.
*/
unsigned long addr =
iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE);
__kcsan_check_access((void *)addr, sizeof(long), KCSAN_ACCESS_ATOMIC);
} }
cycles = get_cycles() - cycles; cycles = get_cycles() - cycles;
pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles); pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
WRITE_ONCE(kcsan_enabled, was_enabled);
/* restore context */
current->kcsan_ctx = ctx_save;
} }
/* /*
...@@ -101,6 +110,7 @@ static noinline void microbenchmark(unsigned long iters) ...@@ -101,6 +110,7 @@ static noinline void microbenchmark(unsigned long iters)
*/ */
static long test_dummy; static long test_dummy;
static long test_flags; static long test_flags;
static long test_scoped;
static noinline void test_thread(unsigned long iters) static noinline void test_thread(unsigned long iters)
{ {
const long CHANGE_BITS = 0xff00ff00ff00ff00L; const long CHANGE_BITS = 0xff00ff00ff00ff00L;
...@@ -111,7 +121,8 @@ static noinline void test_thread(unsigned long iters) ...@@ -111,7 +121,8 @@ static noinline void test_thread(unsigned long iters)
memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx)); memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters); pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
pr_info("test_dummy@%px, test_flags@%px\n", &test_dummy, &test_flags); pr_info("test_dummy@%px, test_flags@%px, test_scoped@%px,\n",
&test_dummy, &test_flags, &test_scoped);
cycles = get_cycles(); cycles = get_cycles();
while (iters--) { while (iters--) {
...@@ -132,6 +143,18 @@ static noinline void test_thread(unsigned long iters) ...@@ -132,6 +143,18 @@ static noinline void test_thread(unsigned long iters)
test_flags ^= CHANGE_BITS; /* generate value-change */ test_flags ^= CHANGE_BITS; /* generate value-change */
__kcsan_check_write(&test_flags, sizeof(test_flags)); __kcsan_check_write(&test_flags, sizeof(test_flags));
BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
{
/* Should generate reports anywhere in this block. */
ASSERT_EXCLUSIVE_WRITER_SCOPED(test_scoped);
ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_scoped);
BUG_ON(!current->kcsan_ctx.scoped_accesses.prev);
/* Unrelated accesses. */
__kcsan_check_access(&cycles, sizeof(cycles), 0);
__kcsan_check_access(&cycles, sizeof(cycles), KCSAN_ACCESS_ATOMIC);
}
BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
} }
cycles = get_cycles() - cycles; cycles = get_cycles() - cycles;
...@@ -207,7 +230,7 @@ static ssize_t insert_report_filterlist(const char *func) ...@@ -207,7 +230,7 @@ static ssize_t insert_report_filterlist(const char *func)
/* initial allocation */ /* initial allocation */
report_filterlist.addrs = report_filterlist.addrs =
kmalloc_array(report_filterlist.size, kmalloc_array(report_filterlist.size,
sizeof(unsigned long), GFP_KERNEL); sizeof(unsigned long), GFP_ATOMIC);
if (report_filterlist.addrs == NULL) { if (report_filterlist.addrs == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -217,7 +240,7 @@ static ssize_t insert_report_filterlist(const char *func) ...@@ -217,7 +240,7 @@ static ssize_t insert_report_filterlist(const char *func)
size_t new_size = report_filterlist.size * 2; size_t new_size = report_filterlist.size * 2;
unsigned long *new_addrs = unsigned long *new_addrs =
krealloc(report_filterlist.addrs, krealloc(report_filterlist.addrs,
new_size * sizeof(unsigned long), GFP_KERNEL); new_size * sizeof(unsigned long), GFP_ATOMIC);
if (new_addrs == NULL) { if (new_addrs == NULL) {
/* leave filterlist itself untouched */ /* leave filterlist itself untouched */
......
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
/* The number of adjacent watchpoints to check. */ /* The number of adjacent watchpoints to check. */
#define KCSAN_CHECK_ADJACENT 1 #define KCSAN_CHECK_ADJACENT 1
#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT)
extern unsigned int kcsan_udelay_task;
extern unsigned int kcsan_udelay_interrupt;
/* /*
* Globally enable and disable KCSAN. * Globally enable and disable KCSAN.
...@@ -132,7 +136,7 @@ enum kcsan_report_type { ...@@ -132,7 +136,7 @@ enum kcsan_report_type {
* Print a race report from thread that encountered the race. * Print a race report from thread that encountered the race.
*/ */
extern void kcsan_report(const volatile void *ptr, size_t size, int access_type, extern void kcsan_report(const volatile void *ptr, size_t size, int access_type,
enum kcsan_value_change value_change, int cpu_id, enum kcsan_value_change value_change,
enum kcsan_report_type type); enum kcsan_report_type type, int watchpoint_idx);
#endif /* _KERNEL_KCSAN_KCSAN_H */ #endif /* _KERNEL_KCSAN_KCSAN_H */
This diff is collapsed.
...@@ -4,22 +4,36 @@ config HAVE_ARCH_KCSAN ...@@ -4,22 +4,36 @@ config HAVE_ARCH_KCSAN
bool bool
menuconfig KCSAN menuconfig KCSAN
bool "KCSAN: dynamic race detector" bool "KCSAN: dynamic data race detector"
depends on HAVE_ARCH_KCSAN && DEBUG_KERNEL && !KASAN depends on HAVE_ARCH_KCSAN && DEBUG_KERNEL && !KASAN
select STACKTRACE select STACKTRACE
help help
The Kernel Concurrency Sanitizer (KCSAN) is a dynamic race detector, The Kernel Concurrency Sanitizer (KCSAN) is a dynamic
which relies on compile-time instrumentation, and uses a data-race detector that relies on compile-time instrumentation.
watchpoint-based sampling approach to detect races. KCSAN uses a watchpoint-based sampling approach to detect races.
KCSAN's primary purpose is to detect data races. KCSAN can also be While KCSAN's primary purpose is to detect data races, it
used to check properties, with the help of provided assertions, of also provides assertions to check data access constraints.
concurrent code where bugs do not manifest as data races. These assertions can expose bugs that do not manifest as
data races.
See <file:Documentation/dev-tools/kcsan.rst> for more details. See <file:Documentation/dev-tools/kcsan.rst> for more details.
if KCSAN if KCSAN
config KCSAN_VERBOSE
bool "Show verbose reports with more information about system state"
depends on PROVE_LOCKING
help
If enabled, reports show more information about the system state that
may help better analyze and debug races. This includes held locks and
IRQ trace events.
While this option should generally be benign, we call into more
external functions on report generation; if a race report is
generated from any one of them, system stability may suffer due to
deadlocks or recursion. If in doubt, say N.
config KCSAN_DEBUG config KCSAN_DEBUG
bool "Debugging of KCSAN internals" bool "Debugging of KCSAN internals"
...@@ -88,6 +102,17 @@ config KCSAN_SKIP_WATCH_RANDOMIZE ...@@ -88,6 +102,17 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
KCSAN_WATCH_SKIP. If false, the chosen value is always KCSAN_WATCH_SKIP. If false, the chosen value is always
KCSAN_WATCH_SKIP. KCSAN_WATCH_SKIP.
config KCSAN_INTERRUPT_WATCHER
bool "Interruptible watchers"
help
If enabled, a task that set up a watchpoint may be interrupted while
delayed. This option will allow KCSAN to detect races between
interrupted tasks and other threads of execution on the same CPU.
Currently disabled by default, because not all safe per-CPU access
primitives and patterns may be accounted for, and therefore could
result in false positives.
config KCSAN_REPORT_ONCE_IN_MS config KCSAN_REPORT_ONCE_IN_MS
int "Duration in milliseconds, in which any given race is only reported once" int "Duration in milliseconds, in which any given race is only reported once"
default 3000 default 3000
......
...@@ -5890,6 +5890,14 @@ sub process { ...@@ -5890,6 +5890,14 @@ sub process {
} }
} }
# check for data_race without a comment.
if ($line =~ /\bdata_race\s*\(/) {
if (!ctx_has_comment($first_line, $linenr)) {
WARN("DATA_RACE",
"data_race without comment\n" . $herecurr);
}
}
# check for smp_read_barrier_depends and read_barrier_depends # check for smp_read_barrier_depends and read_barrier_depends
if (!$file && $line =~ /\b(smp_|)read_barrier_depends\s*\(/) { if (!$file && $line =~ /\b(smp_|)read_barrier_depends\s*\(/) {
WARN("READ_BARRIER_DEPENDS", WARN("READ_BARRIER_DEPENDS",
......
...@@ -478,8 +478,12 @@ static const char *uaccess_safe_builtin[] = { ...@@ -478,8 +478,12 @@ static const char *uaccess_safe_builtin[] = {
"__asan_report_store8_noabort", "__asan_report_store8_noabort",
"__asan_report_store16_noabort", "__asan_report_store16_noabort",
/* KCSAN */ /* KCSAN */
"__kcsan_check_access",
"kcsan_found_watchpoint", "kcsan_found_watchpoint",
"kcsan_setup_watchpoint", "kcsan_setup_watchpoint",
"kcsan_check_scoped_accesses",
"kcsan_disable_current",
"kcsan_enable_current_nowarn",
/* KCSAN/TSAN */ /* KCSAN/TSAN */
"__tsan_func_entry", "__tsan_func_entry",
"__tsan_func_exit", "__tsan_func_exit",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment