Commit ca2ef2d9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'kcsan.2021.11.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull KCSAN updates from Paul McKenney:
 "This contains initialization fixups, testing improvements, addition of
  instruction pointer to data-race reports, and scoped data-race checks"

* tag 'kcsan.2021.11.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  kcsan: selftest: Cleanup and add missing __init
  kcsan: Move ctx to start of argument list
  kcsan: Support reporting scoped read-write access type
  kcsan: Start stack trace with explicit location if provided
  kcsan: Save instruction pointer for scoped accesses
  kcsan: Add ability to pass instruction pointer of access to reporting
  kcsan: test: Fix flaky test case
  kcsan: test: Use kunit_skip() to skip tests
  kcsan: test: Defer kcsan_test_init() after kunit initialization
parents 5593a733 ac20e39e
......@@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask);
/* Scoped access information. */
struct kcsan_scoped_access {
struct list_head list;
/* Access information. */
const volatile void *ptr;
size_t size;
int type;
/* Location where scoped access was set up. */
unsigned long ip;
};
/*
* Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
......
......@@ -202,6 +202,9 @@ static __always_inline struct kcsan_ctx *get_ctx(void)
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
}
static __always_inline void
check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
/* Check scoped accesses; never inline because this is a slow-path! */
static noinline void kcsan_check_scoped_accesses(void)
{
......@@ -210,14 +213,16 @@ static noinline void kcsan_check_scoped_accesses(void)
struct kcsan_scoped_access *scoped_access;
ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
__kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
check_access(scoped_access->ptr, scoped_access->size,
scoped_access->type, scoped_access->ip);
}
ctx->scoped_accesses.prev = prev_save;
}
/* Rules for generic atomic accesses. Called from fast-path. */
static __always_inline bool
is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
{
if (type & KCSAN_ACCESS_ATOMIC)
return true;
......@@ -254,7 +259,7 @@ is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx
}
static __always_inline bool
should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
{
/*
* Never set up watchpoints when memory operations are atomic.
......@@ -263,7 +268,7 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
* should not count towards skipped instructions, and (2) to actually
* decrement kcsan_atomic_next for consecutive instruction stream.
*/
if (is_atomic(ptr, size, type, ctx))
if (is_atomic(ctx, ptr, size, type))
return false;
if (this_cpu_dec_return(kcsan_skip) >= 0)
......@@ -350,6 +355,7 @@ void kcsan_restore_irqtrace(struct task_struct *task)
static noinline void kcsan_found_watchpoint(const volatile void *ptr,
size_t size,
int type,
unsigned long ip,
atomic_long_t *watchpoint,
long encoded_watchpoint)
{
......@@ -396,7 +402,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
if (consumed) {
kcsan_save_irqtrace(current);
kcsan_report_set_info(ptr, size, type, watchpoint - watchpoints);
kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints);
kcsan_restore_irqtrace(current);
} else {
/*
......@@ -416,7 +422,7 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
}
static noinline void
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip)
{
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
......@@ -568,8 +574,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
kcsan_report_known_origin(ptr, size, type, value_change,
watchpoint - watchpoints,
kcsan_report_known_origin(ptr, size, type, ip,
value_change, watchpoint - watchpoints,
old, new, access_mask);
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
/* Inferring a race, since the value should not have changed. */
......@@ -578,8 +584,10 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
if (is_assert)
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
kcsan_report_unknown_origin(ptr, size, type, old, new, access_mask);
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) {
kcsan_report_unknown_origin(ptr, size, type, ip,
old, new, access_mask);
}
}
/*
......@@ -596,8 +604,8 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
user_access_restore(ua_flags);
}
static __always_inline void check_access(const volatile void *ptr, size_t size,
int type)
static __always_inline void
check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
{
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
atomic_long_t *watchpoint;
......@@ -625,13 +633,12 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
*/
if (unlikely(watchpoint != NULL))
kcsan_found_watchpoint(ptr, size, type, watchpoint,
encoded_watchpoint);
kcsan_found_watchpoint(ptr, size, type, ip, watchpoint, encoded_watchpoint);
else {
struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
if (unlikely(should_watch(ptr, size, type, ctx)))
kcsan_setup_watchpoint(ptr, size, type);
if (unlikely(should_watch(ctx, ptr, size, type)))
kcsan_setup_watchpoint(ptr, size, type, ip);
else if (unlikely(ctx->scoped_accesses.prev))
kcsan_check_scoped_accesses();
}
......@@ -757,7 +764,7 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
{
struct kcsan_ctx *ctx = get_ctx();
__kcsan_check_access(ptr, size, type);
check_access(ptr, size, type, _RET_IP_);
ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
......@@ -765,6 +772,7 @@ kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
sa->ptr = ptr;
sa->size = size;
sa->type = type;
sa->ip = _RET_IP_;
if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
INIT_LIST_HEAD(&ctx->scoped_accesses);
......@@ -796,13 +804,13 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
ctx->disable_count--;
__kcsan_check_access(sa->ptr, sa->size, sa->type);
check_access(sa->ptr, sa->size, sa->type, sa->ip);
}
EXPORT_SYMBOL(kcsan_end_scoped_access);
void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
{
check_access(ptr, size, type);
check_access(ptr, size, type, _RET_IP_);
}
EXPORT_SYMBOL(__kcsan_check_access);
......@@ -823,7 +831,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_read##size(void *ptr); \
void __tsan_read##size(void *ptr) \
{ \
check_access(ptr, size, 0); \
check_access(ptr, size, 0, _RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_read##size); \
void __tsan_unaligned_read##size(void *ptr) \
......@@ -832,7 +840,7 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_write##size(void *ptr); \
void __tsan_write##size(void *ptr) \
{ \
check_access(ptr, size, KCSAN_ACCESS_WRITE); \
check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_write##size); \
void __tsan_unaligned_write##size(void *ptr) \
......@@ -842,7 +850,8 @@ EXPORT_SYMBOL(__kcsan_check_access);
void __tsan_read_write##size(void *ptr) \
{ \
check_access(ptr, size, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \
_RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_read_write##size); \
void __tsan_unaligned_read_write##size(void *ptr) \
......@@ -858,14 +867,14 @@ DEFINE_TSAN_READ_WRITE(16);
void __tsan_read_range(void *ptr, size_t size);
void __tsan_read_range(void *ptr, size_t size)
{
check_access(ptr, size, 0);
check_access(ptr, size, 0, _RET_IP_);
}
EXPORT_SYMBOL(__tsan_read_range);
void __tsan_write_range(void *ptr, size_t size);
void __tsan_write_range(void *ptr, size_t size)
{
check_access(ptr, size, KCSAN_ACCESS_WRITE);
check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_);
}
EXPORT_SYMBOL(__tsan_write_range);
......@@ -886,7 +895,8 @@ EXPORT_SYMBOL(__tsan_write_range);
IS_ALIGNED((unsigned long)ptr, size); \
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
return; \
check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \
_RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_volatile_read##size); \
void __tsan_unaligned_volatile_read##size(void *ptr) \
......@@ -901,7 +911,8 @@ EXPORT_SYMBOL(__tsan_write_range);
return; \
check_access(ptr, size, \
KCSAN_ACCESS_WRITE | \
(is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
(is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \
_RET_IP_); \
} \
EXPORT_SYMBOL(__tsan_volatile_write##size); \
void __tsan_unaligned_volatile_write##size(void *ptr) \
......@@ -955,7 +966,7 @@ EXPORT_SYMBOL(__tsan_init);
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
{ \
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
return __atomic_load_n(ptr, memorder); \
} \
......@@ -965,7 +976,7 @@ EXPORT_SYMBOL(__tsan_init);
{ \
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
__atomic_store_n(ptr, v, memorder); \
} \
......@@ -978,7 +989,7 @@ EXPORT_SYMBOL(__tsan_init);
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
KCSAN_ACCESS_ATOMIC); \
KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
return __atomic_##op##suffix(ptr, v, memorder); \
} \
......@@ -1010,7 +1021,7 @@ EXPORT_SYMBOL(__tsan_init);
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
KCSAN_ACCESS_ATOMIC); \
KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
} \
......@@ -1025,7 +1036,7 @@ EXPORT_SYMBOL(__tsan_init);
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
check_access(ptr, bits / BITS_PER_BYTE, \
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
KCSAN_ACCESS_ATOMIC); \
KCSAN_ACCESS_ATOMIC, _RET_IP_); \
} \
__atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
return exp; \
......
......@@ -121,7 +121,7 @@ enum kcsan_value_change {
* to be consumed by the reporting thread. No report is printed yet.
*/
void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
int watchpoint_idx);
unsigned long ip, int watchpoint_idx);
/*
* The calling thread observed that the watchpoint it set up was hit and
......@@ -129,14 +129,14 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ
* thread.
*/
void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
enum kcsan_value_change value_change, int watchpoint_idx,
u64 old, u64 new, u64 mask);
unsigned long ip, enum kcsan_value_change value_change,
int watchpoint_idx, u64 old, u64 new, u64 mask);
/*
* No other thread was observed to race with the access, but the data value
* before and after the stall differs. Reports a race of "unknown origin".
*/
void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
u64 old, u64 new, u64 mask);
unsigned long ip, u64 old, u64 new, u64 mask);
#endif /* _KERNEL_KCSAN_KCSAN_H */
......@@ -29,6 +29,11 @@
#include <linux/types.h>
#include <trace/events/printk.h>
#define KCSAN_TEST_REQUIRES(test, cond) do { \
if (!(cond)) \
kunit_skip((test), "Test requires: " #cond); \
} while (0)
#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
#else
......@@ -205,10 +210,12 @@ static bool report_matches(const struct expect_report *r)
"read-write" :
"write") :
"read");
const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC);
const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED);
const char *const access_type_aux =
(ty & KCSAN_ACCESS_ATOMIC) ?
" (marked)" :
((ty & KCSAN_ACCESS_SCOPED) ? " (scoped)" : "");
(is_atomic && is_scoped) ? " (marked, scoped)"
: (is_atomic ? " (marked)"
: (is_scoped ? " (scoped)" : ""));
if (i == 1) {
/* Access 2 */
......@@ -333,7 +340,10 @@ static noinline void test_kernel_assert_bits_nochange(void)
ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS);
}
/* To check that scoped assertions do trigger anywhere in scope. */
/*
* Scoped assertions do trigger anywhere in scope. However, the report should
* still only point at the start of the scope.
*/
static noinline void test_enter_scope(void)
{
int x = 0;
......@@ -488,17 +498,24 @@ static void test_concurrent_races(struct kunit *test)
__no_kcsan
static void test_novalue_change(struct kunit *test)
{
const struct expect_report expect = {
const struct expect_report expect_rw = {
.access = {
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
},
};
const struct expect_report expect_ww = {
.access = {
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
};
bool match_expect = false;
test_kernel_write_nochange(); /* Reset value. */
begin_test_checks(test_kernel_write_nochange, test_kernel_read);
do {
match_expect = report_matches(&expect);
match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
} while (!end_test_checks(match_expect));
if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY))
KUNIT_EXPECT_FALSE(test, match_expect);
......@@ -513,17 +530,24 @@ static void test_novalue_change(struct kunit *test)
__no_kcsan
static void test_novalue_change_exception(struct kunit *test)
{
const struct expect_report expect = {
const struct expect_report expect_rw = {
.access = {
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
},
};
const struct expect_report expect_ww = {
.access = {
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
};
bool match_expect = false;
test_kernel_write_nochange_rcu(); /* Reset value. */
begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read);
do {
match_expect = report_matches(&expect);
match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
} while (!end_test_checks(match_expect));
KUNIT_EXPECT_TRUE(test, match_expect);
}
......@@ -642,8 +666,7 @@ static void test_read_plain_atomic_write(struct kunit *test)
};
bool match_expect = false;
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS))
return;
KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
begin_test_checks(test_kernel_read, test_kernel_write_atomic);
do {
......@@ -665,8 +688,7 @@ static void test_read_plain_atomic_rmw(struct kunit *test)
};
bool match_expect = false;
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS))
return;
KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
begin_test_checks(test_kernel_read, test_kernel_atomic_rmw);
do {
......@@ -828,22 +850,22 @@ static void test_assert_exclusive_writer_scoped(struct kunit *test)
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
};
const struct expect_report expect_anywhere = {
const struct expect_report expect_inscope = {
.access = {
{ test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
},
};
bool match_expect_start = false;
bool match_expect_anywhere = false;
bool match_expect_inscope = false;
begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange);
do {
match_expect_start |= report_matches(&expect_start);
match_expect_anywhere |= report_matches(&expect_anywhere);
} while (!end_test_checks(match_expect_start && match_expect_anywhere));
match_expect_inscope |= report_matches(&expect_inscope);
} while (!end_test_checks(match_expect_inscope));
KUNIT_EXPECT_TRUE(test, match_expect_start);
KUNIT_EXPECT_TRUE(test, match_expect_anywhere);
KUNIT_EXPECT_FALSE(test, match_expect_inscope);
}
__no_kcsan
......@@ -872,9 +894,9 @@ static void test_assert_exclusive_access_scoped(struct kunit *test)
do {
match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2);
match_expect_inscope |= report_matches(&expect_inscope);
} while (!end_test_checks(match_expect_start && match_expect_inscope));
} while (!end_test_checks(match_expect_inscope));
KUNIT_EXPECT_TRUE(test, match_expect_start);
KUNIT_EXPECT_TRUE(test, match_expect_inscope);
KUNIT_EXPECT_FALSE(test, match_expect_inscope);
}
/*
......@@ -1224,7 +1246,7 @@ static void kcsan_test_exit(void)
tracepoint_synchronize_unregister();
}
late_initcall(kcsan_test_init);
late_initcall_sync(kcsan_test_init);
module_exit(kcsan_test_exit);
MODULE_LICENSE("GPL v2");
......
......@@ -8,6 +8,7 @@
#include <linux/debug_locks.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/preempt.h>
......@@ -31,6 +32,7 @@ struct access_info {
int access_type;
int task_pid;
int cpu_id;
unsigned long ip;
};
/*
......@@ -245,6 +247,10 @@ static const char *get_access_type(int type)
return "write (scoped)";
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
return "write (marked, scoped)";
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
return "read-write (scoped)";
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
return "read-write (marked, scoped)";
default:
BUG();
}
......@@ -300,6 +306,48 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
return skip;
}
/*
* Skips to the first entry that matches the function of @ip, and then replaces
* that entry with @ip, returning the entries to skip.
*/
static int
replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip)
{
unsigned long symbolsize, offset;
unsigned long target_func;
int skip;
if (kallsyms_lookup_size_offset(ip, &symbolsize, &offset))
target_func = ip - offset;
else
goto fallback;
for (skip = 0; skip < num_entries; ++skip) {
unsigned long func = stack_entries[skip];
if (!kallsyms_lookup_size_offset(func, &symbolsize, &offset))
goto fallback;
func -= offset;
if (func == target_func) {
stack_entries[skip] = ip;
return skip;
}
}
fallback:
/* Should not happen; the resulting stack trace is likely misleading. */
WARN_ONCE(1, "Cannot find frame for %pS in stack trace", (void *)ip);
return get_stack_skipnr(stack_entries, num_entries);
}
static int
sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip)
{
return ip ? replace_stack_entry(stack_entries, num_entries, ip) :
get_stack_skipnr(stack_entries, num_entries);
}
/* Compares symbolized strings of addr1 and addr2. */
static int sym_strcmp(void *addr1, void *addr2)
{
......@@ -327,12 +375,12 @@ static void print_verbose_info(struct task_struct *task)
static void print_report(enum kcsan_value_change value_change,
const struct access_info *ai,
const struct other_info *other_info,
struct other_info *other_info,
u64 old, u64 new, u64 mask)
{
unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip);
unsigned long this_frame = stack_entries[skipnr];
unsigned long other_frame = 0;
int other_skipnr = 0; /* silence uninit warnings */
......@@ -344,8 +392,9 @@ static void print_report(enum kcsan_value_change value_change,
return;
if (other_info) {
other_skipnr = get_stack_skipnr(other_info->stack_entries,
other_info->num_stack_entries);
other_skipnr = sanitize_stack_entries(other_info->stack_entries,
other_info->num_stack_entries,
other_info->ai.ip);
other_frame = other_info->stack_entries[other_skipnr];
/* @value_change is only known for the other thread */
......@@ -576,21 +625,23 @@ static bool prepare_report_consumer(unsigned long *flags,
}
static struct access_info prepare_access_info(const volatile void *ptr, size_t size,
int access_type)
int access_type, unsigned long ip)
{
return (struct access_info) {
.ptr = ptr,
.size = size,
.access_type = access_type,
.task_pid = in_task() ? task_pid_nr(current) : -1,
.cpu_id = raw_smp_processor_id()
.cpu_id = raw_smp_processor_id(),
/* Only replace stack entry with @ip if scoped access. */
.ip = (access_type & KCSAN_ACCESS_SCOPED) ? ip : 0,
};
}
void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
int watchpoint_idx)
unsigned long ip, int watchpoint_idx)
{
const struct access_info ai = prepare_access_info(ptr, size, access_type);
const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
unsigned long flags;
kcsan_disable_current();
......@@ -603,10 +654,10 @@ void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_typ
}
void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
enum kcsan_value_change value_change, int watchpoint_idx,
u64 old, u64 new, u64 mask)
unsigned long ip, enum kcsan_value_change value_change,
int watchpoint_idx, u64 old, u64 new, u64 mask)
{
const struct access_info ai = prepare_access_info(ptr, size, access_type);
const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
struct other_info *other_info = &other_infos[watchpoint_idx];
unsigned long flags = 0;
......@@ -637,9 +688,9 @@ void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access
}
void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
u64 old, u64 new, u64 mask)
unsigned long ip, u64 old, u64 new, u64 mask)
{
const struct access_info ai = prepare_access_info(ptr, size, access_type);
const struct access_info ai = prepare_access_info(ptr, size, access_type, ip);
unsigned long flags;
kcsan_disable_current();
......
......@@ -18,7 +18,7 @@
#define ITERS_PER_TEST 2000
/* Test requirements. */
static bool test_requires(void)
static bool __init test_requires(void)
{
/* random should be initialized for the below tests */
return prandom_u32() + prandom_u32() != 0;
......@@ -28,14 +28,18 @@ static bool test_requires(void)
* Test watchpoint encode and decode: check that encoding some access's info,
* and then subsequent decode preserves the access's info.
*/
static bool test_encode_decode(void)
static bool __init test_encode_decode(void)
{
int i;
for (i = 0; i < ITERS_PER_TEST; ++i) {
size_t size = prandom_u32_max(MAX_ENCODABLE_SIZE) + 1;
bool is_write = !!prandom_u32_max(2);
unsigned long verif_masked_addr;
long encoded_watchpoint;
bool verif_is_write;
unsigned long addr;
size_t verif_size;
prandom_bytes(&addr, sizeof(addr));
if (addr < PAGE_SIZE)
......@@ -44,31 +48,18 @@ static bool test_encode_decode(void)
if (WARN_ON(!check_encodable(addr, size)))
return false;
/* Encode and decode */
{
const long encoded_watchpoint =
encode_watchpoint(addr, size, is_write);
unsigned long verif_masked_addr;
size_t verif_size;
bool verif_is_write;
encoded_watchpoint = encode_watchpoint(addr, size, is_write);
/* Check special watchpoints */
if (WARN_ON(decode_watchpoint(
INVALID_WATCHPOINT, &verif_masked_addr,
&verif_size, &verif_is_write)))
if (WARN_ON(decode_watchpoint(INVALID_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write)))
return false;
if (WARN_ON(decode_watchpoint(
CONSUMED_WATCHPOINT, &verif_masked_addr,
&verif_size, &verif_is_write)))
if (WARN_ON(decode_watchpoint(CONSUMED_WATCHPOINT, &verif_masked_addr, &verif_size, &verif_is_write)))
return false;
/* Check decoding watchpoint returns same data */
if (WARN_ON(!decode_watchpoint(
encoded_watchpoint, &verif_masked_addr,
&verif_size, &verif_is_write)))
if (WARN_ON(!decode_watchpoint(encoded_watchpoint, &verif_masked_addr, &verif_size, &verif_is_write)))
return false;
if (WARN_ON(verif_masked_addr !=
(addr & WATCHPOINT_ADDR_MASK)))
if (WARN_ON(verif_masked_addr != (addr & WATCHPOINT_ADDR_MASK)))
goto fail;
if (WARN_ON(verif_size != size))
goto fail;
......@@ -78,19 +69,16 @@ static bool test_encode_decode(void)
continue;
fail:
pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n",
__func__, is_write ? "write" : "read", size,
addr, encoded_watchpoint,
verif_is_write ? "write" : "read", verif_size,
verif_masked_addr);
__func__, is_write ? "write" : "read", size, addr, encoded_watchpoint,
verif_is_write ? "write" : "read", verif_size, verif_masked_addr);
return false;
}
}
return true;
}
/* Test access matching function. */
static bool test_matching_access(void)
static bool __init test_matching_access(void)
{
if (WARN_ON(!matching_access(10, 1, 10, 1)))
return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment