Commit 1f76a755 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
 "Misc fixes:

   - Fix a S390 boot hang that was caused by the lock-break logic.
     Remove lock-break to begin with, as review suggested it was
     unreasonably fragile and our confidence in its continued good
     health is lower than our confidence in its removal.

   - Remove the lockdep cross-release checking code for now, because of
     unresolved false positive warnings. This should make lockdep work
     well everywhere again.

   - Get rid of the final (and single) ACCESS_ONCE() straggler and
     remove the API from v4.15.

   - Fix a liblockdep build warning"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tools/lib/lockdep: Add missing declaration of 'pr_cont()'
  checkpatch: Remove ACCESS_ONCE() warning
  compiler.h: Remove ACCESS_ONCE()
  tools/include: Remove ACCESS_ONCE()
  tools/perf: Convert ACCESS_ONCE() to READ_ONCE()
  locking/lockdep: Remove the cross-release locking checks
  locking/core: Remove break_lock field when CONFIG_GENERIC_LOCKBREAK=y
  locking/core: Fix deadlock during boot on systems with GENERIC_LOCKBREAK
parents a58653cc 92ccc262
This diff is collapsed.
...@@ -220,17 +220,17 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s ...@@ -220,17 +220,17 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
/* /*
* Prevent the compiler from merging or refetching reads or writes. The * Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of * compiler is also forbidden from reordering successive instances of
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
* compiler is aware of some particular ordering. One way to make the * particular ordering. One way to make the compiler aware of ordering is to
* compiler aware of ordering is to put the two invocations of READ_ONCE, * put the two invocations of READ_ONCE or WRITE_ONCE in different C
* WRITE_ONCE or ACCESS_ONCE() in different C statements. * statements.
* *
* In contrast to ACCESS_ONCE these two macros will also work on aggregate * These two macros will also work on aggregate data types like structs or
* data types like structs or unions. If the size of the accessed data * unions. If the size of the accessed data type exceeds the word size of
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits) * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at * fall back to memcpy(). There's at least two memcpy()s: one for the
* least two memcpy()s: one for the __builtin_memcpy() and then one for * __builtin_memcpy() and then one for the macro doing the copy of variable
* the macro doing the copy of variable - '__u' allocated on the stack. * - '__u' allocated on the stack.
* *
* Their two major use cases are: (1) Mediating communication between * Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU, * process-level code and irq/NMI handlers, all running on the same CPU,
...@@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s ...@@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
compiletime_assert(__native_word(t), \ compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.") "Need native word sized stores/loads for atomicity.")
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
* but only when the compiler is aware of some particular ordering. One way
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
* ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
* on a union member will work as long as the size of the member matches the
* size of the union and the size is smaller than word size.
*
* The major use cases of ACCESS_ONCE used to be (1) Mediating communication
* between process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*
* If possible use READ_ONCE()/WRITE_ONCE() instead.
*/
#define __ACCESS_ONCE(x) ({ \
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
#endif /* __LINUX_COMPILER_H */ #endif /* __LINUX_COMPILER_H */
...@@ -10,9 +10,6 @@ ...@@ -10,9 +10,6 @@
*/ */
#include <linux/wait.h> #include <linux/wait.h>
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#include <linux/lockdep.h>
#endif
/* /*
* struct completion - structure used to maintain state for a "completion" * struct completion - structure used to maintain state for a "completion"
...@@ -29,58 +26,16 @@ ...@@ -29,58 +26,16 @@
struct completion { struct completion {
unsigned int done; unsigned int done;
wait_queue_head_t wait; wait_queue_head_t wait;
#ifdef CONFIG_LOCKDEP_COMPLETIONS
struct lockdep_map_cross map;
#endif
}; };
#ifdef CONFIG_LOCKDEP_COMPLETIONS
static inline void complete_acquire(struct completion *x)
{
lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
}
static inline void complete_release(struct completion *x)
{
lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
}
static inline void complete_release_commit(struct completion *x)
{
lock_commit_crosslock((struct lockdep_map *)&x->map);
}
#define init_completion_map(x, m) \
do { \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
(m)->name, (m)->key, 0); \
__init_completion(x); \
} while (0)
#define init_completion(x) \
do { \
static struct lock_class_key __key; \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
"(completion)" #x, \
&__key, 0); \
__init_completion(x); \
} while (0)
#else
#define init_completion_map(x, m) __init_completion(x) #define init_completion_map(x, m) __init_completion(x)
#define init_completion(x) __init_completion(x) #define init_completion(x) __init_completion(x)
static inline void complete_acquire(struct completion *x) {} static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {} static inline void complete_release(struct completion *x) {}
static inline void complete_release_commit(struct completion *x) {} static inline void complete_release_commit(struct completion *x) {}
#endif
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
#else
#define COMPLETION_INITIALIZER(work) \ #define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#endif
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
(*({ init_completion_map(&(work), &(map)); &(work); })) (*({ init_completion_map(&(work), &(map)); &(work); }))
......
...@@ -158,12 +158,6 @@ struct lockdep_map { ...@@ -158,12 +158,6 @@ struct lockdep_map {
int cpu; int cpu;
unsigned long ip; unsigned long ip;
#endif #endif
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
/*
* Whether it's a crosslock.
*/
int cross;
#endif
}; };
static inline void lockdep_copy_map(struct lockdep_map *to, static inline void lockdep_copy_map(struct lockdep_map *to,
...@@ -267,95 +261,8 @@ struct held_lock { ...@@ -267,95 +261,8 @@ struct held_lock {
unsigned int hardirqs_off:1; unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */ unsigned int references:12; /* 32 bits */
unsigned int pin_count; unsigned int pin_count;
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
/*
* Generation id.
*
* A value of cross_gen_id will be stored when holding this,
* which is globally increased whenever each crosslock is held.
*/
unsigned int gen_id;
#endif
};
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCK_TRACE_ENTRIES 5
/*
* This is for keeping locks waiting for commit so that true dependencies
* can be added at commit step.
*/
struct hist_lock {
/*
* Id for each entry in the ring buffer. This is used to
* decide whether the ring buffer was overwritten or not.
*
* For example,
*
* |<----------- hist_lock ring buffer size ------->|
* pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
* wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
*
* where 'p' represents an acquisition in process
* context, 'i' represents an acquisition in irq
* context.
*
* In this example, the ring buffer was overwritten by
* acquisitions in irq context, that should be detected on
* rollback or commit.
*/
unsigned int hist_id;
/*
* Seperate stack_trace data. This will be used at commit step.
*/
struct stack_trace trace;
unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
/*
* Seperate hlock instance. This will be used at commit step.
*
* TODO: Use a smaller data structure containing only necessary
* data. However, we should make lockdep code able to handle the
* smaller one first.
*/
struct held_lock hlock;
}; };
/*
* To initialize a lock as crosslock, lockdep_init_map_crosslock() should
* be called instead of lockdep_init_map().
*/
struct cross_lock {
/*
* When more than one acquisition of crosslocks are overlapped,
* we have to perform commit for them based on cross_gen_id of
* the first acquisition, which allows us to add more true
* dependencies.
*
* Moreover, when no acquisition of a crosslock is in progress,
* we should not perform commit because the lock might not exist
* any more, which might cause incorrect memory access. So we
* have to track the number of acquisitions of a crosslock.
*/
int nr_acquire;
/*
* Seperate hlock instance. This will be used at commit step.
*
* TODO: Use a smaller data structure containing only necessary
* data. However, we should make lockdep code able to handle the
* smaller one first.
*/
struct held_lock hlock;
};
struct lockdep_map_cross {
struct lockdep_map map;
struct cross_lock xlock;
};
#endif
/* /*
* Initialization, self-test and debugging-output methods: * Initialization, self-test and debugging-output methods:
*/ */
...@@ -560,37 +467,6 @@ enum xhlock_context_t { ...@@ -560,37 +467,6 @@ enum xhlock_context_t {
XHLOCK_CTX_NR, XHLOCK_CTX_NR,
}; };
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
const char *name,
struct lock_class_key *key,
int subclass);
extern void lock_commit_crosslock(struct lockdep_map *lock);
/*
* What we essencially have to initialize is 'nr_acquire'. Other members
* will be initialized in add_xlock().
*/
#define STATIC_CROSS_LOCK_INIT() \
{ .nr_acquire = 0,}
#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
{ .map.name = (_name), .map.key = (void *)(_key), \
.map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), .cross = 0, }
extern void crossrelease_hist_start(enum xhlock_context_t c);
extern void crossrelease_hist_end(enum xhlock_context_t c);
extern void lockdep_invariant_state(bool force);
extern void lockdep_init_task(struct task_struct *task);
extern void lockdep_free_task(struct task_struct *task);
#else /* !CROSSRELEASE */
#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
/* /*
* To initialize a lockdep_map statically use this macro. * To initialize a lockdep_map statically use this macro.
...@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {} ...@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void lockdep_invariant_state(bool force) {} static inline void lockdep_invariant_state(bool force) {}
static inline void lockdep_init_task(struct task_struct *task) {} static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {} static inline void lockdep_free_task(struct task_struct *task) {}
#endif /* CROSSRELEASE */
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
......
...@@ -10,9 +10,6 @@ ...@@ -10,9 +10,6 @@
*/ */
typedef struct { typedef struct {
arch_rwlock_t raw_lock; arch_rwlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu; unsigned int magic, owner_cpu;
void *owner; void *owner;
......
...@@ -849,17 +849,6 @@ struct task_struct { ...@@ -849,17 +849,6 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH]; struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif #endif
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCKS_NR 64UL
struct hist_lock *xhlocks; /* Crossrelease history locks */
unsigned int xhlock_idx;
/* For restoring at history boundaries */
unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
unsigned int hist_id;
/* For overwrite check at each context exit */
unsigned int hist_id_save[XHLOCK_CTX_NR];
#endif
#ifdef CONFIG_UBSAN #ifdef CONFIG_UBSAN
unsigned int in_ubsan; unsigned int in_ubsan;
#endif #endif
......
...@@ -107,16 +107,11 @@ do { \ ...@@ -107,16 +107,11 @@ do { \
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
#define raw_spin_is_contended(lock) ((lock)->break_lock)
#else
#ifdef arch_spin_is_contended #ifdef arch_spin_is_contended
#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else #else
#define raw_spin_is_contended(lock) (((void)(lock), 0)) #define raw_spin_is_contended(lock) (((void)(lock), 0))
#endif /*arch_spin_is_contended*/ #endif /*arch_spin_is_contended*/
#endif
/* /*
* This barrier must provide two things: * This barrier must provide two things:
......
...@@ -19,9 +19,6 @@ ...@@ -19,9 +19,6 @@
typedef struct raw_spinlock { typedef struct raw_spinlock {
arch_spinlock_t raw_lock; arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu; unsigned int magic, owner_cpu;
void *owner; void *owner;
......
This diff is collapsed.
...@@ -66,12 +66,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ ...@@ -66,12 +66,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
break; \ break; \
preempt_enable(); \ preempt_enable(); \
\ \
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
while ((lock)->break_lock) \
arch_##op##_relax(&lock->raw_lock); \ arch_##op##_relax(&lock->raw_lock); \
} \ } \
(lock)->break_lock = 0; \
} \ } \
\ \
unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
...@@ -86,12 +82,9 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ ...@@ -86,12 +82,9 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
local_irq_restore(flags); \ local_irq_restore(flags); \
preempt_enable(); \ preempt_enable(); \
\ \
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
while ((lock)->break_lock) \
arch_##op##_relax(&lock->raw_lock); \ arch_##op##_relax(&lock->raw_lock); \
} \ } \
(lock)->break_lock = 0; \ \
return flags; \ return flags; \
} \ } \
\ \
......
...@@ -1099,8 +1099,6 @@ config PROVE_LOCKING ...@@ -1099,8 +1099,6 @@ config PROVE_LOCKING
select DEBUG_MUTEXES select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC select DEBUG_LOCK_ALLOC
select LOCKDEP_CROSSRELEASE
select LOCKDEP_COMPLETIONS
select TRACE_IRQFLAGS select TRACE_IRQFLAGS
default n default n
help help
...@@ -1170,37 +1168,6 @@ config LOCK_STAT ...@@ -1170,37 +1168,6 @@ config LOCK_STAT
CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
(CONFIG_LOCKDEP defines "acquire" and "release" events.) (CONFIG_LOCKDEP defines "acquire" and "release" events.)
config LOCKDEP_CROSSRELEASE
bool
help
This makes lockdep work for crosslock which is a lock allowed to
be released in a different context from the acquisition context.
Normally a lock must be released in the context acquiring the lock.
However, relexing this constraint helps synchronization primitives
such as page locks or completions can use the lock correctness
detector, lockdep.
config LOCKDEP_COMPLETIONS
bool
help
A deadlock caused by wait_for_completion() and complete() can be
detected by lockdep using crossrelease feature.
config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
bool "Enable the boot parameter, crossrelease_fullstack"
depends on LOCKDEP_CROSSRELEASE
default n
help
The lockdep "cross-release" feature needs to record stack traces
(of calling functions) for all acquisitions, for eventual later
use during analysis. By default only a single caller is recorded,
because the unwind operation can be very expensive with deeper
stack chains.
However a boot parameter, crossrelease_fullstack, was
introduced since sometimes deeper traces are required for full
analysis. This option turns on the boot parameter.
config DEBUG_LOCKDEP config DEBUG_LOCKDEP
bool "Lock dependency engine debugging" bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP depends on DEBUG_KERNEL && LOCKDEP
......
...@@ -6233,28 +6233,6 @@ sub process { ...@@ -6233,28 +6233,6 @@ sub process {
} }
} }
# whine about ACCESS_ONCE
if ($^V && $^V ge 5.10.0 &&
$line =~ /\bACCESS_ONCE\s*$balanced_parens\s*(=(?!=))?\s*($FuncArg)?/) {
my $par = $1;
my $eq = $2;
my $fun = $3;
$par =~ s/^\(\s*(.*)\s*\)$/$1/;
if (defined($eq)) {
if (WARN("PREFER_WRITE_ONCE",
"Prefer WRITE_ONCE(<FOO>, <BAR>) over ACCESS_ONCE(<FOO>) = <BAR>\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)\s*$eq\s*\Q$fun\E/WRITE_ONCE($par, $fun)/;
}
} else {
if (WARN("PREFER_READ_ONCE",
"Prefer READ_ONCE(<FOO>) over ACCESS_ONCE(<FOO>)\n" . $herecurr) &&
$fix) {
$fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)/READ_ONCE($par)/;
}
}
}
# check for mutex_trylock_recursive usage # check for mutex_trylock_recursive usage
if ($line =~ /mutex_trylock_recursive/) { if ($line =~ /mutex_trylock_recursive/) {
ERROR("LOCKING", ERROR("LOCKING",
......
...@@ -84,8 +84,6 @@ ...@@ -84,8 +84,6 @@
#define uninitialized_var(x) x = *(&(x)) #define uninitialized_var(x) x = *(&(x))
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
#include <linux/types.h> #include <linux/types.h>
/* /*
...@@ -135,16 +133,15 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s ...@@ -135,16 +133,15 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
/* /*
* Prevent the compiler from merging or refetching reads or writes. The * Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of * compiler is also forbidden from reordering successive instances of
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
* compiler is aware of some particular ordering. One way to make the * particular ordering. One way to make the compiler aware of ordering is to
* compiler aware of ordering is to put the two invocations of READ_ONCE, * put the two invocations of READ_ONCE or WRITE_ONCE in different C
* WRITE_ONCE or ACCESS_ONCE() in different C statements. * statements.
* *
* In contrast to ACCESS_ONCE these two macros will also work on aggregate * These two macros will also work on aggregate data types like structs or
* data types like structs or unions. If the size of the accessed data * unions. If the size of the accessed data type exceeds the word size of
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits) * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a * fall back to memcpy and print a compile-time warning.
* compile-time warning.
* *
* Their two major use cases are: (1) Mediating communication between * Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU, * process-level code and irq/NMI handlers, all running on the same CPU,
......
...@@ -48,6 +48,7 @@ static inline int debug_locks_off(void) ...@@ -48,6 +48,7 @@ static inline int debug_locks_off(void)
#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) #define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) #define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define pr_warn pr_err #define pr_warn pr_err
#define pr_cont pr_err
#define list_del_rcu list_del #define list_del_rcu list_del
......
...@@ -70,7 +70,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md); ...@@ -70,7 +70,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md);
static inline u64 perf_mmap__read_head(struct perf_mmap *mm) static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
{ {
struct perf_event_mmap_page *pc = mm->base; struct perf_event_mmap_page *pc = mm->base;
u64 head = ACCESS_ONCE(pc->data_head); u64 head = READ_ONCE(pc->data_head);
rmb(); rmb();
return head; return head;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment