Commit 253e1e98 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'lkmm.2024.07.12a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull memory model updates from Paul McKenney:
 "lkmm: Fix corner-case locking bug and improve documentation

  A simple but odd single-process litmus test acquires and immediately
  releases a lock, then calls spin_is_locked(). LKMM acts if it was a
  deadlock due to an assumption that spin_is_locked() will follow a
  spin_lock() or some other process's spin_unlock(). This litmus test
  manages to violate this assumption because the spin_is_locked()
  follows the same process's spin_unlock().

  This series fixes this bug, reorganizes and optimizes the lock.cat
  model, and updates documentation"

* tag 'lkmm.2024.07.12a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  tools/memory-model: Code reorganization in lock.cat
  tools/memory-model: Fix bug in lock.cat
  tools/memory-model: Add access-marking.txt to README
  tools/memory-model: Add KCSAN LF mentorship session citation
parents c4b729b0 ea6ee1ba
...@@ -47,6 +47,10 @@ DESCRIPTION OF FILES ...@@ -47,6 +47,10 @@ DESCRIPTION OF FILES
README README
This file. This file.
access-marking.txt
Guidelines for marking intentionally concurrent accesses to
shared memory.
cheatsheet.txt cheatsheet.txt
Quick-reference guide to the Linux-kernel memory model. Quick-reference guide to the Linux-kernel memory model.
......
...@@ -6,7 +6,8 @@ normal accesses to shared memory, that is "normal" as in accesses that do ...@@ -6,7 +6,8 @@ normal accesses to shared memory, that is "normal" as in accesses that do
not use read-modify-write atomic operations. It also describes how to not use read-modify-write atomic operations. It also describes how to
document these accesses, both with comments and with special assertions document these accesses, both with comments and with special assertions
processed by the Kernel Concurrency Sanitizer (KCSAN). This discussion processed by the Kernel Concurrency Sanitizer (KCSAN). This discussion
builds on an earlier LWN article [1]. builds on an earlier LWN article [1] and Linux Foundation mentorship
session [2].
ACCESS-MARKING OPTIONS ACCESS-MARKING OPTIONS
...@@ -31,7 +32,7 @@ example: ...@@ -31,7 +32,7 @@ example:
WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e)); WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e));
Neither plain C-language accesses nor data_race() (#1 and #2 above) place Neither plain C-language accesses nor data_race() (#1 and #2 above) place
any sort of constraint on the compiler's choice of optimizations [2]. any sort of constraint on the compiler's choice of optimizations [3].
In contrast, READ_ONCE() and WRITE_ONCE() (#3 and #4 above) restrict the In contrast, READ_ONCE() and WRITE_ONCE() (#3 and #4 above) restrict the
compiler's use of code-motion and common-subexpression optimizations. compiler's use of code-motion and common-subexpression optimizations.
Therefore, if a given access is involved in an intentional data race, Therefore, if a given access is involved in an intentional data race,
...@@ -594,5 +595,8 @@ REFERENCES ...@@ -594,5 +595,8 @@ REFERENCES
[1] "Concurrency bugs should fear the big bad data-race detector (part 2)" [1] "Concurrency bugs should fear the big bad data-race detector (part 2)"
https://lwn.net/Articles/816854/ https://lwn.net/Articles/816854/
[2] "Who's afraid of a big bad optimizing compiler?" [2] "The Kernel Concurrency Sanitizer"
https://www.linuxfoundation.org/webinars/the-kernel-concurrency-sanitizer
[3] "Who's afraid of a big bad optimizing compiler?"
https://lwn.net/Articles/793253/ https://lwn.net/Articles/793253/
...@@ -54,6 +54,12 @@ flag ~empty LKR \ domain(lk-rmw) as unpaired-LKR ...@@ -54,6 +54,12 @@ flag ~empty LKR \ domain(lk-rmw) as unpaired-LKR
*) *)
empty ([LKW] ; po-loc ; [LKR]) \ (po-loc ; [UL] ; po-loc) as lock-nest empty ([LKW] ; po-loc ; [LKR]) \ (po-loc ; [UL] ; po-loc) as lock-nest
(*
* In the same way, spin_is_locked() inside a critical section must always
* return True (no RU events can be in a critical section for the same lock).
*)
empty ([LKW] ; po-loc ; [RU]) \ (po-loc ; [UL] ; po-loc) as nested-is-locked
(* The final value of a spinlock should not be tested *) (* The final value of a spinlock should not be tested *)
flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final
...@@ -79,42 +85,50 @@ empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks ...@@ -79,42 +85,50 @@ empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks
(* rfi for LF events: link each LKW to the LF events in its critical section *) (* rfi for LF events: link each LKW to the LF events in its critical section *)
let rfi-lf = ([LKW] ; po-loc ; [LF]) \ ([LKW] ; po-loc ; [UL] ; po-loc) let rfi-lf = ([LKW] ; po-loc ; [LF]) \ ([LKW] ; po-loc ; [UL] ; po-loc)
(* rfe for LF events *) (* Utility macro to convert a single pair to a single-edge relation *)
let pair-to-relation p = p ++ 0
(*
* If a given LF event e is outside a critical section, it cannot read
* internally but it may read from an LKW event in another thread.
* Compute the relation containing these possible edges.
*)
let possible-rfe-noncrit-lf e = (LKW * {e}) & loc & ext
(* Compute set of sets of possible rfe edges for LF events *)
let all-possible-rfe-lf = let all-possible-rfe-lf =
(* (*
* Given an LF event r, compute the possible rfe edges for that event * Convert the possible-rfe-noncrit-lf relation for e
* (all those starting from LKW events in other threads), * to a set of single edges
* and then convert that relation to a set of single-edge relations.
*) *)
let possible-rfe-lf r = let set-of-singleton-rfe-lf e =
let pair-to-relation p = p ++ 0 map pair-to-relation (possible-rfe-noncrit-lf e)
in map pair-to-relation ((LKW * {r}) & loc & ext) (* Do this for each LF event e that isn't in rfi-lf *)
(* Do this for each LF event r that isn't in rfi-lf *) in map set-of-singleton-rfe-lf (LF \ range(rfi-lf))
in map possible-rfe-lf (LF \ range(rfi-lf))
(* Generate all rf relations for LF events *) (* Generate all rf relations for LF events *)
with rfe-lf from cross(all-possible-rfe-lf) with rfe-lf from cross(all-possible-rfe-lf)
let rf-lf = rfe-lf | rfi-lf let rf-lf = rfe-lf | rfi-lf
(* (*
* RU, i.e., spin_is_locked() returning False, is slightly different. * A given RU event e may read internally from the last po-previous UL,
* We rely on the memory model to rule out cases where spin_is_locked() * or it may read from a UL event in another thread or the initial write.
* within one of the lock's critical sections returns False. * Compute the relation containing these possible edges.
*) *)
let possible-rf-ru e = (((UL * {e}) & po-loc) \
(* rfi for RU events: an RU may read from the last po-previous UL *) ([UL] ; po-loc ; [UL] ; po-loc)) |
let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc) (((UL | IW) * {e}) & loc & ext)
(* rfe for RU events: an RU may read from an external UL or the initial write *) (* Compute set of sets of possible rf edges for RU events *)
let all-possible-rfe-ru = let all-possible-rf-ru =
let possible-rfe-ru r = (* Convert the possible-rf-ru relation for e to a set of single edges *)
let pair-to-relation p = p ++ 0 let set-of-singleton-rf-ru e =
in map pair-to-relation (((UL | IW) * {r}) & loc & ext) map pair-to-relation (possible-rf-ru e)
in map possible-rfe-ru RU (* Do this for each RU event e *)
in map set-of-singleton-rf-ru RU
(* Generate all rf relations for RU events *) (* Generate all rf relations for RU events *)
with rfe-ru from cross(all-possible-rfe-ru) with rf-ru from cross(all-possible-rf-ru)
let rf-ru = rfe-ru | rfi-ru
(* Final rf relation *) (* Final rf relation *)
let rf = rf | rf-lf | rf-ru let rf = rf | rf-lf | rf-ru
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment