Commit 3b680865 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sched_urgent_for_v6.9_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Borislav Petkov:

 - Add a missing memory barrier in the concurrency ID mm switching

* tag 'sched_urgent_for_v6.9_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Add missing memory barrier in switch_mm_cid
parents d07a0b86 fe90f396
...@@ -79,6 +79,9 @@ do { \ ...@@ -79,6 +79,9 @@ do { \
#define __smp_mb__before_atomic() do { } while (0) #define __smp_mb__before_atomic() do { } while (0)
#define __smp_mb__after_atomic() do { } while (0) #define __smp_mb__after_atomic() do { } while (0)
/* Writing to CR3 provides a full memory barrier in switch_mm(). */
#define smp_mb__after_switch_mm() do { } while (0)
#include <asm-generic/barrier.h> #include <asm-generic/barrier.h>
#endif /* _ASM_X86_BARRIER_H */ #endif /* _ASM_X86_BARRIER_H */
...@@ -294,5 +294,13 @@ do { \ ...@@ -294,5 +294,13 @@ do { \
#define io_stop_wc() do { } while (0) #define io_stop_wc() do { } while (0)
#endif #endif
/*
* Architectures that guarantee an implicit smp_mb() in switch_mm()
* can override smp_mb__after_switch_mm.
*/
#ifndef smp_mb__after_switch_mm
# define smp_mb__after_switch_mm() smp_mb()
#endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */ #endif /* __ASM_GENERIC_BARRIER_H */
...@@ -79,6 +79,8 @@ ...@@ -79,6 +79,8 @@
# include <asm/paravirt_api_clock.h> # include <asm/paravirt_api_clock.h>
#endif #endif
#include <asm/barrier.h>
#include "cpupri.h" #include "cpupri.h"
#include "cpudeadline.h" #include "cpudeadline.h"
...@@ -3445,13 +3447,19 @@ static inline void switch_mm_cid(struct rq *rq, ...@@ -3445,13 +3447,19 @@ static inline void switch_mm_cid(struct rq *rq,
* between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu]. * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu].
* Provide it here. * Provide it here.
*/ */
if (!prev->mm) // from kernel if (!prev->mm) { // from kernel
smp_mb(); smp_mb();
} else { // from user
/* /*
* user -> user transition guarantees a memory barrier through * user->user transition relies on an implicit
* switch_mm() when current->mm changes. If current->mm is * memory barrier in switch_mm() when
* unchanged, no barrier is needed. * current->mm changes. If the architecture
*/ * switch_mm() does not have an implicit memory
* barrier, it is emitted here. If current->mm
* is unchanged, no barrier is needed.
*/
smp_mb__after_switch_mm();
}
} }
if (prev->mm_cid_active) { if (prev->mm_cid_active) {
mm_cid_snapshot_time(rq, prev->mm); mm_cid_snapshot_time(rq, prev->mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment