Commit af7f588d authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Peter Zijlstra

sched: Introduce per-memory-map concurrency ID

This feature allows the scheduler to expose a per-memory map concurrency
ID to user-space. This concurrency ID is within the possible cpus range,
and is temporarily (and uniquely) assigned while threads are actively
running within a memory map. If a memory map has fewer threads than
cores, or is limited to run on few cores concurrently through sched
affinity or cgroup cpusets, the concurrency IDs will be values close
to 0, thus allowing efficient use of user-space memory for per-cpu
data structures.

This feature is meant to be exposed by a new rseq thread area field.

The primary purpose of this feature is to do the heavy-lifting needed
by memory allocators to allow them to use per-cpu data structures
efficiently in the following situations:

- Single-threaded applications,
- Multi-threaded applications on large systems (many cores) with limited
  cpu affinity mask,
- Multi-threaded applications on large systems (many cores) with
  restricted cgroup cpuset per container.

One of the key concern from scheduler maintainers is the overhead
associated with additional spin locks or atomic operations in the
scheduler fast-path. This is why the following optimization is
implemented.

On context switch between threads belonging to the same memory map,
transfer the mm_cid from prev to next without any atomic ops. This
takes care of use-cases involving frequent context switch between
threads belonging to the same memory map.

Additional optimizations can be done if the spin locks added when
context switching between threads belonging to different memory maps end
up being a performance bottleneck. Those are left out of this patch
though. A performance impact would have to be clearly demonstrated to
justify the added complexity.

The credit goes to Paul Turner (Google) for the original virtual cpu id
idea. This feature is implemented based on the discussions with Paul
Turner and Peter Oskolkov (Google), but I took the liberty to implement
scheduler fast-path optimizations and my own NUMA-awareness scheme. The
rumor has it that Google have been running a rseq vcpu_id extension
internally in production for a year. The tcmalloc source code indeed has
comments hinting at a vcpu_id prototype extension to the rseq system
call [1].

The following benchmarks do not show any significant overhead added to
the scheduler context switch by this feature:

* perf bench sched messaging (process)

Baseline:                    86.5±0.3 ms
With mm_cid:                 86.7±2.6 ms

* perf bench sched messaging (threaded)

Baseline:                    84.3±3.0 ms
With mm_cid:                 84.7±2.6 ms

* hackbench (process)

Baseline:                    82.9±2.7 ms
With mm_cid:                 82.9±2.9 ms

* hackbench (threaded)

Baseline:                    85.2±2.6 ms
With mm_cid:                 84.4±2.9 ms

[1] https://github.com/google/tcmalloc/blob/master/tcmalloc/internal/linux_syscall_support.h#L26Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20221122203932.231377-8-mathieu.desnoyers@efficios.com
parent 99babd04
...@@ -1010,6 +1010,7 @@ static int exec_mmap(struct mm_struct *mm) ...@@ -1010,6 +1010,7 @@ static int exec_mmap(struct mm_struct *mm)
active_mm = tsk->active_mm; active_mm = tsk->active_mm;
tsk->active_mm = mm; tsk->active_mm = mm;
tsk->mm = mm; tsk->mm = mm;
mm_init_cid(mm);
/* /*
* This prevents preemption while active_mm is being loaded and * This prevents preemption while active_mm is being loaded and
* it and mm are being updated, which could cause problems for * it and mm are being updated, which could cause problems for
...@@ -1822,6 +1823,7 @@ static int bprm_execve(struct linux_binprm *bprm, ...@@ -1822,6 +1823,7 @@ static int bprm_execve(struct linux_binprm *bprm,
*/ */
check_unsafe_exec(bprm); check_unsafe_exec(bprm);
current->in_execve = 1; current->in_execve = 1;
sched_mm_cid_before_execve(current);
file = do_open_execat(fd, filename, flags); file = do_open_execat(fd, filename, flags);
retval = PTR_ERR(file); retval = PTR_ERR(file);
...@@ -1852,6 +1854,7 @@ static int bprm_execve(struct linux_binprm *bprm, ...@@ -1852,6 +1854,7 @@ static int bprm_execve(struct linux_binprm *bprm,
if (retval < 0) if (retval < 0)
goto out; goto out;
sched_mm_cid_after_execve(current);
/* execve succeeded */ /* execve succeeded */
current->fs->in_exec = 0; current->fs->in_exec = 0;
current->in_execve = 0; current->in_execve = 0;
...@@ -1871,6 +1874,7 @@ static int bprm_execve(struct linux_binprm *bprm, ...@@ -1871,6 +1874,7 @@ static int bprm_execve(struct linux_binprm *bprm,
force_fatal_sig(SIGSEGV); force_fatal_sig(SIGSEGV);
out_unmark: out_unmark:
sched_mm_cid_after_execve(current);
current->fs->in_exec = 0; current->fs->in_exec = 0;
current->in_execve = 0; current->in_execve = 0;
......
...@@ -1976,6 +1976,31 @@ struct zap_details { ...@@ -1976,6 +1976,31 @@ struct zap_details {
/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */ /* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1)) #define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
#ifdef CONFIG_SCHED_MM_CID
void sched_mm_cid_before_execve(struct task_struct *t);
void sched_mm_cid_after_execve(struct task_struct *t);
void sched_mm_cid_fork(struct task_struct *t);
void sched_mm_cid_exit_signals(struct task_struct *t);
static inline int task_mm_cid(struct task_struct *t)
{
return t->mm_cid;
}
#else
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
static inline void sched_mm_cid_fork(struct task_struct *t) { }
static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
static inline int task_mm_cid(struct task_struct *t)
{
/*
* Use the processor id as a fall-back when the mm cid feature is
* disabled. This provides functional per-cpu data structure accesses
* in user-space, althrough it won't provide the memory usage benefits.
*/
return raw_smp_processor_id();
}
#endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern bool can_do_mlock(void); extern bool can_do_mlock(void);
#else #else
......
...@@ -645,7 +645,18 @@ struct mm_struct { ...@@ -645,7 +645,18 @@ struct mm_struct {
* &struct mm_struct is freed. * &struct mm_struct is freed.
*/ */
atomic_t mm_count; atomic_t mm_count;
#ifdef CONFIG_SCHED_MM_CID
/**
* @cid_lock: Protect cid bitmap updates vs lookups.
*
* Prevent situations where updates to the cid bitmap happen
* concurrently with lookups. Those can lead to situations
* where a lookup cannot find a free bit simply because it was
* unlucky enough to load, non-atomically, bitmap words as they
* were being concurrently updated by the updaters.
*/
raw_spinlock_t cid_lock;
#endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* PTE page table pages */ atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif #endif
...@@ -909,6 +920,36 @@ static inline void vma_iter_init(struct vma_iterator *vmi, ...@@ -909,6 +920,36 @@ static inline void vma_iter_init(struct vma_iterator *vmi,
vmi->mas.node = MAS_START; vmi->mas.node = MAS_START;
} }
#ifdef CONFIG_SCHED_MM_CID
/* Accessor for struct mm_struct's cidmask. */
static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
{
unsigned long cid_bitmap = (unsigned long)mm;
cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
/* Skip cpu_bitmap */
cid_bitmap += cpumask_size();
return (struct cpumask *)cid_bitmap;
}
static inline void mm_init_cid(struct mm_struct *mm)
{
raw_spin_lock_init(&mm->cid_lock);
cpumask_clear(mm_cidmask(mm));
}
static inline unsigned int mm_cid_size(void)
{
return cpumask_size();
}
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_init_cid(struct mm_struct *mm) { }
static inline unsigned int mm_cid_size(void)
{
return 0;
}
#endif /* CONFIG_SCHED_MM_CID */
struct mmu_gather; struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm); extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm); extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
......
...@@ -1311,6 +1311,11 @@ struct task_struct { ...@@ -1311,6 +1311,11 @@ struct task_struct {
unsigned long rseq_event_mask; unsigned long rseq_event_mask;
#endif #endif
#ifdef CONFIG_SCHED_MM_CID
int mm_cid; /* Current cid in mm */
int mm_cid_active; /* Whether cid bitmap is active */
#endif
struct tlbflush_unmap_batch tlb_ubc; struct tlbflush_unmap_batch tlb_ubc;
union { union {
......
...@@ -1041,6 +1041,10 @@ config RT_GROUP_SCHED ...@@ -1041,6 +1041,10 @@ config RT_GROUP_SCHED
endif #CGROUP_SCHED endif #CGROUP_SCHED
config SCHED_MM_CID
def_bool y
depends on SMP && RSEQ
config UCLAMP_TASK_GROUP config UCLAMP_TASK_GROUP
bool "Utilization clamping per group of tasks" bool "Utilization clamping per group of tasks"
depends on CGROUP_SCHED depends on CGROUP_SCHED
......
...@@ -1060,6 +1060,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) ...@@ -1060,6 +1060,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->reported_split_lock = 0; tsk->reported_split_lock = 0;
#endif #endif
#ifdef CONFIG_SCHED_MM_CID
tsk->mm_cid = -1;
tsk->mm_cid_active = 0;
#endif
return tsk; return tsk;
free_stack: free_stack:
...@@ -1169,6 +1173,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, ...@@ -1169,6 +1173,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->user_ns = get_user_ns(user_ns); mm->user_ns = get_user_ns(user_ns);
lru_gen_init_mm(mm); lru_gen_init_mm(mm);
mm_init_cid(mm);
return mm; return mm;
fail_pcpu: fail_pcpu:
...@@ -1601,6 +1606,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) ...@@ -1601,6 +1606,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
tsk->mm = mm; tsk->mm = mm;
tsk->active_mm = mm; tsk->active_mm = mm;
sched_mm_cid_fork(tsk);
return 0; return 0;
} }
...@@ -3034,7 +3040,7 @@ void __init mm_cache_init(void) ...@@ -3034,7 +3040,7 @@ void __init mm_cache_init(void)
* dynamically sized based on the maximum CPU number this system * dynamically sized based on the maximum CPU number this system
* can have, taking hotplug into account (nr_cpu_ids). * can have, taking hotplug into account (nr_cpu_ids).
*/ */
mm_size = sizeof(struct mm_struct) + cpumask_size(); mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size();
mm_cachep = kmem_cache_create_usercopy("mm_struct", mm_cachep = kmem_cache_create_usercopy("mm_struct",
mm_size, ARCH_MIN_MMSTRUCT_ALIGN, mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
......
...@@ -5052,6 +5052,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, ...@@ -5052,6 +5052,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
sched_info_switch(rq, prev, next); sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next); perf_event_task_sched_out(prev, next);
rseq_preempt(prev); rseq_preempt(prev);
switch_mm_cid(prev, next);
fire_sched_out_preempt_notifiers(prev, next); fire_sched_out_preempt_notifiers(prev, next);
kmap_local_sched_out(); kmap_local_sched_out();
prepare_task(next); prepare_task(next);
...@@ -11305,3 +11306,53 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) ...@@ -11305,3 +11306,53 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
{ {
trace_sched_update_nr_running_tp(rq, count); trace_sched_update_nr_running_tp(rq, count);
} }
#ifdef CONFIG_SCHED_MM_CID
void sched_mm_cid_exit_signals(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
unsigned long flags;
if (!mm)
return;
local_irq_save(flags);
mm_cid_put(mm, t->mm_cid);
t->mm_cid = -1;
t->mm_cid_active = 0;
local_irq_restore(flags);
}
void sched_mm_cid_before_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
unsigned long flags;
if (!mm)
return;
local_irq_save(flags);
mm_cid_put(mm, t->mm_cid);
t->mm_cid = -1;
t->mm_cid_active = 0;
local_irq_restore(flags);
}
void sched_mm_cid_after_execve(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
unsigned long flags;
WARN_ON_ONCE((t->flags & PF_KTHREAD) || !t->mm);
local_irq_save(flags);
t->mm_cid = mm_cid_get(mm);
t->mm_cid_active = 1;
local_irq_restore(flags);
rseq_set_notify_resume(t);
}
void sched_mm_cid_fork(struct task_struct *t)
{
WARN_ON_ONCE((t->flags & PF_KTHREAD) || !t->mm || t->mm_cid != -1);
t->mm_cid_active = 1;
}
#endif
...@@ -3269,4 +3269,62 @@ static inline void update_current_exec_runtime(struct task_struct *curr, ...@@ -3269,4 +3269,62 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
cgroup_account_cputime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec);
} }
#ifdef CONFIG_SCHED_MM_CID
static inline int __mm_cid_get(struct mm_struct *mm)
{
struct cpumask *cpumask;
int cid;
cpumask = mm_cidmask(mm);
cid = cpumask_first_zero(cpumask);
if (cid >= nr_cpu_ids)
return -1;
__cpumask_set_cpu(cid, cpumask);
return cid;
}
static inline void mm_cid_put(struct mm_struct *mm, int cid)
{
lockdep_assert_irqs_disabled();
if (cid < 0)
return;
raw_spin_lock(&mm->cid_lock);
__cpumask_clear_cpu(cid, mm_cidmask(mm));
raw_spin_unlock(&mm->cid_lock);
}
static inline int mm_cid_get(struct mm_struct *mm)
{
int ret;
lockdep_assert_irqs_disabled();
raw_spin_lock(&mm->cid_lock);
ret = __mm_cid_get(mm);
raw_spin_unlock(&mm->cid_lock);
return ret;
}
static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next)
{
if (prev->mm_cid_active) {
if (next->mm_cid_active && next->mm == prev->mm) {
/*
* Context switch between threads in same mm, hand over
* the mm_cid from prev to next.
*/
next->mm_cid = prev->mm_cid;
prev->mm_cid = -1;
return;
}
mm_cid_put(prev->mm, prev->mm_cid);
prev->mm_cid = -1;
}
if (next->mm_cid_active)
next->mm_cid = mm_cid_get(next->mm);
}
#else
static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next) { }
#endif
#endif /* _KERNEL_SCHED_SCHED_H */ #endif /* _KERNEL_SCHED_SCHED_H */
...@@ -2951,6 +2951,7 @@ void exit_signals(struct task_struct *tsk) ...@@ -2951,6 +2951,7 @@ void exit_signals(struct task_struct *tsk)
cgroup_threadgroup_change_begin(tsk); cgroup_threadgroup_change_begin(tsk);
if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) { if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
sched_mm_cid_exit_signals(tsk);
tsk->flags |= PF_EXITING; tsk->flags |= PF_EXITING;
cgroup_threadgroup_change_end(tsk); cgroup_threadgroup_change_end(tsk);
return; return;
...@@ -2961,6 +2962,7 @@ void exit_signals(struct task_struct *tsk) ...@@ -2961,6 +2962,7 @@ void exit_signals(struct task_struct *tsk)
* From now this task is not visible for group-wide signals, * From now this task is not visible for group-wide signals,
* see wants_signal(), do_signal_stop(). * see wants_signal(), do_signal_stop().
*/ */
sched_mm_cid_exit_signals(tsk);
tsk->flags |= PF_EXITING; tsk->flags |= PF_EXITING;
cgroup_threadgroup_change_end(tsk); cgroup_threadgroup_change_end(tsk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment