Commit acb5002d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Stefan Bader

UBUNTU: SAUCE: sched/smt: Introduce sched_smt_{active,present}

Introduce the scheduler's 'sched_smt_present' static key and provide
the query function 'sched_smt_active()' to be used in subsequent x86
speculation code.

Loosely based on the following upstream commits:
  - 321a874a ("sched/smt: Expose sched_smt_present static key")
  - c5511d03 ("sched/smt: Make sched_smt_present track topology")
  - ba2591a5 ("sched/smt: Update sched_smt_present at runtime")
  - 1b568f0a ("sched/core: Optimize SCHED_SMT")

CVE-2018-12126
CVE-2018-12127
CVE-2018-12130
Signed-off-by: default avatarJuerg Haefliger <juergh@canonical.com>
Acked-by: default avatarTyler Hicks <tyhicks@canonical.com>
Acked-by: default avatarStefan Bader <stefan.bader@canonical.com>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent 9c1043b2
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_SMT_H
#define _LINUX_SCHED_SMT_H
#include <linux/static_key.h>
#ifdef CONFIG_SCHED_SMT
extern struct static_key_false sched_smt_present;
static __always_inline bool sched_smt_active(void)
{
return static_branch_likely(&sched_smt_present);
}
#else
static inline bool sched_smt_active(void) { return false; }
#endif
#endif
......@@ -5621,6 +5621,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
return NOTIFY_OK;
case CPU_ONLINE:
#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT
* present.
*/
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_inc(&sched_smt_present);
#endif
/*
* At this point a starting CPU has marked itself as online via
* set_cpu_online(). But it might not yet have marked itself
......@@ -5631,6 +5639,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
return NOTIFY_OK;
case CPU_DOWN_FAILED:
#ifdef CONFIG_SCHED_SMT
/*
* Downing the CPU failed but we already decremented the number
* of cores with SMT present so need to increment it again.
*/
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_inc(&sched_smt_present);
#endif
set_cpu_active(cpu, true);
return NOTIFY_OK;
......@@ -5642,9 +5658,19 @@ static int sched_cpu_active(struct notifier_block *nfb,
static int sched_cpu_inactive(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
set_cpu_active((long)hcpu, false);
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT
* present.
*/
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_dec(&sched_smt_present);
#endif
set_cpu_active(cpu, false);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
......
......@@ -4922,6 +4922,10 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
}
#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
#endif
/*
* Try and locate an idle CPU in the sched_domain.
*/
......
#include <linux/sched.h>
#include <linux/sched/smt.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment