Commit 77222b0d authored by Qais Yousef's avatar Qais Yousef Committed by Ingo Molnar

sched/topology: Export asym_cap_list

So that we can use it to iterate through available capacities in the
system. Sort asym_cap_list in descending order as expected users are
likely to be interested on the highest capacity first.

Make the list RCU protected to allow for cheap access in hot paths.
Signed-off-by: default avatarQais Yousef <qyousef@layalina.io>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Reviewed-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20240324004552.999936-2-qyousef@layalina.io
parent f4566a1e
......@@ -109,6 +109,20 @@ extern int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
extern int sched_rr_timeslice;
/*
* Asymmetric CPU capacity bits
*/
struct asym_cap_data {
struct list_head link;
struct rcu_head rcu;
unsigned long capacity;
unsigned long cpus[];
};
extern struct list_head asym_cap_list;
#define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
/*
* Helpers for converting nanosecond timing to jiffy resolution
*/
......
......@@ -1329,24 +1329,13 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
update_group_capacity(sd, cpu);
}
/*
* Asymmetric CPU capacity bits
*/
struct asym_cap_data {
struct list_head link;
unsigned long capacity;
unsigned long cpus[];
};
/*
* Set of available CPUs grouped by their corresponding capacities
* Each list entry contains a CPU mask reflecting CPUs that share the same
* capacity.
* The lifespan of data is unlimited.
*/
static LIST_HEAD(asym_cap_list);
#define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
LIST_HEAD(asym_cap_list);
/*
* Verify whether there is any CPU capacity asymmetry in a given sched domain.
......@@ -1386,21 +1375,39 @@ asym_cpu_capacity_classify(const struct cpumask *sd_span,
}
static void free_asym_cap_entry(struct rcu_head *head)
{
struct asym_cap_data *entry = container_of(head, struct asym_cap_data, rcu);
kfree(entry);
}
static inline void asym_cpu_capacity_update_data(int cpu)
{
unsigned long capacity = arch_scale_cpu_capacity(cpu);
struct asym_cap_data *entry = NULL;
struct asym_cap_data *insert_entry = NULL;
struct asym_cap_data *entry;
/*
* Search if capacity already exits. If not, track which the entry
* where we should insert to keep the list ordered descendingly.
*/
list_for_each_entry(entry, &asym_cap_list, link) {
if (capacity == entry->capacity)
goto done;
else if (!insert_entry && capacity > entry->capacity)
insert_entry = list_prev_entry(entry, link);
}
entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);
if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n"))
return;
entry->capacity = capacity;
list_add(&entry->link, &asym_cap_list);
/* If NULL then the new capacity is the smallest, add last. */
if (!insert_entry)
list_add_tail_rcu(&entry->link, &asym_cap_list);
else
list_add_rcu(&entry->link, &insert_entry->link);
done:
__cpumask_set_cpu(cpu, cpu_capacity_span(entry));
}
......@@ -1423,8 +1430,8 @@ static void asym_cpu_capacity_scan(void)
list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
if (cpumask_empty(cpu_capacity_span(entry))) {
list_del(&entry->link);
kfree(entry);
list_del_rcu(&entry->link);
call_rcu(&entry->rcu, free_asym_cap_entry);
}
}
......@@ -1434,8 +1441,8 @@ static void asym_cpu_capacity_scan(void)
*/
if (list_is_singular(&asym_cap_list)) {
entry = list_first_entry(&asym_cap_list, typeof(*entry), link);
list_del(&entry->link);
kfree(entry);
list_del_rcu(&entry->link);
call_rcu(&entry->rcu, free_asym_cap_entry);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment