Commit bb814518 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge tag 'opp-updates-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm

Pull OPP (Operating Performance Points) updates for 6.5 from Viresh
Kumar:

"- Simplify performance state related logic in the OPP core (Viresh
   Kumar).

 - Fix use-after-free and improve locking around lazy_opp_tables (Viresh
   Kumar and Stephan Gerhold).

 - Minor cleanups - using dev_err_probe() and rate-limiting debug
   messages (Andrew Halaney and Adrián Larumbe)."

* tag 'opp-updates-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
  OPP: Properly propagate error along when failing to get icc_path
  OPP: Use dev_err_probe() when failing to get icc_path
  OPP: Simplify the over-designed pstate <-> level dance
  OPP: pstate is only valid for genpd OPP tables
  OPP: don't drop performance constraint on OPP table removal
  OPP: Protect `lazy_opp_tables` list with `opp_table_lock`
  OPP: Staticize `lazy_opp_tables` in of.c
  opp: Fix use-after-free in lazy_opp_tables after probe deferral
  OPP: rate-limit debug messages when no change in OPP is required
parents 40e8e98f 5fb2864c
......@@ -29,9 +29,6 @@
*/
LIST_HEAD(opp_tables);
/* OPP tables with uninitialized required OPPs */
LIST_HEAD(lazy_opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
/* Flag indicating that opp_tables list is being updated at the moment */
......@@ -230,17 +227,25 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
unsigned int index)
{
struct opp_table *opp_table = opp->opp_table;
if (IS_ERR_OR_NULL(opp) || !opp->available ||
index >= opp->opp_table->required_opp_count) {
index >= opp_table->required_opp_count) {
pr_err("%s: Invalid parameters\n", __func__);
return 0;
}
/* required-opps not fully initialized yet */
if (lazy_linking_pending(opp->opp_table))
if (lazy_linking_pending(opp_table))
return 0;
return opp->required_opps[index]->pstate;
/* The required OPP table must belong to a genpd */
if (unlikely(!opp_table->required_opp_tables[index]->is_genpd)) {
pr_err("%s: Performance state is only valid for genpds.\n", __func__);
return 0;
}
return opp->required_opps[index]->level;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate);
......@@ -938,7 +943,7 @@ static int _set_opp_bw(const struct opp_table *opp_table,
static int _set_performance_state(struct device *dev, struct device *pd_dev,
struct dev_pm_opp *opp, int i)
{
unsigned int pstate = likely(opp) ? opp->required_opps[i]->pstate : 0;
unsigned int pstate = likely(opp) ? opp->required_opps[i]->level: 0;
int ret;
if (!pd_dev)
......@@ -1091,7 +1096,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
/* Return early if nothing to do */
if (!forced && old_opp == opp && opp_table->enabled) {
dev_dbg(dev, "%s: OPPs are same, nothing to do\n", __func__);
dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__);
return 0;
}
......@@ -1358,7 +1363,10 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return opp_table;
remove_opp_dev:
_of_clear_opp_table(opp_table);
_remove_opp_dev(opp_dev, opp_table);
mutex_destroy(&opp_table->genpd_virt_dev_lock);
mutex_destroy(&opp_table->lock);
err:
kfree(opp_table);
return ERR_PTR(ret);
......@@ -1522,16 +1530,8 @@ static void _opp_table_kref_release(struct kref *kref)
WARN_ON(!list_empty(&opp_table->opp_list));
list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
/*
* The OPP table is getting removed, drop the performance state
* constraints.
*/
if (opp_table->genpd_performance_state)
dev_pm_genpd_set_performance_state((struct device *)(opp_dev->dev), 0);
list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node)
_remove_opp_dev(opp_dev, opp_table);
}
mutex_destroy(&opp_table->genpd_virt_dev_lock);
mutex_destroy(&opp_table->lock);
......@@ -2704,6 +2704,12 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
if (!src_table || !src_table->required_opp_count)
return pstate;
/* Both OPP tables must belong to genpds */
if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) {
pr_err("%s: Performance state is only valid for genpds.\n", __func__);
return -EINVAL;
}
/* required-opps not fully initialized yet */
if (lazy_linking_pending(src_table))
return -EBUSY;
......@@ -2722,8 +2728,8 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
mutex_lock(&src_table->lock);
list_for_each_entry(opp, &src_table->opp_list, node) {
if (opp->pstate == pstate) {
dest_pstate = opp->required_opps[i]->pstate;
if (opp->level == pstate) {
dest_pstate = opp->required_opps[i]->level;
goto unlock;
}
}
......
......@@ -152,7 +152,6 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic);
debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo);
debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend);
debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate);
debugfs_create_u32("level", S_IRUGO, d, &opp->level);
debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
&opp->clock_latency_ns);
......
......@@ -21,6 +21,9 @@
#include "opp.h"
/* OPP tables with uninitialized required OPPs, protected by opp_table_lock */
static LIST_HEAD(lazy_opp_tables);
/*
* Returns opp descriptor node for a device node, caller must
* do of_node_put().
......@@ -145,7 +148,10 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table)
opp_table->required_opp_count = 0;
opp_table->required_opp_tables = NULL;
mutex_lock(&opp_table_lock);
list_del(&opp_table->lazy);
mutex_unlock(&opp_table_lock);
}
/*
......@@ -194,8 +200,15 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
}
/* Let's do the linking later on */
if (lazy)
if (lazy) {
/*
* The OPP table is not held while allocating the table, take it
* now to avoid corruption to the lazy_opp_tables list.
*/
mutex_lock(&opp_table_lock);
list_add(&opp_table->lazy, &lazy_opp_tables);
mutex_unlock(&opp_table_lock);
}
else
_update_set_required_opps(opp_table);
......@@ -500,11 +513,7 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev,
for (i = 0; i < num_paths; i++) {
paths[i] = of_icc_get_by_index(dev, i);
if (IS_ERR(paths[i])) {
ret = PTR_ERR(paths[i]);
if (ret != -EPROBE_DEFER) {
dev_err(dev, "%s: Unable to get path%d: %d\n",
__func__, i, ret);
}
ret = dev_err_probe(dev, PTR_ERR(paths[i]), "%s: Unable to get path%d\n", __func__, i);
goto err;
}
}
......@@ -932,9 +941,6 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (ret)
goto free_required_opps;
if (opp_table->is_genpd)
new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
ret = _opp_add(dev, new_opp, opp_table);
if (ret) {
/* Don't return error for duplicate OPPs */
......@@ -1021,14 +1027,6 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
goto remove_static_opp;
}
list_for_each_entry(opp, &opp_table->opp_list, node) {
/* Any non-zero performance state would enable the feature */
if (opp->pstate) {
opp_table->genpd_performance_state = true;
break;
}
}
lazy_link_required_opp_table(opp_table);
return 0;
......@@ -1387,9 +1385,15 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
goto put_required_np;
}
/* The OPP tables must belong to a genpd */
if (unlikely(!opp_table->is_genpd)) {
pr_err("%s: Performance state is only valid for genpds.\n", __func__);
goto put_required_np;
}
opp = _find_opp_of_np(opp_table, required_np);
if (opp) {
pstate = opp->pstate;
pstate = opp->level;
dev_pm_opp_put(opp);
}
......
......@@ -26,7 +26,7 @@ struct regulator;
/* Lock to allow exclusive modification to the device and opp lists */
extern struct mutex opp_table_lock;
extern struct list_head opp_tables, lazy_opp_tables;
extern struct list_head opp_tables;
/* OPP Config flags */
#define OPP_CONFIG_CLK BIT(0)
......@@ -78,7 +78,6 @@ struct opp_config_data {
* @turbo: true if turbo (boost) OPP
* @suspend: true if suspend OPP
* @removed: flag indicating that OPP's reference is dropped by OPP core.
* @pstate: Device's power domain's performance state.
* @rates: Frequencies in hertz
* @level: Performance level
* @supplies: Power supplies voltage/current values
......@@ -101,7 +100,6 @@ struct dev_pm_opp {
bool turbo;
bool suspend;
bool removed;
unsigned int pstate;
unsigned long *rates;
unsigned int level;
......@@ -182,7 +180,6 @@ enum opp_table_access {
* @paths: Interconnect path handles
* @path_count: Number of interconnect paths
* @enabled: Set to true if the device's resources are enabled/configured.
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_required_opps: Helper responsible to set required OPPs.
* @dentry: debugfs dentry pointer of the real device directory (not links).
......@@ -233,7 +230,6 @@ struct opp_table {
struct icc_path **paths;
unsigned int path_count;
bool enabled;
bool genpd_performance_state;
bool is_genpd;
int (*set_required_opps)(struct device *dev,
struct opp_table *opp_table, struct dev_pm_opp *opp, bool scaling_down);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment